diff --git a/Gopkg.lock b/Gopkg.lock index 7124bbe582a..97db97b4c0d 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -412,9 +412,11 @@ revision = "0798caf39368f3cfb506eb126dbbeee5e738846c" [[projects]] - digest = "1:35a1d3d1166a3a74d696180e74d6fd0e5179747d2626ca4586bdc9ca45641544" + digest = "1:5543d01772b4474e08f1554ea53cf3eb12660deed37470888cfc1cd66748cab0" name = "github.com/openshift/cluster-api" packages = [ + "pkg/apis/cluster/common", + "pkg/apis/cluster/v1alpha1", "pkg/apis/machine/common", "pkg/apis/machine/v1beta1", ] @@ -881,14 +883,14 @@ [[projects]] branch = "master" - digest = "1:53e688169eb9b22a6a6d1d69f8987e85331bb185e3f3691ef4182e18f6bdd3a0" + digest = "1:4c282f9a6274e8614446fbbe4faad65ea1588ebff812eaae286355f56fd81236" name = "sigs.k8s.io/cluster-api-provider-openstack" packages = [ "pkg/apis", "pkg/apis/openstackproviderconfig/v1alpha1", ] pruneopts = "NUT" - revision = "581696acc2e79f9cc1755a9cb1d8b6d512298883" + revision = "2b8ee6bc02ae0c13e88e6484b8097adba4e7a894" source = "https://github.com/openshift/cluster-api-provider-openstack.git" [[projects]] diff --git a/data/data/openstack/main.tf b/data/data/openstack/main.tf index c0b7984d866..77f47cc01d7 100644 --- a/data/data/openstack/main.tf +++ b/data/data/openstack/main.tf @@ -36,6 +36,7 @@ module "service" { service_port_id = "${module.topology.service_port_id}" master_ips = "${module.topology.master_ips}" master_port_names = "${module.topology.master_port_names}" + bootstrap_ip = "${module.topology.bootstrap_port_ip}" } module "bootstrap" { @@ -68,12 +69,13 @@ module "masters" { module "topology" { source = "./topology" - cidr_block = "${var.machine_cidr}" - cluster_id = "${var.cluster_id}" - external_network = "${var.openstack_external_network}" - masters_count = "${var.master_count}" - lb_floating_ip = "${var.openstack_lb_floating_ip}" - trunk_support = "${var.openstack_trunk_support}" + cidr_block = "${var.machine_cidr}" + cluster_id = "${var.cluster_id}" + external_network = "${var.openstack_external_network}" + external_network_id = "${var.openstack_external_network_id}" + masters_count = "${var.master_count}" + lb_floating_ip = "${var.openstack_lb_floating_ip}" + trunk_support = "${var.openstack_trunk_support}" } resource "openstack_objectstorage_container_v1" "container" { diff --git a/data/data/openstack/service/main.tf b/data/data/openstack/service/main.tf index deca0b51c4e..09d2ab90e29 100644 --- a/data/data/openstack/service/main.tf +++ b/data/data/openstack/service/main.tf @@ -69,29 +69,55 @@ data "ignition_file" "haproxy_watcher_script" { set -x +# NOTE(flaper87): We're doing this here for now +# because our current vendored verison for terraform +# doesn't support appending to an ignition_file. This +# is coming in 2.3 +grep -qxF "127.0.0.1 api.${var.cluster_domain}" /etc/hosts || echo "127.0.0.1 api.${var.cluster_domain}" | sudo tee -a /etc/hosts + +mkdir -p /etc/haproxy export KUBECONFIG=/opt/openshift/auth/kubeconfig -TEMPLATE="{{range .items}}{{\$name:=.metadata.name}}{{range .status.conditions}}{{if eq .type \"Ready\"}}{{if eq .status \"True\" }}{{\$name}}{{end}}{{end}}{{end}} {{end}}" +TEMPLATE="{{range .items}}{{\$addresses:=.status.addresses}}{{range .status.conditions}}{{if eq .type \"Ready\"}}{{if eq .status \"True\" }}{{range \$addresses}}{{if eq .type \"InternalIP\"}}{{.address}}{{end}}{{end}}{{end}}{{end}}{{end}} {{end}}" MASTERS=$(oc get nodes -l node-role.kubernetes.io/master -ogo-template="$TEMPLATE") WORKERS=$(oc get nodes -l node-role.kubernetes.io/worker -ogo-template="$TEMPLATE") +update_cfg_and_restart() { + CHANGED=$(diff /etc/haproxy/haproxy.cfg /etc/haproxy/haproxy.cfg.new) + + if [[ ! -f /etc/haproxy/haproxy.cfg ]] || [[ ! $CHANGED -eq "" ]]; + then + cp /etc/haproxy/haproxy.cfg /etc/haproxy/haproxy.cfg.backup || true + cp /etc/haproxy/haproxy.cfg.new /etc/haproxy/haproxy.cfg + systemctl restart haproxy + fi +} + if [[ $MASTERS -eq "" ]]; then - MASTER_LINES=" - server ${var.cluster_id}-bootstrap-22623 ${var.cluster_id}-bootstrap.${var.cluster_domain} check port 22623 - server ${var.cluster_id}-bootstrap-6443 ${var.cluster_id}-bootstrap.${var.cluster_domain} check port 6443" - MASTERS="${var.cluster_id}-master-0 ${var.cluster_id}-master-1 ${var.cluster_id}-master-2" +cat > /etc/haproxy/haproxy.cfg.new << EOF +listen ${var.cluster_id}-api-masters + bind 0.0.0.0:6443 + bind 0.0.0.0:22623 + mode tcp + balance roundrobin + server bootstrap-22623 ${var.bootstrap_ip} check port 22623 + server bootstrap-6443 ${var.bootstrap_ip} check port 6443 + ${replace(join("\n ", formatlist("server master-%s %s check port 6443", var.master_port_names, var.master_ips)), "master-port-", "")} +EOF + update_cfg_and_restart + exit 0 fi for master in $MASTERS; do MASTER_LINES="$MASTER_LINES - server $master $master.${var.cluster_domain} check port 6443" + server $master $master check port 6443" done for worker in $WORKERS; do WORKER_LINES="$WORKER_LINES - server $worker $worker.${var.cluster_domain} check port 443" + server $worker $worker check port 443" done cat > /etc/haproxy/haproxy.cfg.new << EOF @@ -108,16 +134,7 @@ listen ${var.cluster_id}-api-workers balance roundrobin$WORKER_LINES EOF - -mkdir -p /etc/haproxy -CHANGED=$(diff /etc/haproxy/haproxy.cfg /etc/haproxy/haproxy.cfg.new) - -if [[ ! -f /etc/haproxy/haproxy.cfg ]] || [[ ! $CHANGED -eq "" ]]; -then - cp /etc/haproxy/haproxy.cfg /etc/haproxy/haproxy.cfg.backup || true - cp /etc/haproxy/haproxy.cfg.new /etc/haproxy/haproxy.cfg - systemctl restart haproxy -fi +update_cfg_and_restart TFEOF } } @@ -140,8 +157,15 @@ ${length(var.lb_floating_ip) == 0 ? "" : " file /etc/coredns/db.${var.cluster file /etc/coredns/db.${var.cluster_domain} _etcd-server-ssl._tcp.${var.cluster_domain} { } + file /etc/coredns/db.${var.cluster_domain} bootstrap.${var.cluster_domain} { + upstream /etc/resolv.conf + } + +${replace(join("\n", formatlist(" file /etc/coredns/db.${var.cluster_domain} master-%s.${var.cluster_domain} {\n upstream /etc/resolv.conf\n }\n", var.master_port_names)), "master-port-", "")} + ${replace(join("\n", formatlist(" file /etc/coredns/db.${var.cluster_domain} etcd-%s.${var.cluster_domain} {\n upstream /etc/resolv.conf\n }\n", var.master_port_names)), "master-port-", "")} + forward . /etc/resolv.conf { } } @@ -179,9 +203,11 @@ $ORIGIN ${var.cluster_domain}. ${length(var.lb_floating_ip) == 0 ? "" : "api IN A ${var.lb_floating_ip}"} ${length(var.lb_floating_ip) == 0 ? "" : "*.apps IN A ${var.lb_floating_ip}"} -${replace(join("\n", formatlist("etcd-%s IN CNAME master-%s", var.master_port_names, var.master_port_names)), "master-port-", "")} +bootstrap.${var.cluster_domain} IN A ${var.bootstrap_ip} +${replace(join("\n", formatlist("master-%s IN A %s", var.master_port_names, var.master_ips)), "master-port-", "")} -${replace(join("\n", formatlist("_etcd-server-ssl._tcp.${var.cluster_domain} 8640 IN SRV 0 10 2380 etcd-%s.${var.cluster_domain}.", var.master_port_names)), "master-port-", "")} +${replace(join("\n", formatlist("etcd-%s IN A %s", var.master_port_names, var.master_ips)), "master-port-", "")} +${replace(join("\n", formatlist("_etcd-server-ssl._tcp 8640 IN SRV 0 10 2380 etcd-%s.${var.cluster_domain}.", var.master_port_names)), "master-port-", "")} EOF } } diff --git a/data/data/openstack/service/variables.tf b/data/data/openstack/service/variables.tf index fe1c766a491..35f12a4dcf3 100644 --- a/data/data/openstack/service/variables.tf +++ b/data/data/openstack/service/variables.tf @@ -42,6 +42,10 @@ variable "master_port_names" { type = "list" } +variable "bootstrap_ip" { + type = "string" +} + variable "lb_floating_ip" { type = "string" } diff --git a/data/data/openstack/topology/outputs.tf b/data/data/openstack/topology/outputs.tf index 8e0c7e1aa14..6a78f4d0517 100644 --- a/data/data/openstack/topology/outputs.tf +++ b/data/data/openstack/topology/outputs.tf @@ -6,6 +6,10 @@ output "bootstrap_port_id" { value = "${openstack_networking_port_v2.bootstrap_port.id}" } +output "bootstrap_port_ip" { + value = "${openstack_networking_port_v2.bootstrap_port.all_fixed_ips[0]}" +} + output "master_ips" { value = "${flatten(openstack_networking_port_v2.masters.*.all_fixed_ips)}" } diff --git a/data/data/openstack/topology/private-network.tf b/data/data/openstack/topology/private-network.tf index b2a5a8b0dcb..b910e67188b 100644 --- a/data/data/openstack/topology/private-network.tf +++ b/data/data/openstack/topology/private-network.tf @@ -76,8 +76,9 @@ resource "openstack_networking_port_v2" "service_port" { } data "openstack_networking_network_v2" "external_network" { - name = "${var.external_network}" - external = true + name = "${var.external_network}" + network_id = "${var.external_network_id}" + external = true } resource "openstack_networking_floatingip_associate_v2" "service_fip" { diff --git a/data/data/openstack/topology/variables.tf b/data/data/openstack/topology/variables.tf index c91ce4c07d0..0c042914f52 100644 --- a/data/data/openstack/topology/variables.tf +++ b/data/data/openstack/topology/variables.tf @@ -7,6 +7,12 @@ variable "cluster_id" { } variable "external_network" { + description = "Name of the external network providing Floating IP addresses." + type = "string" + default = "" +} + +variable "external_network_id" { description = "UUID of the external network providing Floating IP addresses." type = "string" default = "" diff --git a/data/data/openstack/variables-openstack.tf b/data/data/openstack/variables-openstack.tf index 60e58cb9034..c52e8da8348 100644 --- a/data/data/openstack/variables-openstack.tf +++ b/data/data/openstack/variables-openstack.tf @@ -194,9 +194,21 @@ variable "openstack_external_network" { type = "string" default = "" + description = <-api` +## Disambiguating the External Network + +The installer assumes that the name of the external network is unique. In case +there is more than one network with the same name as the desired external +network, it’s possible to provide a UUID to specify which network should be +used. + +``` +$ env TF_VAR_openstack_external_network_id="6a32627e-d98d-40d8-9324-5da7cf1452fc" \ +> bin/openshift-install create cluster +``` + ## Troubleshooting See the [troubleshooting installer issues in OpenStack](./troubleshooting.md) guide. diff --git a/pkg/asset/machines/openstack/machines.go b/pkg/asset/machines/openstack/machines.go index 9d5e8afeb7f..f4f650c9d65 100644 --- a/pkg/asset/machines/openstack/machines.go +++ b/pkg/asset/machines/openstack/machines.go @@ -78,6 +78,7 @@ func Machines(clusterID string, config *types.InstallConfig, pool *types.Machine } func provider(clusterID string, platform *openstack.Platform, mpool *openstack.MachinePool, osImage string, az string, role, userDataSecret string) (*openstackprovider.OpenstackProviderSpec, error) { + return &openstackprovider.OpenstackProviderSpec{ TypeMeta: metav1.TypeMeta{ APIVersion: "openstackproviderconfig.k8s.io/v1alpha1", @@ -94,8 +95,13 @@ func provider(clusterID string, platform *openstack.Platform, mpool *openstack.M UserDataSecret: &corev1.SecretReference{Name: userDataSecret}, Networks: []openstackprovider.NetworkParam{ { - Filter: openstackprovider.Filter{ - Tags: fmt.Sprintf("%s=%s", "openshiftClusterID", clusterID), + Subnets: []openstackprovider.SubnetParam{ + { + Filter: openstackprovider.SubnetFilter{ + Name: "nodes", + Tags: fmt.Sprintf("%s=%s", "openshiftClusterID", clusterID), + }, + }, }, }, }, diff --git a/pkg/asset/machines/userdata.go b/pkg/asset/machines/userdata.go index 6bc350a98d0..9a666b53c69 100644 --- a/pkg/asset/machines/userdata.go +++ b/pkg/asset/machines/userdata.go @@ -23,6 +23,7 @@ items: namespace: openshift-machine-api type: Opaque data: + disableTemplating: "dHJ1ZQo=" userData: {{$content}} {{- end}} `)) diff --git a/pkg/types/openstack/platform.go b/pkg/types/openstack/platform.go index 8265375dcf8..2308f0e50b9 100644 --- a/pkg/types/openstack/platform.go +++ b/pkg/types/openstack/platform.go @@ -17,7 +17,7 @@ type Platform struct { Cloud string `json:"cloud"` // ExternalNetwork - // The OpenStack external network to be used for installation. + // The OpenStack external network name to be used for installation. ExternalNetwork string `json:"externalNetwork"` // FlavorName diff --git a/vendor/github.com/openshift/cluster-api/pkg/apis/cluster/common/consts.go b/vendor/github.com/openshift/cluster-api/pkg/apis/cluster/common/consts.go new file mode 100644 index 00000000000..287480250d5 --- /dev/null +++ b/vendor/github.com/openshift/cluster-api/pkg/apis/cluster/common/consts.go @@ -0,0 +1,114 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package common + +// Constants aren't automatically generated for unversioned packages. +// Instead share the same constant for all versioned packages +type MachineStatusError string + +const ( + // Represents that the combination of configuration in the MachineSpec + // is not supported by this cluster. This is not a transient error, but + // indicates a state that must be fixed before progress can be made. + // + // Example: the ProviderSpec specifies an instance type that doesn't exist, + InvalidConfigurationMachineError MachineStatusError = "InvalidConfiguration" + + // This indicates that the MachineSpec has been updated in a way that + // is not supported for reconciliation on this cluster. The spec may be + // completely valid from a configuration standpoint, but the controller + // does not support changing the real world state to match the new + // spec. + // + // Example: the responsible controller is not capable of changing the + // container runtime from docker to rkt. + UnsupportedChangeMachineError MachineStatusError = "UnsupportedChange" + + // This generally refers to exceeding one's quota in a cloud provider, + // or running out of physical machines in an on-premise environment. + InsufficientResourcesMachineError MachineStatusError = "InsufficientResources" + + // There was an error while trying to create a Node to match this + // Machine. This may indicate a transient problem that will be fixed + // automatically with time, such as a service outage, or a terminal + // error during creation that doesn't match a more specific + // MachineStatusError value. + // + // Example: timeout trying to connect to GCE. + CreateMachineError MachineStatusError = "CreateError" + + // An error was encountered while trying to delete the Node that this + // Machine represents. This could be a transient or terminal error, but + // will only be observable if the provider's Machine controller has + // added a finalizer to the object to more gracefully handle deletions. + // + // Example: cannot resolve EC2 IP address. + DeleteMachineError MachineStatusError = "DeleteError" + + // This error indicates that the machine did not join the cluster + // as a new node within the expected timeframe after instance + // creation at the provider succeeded + // + // Example use case: A controller that deletes Machines which do + // not result in a Node joining the cluster within a given timeout + // and that are managed by a MachineSet + JoinClusterTimeoutMachineError = "JoinClusterTimeoutError" +) + +type ClusterStatusError string + +const ( + // InvalidConfigurationClusterError indicates that the cluster + // configuration is invalid. + InvalidConfigurationClusterError ClusterStatusError = "InvalidConfiguration" + + // UnsupportedChangeClusterError indicates that the cluster + // spec has been updated in an unsupported way. That cannot be + // reconciled. + UnsupportedChangeClusterError ClusterStatusError = "UnsupportedChange" + + // CreateClusterError indicates that an error was encountered + // when trying to create the cluster. + CreateClusterError ClusterStatusError = "CreateError" + + // UpdateClusterError indicates that an error was encountered + // when trying to update the cluster. + UpdateClusterError ClusterStatusError = "UpdateError" + + // DeleteClusterError indicates that an error was encountered + // when trying to delete the cluster. + DeleteClusterError ClusterStatusError = "DeleteError" +) + +type MachineSetStatusError string + +const ( + // Represents that the combination of configuration in the MachineTemplateSpec + // is not supported by this cluster. This is not a transient error, but + // indicates a state that must be fixed before progress can be made. + // + // Example: the ProviderSpec specifies an instance type that doesn't exist. + InvalidConfigurationMachineSetError MachineSetStatusError = "InvalidConfiguration" +) + +type MachineDeploymentStrategyType string + +const ( + // Replace the old MachineSet by new one using rolling update + // i.e. gradually scale down the old MachineSet and scale up the new one. + RollingUpdateMachineDeploymentStrategyType MachineDeploymentStrategyType = "RollingUpdate" +) diff --git a/vendor/github.com/openshift/cluster-api/pkg/apis/cluster/common/plugins.go b/vendor/github.com/openshift/cluster-api/pkg/apis/cluster/common/plugins.go new file mode 100644 index 00000000000..8c2083d88c0 --- /dev/null +++ b/vendor/github.com/openshift/cluster-api/pkg/apis/cluster/common/plugins.go @@ -0,0 +1,51 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package common + +import ( + "sync" + + "github.com/pkg/errors" + "k8s.io/klog" +) + +var ( + providersMutex sync.Mutex + providers = make(map[string]interface{}) +) + +// RegisterClusterProvisioner registers a ClusterProvisioner by name. This +// is expected to happen during app startup. +func RegisterClusterProvisioner(name string, provisioner interface{}) { + providersMutex.Lock() + defer providersMutex.Unlock() + if _, found := providers[name]; found { + klog.Fatalf("Cluster provisioner %q was registered twice", name) + } + klog.V(1).Infof("Registered cluster provisioner %q", name) + providers[name] = provisioner +} + +func ClusterProvisioner(name string) (interface{}, error) { + providersMutex.Lock() + defer providersMutex.Unlock() + provisioner, found := providers[name] + if !found { + return nil, errors.Errorf("unable to find provisioner for %s", name) + } + return provisioner, nil +} diff --git a/vendor/github.com/openshift/cluster-api/pkg/apis/cluster/v1alpha1/cluster_types.go b/vendor/github.com/openshift/cluster-api/pkg/apis/cluster/v1alpha1/cluster_types.go new file mode 100644 index 00000000000..dae8b49cebe --- /dev/null +++ b/vendor/github.com/openshift/cluster-api/pkg/apis/cluster/v1alpha1/cluster_types.go @@ -0,0 +1,165 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + + "github.com/openshift/cluster-api/pkg/apis/cluster/common" + "k8s.io/apimachinery/pkg/util/validation/field" +) + +const ClusterFinalizer = "cluster.cluster.k8s.io" + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +/// [Cluster] +// Cluster is the Schema for the clusters API +// +k8s:openapi-gen=true +// +kubebuilder:subresource:status +type Cluster struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec ClusterSpec `json:"spec,omitempty"` + Status ClusterStatus `json:"status,omitempty"` +} + +/// [Cluster] + +/// [ClusterSpec] +// ClusterSpec defines the desired state of Cluster +type ClusterSpec struct { + // Cluster network configuration + ClusterNetwork ClusterNetworkingConfig `json:"clusterNetwork"` + + // Provider-specific serialized configuration to use during + // cluster creation. It is recommended that providers maintain + // their own versioned API types that should be + // serialized/deserialized from this field. + // +optional + ProviderSpec ProviderSpec `json:"providerSpec,omitempty"` +} + +/// [ClusterSpec] + +/// [ClusterNetworkingConfig] +// ClusterNetworkingConfig specifies the different networking +// parameters for a cluster. +type ClusterNetworkingConfig struct { + // The network ranges from which service VIPs are allocated. + Services NetworkRanges `json:"services"` + + // The network ranges from which Pod networks are allocated. + Pods NetworkRanges `json:"pods"` + + // Domain name for services. + ServiceDomain string `json:"serviceDomain"` +} + +/// [ClusterNetworkingConfig] + +/// [NetworkRanges] +// NetworkRanges represents ranges of network addresses. +type NetworkRanges struct { + CIDRBlocks []string `json:"cidrBlocks"` +} + +/// [NetworkRanges] + +/// [ClusterStatus] +// ClusterStatus defines the observed state of Cluster +type ClusterStatus struct { + // APIEndpoint represents the endpoint to communicate with the IP. + // +optional + APIEndpoints []APIEndpoint `json:"apiEndpoints,omitempty"` + + // NB: Eventually we will redefine ErrorReason as ClusterStatusError once the + // following issue is fixed. + // https://github.com/kubernetes-incubator/apiserver-builder/issues/176 + + // If set, indicates that there is a problem reconciling the + // state, and will be set to a token value suitable for + // programmatic interpretation. + // +optional + ErrorReason common.ClusterStatusError `json:"errorReason,omitempty"` + + // If set, indicates that there is a problem reconciling the + // state, and will be set to a descriptive error message. + // +optional + ErrorMessage string `json:"errorMessage,omitempty"` + + // Provider-specific status. + // It is recommended that providers maintain their + // own versioned API types that should be + // serialized/deserialized from this field. + // +optional + ProviderStatus *runtime.RawExtension `json:"providerStatus,omitempty"` +} + +/// [ClusterStatus] + +/// [APIEndpoint] +// APIEndpoint represents a reachable Kubernetes API endpoint. +type APIEndpoint struct { + // The hostname on which the API server is serving. + Host string `json:"host"` + + // The port on which the API server is serving. + Port int `json:"port"` +} + +/// [APIEndpoint] + +func (o *Cluster) Validate() field.ErrorList { + errors := field.ErrorList{} + // perform validation here and add to errors using field.Invalid + if o.Spec.ClusterNetwork.ServiceDomain == "" { + errors = append(errors, field.Invalid( + field.NewPath("Spec", "ClusterNetwork", "ServiceDomain"), + o.Spec.ClusterNetwork.ServiceDomain, + "invalid cluster configuration: missing Cluster.Spec.ClusterNetwork.ServiceDomain")) + } + if len(o.Spec.ClusterNetwork.Pods.CIDRBlocks) == 0 { + errors = append(errors, field.Invalid( + field.NewPath("Spec", "ClusterNetwork", "Pods"), + o.Spec.ClusterNetwork.Pods, + "invalid cluster configuration: missing Cluster.Spec.ClusterNetwork.Pods")) + } + if len(o.Spec.ClusterNetwork.Services.CIDRBlocks) == 0 { + errors = append(errors, field.Invalid( + field.NewPath("Spec", "ClusterNetwork", "Services"), + o.Spec.ClusterNetwork.Services, + "invalid cluster configuration: missing Cluster.Spec.ClusterNetwork.Services")) + } + return errors +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterList contains a list of Cluster +type ClusterList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Cluster `json:"items"` +} + +func init() { + SchemeBuilder.Register(&Cluster{}, &ClusterList{}) +} diff --git a/vendor/github.com/openshift/cluster-api/pkg/apis/cluster/v1alpha1/common_types.go b/vendor/github.com/openshift/cluster-api/pkg/apis/cluster/v1alpha1/common_types.go new file mode 100644 index 00000000000..7a9c647fe29 --- /dev/null +++ b/vendor/github.com/openshift/cluster-api/pkg/apis/cluster/v1alpha1/common_types.go @@ -0,0 +1,58 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + corev1 "k8s.io/api/core/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// ProviderSpec defines the configuration to use during node creation. +type ProviderSpec struct { + + // No more than one of the following may be specified. + + // Value is an inlined, serialized representation of the resource + // configuration. It is recommended that providers maintain their own + // versioned API types that should be serialized/deserialized from this + // field, akin to component config. + // +optional + Value *runtime.RawExtension `json:"value,omitempty"` + + // Source for the provider configuration. Cannot be used if value is + // not empty. + // +optional + ValueFrom *ProviderSpecSource `json:"valueFrom,omitempty"` +} + +// ProviderSpecSource represents a source for the provider-specific +// resource configuration. +type ProviderSpecSource struct { + // The machine class from which the provider config should be sourced. + // +optional + MachineClass *MachineClassRef `json:"machineClass,omitempty"` +} + +// MachineClassRef is a reference to the MachineClass object. Controllers should find the right MachineClass using this reference. +type MachineClassRef struct { + // +optional + *corev1.ObjectReference `json:",inline"` + + // Provider is the name of the cloud-provider which MachineClass is intended for. + // +optional + Provider string `json:"provider,omitempty"` +} diff --git a/vendor/github.com/openshift/cluster-api/pkg/apis/cluster/v1alpha1/defaults.go b/vendor/github.com/openshift/cluster-api/pkg/apis/cluster/v1alpha1/defaults.go new file mode 100644 index 00000000000..a64e64ab375 --- /dev/null +++ b/vendor/github.com/openshift/cluster-api/pkg/apis/cluster/v1alpha1/defaults.go @@ -0,0 +1,74 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "github.com/openshift/cluster-api/pkg/apis/cluster/common" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" +) + +// PopulateDefaultsMachineDeployment fills in default field values +// Currently it is called after reading objects, but it could be called in an admission webhook also +func PopulateDefaultsMachineDeployment(d *MachineDeployment) { + if d.Spec.Replicas == nil { + d.Spec.Replicas = new(int32) + *d.Spec.Replicas = 1 + } + + if d.Spec.MinReadySeconds == nil { + d.Spec.MinReadySeconds = new(int32) + *d.Spec.MinReadySeconds = 0 + } + + if d.Spec.RevisionHistoryLimit == nil { + d.Spec.RevisionHistoryLimit = new(int32) + *d.Spec.RevisionHistoryLimit = 1 + } + + if d.Spec.ProgressDeadlineSeconds == nil { + d.Spec.ProgressDeadlineSeconds = new(int32) + *d.Spec.ProgressDeadlineSeconds = 600 + } + + if d.Spec.Strategy == nil { + d.Spec.Strategy = &MachineDeploymentStrategy{} + } + + if d.Spec.Strategy.Type == "" { + d.Spec.Strategy.Type = common.RollingUpdateMachineDeploymentStrategyType + } + + // Default RollingUpdate strategy only if strategy type is RollingUpdate. + if d.Spec.Strategy.Type == common.RollingUpdateMachineDeploymentStrategyType { + if d.Spec.Strategy.RollingUpdate == nil { + d.Spec.Strategy.RollingUpdate = &MachineRollingUpdateDeployment{} + } + if d.Spec.Strategy.RollingUpdate.MaxSurge == nil { + ios1 := intstr.FromInt(1) + d.Spec.Strategy.RollingUpdate.MaxSurge = &ios1 + } + if d.Spec.Strategy.RollingUpdate.MaxUnavailable == nil { + ios0 := intstr.FromInt(0) + d.Spec.Strategy.RollingUpdate.MaxUnavailable = &ios0 + } + } + + if len(d.Namespace) == 0 { + d.Namespace = metav1.NamespaceDefault + } +} diff --git a/vendor/github.com/openshift/cluster-api/pkg/apis/cluster/v1alpha1/doc.go b/vendor/github.com/openshift/cluster-api/pkg/apis/cluster/v1alpha1/doc.go new file mode 100644 index 00000000000..9bd92e43525 --- /dev/null +++ b/vendor/github.com/openshift/cluster-api/pkg/apis/cluster/v1alpha1/doc.go @@ -0,0 +1,23 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v1alpha1 contains API Schema definitions for the cluster v1alpha1 API group +// +k8s:openapi-gen=true +// +k8s:deepcopy-gen=package,register +// +k8s:conversion-gen=sigs.k8s.io/cluster-api/pkg/apis/cluster +// +k8s:defaulter-gen=TypeMeta +// +groupName=cluster.k8s.io +package v1alpha1 diff --git a/vendor/github.com/openshift/cluster-api/pkg/apis/cluster/v1alpha1/machine_types.go b/vendor/github.com/openshift/cluster-api/pkg/apis/cluster/v1alpha1/machine_types.go new file mode 100644 index 00000000000..6c6cc4a3c25 --- /dev/null +++ b/vendor/github.com/openshift/cluster-api/pkg/apis/cluster/v1alpha1/machine_types.go @@ -0,0 +1,227 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + + "github.com/openshift/cluster-api/pkg/apis/cluster/common" +) + +// Finalizer is set on PrepareForCreate callback +const MachineFinalizer = "machine.cluster.k8s.io" + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +/// [Machine] +// Machine is the Schema for the machines API +// +k8s:openapi-gen=true +// +kubebuilder:subresource:status +type Machine struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec MachineSpec `json:"spec,omitempty"` + Status MachineStatus `json:"status,omitempty"` +} + +/// [Machine] + +/// [MachineSpec] +// MachineSpec defines the desired state of Machine +type MachineSpec struct { + // ObjectMeta will autopopulate the Node created. Use this to + // indicate what labels, annotations, name prefix, etc., should be used + // when creating the Node. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + + // Taints is the full, authoritative list of taints to apply to the corresponding + // Node. This list will overwrite any modifications made to the Node on + // an ongoing basis. + // +optional + Taints []corev1.Taint `json:"taints,omitempty"` + + // ProviderSpec details Provider-specific configuration to use during node creation. + // +optional + ProviderSpec ProviderSpec `json:"providerSpec"` + + // Versions of key software to use. This field is optional at cluster + // creation time, and omitting the field indicates that the cluster + // installation tool should select defaults for the user. These + // defaults may differ based on the cluster installer, but the tool + // should populate the values it uses when persisting Machine objects. + // A Machine spec missing this field at runtime is invalid. + // +optional + Versions MachineVersionInfo `json:"versions,omitempty"` + + // ConfigSource is used to populate in the associated Node for dynamic kubelet config. This + // field already exists in Node, so any updates to it in the Machine + // spec will be automatically copied to the linked NodeRef from the + // status. The rest of dynamic kubelet config support should then work + // as-is. + // +optional + ConfigSource *corev1.NodeConfigSource `json:"configSource,omitempty"` +} + +/// [MachineSpec] + +/// [MachineStatus] +// MachineStatus defines the observed state of Machine +type MachineStatus struct { + // NodeRef will point to the corresponding Node if it exists. + // +optional + NodeRef *corev1.ObjectReference `json:"nodeRef,omitempty"` + + // LastUpdated identifies when this status was last observed. + // +optional + LastUpdated *metav1.Time `json:"lastUpdated,omitempty"` + + // Versions specifies the current versions of software on the corresponding Node (if it + // exists). This is provided for a few reasons: + // + // 1) It is more convenient than checking the NodeRef, traversing it to + // the Node, and finding the appropriate field in Node.Status.NodeInfo + // (which uses different field names and formatting). + // 2) It removes some of the dependency on the structure of the Node, + // so that if the structure of Node.Status.NodeInfo changes, only + // machine controllers need to be updated, rather than every client + // of the Machines API. + // 3) There is no other simple way to check the control plane + // version. A client would have to connect directly to the apiserver + // running on the target node in order to find out its version. + // +optional + Versions *MachineVersionInfo `json:"versions,omitempty"` + + // ErrorReason will be set in the event that there is a terminal problem + // reconciling the Machine and will contain a succinct value suitable + // for machine interpretation. + // + // This field should not be set for transitive errors that a controller + // faces that are expected to be fixed automatically over + // time (like service outages), but instead indicate that something is + // fundamentally wrong with the Machine's spec or the configuration of + // the controller, and that manual intervention is required. Examples + // of terminal errors would be invalid combinations of settings in the + // spec, values that are unsupported by the controller, or the + // responsible controller itself being critically misconfigured. + // + // Any transient errors that occur during the reconciliation of Machines + // can be added as events to the Machine object and/or logged in the + // controller's output. + // +optional + ErrorReason *common.MachineStatusError `json:"errorReason,omitempty"` + + // ErrorMessage will be set in the event that there is a terminal problem + // reconciling the Machine and will contain a more verbose string suitable + // for logging and human consumption. + // + // This field should not be set for transitive errors that a controller + // faces that are expected to be fixed automatically over + // time (like service outages), but instead indicate that something is + // fundamentally wrong with the Machine's spec or the configuration of + // the controller, and that manual intervention is required. Examples + // of terminal errors would be invalid combinations of settings in the + // spec, values that are unsupported by the controller, or the + // responsible controller itself being critically misconfigured. + // + // Any transient errors that occur during the reconciliation of Machines + // can be added as events to the Machine object and/or logged in the + // controller's output. + // +optional + ErrorMessage *string `json:"errorMessage,omitempty"` + + // ProviderStatus details a Provider-specific status. + // It is recommended that providers maintain their + // own versioned API types that should be + // serialized/deserialized from this field. + // +optional + ProviderStatus *runtime.RawExtension `json:"providerStatus,omitempty"` + + // Addresses is a list of addresses assigned to the machine. Queried from cloud provider, if available. + // +optional + Addresses []corev1.NodeAddress `json:"addresses,omitempty"` + + // Conditions lists the conditions synced from the node conditions of the corresponding node-object. + // Machine-controller is responsible for keeping conditions up-to-date. + // MachineSet controller will be taking these conditions as a signal to decide if + // machine is healthy or needs to be replaced. + // Refer: https://kubernetes.io/docs/concepts/architecture/nodes/#condition + // +optional + Conditions []corev1.NodeCondition `json:"conditions,omitempty"` + + // LastOperation describes the last-operation performed by the machine-controller. + // This API should be useful as a history in terms of the latest operation performed on the + // specific machine. It should also convey the state of the latest-operation for example if + // it is still on-going, failed or completed successfully. + // +optional + LastOperation *LastOperation `json:"lastOperation,omitempty"` + + // Phase represents the current phase of machine actuation. + // E.g. Pending, Running, Terminating, Failed etc. + // +optional + Phase *string `json:"phase,omitempty"` +} + +// LastOperation represents the detail of the last performed operation on the MachineObject. +type LastOperation struct { + // Description is the human-readable description of the last operation. + Description *string `json:"description,omitempty"` + + // LastUpdated is the timestamp at which LastOperation API was last-updated. + LastUpdated *metav1.Time `json:"lastUpdated,omitempty"` + + // State is the current status of the last performed operation. + // E.g. Processing, Failed, Successful etc + State *string `json:"state,omitempty"` + + // Type is the type of operation which was last performed. + // E.g. Create, Delete, Update etc + Type *string `json:"type,omitempty"` +} + +/// [MachineStatus] + +/// [MachineVersionInfo] +type MachineVersionInfo struct { + // Kubelet is the semantic version of kubelet to run + Kubelet string `json:"kubelet"` + + // ControlPlane is the semantic version of the Kubernetes control plane to + // run. This should only be populated when the machine is a + // control plane. + // +optional + ControlPlane string `json:"controlPlane,omitempty"` +} + +/// [MachineVersionInfo] + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// MachineList contains a list of Machine +type MachineList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Machine `json:"items"` +} + +func init() { + SchemeBuilder.Register(&Machine{}, &MachineList{}) +} diff --git a/vendor/github.com/openshift/cluster-api/pkg/apis/cluster/v1alpha1/machineclass_types.go b/vendor/github.com/openshift/cluster-api/pkg/apis/cluster/v1alpha1/machineclass_types.go new file mode 100644 index 00000000000..7844f6cfa46 --- /dev/null +++ b/vendor/github.com/openshift/cluster-api/pkg/apis/cluster/v1alpha1/machineclass_types.go @@ -0,0 +1,78 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// +genclient +// +genclient:noStatus +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +/// [MachineClass] +// MachineClass can be used to templatize and re-use provider configuration +// across multiple Machines / MachineSets / MachineDeployments. +// +k8s:openapi-gen=true +// +resource:path=machineclasses +type MachineClass struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + + // The total capacity available on this machine type (cpu/memory/disk). + // + // WARNING: It is up to the creator of the MachineClass to ensure that + // this field is consistent with the underlying machine that will + // be provisioned when this class is used, to inform higher level + // automation (e.g. the cluster autoscaler). + // TODO(hardikdr) Add allocatable field once requirements are clear from autoscaler-clusterapi // integration topic. + // Capacity corev1.ResourceList `json:"capacity"` + + // How much capacity is actually allocatable on this machine. + // Must be equal to or less than the capacity, and when less + // indicates the resources reserved for system overhead. + // + // WARNING: It is up to the creator of the MachineClass to ensure that + // this field is consistent with the underlying machine that will + // be provisioned when this class is used, to inform higher level + // automation (e.g. the cluster autoscaler). + // TODO(hardikdr) Add allocatable field once requirements are clear from autoscaler-clusterapi // integration topic. + // Allocatable corev1.ResourceList `json:"allocatable"` + + // Provider-specific configuration to use during node creation. + ProviderSpec runtime.RawExtension `json:"providerSpec"` + + // TODO: should this use an api.ObjectReference to a 'MachineTemplate' instead? + // A link to the MachineTemplate that will be used to create provider + // specific configuration for Machines of this class. + // MachineTemplate corev1.ObjectReference `json:machineTemplate` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// MachineClassList contains a list of MachineClasses +type MachineClassList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []MachineClass `json:"items"` +} + +func init() { + SchemeBuilder.Register(&MachineClass{}, &MachineClassList{}) +} diff --git a/vendor/github.com/openshift/cluster-api/pkg/apis/cluster/v1alpha1/machinedeployment_types.go b/vendor/github.com/openshift/cluster-api/pkg/apis/cluster/v1alpha1/machinedeployment_types.go new file mode 100644 index 00000000000..be0cc25ff6d --- /dev/null +++ b/vendor/github.com/openshift/cluster-api/pkg/apis/cluster/v1alpha1/machinedeployment_types.go @@ -0,0 +1,194 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + + "github.com/openshift/cluster-api/pkg/apis/cluster/common" +) + +/// [MachineDeploymentSpec] +// MachineDeploymentSpec defines the desired state of MachineDeployment +type MachineDeploymentSpec struct { + // Number of desired machines. Defaults to 1. + // This is a pointer to distinguish between explicit zero and not specified. + Replicas *int32 `json:"replicas,omitempty"` + + // Label selector for machines. Existing MachineSets whose machines are + // selected by this will be the ones affected by this deployment. + // It must match the machine template's labels. + Selector metav1.LabelSelector `json:"selector"` + + // Template describes the machines that will be created. + Template MachineTemplateSpec `json:"template"` + + // The deployment strategy to use to replace existing machines with + // new ones. + // +optional + Strategy *MachineDeploymentStrategy `json:"strategy,omitempty"` + + // Minimum number of seconds for which a newly created machine should + // be ready. + // Defaults to 0 (machine will be considered available as soon as it + // is ready) + // +optional + MinReadySeconds *int32 `json:"minReadySeconds,omitempty"` + + // The number of old MachineSets to retain to allow rollback. + // This is a pointer to distinguish between explicit zero and not specified. + // Defaults to 1. + // +optional + RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty"` + + // Indicates that the deployment is paused. + // +optional + Paused bool `json:"paused,omitempty"` + + // The maximum time in seconds for a deployment to make progress before it + // is considered to be failed. The deployment controller will continue to + // process failed deployments and a condition with a ProgressDeadlineExceeded + // reason will be surfaced in the deployment status. Note that progress will + // not be estimated during the time a deployment is paused. Defaults to 600s. + ProgressDeadlineSeconds *int32 `json:"progressDeadlineSeconds,omitempty"` +} + +/// [MachineDeploymentSpec] + +/// [MachineDeploymentStrategy] +// MachineDeploymentStrategy describes how to replace existing machines +// with new ones. +type MachineDeploymentStrategy struct { + // Type of deployment. Currently the only supported strategy is + // "RollingUpdate". + // Default is RollingUpdate. + // +optional + Type common.MachineDeploymentStrategyType `json:"type,omitempty"` + + // Rolling update config params. Present only if + // MachineDeploymentStrategyType = RollingUpdate. + // +optional + RollingUpdate *MachineRollingUpdateDeployment `json:"rollingUpdate,omitempty"` +} + +/// [MachineDeploymentStrategy] + +/// [MachineRollingUpdateDeployment] +// Spec to control the desired behavior of rolling update. +type MachineRollingUpdateDeployment struct { + // The maximum number of machines that can be unavailable during the update. + // Value can be an absolute number (ex: 5) or a percentage of desired + // machines (ex: 10%). + // Absolute number is calculated from percentage by rounding down. + // This can not be 0 if MaxSurge is 0. + // Defaults to 0. + // Example: when this is set to 30%, the old MachineSet can be scaled + // down to 70% of desired machines immediately when the rolling update + // starts. Once new machines are ready, old MachineSet can be scaled + // down further, followed by scaling up the new MachineSet, ensuring + // that the total number of machines available at all times + // during the update is at least 70% of desired machines. + // +optional + MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty" protobuf:"bytes,1,opt,name=maxUnavailable"` + + // The maximum number of machines that can be scheduled above the + // desired number of machines. + // Value can be an absolute number (ex: 5) or a percentage of + // desired machines (ex: 10%). + // This can not be 0 if MaxUnavailable is 0. + // Absolute number is calculated from percentage by rounding up. + // Defaults to 1. + // Example: when this is set to 30%, the new MachineSet can be scaled + // up immediately when the rolling update starts, such that the total + // number of old and new machines do not exceed 130% of desired + // machines. Once old machines have been killed, new MachineSet can + // be scaled up further, ensuring that total number of machines running + // at any time during the update is at most 130% of desired machines. + // +optional + MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty" protobuf:"bytes,2,opt,name=maxSurge"` +} + +/// [MachineRollingUpdateDeployment] + +/// [MachineDeploymentStatus] +// MachineDeploymentStatus defines the observed state of MachineDeployment +type MachineDeploymentStatus struct { + // The generation observed by the deployment controller. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"` + + // Total number of non-terminated machines targeted by this deployment + // (their labels match the selector). + // +optional + Replicas int32 `json:"replicas,omitempty" protobuf:"varint,2,opt,name=replicas"` + + // Total number of non-terminated machines targeted by this deployment + // that have the desired template spec. + // +optional + UpdatedReplicas int32 `json:"updatedReplicas,omitempty" protobuf:"varint,3,opt,name=updatedReplicas"` + + // Total number of ready machines targeted by this deployment. + // +optional + ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,7,opt,name=readyReplicas"` + + // Total number of available machines (ready for at least minReadySeconds) + // targeted by this deployment. + // +optional + AvailableReplicas int32 `json:"availableReplicas,omitempty" protobuf:"varint,4,opt,name=availableReplicas"` + + // Total number of unavailable machines targeted by this deployment. + // This is the total number of machines that are still required for + // the deployment to have 100% available capacity. They may either + // be machines that are running but not yet available or machines + // that still have not been created. + // +optional + UnavailableReplicas int32 `json:"unavailableReplicas,omitempty" protobuf:"varint,5,opt,name=unavailableReplicas"` +} + +/// [MachineDeploymentStatus] + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +/// [MachineDeployment] +// MachineDeployment is the Schema for the machinedeployments API +// +k8s:openapi-gen=true +// +kubebuilder:subresource:status +// +kubebuilder:subresource:scale:specpath=.spec.replicas,statuspath=.status.replicas,selectorpath=.status.labelSelector +type MachineDeployment struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec MachineDeploymentSpec `json:"spec,omitempty"` + Status MachineDeploymentStatus `json:"status,omitempty"` +} + +/// [MachineDeployment] + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// MachineDeploymentList contains a list of MachineDeployment +type MachineDeploymentList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []MachineDeployment `json:"items"` +} + +func init() { + SchemeBuilder.Register(&MachineDeployment{}, &MachineDeploymentList{}) +} diff --git a/vendor/github.com/openshift/cluster-api/pkg/apis/cluster/v1alpha1/machineset_types.go b/vendor/github.com/openshift/cluster-api/pkg/apis/cluster/v1alpha1/machineset_types.go new file mode 100644 index 00000000000..95644395ace --- /dev/null +++ b/vendor/github.com/openshift/cluster-api/pkg/apis/cluster/v1alpha1/machineset_types.go @@ -0,0 +1,187 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "log" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1validation "k8s.io/apimachinery/pkg/apis/meta/v1/validation" + + "github.com/openshift/cluster-api/pkg/apis/cluster/common" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/util/validation/field" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +/// [MachineSet] +// MachineSet ensures that a specified number of machines replicas are running at any given time. +// +k8s:openapi-gen=true +// +kubebuilder:subresource:status +// +kubebuilder:subresource:scale:specpath=.spec.replicas,statuspath=.status.replicas,selectorpath=.status.labelSelector +type MachineSet struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec MachineSetSpec `json:"spec,omitempty"` + Status MachineSetStatus `json:"status,omitempty"` +} + +/// [MachineSet] + +/// [MachineSetSpec] +// MachineSetSpec defines the desired state of MachineSet +type MachineSetSpec struct { + // Replicas is the number of desired replicas. + // This is a pointer to distinguish between explicit zero and unspecified. + // Defaults to 1. + // +optional + Replicas *int32 `json:"replicas,omitempty"` + + // MinReadySeconds is the minimum number of seconds for which a newly created machine should be ready. + // Defaults to 0 (machine will be considered available as soon as it is ready) + // +optional + MinReadySeconds int32 `json:"minReadySeconds,omitempty"` + + // Selector is a label query over machines that should match the replica count. + // Label keys and values that must match in order to be controlled by this MachineSet. + // It must match the machine template's labels. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors + Selector metav1.LabelSelector `json:"selector"` + + // Template is the object that describes the machine that will be created if + // insufficient replicas are detected. + // +optional + Template MachineTemplateSpec `json:"template,omitempty"` +} + +/// [MachineSetSpec] // doxygen marker + +/// [MachineTemplateSpec] // doxygen marker +// MachineTemplateSpec describes the data needed to create a Machine from a template +type MachineTemplateSpec struct { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + + // Specification of the desired behavior of the machine. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // +optional + Spec MachineSpec `json:"spec,omitempty"` +} + +/// [MachineTemplateSpec] + +/// [MachineSetStatus] +// MachineSetStatus defines the observed state of MachineSet +type MachineSetStatus struct { + // Replicas is the most recently observed number of replicas. + Replicas int32 `json:"replicas"` + + // The number of replicas that have labels matching the labels of the machine template of the MachineSet. + // +optional + FullyLabeledReplicas int32 `json:"fullyLabeledReplicas,omitempty"` + + // The number of ready replicas for this MachineSet. A machine is considered ready when the node has been created and is "Ready". + // +optional + ReadyReplicas int32 `json:"readyReplicas,omitempty"` + + // The number of available replicas (ready for at least minReadySeconds) for this MachineSet. + // +optional + AvailableReplicas int32 `json:"availableReplicas,omitempty"` + + // ObservedGeneration reflects the generation of the most recently observed MachineSet. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty"` + + // In the event that there is a terminal problem reconciling the + // replicas, both ErrorReason and ErrorMessage will be set. ErrorReason + // will be populated with a succinct value suitable for machine + // interpretation, while ErrorMessage will contain a more verbose + // string suitable for logging and human consumption. + // + // These fields should not be set for transitive errors that a + // controller faces that are expected to be fixed automatically over + // time (like service outages), but instead indicate that something is + // fundamentally wrong with the MachineTemplate's spec or the configuration of + // the machine controller, and that manual intervention is required. Examples + // of terminal errors would be invalid combinations of settings in the + // spec, values that are unsupported by the machine controller, or the + // responsible machine controller itself being critically misconfigured. + // + // Any transient errors that occur during the reconciliation of Machines + // can be added as events to the MachineSet object and/or logged in the + // controller's output. + // +optional + ErrorReason *common.MachineSetStatusError `json:"errorReason,omitempty"` + // +optional + ErrorMessage *string `json:"errorMessage,omitempty"` +} + +/// [MachineSetStatus] + +func (machineSet *MachineSet) Validate() field.ErrorList { + errors := field.ErrorList{} + + // validate spec.selector and spec.template.labels + fldPath := field.NewPath("spec") + errors = append(errors, metav1validation.ValidateLabelSelector(&machineSet.Spec.Selector, fldPath.Child("selector"))...) + if len(machineSet.Spec.Selector.MatchLabels)+len(machineSet.Spec.Selector.MatchExpressions) == 0 { + errors = append(errors, field.Invalid(fldPath.Child("selector"), machineSet.Spec.Selector, "empty selector is not valid for MachineSet.")) + } + selector, err := metav1.LabelSelectorAsSelector(&machineSet.Spec.Selector) + if err != nil { + errors = append(errors, field.Invalid(fldPath.Child("selector"), machineSet.Spec.Selector, "invalid label selector.")) + } else { + labels := labels.Set(machineSet.Spec.Template.Labels) + if !selector.Matches(labels) { + errors = append(errors, field.Invalid(fldPath.Child("template", "metadata", "labels"), machineSet.Spec.Template.Labels, "`selector` does not match template `labels`")) + } + } + + return errors +} + +// DefaultingFunction sets default MachineSet field values +func (obj *MachineSet) Default() { + log.Printf("Defaulting fields for MachineSet %s\n", obj.Name) + + if obj.Spec.Replicas == nil { + obj.Spec.Replicas = new(int32) + *obj.Spec.Replicas = 1 + } + + if len(obj.Namespace) == 0 { + obj.Namespace = metav1.NamespaceDefault + } +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// MachineSetList contains a list of MachineSet +type MachineSetList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []MachineSet `json:"items"` +} + +func init() { + SchemeBuilder.Register(&MachineSet{}, &MachineSetList{}) +} diff --git a/vendor/github.com/openshift/cluster-api/pkg/apis/cluster/v1alpha1/register.go b/vendor/github.com/openshift/cluster-api/pkg/apis/cluster/v1alpha1/register.go new file mode 100644 index 00000000000..d4b8529670d --- /dev/null +++ b/vendor/github.com/openshift/cluster-api/pkg/apis/cluster/v1alpha1/register.go @@ -0,0 +1,48 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// NOTE: Boilerplate only. Ignore this file. + +// Package v1alpha1 contains API Schema definitions for the cluster v1alpha1 API group +// +k8s:openapi-gen=true +// +k8s:deepcopy-gen=package,register +// +k8s:conversion-gen=sigs.k8s.io/cluster-api/pkg/apis/cluster +// +k8s:defaulter-gen=TypeMeta +// +groupName=cluster.k8s.io +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/runtime/scheme" +) + +var ( + // SchemeGroupVersion is group version used to register these objects + SchemeGroupVersion = schema.GroupVersion{Group: "cluster.k8s.io", Version: "v1alpha1"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion} + + // Required by pkg/client/... + // TODO(pwittrock): Remove this after removing pkg/client/... + AddToScheme = SchemeBuilder.AddToScheme +) + +// Required by pkg/client/listers/... +// TODO(pwittrock): Remove this after removing pkg/client/... +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} diff --git a/vendor/github.com/openshift/cluster-api/pkg/apis/cluster/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/openshift/cluster-api/pkg/apis/cluster/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 00000000000..fd8e1f63a9c --- /dev/null +++ b/vendor/github.com/openshift/cluster-api/pkg/apis/cluster/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,821 @@ +// +build !ignore_autogenerated + +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by main. DO NOT EDIT. + +package v1alpha1 + +import ( + common "github.com/openshift/cluster-api/pkg/apis/cluster/common" + v1 "k8s.io/api/core/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + intstr "k8s.io/apimachinery/pkg/util/intstr" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIEndpoint) DeepCopyInto(out *APIEndpoint) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIEndpoint. +func (in *APIEndpoint) DeepCopy() *APIEndpoint { + if in == nil { + return nil + } + out := new(APIEndpoint) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Cluster) DeepCopyInto(out *Cluster) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Cluster. +func (in *Cluster) DeepCopy() *Cluster { + if in == nil { + return nil + } + out := new(Cluster) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Cluster) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterList) DeepCopyInto(out *ClusterList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Cluster, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterList. +func (in *ClusterList) DeepCopy() *ClusterList { + if in == nil { + return nil + } + out := new(ClusterList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterNetworkingConfig) DeepCopyInto(out *ClusterNetworkingConfig) { + *out = *in + in.Services.DeepCopyInto(&out.Services) + in.Pods.DeepCopyInto(&out.Pods) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterNetworkingConfig. +func (in *ClusterNetworkingConfig) DeepCopy() *ClusterNetworkingConfig { + if in == nil { + return nil + } + out := new(ClusterNetworkingConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) { + *out = *in + in.ClusterNetwork.DeepCopyInto(&out.ClusterNetwork) + in.ProviderSpec.DeepCopyInto(&out.ProviderSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSpec. +func (in *ClusterSpec) DeepCopy() *ClusterSpec { + if in == nil { + return nil + } + out := new(ClusterSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterStatus) DeepCopyInto(out *ClusterStatus) { + *out = *in + if in.APIEndpoints != nil { + in, out := &in.APIEndpoints, &out.APIEndpoints + *out = make([]APIEndpoint, len(*in)) + copy(*out, *in) + } + if in.ProviderStatus != nil { + in, out := &in.ProviderStatus, &out.ProviderStatus + *out = new(runtime.RawExtension) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterStatus. +func (in *ClusterStatus) DeepCopy() *ClusterStatus { + if in == nil { + return nil + } + out := new(ClusterStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LastOperation) DeepCopyInto(out *LastOperation) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.LastUpdated != nil { + in, out := &in.LastUpdated, &out.LastUpdated + *out = (*in).DeepCopy() + } + if in.State != nil { + in, out := &in.State, &out.State + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LastOperation. +func (in *LastOperation) DeepCopy() *LastOperation { + if in == nil { + return nil + } + out := new(LastOperation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Machine) DeepCopyInto(out *Machine) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Machine. +func (in *Machine) DeepCopy() *Machine { + if in == nil { + return nil + } + out := new(Machine) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Machine) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineClass) DeepCopyInto(out *MachineClass) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.ProviderSpec.DeepCopyInto(&out.ProviderSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineClass. +func (in *MachineClass) DeepCopy() *MachineClass { + if in == nil { + return nil + } + out := new(MachineClass) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MachineClass) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineClassList) DeepCopyInto(out *MachineClassList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]MachineClass, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineClassList. +func (in *MachineClassList) DeepCopy() *MachineClassList { + if in == nil { + return nil + } + out := new(MachineClassList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MachineClassList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineClassRef) DeepCopyInto(out *MachineClassRef) { + *out = *in + if in.ObjectReference != nil { + in, out := &in.ObjectReference, &out.ObjectReference + *out = new(v1.ObjectReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineClassRef. +func (in *MachineClassRef) DeepCopy() *MachineClassRef { + if in == nil { + return nil + } + out := new(MachineClassRef) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineDeployment) DeepCopyInto(out *MachineDeployment) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineDeployment. +func (in *MachineDeployment) DeepCopy() *MachineDeployment { + if in == nil { + return nil + } + out := new(MachineDeployment) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MachineDeployment) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineDeploymentList) DeepCopyInto(out *MachineDeploymentList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]MachineDeployment, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineDeploymentList. +func (in *MachineDeploymentList) DeepCopy() *MachineDeploymentList { + if in == nil { + return nil + } + out := new(MachineDeploymentList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MachineDeploymentList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineDeploymentSpec) DeepCopyInto(out *MachineDeploymentSpec) { + *out = *in + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + *out = new(int32) + **out = **in + } + in.Selector.DeepCopyInto(&out.Selector) + in.Template.DeepCopyInto(&out.Template) + if in.Strategy != nil { + in, out := &in.Strategy, &out.Strategy + *out = new(MachineDeploymentStrategy) + (*in).DeepCopyInto(*out) + } + if in.MinReadySeconds != nil { + in, out := &in.MinReadySeconds, &out.MinReadySeconds + *out = new(int32) + **out = **in + } + if in.RevisionHistoryLimit != nil { + in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit + *out = new(int32) + **out = **in + } + if in.ProgressDeadlineSeconds != nil { + in, out := &in.ProgressDeadlineSeconds, &out.ProgressDeadlineSeconds + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineDeploymentSpec. +func (in *MachineDeploymentSpec) DeepCopy() *MachineDeploymentSpec { + if in == nil { + return nil + } + out := new(MachineDeploymentSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineDeploymentStatus) DeepCopyInto(out *MachineDeploymentStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineDeploymentStatus. +func (in *MachineDeploymentStatus) DeepCopy() *MachineDeploymentStatus { + if in == nil { + return nil + } + out := new(MachineDeploymentStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineDeploymentStrategy) DeepCopyInto(out *MachineDeploymentStrategy) { + *out = *in + if in.RollingUpdate != nil { + in, out := &in.RollingUpdate, &out.RollingUpdate + *out = new(MachineRollingUpdateDeployment) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineDeploymentStrategy. +func (in *MachineDeploymentStrategy) DeepCopy() *MachineDeploymentStrategy { + if in == nil { + return nil + } + out := new(MachineDeploymentStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineList) DeepCopyInto(out *MachineList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Machine, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineList. +func (in *MachineList) DeepCopy() *MachineList { + if in == nil { + return nil + } + out := new(MachineList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MachineList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineRollingUpdateDeployment) DeepCopyInto(out *MachineRollingUpdateDeployment) { + *out = *in + if in.MaxUnavailable != nil { + in, out := &in.MaxUnavailable, &out.MaxUnavailable + *out = new(intstr.IntOrString) + **out = **in + } + if in.MaxSurge != nil { + in, out := &in.MaxSurge, &out.MaxSurge + *out = new(intstr.IntOrString) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineRollingUpdateDeployment. +func (in *MachineRollingUpdateDeployment) DeepCopy() *MachineRollingUpdateDeployment { + if in == nil { + return nil + } + out := new(MachineRollingUpdateDeployment) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineSet) DeepCopyInto(out *MachineSet) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineSet. +func (in *MachineSet) DeepCopy() *MachineSet { + if in == nil { + return nil + } + out := new(MachineSet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MachineSet) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineSetList) DeepCopyInto(out *MachineSetList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]MachineSet, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineSetList. +func (in *MachineSetList) DeepCopy() *MachineSetList { + if in == nil { + return nil + } + out := new(MachineSetList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MachineSetList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineSetSpec) DeepCopyInto(out *MachineSetSpec) { + *out = *in + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + *out = new(int32) + **out = **in + } + in.Selector.DeepCopyInto(&out.Selector) + in.Template.DeepCopyInto(&out.Template) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineSetSpec. +func (in *MachineSetSpec) DeepCopy() *MachineSetSpec { + if in == nil { + return nil + } + out := new(MachineSetSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineSetStatus) DeepCopyInto(out *MachineSetStatus) { + *out = *in + if in.ErrorReason != nil { + in, out := &in.ErrorReason, &out.ErrorReason + *out = new(common.MachineSetStatusError) + **out = **in + } + if in.ErrorMessage != nil { + in, out := &in.ErrorMessage, &out.ErrorMessage + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineSetStatus. +func (in *MachineSetStatus) DeepCopy() *MachineSetStatus { + if in == nil { + return nil + } + out := new(MachineSetStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineSpec) DeepCopyInto(out *MachineSpec) { + *out = *in + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Taints != nil { + in, out := &in.Taints, &out.Taints + *out = make([]v1.Taint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.ProviderSpec.DeepCopyInto(&out.ProviderSpec) + out.Versions = in.Versions + if in.ConfigSource != nil { + in, out := &in.ConfigSource, &out.ConfigSource + *out = new(v1.NodeConfigSource) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineSpec. +func (in *MachineSpec) DeepCopy() *MachineSpec { + if in == nil { + return nil + } + out := new(MachineSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineStatus) DeepCopyInto(out *MachineStatus) { + *out = *in + if in.NodeRef != nil { + in, out := &in.NodeRef, &out.NodeRef + *out = new(v1.ObjectReference) + **out = **in + } + if in.LastUpdated != nil { + in, out := &in.LastUpdated, &out.LastUpdated + *out = (*in).DeepCopy() + } + if in.Versions != nil { + in, out := &in.Versions, &out.Versions + *out = new(MachineVersionInfo) + **out = **in + } + if in.ErrorReason != nil { + in, out := &in.ErrorReason, &out.ErrorReason + *out = new(common.MachineStatusError) + **out = **in + } + if in.ErrorMessage != nil { + in, out := &in.ErrorMessage, &out.ErrorMessage + *out = new(string) + **out = **in + } + if in.ProviderStatus != nil { + in, out := &in.ProviderStatus, &out.ProviderStatus + *out = new(runtime.RawExtension) + (*in).DeepCopyInto(*out) + } + if in.Addresses != nil { + in, out := &in.Addresses, &out.Addresses + *out = make([]v1.NodeAddress, len(*in)) + copy(*out, *in) + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.NodeCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.LastOperation != nil { + in, out := &in.LastOperation, &out.LastOperation + *out = new(LastOperation) + (*in).DeepCopyInto(*out) + } + if in.Phase != nil { + in, out := &in.Phase, &out.Phase + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineStatus. +func (in *MachineStatus) DeepCopy() *MachineStatus { + if in == nil { + return nil + } + out := new(MachineStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineTemplateSpec) DeepCopyInto(out *MachineTemplateSpec) { + *out = *in + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineTemplateSpec. +func (in *MachineTemplateSpec) DeepCopy() *MachineTemplateSpec { + if in == nil { + return nil + } + out := new(MachineTemplateSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineVersionInfo) DeepCopyInto(out *MachineVersionInfo) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineVersionInfo. +func (in *MachineVersionInfo) DeepCopy() *MachineVersionInfo { + if in == nil { + return nil + } + out := new(MachineVersionInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkRanges) DeepCopyInto(out *NetworkRanges) { + *out = *in + if in.CIDRBlocks != nil { + in, out := &in.CIDRBlocks, &out.CIDRBlocks + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkRanges. +func (in *NetworkRanges) DeepCopy() *NetworkRanges { + if in == nil { + return nil + } + out := new(NetworkRanges) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProviderSpec) DeepCopyInto(out *ProviderSpec) { + *out = *in + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(runtime.RawExtension) + (*in).DeepCopyInto(*out) + } + if in.ValueFrom != nil { + in, out := &in.ValueFrom, &out.ValueFrom + *out = new(ProviderSpecSource) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProviderSpec. +func (in *ProviderSpec) DeepCopy() *ProviderSpec { + if in == nil { + return nil + } + out := new(ProviderSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProviderSpecSource) DeepCopyInto(out *ProviderSpecSource) { + *out = *in + if in.MachineClass != nil { + in, out := &in.MachineClass, &out.MachineClass + *out = new(MachineClassRef) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProviderSpecSource. +func (in *ProviderSpecSource) DeepCopy() *ProviderSpecSource { + if in == nil { + return nil + } + out := new(ProviderSpecSource) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/sigs.k8s.io/cluster-api-provider-openstack/pkg/apis/openstackproviderconfig/v1alpha1/doc.go b/vendor/sigs.k8s.io/cluster-api-provider-openstack/pkg/apis/openstackproviderconfig/v1alpha1/doc.go index 7a8ffa99e45..11bae1ff56f 100644 --- a/vendor/sigs.k8s.io/cluster-api-provider-openstack/pkg/apis/openstackproviderconfig/v1alpha1/doc.go +++ b/vendor/sigs.k8s.io/cluster-api-provider-openstack/pkg/apis/openstackproviderconfig/v1alpha1/doc.go @@ -15,7 +15,7 @@ limitations under the License. */ // +k8s:deepcopy-gen=package,register -// +k8s:conversion-gen=sigs.k8s.io/cluster-api/cluster-api-provider-openstack/pkg/apis/openstackproviderconfig +// +k8s:conversion-gen=github.com/openshift/cluster-api/cluster-api-provider-openstack/pkg/apis/openstackproviderconfig // +k8s:openapi-gen=true // +k8s:defaulter-gen=TypeMeta // +groupName=openstackproviderconfig.k8s.io diff --git a/vendor/sigs.k8s.io/cluster-api-provider-openstack/pkg/apis/openstackproviderconfig/v1alpha1/register.go b/vendor/sigs.k8s.io/cluster-api-provider-openstack/pkg/apis/openstackproviderconfig/v1alpha1/register.go index 35ceb6e2de7..035613e1eb1 100644 --- a/vendor/sigs.k8s.io/cluster-api-provider-openstack/pkg/apis/openstackproviderconfig/v1alpha1/register.go +++ b/vendor/sigs.k8s.io/cluster-api-provider-openstack/pkg/apis/openstackproviderconfig/v1alpha1/register.go @@ -24,7 +24,8 @@ import ( "k8s.io/apimachinery/pkg/util/json" "sigs.k8s.io/yaml" - clusterv1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" + clusterv1 "github.com/openshift/cluster-api/pkg/apis/cluster/v1alpha1" + machinev1 "github.com/openshift/cluster-api/pkg/apis/machine/v1beta1" "sigs.k8s.io/controller-runtime/pkg/runtime/scheme" ) @@ -67,7 +68,7 @@ func ClusterStatusFromProviderStatus(extension *runtime.RawExtension) (*Openstac // This is the same as ClusterSpecFromProviderSpec but we // expect there to be a specific Spec type for Machines soon -func MachineSpecFromProviderSpec(providerSpec clusterv1.ProviderSpec) (*OpenstackProviderSpec, error) { +func MachineSpecFromProviderSpec(providerSpec machinev1.ProviderSpec) (*OpenstackProviderSpec, error) { if providerSpec.Value == nil { return nil, errors.New("no such providerSpec found in manifest") } diff --git a/vendor/sigs.k8s.io/cluster-api-provider-openstack/pkg/apis/openstackproviderconfig/v1alpha1/securitygroup_types.go b/vendor/sigs.k8s.io/cluster-api-provider-openstack/pkg/apis/openstackproviderconfig/v1alpha1/securitygroup_types.go new file mode 100644 index 00000000000..3318c660c07 --- /dev/null +++ b/vendor/sigs.k8s.io/cluster-api-provider-openstack/pkg/apis/openstackproviderconfig/v1alpha1/securitygroup_types.go @@ -0,0 +1,51 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +// SecurityGroup represents the basic information of the associated +// OpenStack Neutron Security Group. +type SecurityGroup struct { + Name string `json:"name"` + ID string `json:"id"` + Rules []SecurityGroupRule `json:"rules"` +} + +// SecurityGroupRule represent the basic information of the associated OpenStack +// Security Group Role. +type SecurityGroupRule struct { + ID string `json:"name"` + Direction string `json:"direction"` + EtherType string `json:"etherType"` + SecurityGroupID string `json:"securityGroupID"` + PortRangeMin int `json:"portRangeMin"` + PortRangeMax int `json:"portRangeMax"` + Protocol string `json:"protocol"` + RemoteGroupID string `json:"remoteGroupID"` + RemoteIPPrefix string `json:"remoteIPPrefix"` +} + +// Equal checks if two SecurityGroupRules are the same. +func (r SecurityGroupRule) Equal(x SecurityGroupRule) bool { + return (r.Direction == x.Direction && + r.EtherType == x.EtherType && + r.PortRangeMin == x.PortRangeMin && + r.PortRangeMax == x.PortRangeMax && + r.Protocol == x.Protocol && + r.RemoteGroupID == x.RemoteGroupID && + r.RemoteIPPrefix == x.RemoteIPPrefix) + +} diff --git a/vendor/sigs.k8s.io/cluster-api-provider-openstack/pkg/apis/openstackproviderconfig/v1alpha1/types.go b/vendor/sigs.k8s.io/cluster-api-provider-openstack/pkg/apis/openstackproviderconfig/v1alpha1/types.go index 2741250cd1b..93a895206b5 100644 --- a/vendor/sigs.k8s.io/cluster-api-provider-openstack/pkg/apis/openstackproviderconfig/v1alpha1/types.go +++ b/vendor/sigs.k8s.io/cluster-api-provider-openstack/pkg/apis/openstackproviderconfig/v1alpha1/types.go @@ -27,8 +27,10 @@ import ( // OpenstackProviderSpec is the type that will be embedded in a Machine.Spec.ProviderSpec field // for an OpenStack Instance. It is used by the Openstack machine actuator to create a single machine instance. // TODO(cglaubitz): We might consider to change this to OpenstackMachineProviderSpec +// +k8s:openapi-gen=true type OpenstackProviderSpec struct { - metav1.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` // The name of the secret containing the openstack credentials CloudsSecret *corev1.SecretReference `json:"cloudsSecret"` @@ -63,6 +65,9 @@ type OpenstackProviderSpec struct { // The name of the secret containing the user data (startup script in most cases) UserDataSecret *corev1.SecretReference `json:"userDataSecret,omitempty"` + // Whether the server instance is created on a trunk port or not. + Trunk bool `json:"trunk,omitempty"` + RootVolume RootVolume `json:"root_volume,omitempty"` } @@ -73,6 +78,8 @@ type NetworkParam struct { FixedIp string `json:"fixed_ip,omitempty"` // Filters for optional network query Filter Filter `json:"filter,omitempty"` + // Subnet within a network to use + Subnets []SubnetParam `json:"subnets,omitempty"` } type Filter struct { @@ -93,6 +100,37 @@ type Filter struct { NotTagsAny string `json:"not-tags-any,omitempty"` } +type SubnetParam struct { + // The UUID of the network. Required if you omit the port attribute. + UUID string `json:"uuid,omitempty"` + + // Filters for optional network query + Filter SubnetFilter `json:"filter,omitempty"` +} + +type SubnetFilter struct { + Name string `json:"name,omitempty"` + EnableDHCP *bool `json:"enable_dhcp,omitempty"` + NetworkID string `json:"network_id,omitempty"` + TenantID string `json:"tenant_id,omitempty"` + ProjectID string `json:"project_id,omitempty"` + IPVersion int `json:"ip_version,omitempty"` + GatewayIP string `json:"gateway_ip,omitempty"` + CIDR string `json:"cidr,omitempty"` + IPv6AddressMode string `json:"ipv6_address_mode,omitempty"` + IPv6RAMode string `json:"ipv6_ra_mode,omitempty"` + ID string `json:"id,omitempty"` + SubnetPoolID string `json:"subnetpool_id,omitempty"` + Limit int `json:"limit,omitempty"` + Marker string `json:"marker,omitempty"` + SortKey string `json:"sort_key,omitempty"` + SortDir string `json:"sort_dir,omitempty"` + Tags string `json:"tags,omitempty"` + TagsAny string `json:"tags-any,omitempty"` + NotTags string `json:"not-tags,omitempty"` + NotTagsAny string `json:"not-tags-any,omitempty"` +} + type RootVolume struct { VolumeType string `json:"volumeType"` Size int `json:"diskSize,omitempty"` @@ -111,9 +149,17 @@ type OpenstackClusterProviderSpec struct { // network, a subnet with NodeCIDR, and a router connected to this subnet. // If you leave this empty, no network will be created. NodeCIDR string `json:"nodeCidr,omitempty"` + // DNSNameservers is the list of nameservers for OpenStack Subnet being created. + DNSNameservers []string `json:"dnsNameservers,omitempty"` // ExternalNetworkID is the ID of an external OpenStack Network. This is necessary // to get public internet to the VMs. ExternalNetworkID string `json:"externalNetworkId,omitempty"` + + // ManagedSecurityGroups defines that kubernetes manages the OpenStack security groups + // for now, that means that we'll create two security groups, one allowing SSH + // and API access from everywhere, and another one that allows all traffic to/from + // machines belonging to that group. In the future, we could make this more flexible. + ManagedSecurityGroups bool `json:"managedSecurityGroups"` } // +genclient @@ -135,6 +181,15 @@ type OpenstackClusterProviderStatus struct { // Network contains all information about the created OpenStack Network. // It includes Subnets and Router. Network *Network `json:"network,omitempty"` + + // ControlPlaneSecurityGroups contains all the information about the OpenStack + // Security Group that needs to be applied to control plane nodes. + // TODO: Maybe instead of two properties, we add a property to the group? + ControlPlaneSecurityGroup *SecurityGroup `json:"controlPlaneSecurityGroup,omitempty"` + + // GlobalSecurityGroup contains all the information about the OpenStack Security + // Group that needs to be applied to all nodes, both control plane and worker nodes. + GlobalSecurityGroup *SecurityGroup `json:"globalSecurityGroup,omitempty"` } // Network represents basic information about the associated OpenStach Neutron Network diff --git a/vendor/sigs.k8s.io/cluster-api-provider-openstack/pkg/apis/openstackproviderconfig/v1alpha1/zz_generated.deepcopy.go b/vendor/sigs.k8s.io/cluster-api-provider-openstack/pkg/apis/openstackproviderconfig/v1alpha1/zz_generated.deepcopy.go index f9cb99f3cb3..abe7b821c0d 100644 --- a/vendor/sigs.k8s.io/cluster-api-provider-openstack/pkg/apis/openstackproviderconfig/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/sigs.k8s.io/cluster-api-provider-openstack/pkg/apis/openstackproviderconfig/v1alpha1/zz_generated.deepcopy.go @@ -81,6 +81,13 @@ func (in *Network) DeepCopy() *Network { func (in *NetworkParam) DeepCopyInto(out *NetworkParam) { *out = *in in.Filter.DeepCopyInto(&out.Filter) + if in.Subnets != nil { + in, out := &in.Subnets, &out.Subnets + *out = make([]SubnetParam, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } return } @@ -99,6 +106,11 @@ func (in *OpenstackClusterProviderSpec) DeepCopyInto(out *OpenstackClusterProvid *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.DNSNameservers != nil { + in, out := &in.DNSNameservers, &out.DNSNameservers + *out = make([]string, len(*in)) + copy(*out, *in) + } return } @@ -140,6 +152,16 @@ func (in *OpenstackClusterProviderStatus) DeepCopyInto(out *OpenstackClusterProv *out = new(Network) (*in).DeepCopyInto(*out) } + if in.ControlPlaneSecurityGroup != nil { + in, out := &in.ControlPlaneSecurityGroup, &out.ControlPlaneSecurityGroup + *out = new(SecurityGroup) + (*in).DeepCopyInto(*out) + } + if in.GlobalSecurityGroup != nil { + in, out := &in.GlobalSecurityGroup, &out.GlobalSecurityGroup + *out = new(SecurityGroup) + (*in).DeepCopyInto(*out) + } return } @@ -165,6 +187,12 @@ func (in *OpenstackClusterProviderStatus) DeepCopyObject() runtime.Object { func (in *OpenstackProviderSpec) DeepCopyInto(out *OpenstackProviderSpec) { *out = *in out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.CloudsSecret != nil { + in, out := &in.CloudsSecret, &out.CloudsSecret + *out = new(v1.SecretReference) + **out = **in + } if in.Networks != nil { in, out := &in.Networks, &out.Networks *out = make([]NetworkParam, len(*in)) @@ -236,6 +264,43 @@ func (in *Router) DeepCopy() *Router { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecurityGroup) DeepCopyInto(out *SecurityGroup) { + *out = *in + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]SecurityGroupRule, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityGroup. +func (in *SecurityGroup) DeepCopy() *SecurityGroup { + if in == nil { + return nil + } + out := new(SecurityGroup) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecurityGroupRule) DeepCopyInto(out *SecurityGroupRule) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityGroupRule. +func (in *SecurityGroupRule) DeepCopy() *SecurityGroupRule { + if in == nil { + return nil + } + out := new(SecurityGroupRule) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Subnet) DeepCopyInto(out *Subnet) { *out = *in @@ -251,3 +316,41 @@ func (in *Subnet) DeepCopy() *Subnet { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubnetFilter) DeepCopyInto(out *SubnetFilter) { + *out = *in + if in.EnableDHCP != nil { + in, out := &in.EnableDHCP, &out.EnableDHCP + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubnetFilter. +func (in *SubnetFilter) DeepCopy() *SubnetFilter { + if in == nil { + return nil + } + out := new(SubnetFilter) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubnetParam) DeepCopyInto(out *SubnetParam) { + *out = *in + in.Filter.DeepCopyInto(&out.Filter) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubnetParam. +func (in *SubnetParam) DeepCopy() *SubnetParam { + if in == nil { + return nil + } + out := new(SubnetParam) + in.DeepCopyInto(out) + return out +}