diff --git a/Gopkg.lock b/Gopkg.lock index f76feb635da..1da98786b8f 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -241,6 +241,14 @@ revision = "d2133a1ce379ef6fa992b0514a77146c60db9d1c" version = "v1.1.0" +[[projects]] + digest = "1:4304cca260ab815326ca42d9c28fb843342748267034c51963e13f5e54e727d1" + name = "github.com/evanphx/json-patch" + packages = ["."] + pruneopts = "NUT" + revision = "026c730a0dcc5d11f93f1cf1cc65b01247ea7b6f" + version = "v4.5.0" + [[projects]] digest = "1:81466b4218bf6adddac2572a30ac733a9255919bc2f470b4827a317bd4ee1756" name = "github.com/ghodss/yaml" @@ -495,6 +503,30 @@ revision = "6ca4dbf54d38eea1a992b3c722a76a5d1c4cb25c" version = "v0.0.4" +[[projects]] + branch = "master" + digest = "1:e1dc7d07ec1dd382dd1e3710d0ff9eec0815375fb5eebb748426413aeeb5b703" + name = "github.com/metal3-io/baremetal-operator" + packages = [ + "pkg/bmc", + "pkg/hardware", + ] + pruneopts = "NUT" + revision = "0cd412515cf97e2943ca823bc5db630b2ecdc340" + source = "https://github.com/openshift/baremetal-operator" + +[[projects]] + branch = "master" + digest = "1:7f1f10ab08636d2d0fd260e43a0700a8905588eadd89d4a3eaefac18cce7bd5b" + name = "github.com/metal3-io/cluster-api-provider-baremetal" + packages = [ + "pkg/apis", + "pkg/apis/baremetal/v1alpha1", + ] + pruneopts = "NUT" + revision = "53df0c29f8e20d5ff6a42e95589969b59359eab6" + source = "https://github.com/openshift/cluster-api-provider-baremetal/pkg/apis" + [[projects]] branch = "master" digest = "1:063d55b87e200bced5e2be658cc70acafb4c5bbc4afa04d4b82f66298b73d089" @@ -575,11 +607,12 @@ [[projects]] branch = "master" - digest = "1:5449643b322144dad676e6c0efa0e554fc7b7552b5665468a48d8669b570ad1b" + digest = "1:a0fe86cf6f3fca907a3aa850cd5f54e73fd2e915cc73367097c28408bfbc12ac" name = "github.com/openshift/cloud-credential-operator" packages = [ "pkg/apis/cloudcredential/v1", "pkg/aws", + "pkg/controller/utils", "version", ] pruneopts = "NUT" @@ -1117,10 +1150,11 @@ version = "kubernetes-1.12.2" [[projects]] - digest = "1:08027a07c9a263deb5f51fcc801996e7d453b1ed1318c6c5eb7affd084bff3a9" + digest = "1:d4077b20d8ba3b64ff57af190a5aee42167d7cd6f3788ad0d67bdc7b427f4697" name = "k8s.io/client-go" packages = [ "discovery", + "dynamic", "kubernetes", "kubernetes/scheme", "kubernetes/typed/admissionregistration/v1alpha1", @@ -1161,6 +1195,7 @@ "plugin/pkg/client/auth/exec", "rest", "rest/watch", + "restmapper", "tools/auth", "tools/cache", "tools/clientcmd", @@ -1237,9 +1272,13 @@ [[projects]] branch = "release-0.2" - digest = "1:c4e8f6509ee8cec911985a812ed3f9cc024404acc45ac613fa103f735cf3222a" + digest = "1:44615562b7b4f5f638f9e56d0f51b787fdf0e24ad0dd90177d77d9cee0d0af27" name = "sigs.k8s.io/controller-runtime" - packages = ["pkg/scheme"] + packages = [ + "pkg/client", + "pkg/client/apiutil", + "pkg/scheme", + ] pruneopts = "NUT" revision = "f60c87ec713cb8da81257228530605457ebf7220" @@ -1307,6 +1346,10 @@ "github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects", "github.com/gophercloud/utils/openstack/clientconfig", "github.com/libvirt/libvirt-go", + "github.com/metal3-io/baremetal-operator/pkg/bmc", + "github.com/metal3-io/baremetal-operator/pkg/hardware", + "github.com/metal3-io/cluster-api-provider-baremetal/pkg/apis", + "github.com/metal3-io/cluster-api-provider-baremetal/pkg/apis/baremetal/v1alpha1", "github.com/openshift/api/config/v1", "github.com/openshift/api/operator/v1alpha1", "github.com/openshift/client-go/config/clientset/versioned", diff --git a/Gopkg.toml b/Gopkg.toml index 63999c64cbb..10d2360f003 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -123,3 +123,12 @@ required = [ [[constraint]] name = "github.com/containers/image" version = "2.0.0" + +[[constraint]] + name = "github.com/metal3-io/cluster-api-provider-baremetal" + source = "https://github.com/openshift/cluster-api-provider-baremetal/pkg/apis" + +[[constraint]] + branch = "master" + name = "github.com/metal3-io/baremetal-operator" + source = "https://github.com/openshift/baremetal-operator" diff --git a/README.md b/README.md index 61d5c2f98cb..c7fc329956b 100644 --- a/README.md +++ b/README.md @@ -3,7 +3,8 @@ ## Supported Platforms * [AWS](docs/user/aws/README.md) -* [Bare-metal](docs/user/metal/install_upi.md) +* [Bare Metal (UPI)](docs/user/metal/install_upi.md) +* [Bare Metal (IPI) (Experimental)](docs/user/metal/install_ipi.md) * [Libvirt with KVM](docs/dev/libvirt/README.md) (development only) * [OpenStack (experimental)](docs/user/openstack/README.md) * [vSphere](docs/user/vsphere/install_upi.md) diff --git a/cmd/openshift-install/destroy.go b/cmd/openshift-install/destroy.go index f56918d02e2..a9fb56c0aaf 100644 --- a/cmd/openshift-install/destroy.go +++ b/cmd/openshift-install/destroy.go @@ -9,6 +9,7 @@ import ( "github.com/openshift/installer/pkg/destroy" _ "github.com/openshift/installer/pkg/destroy/aws" _ "github.com/openshift/installer/pkg/destroy/azure" + _ "github.com/openshift/installer/pkg/destroy/baremetal" "github.com/openshift/installer/pkg/destroy/bootstrap" _ "github.com/openshift/installer/pkg/destroy/gcp" _ "github.com/openshift/installer/pkg/destroy/libvirt" diff --git a/docs/user/metal/README.md b/docs/user/metal/README.md new file mode 100644 index 00000000000..04984f0741d --- /dev/null +++ b/docs/user/metal/README.md @@ -0,0 +1,22 @@ +# Support for Bare Metal Environments + +OpenShift has support for bare metal deployments with either [User +provided infrastructure (UPI)](install_upi.md), or [Installer-provided +instrastructure (IPI)](install_ipi.md). + +The following is a summary of key differences: + +* UPI bare metal + * Provisioning hosts is an external requirement + * Requires extra DNS configuration + * Requires setup of load balancers + * Offers more control and choice over infrastructure + +* IPI bare metal + * Has built-in hardware provisioning components, will provision nodes with RHCOS automatically, + and supports the Machine API for ongoing management of these hosts. + * Automates internal DNS requirements + * Automates setup of self-hosted load balancers + * Supports “openshift-install create cluster” for bare metal environments + using this infrastructure automation, but requires the use of compatible + hardware, as described in [install_ipi.md](install_ipi.md). diff --git a/docs/user/metal/install_ipi.md b/docs/user/metal/install_ipi.md new file mode 100644 index 00000000000..610086a67ff --- /dev/null +++ b/docs/user/metal/install_ipi.md @@ -0,0 +1,225 @@ +# Bare Metal IPI (Installer Provisioned Infrastructure) Overview + +Current Status: **Experimental** + +This document discusses the installer support for an IPI (Installer Provisioned +Infrastructure) install for bare metal hosts. This includes platform support +for the management of bare metal hosts, as well as some automation of DNS and +load balancing to bring up the cluster. + +The upstream project that provides Kubernetes-native management of bare metal +hosts is [metal3.io](http://metal3.io). + +For UPI (User Provisioned Infrastructure) based instructions for bare metal +deployments, see [install_upi.md](install_upi.md). + +## Prerequisites + +### Ironic + +Currently, the `baremetal` platform requires an existing Ironic environment. +This will eventually be handled by `openshift-install`, with Ironic being +deployed onto the bootstrap node. Until then, users of the `baremetal` platform +should use the +[openshift-metal3/dev-scripts](https://github.com/openshift-metal3/dev-scripts) +repository to handle configuration of Ironic. + +The following PR contains the WIP changes for automating Ironic from +`openshift-install`: https://github.com/openshift-metal3/kni-installer/pull/100 + +### Network Requirements + +It is assumed that all hosts have at least 2 NICs, used for the following +purposes: + +* **NIC #1 - External Network** + * This network is the main network used by the cluster, including API traffic + and application traffic. + * ***DHCP*** + * External DHCP is assumed on this network. It is **strongly** recommended + to set up DHCP reservations for each of the hosts in the cluster to + ensure that they retain stable IP addresses. + * A pool of dynamic addresses should also be available on this network, as + the provisioning host and temporary bootstrap VM will also need addresses + on this network. + * ***NTP*** + * A time source must be accessible from this network. + * ***Reserved VIPs (Virtual IPs)*** - 3 IP addresses must be reserved on this + network for use by the cluster. Specifically, these IPs will serve the + following purposes: + * API - This IP will be used to reach the cluster API. + * Ingress - This IP will be used by cluster ingress traffic + * DNS - This IP will be used internally by the cluster for automating + internal DNS requirements. + * ***External DNS*** - While the cluster automates the internal DNS + requirements, two external DNS records must be created in whatever DNS + server is appropriate for this environment. + * `api..` - pointing to the API VIP + * `*.apps..` - pointing to the Ingress VIP + +* **NIC #2 - Provisioning Network** + * A private, non-routed network, used for PXE based provisioning. + * DHCP is automated for this network. + * Addressing for this network is currently hard coded as `172.22.0.0/24`, but + will be made configurable in the future. + +* **Out-of-band Management Network** + * Servers will typically have an additional NIC used by the onboard + management controllers (BMCs). These BMCs must be accessible and routed to + the host. + +### Provisioning Host + +The installer must be run from a host that is attached to the same networks as +the cluster, as described in the previous section. We refer to this host as +the *provisioning host*. The easiest way to provide a provisioning host is to +use one of the hosts that is intended to later become a worker node in the same +cluster. That way it is already connected to the proper networks. + +It is recommended that the provisioning host be a bare metal host, as it must be +able to use libvirt to launch the OpenShift bootstrap VM locally. + +### Supported Hardware + +The architecture is intended to support a wide variety of hardware. This was +one of the reasons Ironic is used as an underlying technology. However, so far +development and testing has focused on PXE based provisioning using IPMI for +out-of-band management of hosts. Other provisioning approaches will be added, +tested, and documented over time. + +## Installation Process + +Once an environment has been prepared according to the documented +pre-requisites, the install process is the same as other IPI based platforms. + +`openshift-install create cluster` + +However, it is recommended to prepare an `install-config.yaml` file in advance, +containing all of the details of the bare metal hosts to be provisioned. + +### Install Config + +The `install-config.yaml` file requires some additional details. Most of the +information is teaching the installer and the resulting cluster enough about +the available hardware so that it is able to fully manage it. + +Here is an example `install-config.yaml` with the required `baremetal` platform +details. + +**IMPORTANT NOTE:** The current install configuration for the `baremetal` +platform should be considered experimental and still subject to change without +backwards compatibility. In particular, some items likely to change soon +include: + +* The `image` section will get completely removed. + +* The `hardwareProfile` is currently exposed as a way to allow specifying + different hardware parameters for deployment. By default, we will deploy + RHCOS to the first disk, but that may not be appropriate for all hardware. + The `hardwareProfile` is the field we have available to change that. This + interface is subject to change. In the meantime, hardware profiles can be + found here: + https://github.com/metal3-io/baremetal-operator/blob/master/pkg/hardware/profile.go#L48 + +```yaml +apiVersion: v1beta4 +baseDomain: test.metalkube.org +metadata: + name: ostest +compute: +- name: worker + replicas: 1 +controlPlane: + name: master + replicas: 3 + platform: + baremetal: {} +platform: + baremetal: + apiVIP: 192.168.111.5 + hosts: + - name: openshift-master-0 + role: master + bmc: + address: ipmi://192.168.111.1:6230 + username: admin + password: password + bootMACAddress: 00:11:07:4e:f6:68 + hardwareProfile: default + - name: openshift-master-1 + role: master + bmc: + address: ipmi://192.168.111.1:6231 + username: admin + password: password + bootMACAddress: 00:11:07:4e:f6:6c + hardwareProfile: default + - name: openshift-master-2 + role: master + bmc: + address: ipmi://192.168.111.1:6232 + username: admin + password: password + bootMACAddress: 00:11:07:4e:f6:70 + hardwareProfile: default + - name: openshift-worker-0 + role: master + bmc: + address: ipmi://192.168.111.1:6233 + username: admin + password: password + bootMACAddress: 00:11:07:4e:f6:71 + hardwareProfile: default + image: + source: "http://172.22.0.1/images/rhcos-ootpa-latest.qcow2" + checksum: 2b3b1e19e18627d89da400b63430d5bb + deployKernel: http://172.22.0.1/images/ironic-python-agent.kernel + deployRamdisk: http://172.22.0.1/images/ironic-python-agent.initramfs +pullSecret: ... +sshKey: ... +``` + +## Work in Progress + +Integration of the `baremetal` platform is still a work-in-progress across +various parts of OpenShift. This section discusses key items that are not yet +fully integrated, and their workarounds. + +Note that once this work moves into the `openshift/installer` repository, new +issues will get created or existing issues will be moved to track these gaps +instead of the leaving the existing issues against the KNI fork of the installer. + +### Deployment of the `baremetal-operator` + +The `baremetal-operator` provides the server side of the API used by the +`baremetal` platform `Machine` actuator +([cluster-api-provider-baremetal](https://github.com/metal3-io/cluster-api-provider-baremetal)). +This is currently handled by the +[08_deploy_bmo.sh](https://github.com/openshift-metal3/dev-scripts/blob/master/08_deploy_bmo.sh) +script. + +This will be replaced by `machine-api-operator` integration and the following +PR: https://github.com/openshift/machine-api-operator/pull/302 + +### `BareMetalHost` registration by the Installer + +`openshift-install` needs to create the `BareMetalHost` objects that represent +the inventory of hardware under management. This is currently handled by the +[11_register_hosts.sh](https://github.com/openshift-metal3/dev-scripts/blob/master/11_register_hosts.sh) +script. + +https://github.com/openshift-metal3/kni-installer/issues/46 + +### `destroy cluster` support + +`openshift-install destroy cluster` is not supported for the `baremetal` +platform. + +https://github.com/openshift-metal3/kni-installer/issues/74 + +### install gather not supported + +When an installation fails, `openshift-install` will attempt to gather debug +information from hosts. This is not yet supported by the `baremetal` platform. + +https://github.com/openshift-metal3/kni-installer/issues/79 diff --git a/images/baremetal/Dockerfile.ci b/images/baremetal/Dockerfile.ci new file mode 100644 index 00000000000..8fc266c2315 --- /dev/null +++ b/images/baremetal/Dockerfile.ci @@ -0,0 +1,24 @@ +# This Dockerfile is a used by CI to publish an installer image +# It builds an image containing only the openshift-install. + +FROM registry.svc.ci.openshift.org/openshift/release:golang-1.10 AS builder +WORKDIR /go/src/github.com/openshift/installer +COPY . . +RUN TAGS="libvirt baremetal" hack/build.sh + + +FROM registry.svc.ci.openshift.org/origin/4.1:base +COPY --from=builder /go/src/github.com/openshift/installer/bin/openshift-install /bin/openshift-install + +RUN yum install --setopt=tsflags=nodocs -y \ + yum update -y && \ + yum install --setopt=tsflags=nodocs -y \ + libvirt-libs && \ + yum clean all && rm -rf /var/cache/yum/* + +RUN mkdir /output && chown 1000:1000 /output +USER 1000:1000 +ENV PATH /bin +ENV HOME /output +WORKDIR /output +ENTRYPOINT ["/bin/openshift-install"] diff --git a/pkg/asset/cluster/baremetal/OWNERS b/pkg/asset/cluster/baremetal/OWNERS new file mode 100644 index 00000000000..11af0bba91f --- /dev/null +++ b/pkg/asset/cluster/baremetal/OWNERS @@ -0,0 +1,5 @@ +# See the OWNERS docs: https://git.k8s.io/community/contributors/guide/owners.md +# This file just uses aliases defined in OWNERS_ALIASES. + +reviewers: + - baremetal-reviewers diff --git a/pkg/asset/cluster/baremetal/baremetal.go b/pkg/asset/cluster/baremetal/baremetal.go new file mode 100644 index 00000000000..54c42accf8b --- /dev/null +++ b/pkg/asset/cluster/baremetal/baremetal.go @@ -0,0 +1,16 @@ +// Package baremetal extracts bare metal metadata from install +// configurations. +package baremetal + +import ( + "github.com/openshift/installer/pkg/types" + "github.com/openshift/installer/pkg/types/baremetal" +) + +// Metadata converts an install configuration to bare metal metadata. +func Metadata(config *types.InstallConfig) *baremetal.Metadata { + return &baremetal.Metadata{ + LibvirtURI: config.Platform.BareMetal.LibvirtURI, + IronicURI: config.Platform.BareMetal.IronicURI, + } +} diff --git a/pkg/asset/cluster/metadata.go b/pkg/asset/cluster/metadata.go index 7cfbd20a84d..76c95546b0d 100644 --- a/pkg/asset/cluster/metadata.go +++ b/pkg/asset/cluster/metadata.go @@ -8,6 +8,7 @@ import ( "github.com/openshift/installer/pkg/asset" "github.com/openshift/installer/pkg/asset/cluster/aws" "github.com/openshift/installer/pkg/asset/cluster/azure" + "github.com/openshift/installer/pkg/asset/cluster/baremetal" "github.com/openshift/installer/pkg/asset/cluster/gcp" "github.com/openshift/installer/pkg/asset/cluster/libvirt" "github.com/openshift/installer/pkg/asset/cluster/openstack" @@ -15,6 +16,7 @@ import ( "github.com/openshift/installer/pkg/types" awstypes "github.com/openshift/installer/pkg/types/aws" azuretypes "github.com/openshift/installer/pkg/types/azure" + baremetaltypes "github.com/openshift/installer/pkg/types/baremetal" gcptypes "github.com/openshift/installer/pkg/types/gcp" libvirttypes "github.com/openshift/installer/pkg/types/libvirt" nonetypes "github.com/openshift/installer/pkg/types/none" @@ -71,6 +73,8 @@ func (m *Metadata) Generate(parents asset.Parents) (err error) { metadata.ClusterPlatformMetadata.Azure = azure.Metadata(installConfig.Config) case gcptypes.Name: metadata.ClusterPlatformMetadata.GCP = gcp.Metadata(installConfig.Config) + case baremetaltypes.Name: + metadata.ClusterPlatformMetadata.BareMetal = baremetal.Metadata(installConfig.Config) case nonetypes.Name, vspheretypes.Name: default: return errors.Errorf("no known platform") diff --git a/pkg/asset/cluster/tfvars.go b/pkg/asset/cluster/tfvars.go index 5770bc3e10a..d0090a4caf0 100644 --- a/pkg/asset/cluster/tfvars.go +++ b/pkg/asset/cluster/tfvars.go @@ -28,11 +28,13 @@ import ( "github.com/openshift/installer/pkg/tfvars" awstfvars "github.com/openshift/installer/pkg/tfvars/aws" azuretfvars "github.com/openshift/installer/pkg/tfvars/azure" + baremetaltfvars "github.com/openshift/installer/pkg/tfvars/baremetal" gcptfvars "github.com/openshift/installer/pkg/tfvars/gcp" libvirttfvars "github.com/openshift/installer/pkg/tfvars/libvirt" openstacktfvars "github.com/openshift/installer/pkg/tfvars/openstack" "github.com/openshift/installer/pkg/types/aws" "github.com/openshift/installer/pkg/types/azure" + "github.com/openshift/installer/pkg/types/baremetal" "github.com/openshift/installer/pkg/types/gcp" "github.com/openshift/installer/pkg/types/libvirt" "github.com/openshift/installer/pkg/types/none" @@ -255,6 +257,23 @@ func (t *TerraformVariables) Generate(parents asset.Parents) error { Filename: fmt.Sprintf(TfPlatformVarsFileName, platform), Data: data, }) + case baremetal.Name: + data, err = baremetaltfvars.TFVars( + installConfig.Config.Platform.BareMetal.LibvirtURI, + installConfig.Config.Platform.BareMetal.IronicURI, + string(*rhcosImage), + "baremetal", + "provisioning", + installConfig.Config.Platform.BareMetal.Hosts, + installConfig.Config.Platform.BareMetal.Image, + ) + if err != nil { + return errors.Wrapf(err, "failed to get %s Terraform variables", platform) + } + t.FileList = append(t.FileList, &asset.File{ + Filename: fmt.Sprintf(TfPlatformVarsFileName, platform), + Data: data, + }) default: logrus.Warnf("unrecognized platform %s", platform) } diff --git a/pkg/asset/ignition/machine/node.go b/pkg/asset/ignition/machine/node.go index 04f7b0f2acb..d1930d25559 100644 --- a/pkg/asset/ignition/machine/node.go +++ b/pkg/asset/ignition/machine/node.go @@ -8,11 +8,22 @@ import ( "github.com/vincent-petithory/dataurl" "github.com/openshift/installer/pkg/types" + baremetaltypes "github.com/openshift/installer/pkg/types/baremetal" ) // pointerIgnitionConfig generates a config which references the remote config // served by the machine config server. func pointerIgnitionConfig(installConfig *types.InstallConfig, rootCA []byte, role string) *ignition.Config { + var ignitionHost string + switch installConfig.Platform.Name() { + case baremetaltypes.Name: + // Baremetal needs to point directly at the VIP because we don't have a + // way to configure DNS before Ignition runs. + ignitionHost = fmt.Sprintf("%s:22623", installConfig.BareMetal.APIVIP) + default: + ignitionHost = fmt.Sprintf("api-int.%s:22623", installConfig.ClusterDomain()) + } + return &ignition.Config{ Ignition: ignition.Ignition{ Version: ignition.MaxVersion.String(), @@ -21,7 +32,7 @@ func pointerIgnitionConfig(installConfig *types.InstallConfig, rootCA []byte, ro Source: func() *url.URL { return &url.URL{ Scheme: "https", - Host: fmt.Sprintf("api-int.%s:22623", installConfig.ClusterDomain()), + Host: ignitionHost, Path: fmt.Sprintf("/config/%s", role), } }().String(), diff --git a/pkg/asset/installconfig/installconfig.go b/pkg/asset/installconfig/installconfig.go index f7e8903ace1..2c8f3cfc25a 100644 --- a/pkg/asset/installconfig/installconfig.go +++ b/pkg/asset/installconfig/installconfig.go @@ -73,6 +73,7 @@ func (a *InstallConfig) Generate(parents asset.Parents) error { a.Config.VSphere = platform.VSphere a.Config.Azure = platform.Azure a.Config.GCP = platform.GCP + a.Config.BareMetal = platform.BareMetal if err := a.setDefaults(); err != nil { return errors.Wrap(err, "failed to set defaults for install config") diff --git a/pkg/asset/installconfig/platformcredscheck.go b/pkg/asset/installconfig/platformcredscheck.go index 5a9e72145b4..62b3e58d772 100644 --- a/pkg/asset/installconfig/platformcredscheck.go +++ b/pkg/asset/installconfig/platformcredscheck.go @@ -11,6 +11,7 @@ import ( gcpconfig "github.com/openshift/installer/pkg/asset/installconfig/gcp" "github.com/openshift/installer/pkg/types/aws" "github.com/openshift/installer/pkg/types/azure" + "github.com/openshift/installer/pkg/types/baremetal" "github.com/openshift/installer/pkg/types/gcp" "github.com/openshift/installer/pkg/types/libvirt" "github.com/openshift/installer/pkg/types/none" @@ -59,7 +60,7 @@ func (a *PlatformCredsCheck) Generate(dependencies asset.Parents) error { opts := new(clientconfig.ClientOpts) opts.Cloud = ic.Config.Platform.OpenStack.Cloud _, err = clientconfig.GetCloudFromYAML(opts) - case libvirt.Name, none.Name, vsphere.Name: + case baremetal.Name, libvirt.Name, none.Name, vsphere.Name: // no creds to check case azure.Name: _, err = azureconfig.GetSession() diff --git a/pkg/asset/machines/baremetal/OWNERS b/pkg/asset/machines/baremetal/OWNERS new file mode 100644 index 00000000000..11af0bba91f --- /dev/null +++ b/pkg/asset/machines/baremetal/OWNERS @@ -0,0 +1,5 @@ +# See the OWNERS docs: https://git.k8s.io/community/contributors/guide/owners.md +# This file just uses aliases defined in OWNERS_ALIASES. + +reviewers: + - baremetal-reviewers diff --git a/pkg/asset/machines/baremetal/machines.go b/pkg/asset/machines/baremetal/machines.go new file mode 100644 index 00000000000..e6ad5808428 --- /dev/null +++ b/pkg/asset/machines/baremetal/machines.go @@ -0,0 +1,71 @@ +// Package baremetal generates Machine objects for bare metal. +package baremetal + +import ( + "fmt" + + baremetalprovider "github.com/metal3-io/cluster-api-provider-baremetal/pkg/apis/baremetal/v1alpha1" + + machineapi "github.com/openshift/cluster-api/pkg/apis/machine/v1beta1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + + "github.com/openshift/installer/pkg/types" + "github.com/openshift/installer/pkg/types/baremetal" +) + +// Machines returns a list of machines for a machinepool. +func Machines(clusterID string, config *types.InstallConfig, pool *types.MachinePool, role, userDataSecret string) ([]machineapi.Machine, error) { + if configPlatform := config.Platform.Name(); configPlatform != baremetal.Name { + return nil, fmt.Errorf("non bare metal configuration: %q", configPlatform) + } + if poolPlatform := pool.Platform.Name(); poolPlatform != baremetal.Name { + return nil, fmt.Errorf("non bare metal machine-pool: %q", poolPlatform) + } + clustername := config.ObjectMeta.Name + platform := config.Platform.BareMetal + + total := int64(1) + if pool.Replicas != nil { + total = *pool.Replicas + } + provider := provider(clustername, config.Networking.MachineCIDR.String(), platform, userDataSecret) + var machines []machineapi.Machine + for idx := int64(0); idx < total; idx++ { + machine := machineapi.Machine{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "machine.openshift.io/v1beta1", + Kind: "Machine", + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: "openshift-machine-api", + Name: fmt.Sprintf("%s-%s-%d", clustername, pool.Name, idx), + Labels: map[string]string{ + "machine.openshift.io/cluster-api-cluster": clustername, + "machine.openshift.io/cluster-api-machine-role": role, + "machine.openshift.io/cluster-api-machine-type": role, + }, + }, + Spec: machineapi.MachineSpec{ + ProviderSpec: machineapi.ProviderSpec{ + Value: &runtime.RawExtension{Object: provider}, + }, + // we don't need to set Versions, because we control those via cluster operators. + }, + } + machines = append(machines, machine) + } + + return machines, nil +} + +func provider(clusterName string, networkInterfaceAddress string, platform *baremetal.Platform, userDataSecret string) *baremetalprovider.BareMetalMachineProviderSpec { + return &baremetalprovider.BareMetalMachineProviderSpec{ + Image: baremetalprovider.Image{ + URL: platform.Image.Source, + Checksum: platform.Image.Checksum, + }, + UserData: &corev1.SecretReference{Name: userDataSecret}, + } +} diff --git a/pkg/asset/machines/baremetal/machinesets.go b/pkg/asset/machines/baremetal/machinesets.go new file mode 100644 index 00000000000..aa226fa61e8 --- /dev/null +++ b/pkg/asset/machines/baremetal/machinesets.go @@ -0,0 +1,79 @@ +// Package baremetal generates Machine objects for bare metal. +package baremetal + +import ( + "fmt" + + machineapi "github.com/openshift/cluster-api/pkg/apis/machine/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/utils/pointer" + + "github.com/openshift/installer/pkg/types" + "github.com/openshift/installer/pkg/types/baremetal" +) + +// MachineSets returns a list of machinesets for a machinepool. +func MachineSets(clusterID string, config *types.InstallConfig, pool *types.MachinePool, role, userDataSecret string) ([]*machineapi.MachineSet, error) { + if configPlatform := config.Platform.Name(); configPlatform != baremetal.Name { + return nil, fmt.Errorf("non bare metal configuration: %q", configPlatform) + } + // FIXME: empty is a valid case for bare metal as we don't use it? + if poolPlatform := pool.Platform.Name(); poolPlatform != "" && poolPlatform != baremetal.Name { + return nil, fmt.Errorf("non bare metal machine-pool: %q", poolPlatform) + } + clustername := config.ObjectMeta.Name + platform := config.Platform.BareMetal + // FIXME: bare metal actuator does not support any options from machinepool. + // mpool := pool.Platform.BareMetal + + total := int64(0) + if pool.Replicas != nil { + total = *pool.Replicas + } + + provider := provider(clustername, config.Networking.MachineCIDR.String(), platform, userDataSecret) + name := fmt.Sprintf("%s-%s-%d", clustername, pool.Name, 0) + mset := &machineapi.MachineSet{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "machine.openshift.io/v1beta1", + Kind: "MachineSet", + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: "openshift-machine-api", + Name: name, + Labels: map[string]string{ + "machine.openshift.io/cluster-api-cluster": clustername, + "machine.openshift.io/cluster-api-machine-role": role, + "machine.openshift.io/cluster-api-machine-type": role, + }, + }, + Spec: machineapi.MachineSetSpec{ + Replicas: pointer.Int32Ptr(int32(total)), + Selector: metav1.LabelSelector{ + MatchLabels: map[string]string{ + "machine.openshift.io/cluster-api-machineset": name, + "machine.openshift.io/cluster-api-cluster": clustername, + }, + }, + Template: machineapi.MachineTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "machine.openshift.io/cluster-api-machineset": name, + "machine.openshift.io/cluster-api-cluster": clustername, + "machine.openshift.io/cluster-api-machine-role": role, + "machine.openshift.io/cluster-api-machine-type": role, + }, + }, + Spec: machineapi.MachineSpec{ + ProviderSpec: machineapi.ProviderSpec{ + Value: &runtime.RawExtension{Object: provider}, + }, + // we don't need to set Versions, because we control those via cluster operators. + }, + }, + }, + } + + return []*machineapi.MachineSet{mset}, nil +} diff --git a/pkg/asset/machines/master.go b/pkg/asset/machines/master.go index 91fb32f6490..97ae3b35cc1 100644 --- a/pkg/asset/machines/master.go +++ b/pkg/asset/machines/master.go @@ -6,6 +6,8 @@ import ( "path/filepath" "github.com/ghodss/yaml" + baremetalapi "github.com/metal3-io/cluster-api-provider-baremetal/pkg/apis" + baremetalprovider "github.com/metal3-io/cluster-api-provider-baremetal/pkg/apis/baremetal/v1alpha1" gcpapi "github.com/openshift/cluster-api-provider-gcp/pkg/apis" gcpprovider "github.com/openshift/cluster-api-provider-gcp/pkg/apis/gcpprovider/v1beta1" libvirtapi "github.com/openshift/cluster-api-provider-libvirt/pkg/apis" @@ -27,6 +29,7 @@ import ( "github.com/openshift/installer/pkg/asset/installconfig" "github.com/openshift/installer/pkg/asset/machines/aws" "github.com/openshift/installer/pkg/asset/machines/azure" + "github.com/openshift/installer/pkg/asset/machines/baremetal" "github.com/openshift/installer/pkg/asset/machines/gcp" "github.com/openshift/installer/pkg/asset/machines/libvirt" "github.com/openshift/installer/pkg/asset/machines/machineconfig" @@ -37,6 +40,7 @@ import ( awsdefaults "github.com/openshift/installer/pkg/types/aws/defaults" azuretypes "github.com/openshift/installer/pkg/types/azure" azuredefaults "github.com/openshift/installer/pkg/types/azure/defaults" + baremetaltypes "github.com/openshift/installer/pkg/types/baremetal" gcptypes "github.com/openshift/installer/pkg/types/gcp" libvirttypes "github.com/openshift/installer/pkg/types/libvirt" nonetypes "github.com/openshift/installer/pkg/types/none" @@ -186,6 +190,15 @@ func (m *Master) Generate(dependencies asset.Parents) error { return errors.Wrap(err, "failed to create master machine objects") } azure.ConfigMasters(machines, clusterID.InfraID) + case baremetaltypes.Name: + mpool := defaultBareMetalMachinePoolPlatform() + mpool.Set(ic.Platform.BareMetal.DefaultMachinePlatform) + mpool.Set(pool.Platform.BareMetal) + pool.Platform.BareMetal = &mpool + machines, err = baremetal.Machines(clusterID.InfraID, ic, pool, "master", "master-user-data") + if err != nil { + return errors.Wrap(err, "failed to create master machine objects") + } case nonetypes.Name, vspheretypes.Name: default: return fmt.Errorf("invalid Platform") @@ -273,12 +286,14 @@ func (m *Master) Machines() ([]machineapi.Machine, error) { scheme := runtime.NewScheme() awsapi.AddToScheme(scheme) azureapi.AddToScheme(scheme) + baremetalapi.AddToScheme(scheme) gcpapi.AddToScheme(scheme) libvirtapi.AddToScheme(scheme) openstackapi.AddToScheme(scheme) decoder := serializer.NewCodecFactory(scheme).UniversalDecoder( awsprovider.SchemeGroupVersion, azureprovider.SchemeGroupVersion, + baremetalprovider.SchemeGroupVersion, gcpprovider.SchemeGroupVersion, libvirtprovider.SchemeGroupVersion, openstackprovider.SchemeGroupVersion, diff --git a/pkg/asset/machines/worker.go b/pkg/asset/machines/worker.go index f86248a7662..e2d14911e0e 100644 --- a/pkg/asset/machines/worker.go +++ b/pkg/asset/machines/worker.go @@ -6,6 +6,8 @@ import ( "path/filepath" "github.com/ghodss/yaml" + baremetalapi "github.com/metal3-io/cluster-api-provider-baremetal/pkg/apis" + baremetalprovider "github.com/metal3-io/cluster-api-provider-baremetal/pkg/apis/baremetal/v1alpha1" gcpapi "github.com/openshift/cluster-api-provider-gcp/pkg/apis" gcpprovider "github.com/openshift/cluster-api-provider-gcp/pkg/apis/gcpprovider/v1beta1" libvirtapi "github.com/openshift/cluster-api-provider-libvirt/pkg/apis" @@ -27,6 +29,7 @@ import ( "github.com/openshift/installer/pkg/asset/installconfig" "github.com/openshift/installer/pkg/asset/machines/aws" "github.com/openshift/installer/pkg/asset/machines/azure" + "github.com/openshift/installer/pkg/asset/machines/baremetal" "github.com/openshift/installer/pkg/asset/machines/gcp" "github.com/openshift/installer/pkg/asset/machines/libvirt" "github.com/openshift/installer/pkg/asset/machines/machineconfig" @@ -37,6 +40,7 @@ import ( awsdefaults "github.com/openshift/installer/pkg/types/aws/defaults" azuretypes "github.com/openshift/installer/pkg/types/azure" azuredefaults "github.com/openshift/installer/pkg/types/azure/defaults" + baremetaltypes "github.com/openshift/installer/pkg/types/baremetal" gcptypes "github.com/openshift/installer/pkg/types/gcp" libvirttypes "github.com/openshift/installer/pkg/types/libvirt" nonetypes "github.com/openshift/installer/pkg/types/none" @@ -91,6 +95,10 @@ func defaultOpenStackMachinePoolPlatform(flavor string) openstacktypes.MachinePo } } +func defaultBareMetalMachinePoolPlatform() baremetaltypes.MachinePool { + return baremetaltypes.MachinePool{} +} + // Worker generates the machinesets for `worker` machine pool. type Worker struct { UserDataFile *asset.File @@ -191,6 +199,18 @@ func (w *Worker) Generate(dependencies asset.Parents) error { for _, set := range sets { machineSets = append(machineSets, set) } + case baremetaltypes.Name: + mpool := defaultBareMetalMachinePoolPlatform() + mpool.Set(ic.Platform.BareMetal.DefaultMachinePlatform) + mpool.Set(pool.Platform.BareMetal) + pool.Platform.BareMetal = &mpool + sets, err := baremetal.MachineSets(clusterID.InfraID, ic, &pool, "worker", "worker-user-data") + if err != nil { + return errors.Wrap(err, "failed to create worker machine objects") + } + for _, set := range sets { + machineSets = append(machineSets, set) + } case gcptypes.Name: mpool := defaultGCPMachinePoolPlatform() mpool.Set(ic.Platform.GCP.DefaultMachinePlatform) @@ -235,7 +255,6 @@ func (w *Worker) Generate(dependencies asset.Parents) error { for _, set := range sets { machineSets = append(machineSets, set) } - case nonetypes.Name, vspheretypes.Name: default: return fmt.Errorf("invalid Platform") @@ -315,12 +334,14 @@ func (w *Worker) MachineSets() ([]machineapi.MachineSet, error) { scheme := runtime.NewScheme() awsapi.AddToScheme(scheme) azureapi.AddToScheme(scheme) + baremetalapi.AddToScheme(scheme) gcpapi.AddToScheme(scheme) libvirtapi.AddToScheme(scheme) openstackapi.AddToScheme(scheme) decoder := serializer.NewCodecFactory(scheme).UniversalDecoder( awsprovider.SchemeGroupVersion, azureprovider.SchemeGroupVersion, + baremetalprovider.SchemeGroupVersion, gcpprovider.SchemeGroupVersion, libvirtprovider.SchemeGroupVersion, openstackprovider.SchemeGroupVersion, diff --git a/pkg/asset/manifests/cloudproviderconfig.go b/pkg/asset/manifests/cloudproviderconfig.go index 4a690945284..a35253d3b7e 100644 --- a/pkg/asset/manifests/cloudproviderconfig.go +++ b/pkg/asset/manifests/cloudproviderconfig.go @@ -17,6 +17,7 @@ import ( vspheremanifests "github.com/openshift/installer/pkg/asset/manifests/vsphere" awstypes "github.com/openshift/installer/pkg/types/aws" azuretypes "github.com/openshift/installer/pkg/types/azure" + baremetaltypes "github.com/openshift/installer/pkg/types/baremetal" gcptypes "github.com/openshift/installer/pkg/types/gcp" libvirttypes "github.com/openshift/installer/pkg/types/libvirt" nonetypes "github.com/openshift/installer/pkg/types/none" @@ -78,7 +79,7 @@ func (cpc *CloudProviderConfig) Generate(dependencies asset.Parents) error { } switch installConfig.Config.Platform.Name() { - case awstypes.Name, libvirttypes.Name, nonetypes.Name: + case awstypes.Name, libvirttypes.Name, nonetypes.Name, baremetaltypes.Name: return nil case openstacktypes.Name: cm.Data[cloudProviderConfigDataKey] = openstackmanifests.CloudProviderConfig() diff --git a/pkg/asset/manifests/dns.go b/pkg/asset/manifests/dns.go index d0d29d49f3a..94b6bfd01ce 100644 --- a/pkg/asset/manifests/dns.go +++ b/pkg/asset/manifests/dns.go @@ -19,6 +19,7 @@ import ( icgcp "github.com/openshift/installer/pkg/asset/installconfig/gcp" awstypes "github.com/openshift/installer/pkg/types/aws" azuretypes "github.com/openshift/installer/pkg/types/azure" + baremetaltypes "github.com/openshift/installer/pkg/types/baremetal" gcptypes "github.com/openshift/installer/pkg/types/gcp" libvirttypes "github.com/openshift/installer/pkg/types/libvirt" nonetypes "github.com/openshift/installer/pkg/types/none" @@ -106,7 +107,7 @@ func (d *DNS) Generate(dependencies asset.Parents) error { } config.Spec.PublicZone = &configv1.DNSZone{ID: zone.Name} config.Spec.PrivateZone = &configv1.DNSZone{ID: fmt.Sprintf("%s-private-zone", clusterID.InfraID)} - case libvirttypes.Name, openstacktypes.Name, nonetypes.Name, vspheretypes.Name: + case libvirttypes.Name, openstacktypes.Name, baremetaltypes.Name, nonetypes.Name, vspheretypes.Name: default: return errors.New("invalid Platform") } diff --git a/pkg/asset/manifests/infrastructure.go b/pkg/asset/manifests/infrastructure.go index 09967643c36..202eab420cd 100644 --- a/pkg/asset/manifests/infrastructure.go +++ b/pkg/asset/manifests/infrastructure.go @@ -14,6 +14,7 @@ import ( "github.com/openshift/installer/pkg/types/aws" "github.com/openshift/installer/pkg/types/azure" + "github.com/openshift/installer/pkg/types/baremetal" "github.com/openshift/installer/pkg/types/gcp" "github.com/openshift/installer/pkg/types/libvirt" "github.com/openshift/installer/pkg/types/none" @@ -84,6 +85,8 @@ func (i *Infrastructure) Generate(dependencies asset.Parents) error { config.Status.PlatformStatus.Azure = &configv1.AzurePlatformStatus{ ResourceGroupName: fmt.Sprintf("%s-rg", clusterID.InfraID), } + case baremetal.Name: + config.Status.PlatformStatus.Type = configv1.BareMetalPlatformType case gcp.Name: config.Status.PlatformStatus.Type = configv1.GCPPlatformType config.Status.PlatformStatus.GCP = &configv1.GCPPlatformStatus{ diff --git a/pkg/asset/rhcos/image.go b/pkg/asset/rhcos/image.go index 81a8f8ceb0e..85f786abb56 100644 --- a/pkg/asset/rhcos/image.go +++ b/pkg/asset/rhcos/image.go @@ -14,6 +14,7 @@ import ( "github.com/openshift/installer/pkg/rhcos" "github.com/openshift/installer/pkg/types/aws" "github.com/openshift/installer/pkg/types/azure" + "github.com/openshift/installer/pkg/types/baremetal" "github.com/openshift/installer/pkg/types/gcp" "github.com/openshift/installer/pkg/types/libvirt" "github.com/openshift/installer/pkg/types/none" @@ -67,6 +68,8 @@ func (i *Image) Generate(p asset.Parents) error { case azure.Name: //TODO(serbrech): change to right image once available. osimage = "/resourceGroups/rhcos_images/providers/Microsoft.Compute/images/rhcostestimage" + case baremetal.Name: + osimage, err = rhcos.QEMU(ctx) case none.Name, vsphere.Name: default: return errors.New("invalid Platform") diff --git a/pkg/asset/tls/mcscertkey.go b/pkg/asset/tls/mcscertkey.go index 8d6f8015a88..cd0a5c79b9f 100644 --- a/pkg/asset/tls/mcscertkey.go +++ b/pkg/asset/tls/mcscertkey.go @@ -3,9 +3,11 @@ package tls import ( "crypto/x509" "crypto/x509/pkix" + "net" "github.com/openshift/installer/pkg/asset" "github.com/openshift/installer/pkg/asset/installconfig" + baremetaltypes "github.com/openshift/installer/pkg/types/baremetal" ) // MCSCertKey is the asset that generates the MCS key/cert pair. @@ -37,7 +39,14 @@ func (a *MCSCertKey) Generate(dependencies asset.Parents) error { Subject: pkix.Name{CommonName: hostname}, ExtKeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, Validity: ValidityTenYears, - DNSNames: []string{hostname}, + } + + switch installConfig.Config.Platform.Name() { + case baremetaltypes.Name: + cfg.IPAddresses = []net.IP{net.ParseIP(installConfig.Config.BareMetal.APIVIP)} + cfg.DNSNames = []string{hostname, installConfig.Config.BareMetal.APIVIP} + default: + cfg.DNSNames = []string{hostname} } return a.SignedCertKey.Generate(cfg, ca, "machine-config-server", DoNotAppendParent) diff --git a/pkg/destroy/baremetal/OWNERS b/pkg/destroy/baremetal/OWNERS new file mode 100644 index 00000000000..11af0bba91f --- /dev/null +++ b/pkg/destroy/baremetal/OWNERS @@ -0,0 +1,5 @@ +# See the OWNERS docs: https://git.k8s.io/community/contributors/guide/owners.md +# This file just uses aliases defined in OWNERS_ALIASES. + +reviewers: + - baremetal-reviewers diff --git a/pkg/destroy/baremetal/baremetal.go b/pkg/destroy/baremetal/baremetal.go new file mode 100644 index 00000000000..cf00d3ac00d --- /dev/null +++ b/pkg/destroy/baremetal/baremetal.go @@ -0,0 +1,43 @@ +// +build baremetal + +package baremetal + +import ( + "github.com/libvirt/libvirt-go" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + + "github.com/openshift/installer/pkg/destroy/providers" + "github.com/openshift/installer/pkg/types" +) + +// ClusterUninstaller holds the various options for the cluster we want to delete. +type ClusterUninstaller struct { + LibvirtURI string + IronicURI string + Logger logrus.FieldLogger +} + +// Run is the entrypoint to start the uninstall process. +func (o *ClusterUninstaller) Run() error { + o.Logger.Debug("Deleting bare metal resources") + + // FIXME: close the connection + _, err := libvirt.NewConnect(o.LibvirtURI) + if err != nil { + return errors.Wrap(err, "failed to connect to Libvirt daemon") + } + + o.Logger.Debug("FIXME: delete resources!") + + return nil +} + +// New returns bare metal Uninstaller from ClusterMetadata. +func New(logger logrus.FieldLogger, metadata *types.ClusterMetadata) (providers.Destroyer, error) { + return &ClusterUninstaller{ + LibvirtURI: metadata.ClusterPlatformMetadata.BareMetal.LibvirtURI, + IronicURI: metadata.ClusterPlatformMetadata.BareMetal.IronicURI, + Logger: logger, + }, nil +} diff --git a/pkg/destroy/baremetal/doc.go b/pkg/destroy/baremetal/doc.go new file mode 100644 index 00000000000..e51819329aa --- /dev/null +++ b/pkg/destroy/baremetal/doc.go @@ -0,0 +1,2 @@ +// Package baremetal provides a cluster-destroyer for bare metal clusters. +package baremetal diff --git a/pkg/destroy/baremetal/register.go b/pkg/destroy/baremetal/register.go new file mode 100644 index 00000000000..12ccff422e7 --- /dev/null +++ b/pkg/destroy/baremetal/register.go @@ -0,0 +1,10 @@ +// +build baremetal + +// Package baremetal provides a cluster-destroyer for bare metal clusters. +package baremetal + +import "github.com/openshift/installer/pkg/destroy/providers" + +func init() { + providers.Registry["baremetal"] = New +} diff --git a/pkg/tfvars/baremetal/OWNERS b/pkg/tfvars/baremetal/OWNERS new file mode 100644 index 00000000000..11af0bba91f --- /dev/null +++ b/pkg/tfvars/baremetal/OWNERS @@ -0,0 +1,5 @@ +# See the OWNERS docs: https://git.k8s.io/community/contributors/guide/owners.md +# This file just uses aliases defined in OWNERS_ALIASES. + +reviewers: + - baremetal-reviewers diff --git a/pkg/tfvars/baremetal/baremetal.go b/pkg/tfvars/baremetal/baremetal.go new file mode 100644 index 00000000000..021c6c47ee8 --- /dev/null +++ b/pkg/tfvars/baremetal/baremetal.go @@ -0,0 +1,110 @@ +// Package baremetal contains bare metal specific Terraform-variable logic. +package baremetal + +import ( + "encoding/json" + "github.com/metal3-io/baremetal-operator/pkg/bmc" + "github.com/metal3-io/baremetal-operator/pkg/hardware" + libvirttfvars "github.com/openshift/installer/pkg/tfvars/libvirt" + "github.com/openshift/installer/pkg/types/baremetal" + "github.com/pkg/errors" +) + +type config struct { + LibvirtURI string `json:"libvirt_uri,omitempty"` + IronicURI string `json:"ironic_uri,omitempty"` + Image string `json:"os_image,omitempty"` + ExternalBridge string `json:"external_bridge,omitempty"` + ProvisioningBridge string `json:"provisioning_bridge,omitempty"` + + // Data required for control plane deployment - several maps per host, because of terraform's limitations + Hosts []map[string]interface{} `json:"hosts"` + RootDevices []map[string]interface{} `json:"root_devices"` + Properties []map[string]interface{} `json:"properties"` + DriverInfos []map[string]interface{} `json:"driver_infos"` + InstanceInfos []map[string]interface{} `json:"instance_infos"` +} + +// TFVars generates bare metal specific Terraform variables. +func TFVars(libvirtURI, ironicURI, osImage, externalBridge, provisioningBridge string, platformHosts []*baremetal.Host, image baremetal.Image) ([]byte, error) { + osImage, err := libvirttfvars.CachedImage(osImage) + if err != nil { + return nil, errors.Wrap(err, "failed to use cached libvirt image") + } + + var hosts, rootDevices, properties, driverInfos, instanceInfos []map[string]interface{} + + for _, host := range platformHosts { + // Get hardware profile + if host.HardwareProfile == "default" { + host.HardwareProfile = hardware.DefaultProfileName + } + + profile, err := hardware.GetProfile(host.HardwareProfile) + if err != nil { + return nil, err + } + + // BMC Driver Info + accessDetails, err := bmc.NewAccessDetails(host.BMC.Address) + if err != nil { + return nil, err + } + credentials := bmc.Credentials{ + Username: host.BMC.Username, + Password: host.BMC.Password, + } + driverInfo := accessDetails.DriverInfo(credentials) + driverInfo["deploy_kernel"] = image.DeployKernel + driverInfo["deploy_ramdisk"] = image.DeployRamdisk + + // Host Details + hostMap := map[string]interface{}{ + "name": host.Name, + "port_address": host.BootMACAddress, + "driver": accessDetails.Type(), + } + + // Properties + propertiesMap := map[string]interface{}{ + "local_gb": profile.LocalGB, + "cpu_arch": profile.CPUArch, + } + + // Root device hints + rootDevice := make(map[string]interface{}) + if profile.RootDeviceHints.HCTL != "" { + rootDevice["hctl"] = profile.RootDeviceHints.HCTL + } else { + rootDevice["name"] = profile.RootDeviceHints.DeviceName + } + + // Instance Info + instanceInfo := map[string]interface{}{ + "root_gb": 25, // FIXME(stbenjam): Needed until https://storyboard.openstack.org/#!/story/2005165 + "image_source": image.Source, + "image_checksum": image.Checksum, + } + + hosts = append(hosts, hostMap) + properties = append(properties, propertiesMap) + driverInfos = append(driverInfos, driverInfo) + rootDevices = append(rootDevices, rootDevice) + instanceInfos = append(instanceInfos, instanceInfo) + } + + cfg := &config{ + LibvirtURI: libvirtURI, + IronicURI: ironicURI, + Image: osImage, + ExternalBridge: externalBridge, + ProvisioningBridge: provisioningBridge, + Hosts: hosts, + Properties: properties, + DriverInfos: driverInfos, + RootDevices: rootDevices, + InstanceInfos: instanceInfos, + } + + return json.MarshalIndent(cfg, "", " ") +} diff --git a/pkg/tfvars/libvirt/cache.go b/pkg/tfvars/libvirt/cache.go index 3912d175ec3..afcd19e1947 100644 --- a/pkg/tfvars/libvirt/cache.go +++ b/pkg/tfvars/libvirt/cache.go @@ -14,6 +14,12 @@ import ( "golang.org/x/sys/unix" ) +// CachedImage returns the location of the cached image. +// FIXME: Exported for use by baremetal platform. +func CachedImage(uri string) (string, error) { + return cachedImage(uri) +} + // cachedImage leaves non-file:// image URIs unalterered. // Other URIs are retrieved with a local cache at // $XDG_CACHE_HOME/openshift-install/libvirt [1]. This allows you to diff --git a/pkg/types/baremetal/OWNERS b/pkg/types/baremetal/OWNERS new file mode 100644 index 00000000000..11af0bba91f --- /dev/null +++ b/pkg/types/baremetal/OWNERS @@ -0,0 +1,5 @@ +# See the OWNERS docs: https://git.k8s.io/community/contributors/guide/owners.md +# This file just uses aliases defined in OWNERS_ALIASES. + +reviewers: + - baremetal-reviewers diff --git a/pkg/types/baremetal/defaults/platform.go b/pkg/types/baremetal/defaults/platform.go new file mode 100644 index 00000000000..d51ac98f522 --- /dev/null +++ b/pkg/types/baremetal/defaults/platform.go @@ -0,0 +1,67 @@ +package defaults + +import ( + "fmt" + "net" + + "github.com/openshift/installer/pkg/types" + "github.com/openshift/installer/pkg/types/baremetal" +) + +// Defaults for the baremetal platform. +const ( + LibvirtURI = "qemu:///system" + IronicURI = "http://localhost:6385/v1" + ExternalBridge = "baremetal" + ProvisioningBridge = "provisioning" + HardwareProfile = "default" + APIVIP = "" + IngressVIP = "" +) + +// SetPlatformDefaults sets the defaults for the platform. +func SetPlatformDefaults(p *baremetal.Platform, c *types.InstallConfig) { + if p.LibvirtURI == "" { + p.LibvirtURI = LibvirtURI + } + + if p.IronicURI == "" { + p.IronicURI = IronicURI + } + + if p.ExternalBridge == "" { + p.ExternalBridge = ExternalBridge + } + + if p.ProvisioningBridge == "" { + p.ProvisioningBridge = ProvisioningBridge + } + + for _, host := range p.Hosts { + if host.HardwareProfile == "" { + host.HardwareProfile = HardwareProfile + } + } + + if p.APIVIP == APIVIP { + // This name should resolve to exactly one address + vip, err := net.LookupHost("api." + c.ClusterDomain()) + if err != nil { + // This will fail validation and abort the install + p.APIVIP = fmt.Sprintf("DNS lookup failure: %s", err.Error()) + } else { + p.APIVIP = vip[0] + } + } + + if p.IngressVIP == IngressVIP { + // This name should resolve to exactly one address + vip, err := net.LookupHost("test.apps." + c.ClusterDomain()) + if err != nil { + // This will fail validation and abort the install + p.IngressVIP = fmt.Sprintf("DNS lookup failure: %s", err.Error()) + } else { + p.IngressVIP = vip[0] + } + } +} diff --git a/pkg/types/baremetal/doc.go b/pkg/types/baremetal/doc.go new file mode 100644 index 00000000000..db078844da8 --- /dev/null +++ b/pkg/types/baremetal/doc.go @@ -0,0 +1,6 @@ +// Package baremetal contains baremetal-specific structures for +// installer configuration and management. +package baremetal + +// Name is the name for the baremetal platform. +const Name string = "baremetal" diff --git a/pkg/types/baremetal/machinepool.go b/pkg/types/baremetal/machinepool.go new file mode 100644 index 00000000000..823736388c3 --- /dev/null +++ b/pkg/types/baremetal/machinepool.go @@ -0,0 +1,13 @@ +package baremetal + +// MachinePool stores the configuration for a machine pool installed +// on bare metal. +type MachinePool struct { +} + +// Set sets the values from `required` to `a`. +func (l *MachinePool) Set(required *MachinePool) { + if required == nil || l == nil { + return + } +} diff --git a/pkg/types/baremetal/metadata.go b/pkg/types/baremetal/metadata.go new file mode 100644 index 00000000000..bfa3b5ebc95 --- /dev/null +++ b/pkg/types/baremetal/metadata.go @@ -0,0 +1,7 @@ +package baremetal + +// Metadata contains baremetal metadata (e.g. for uninstalling the cluster). +type Metadata struct { + LibvirtURI string `json:"libvirtURI"` + IronicURI string `json:"ironicURI"` +} diff --git a/pkg/types/baremetal/platform.go b/pkg/types/baremetal/platform.go new file mode 100644 index 00000000000..55511933ea5 --- /dev/null +++ b/pkg/types/baremetal/platform.go @@ -0,0 +1,66 @@ +package baremetal + +// BMC stores the information about a baremetal host's management controller. +type BMC struct { + Username string `json:"username"` + Password string `json:"password"` + Address string `json:"address"` +} + +// Host stores all the configuration data for a baremetal host. +type Host struct { + Name string `json:"name,omitempty"` + BMC BMC `json:"bmc"` + Role string `json:"role"` + BootMACAddress string `json:"bootMACAddress"` + HardwareProfile string `json:"hardwareProfile"` +} + +// Image stores details about the locations of various images needed for deployment. +// FIXME: This should be determined by the installer once Ironic and image downloading occurs in bootstrap VM. +type Image struct { + Source string `json:"source"` + Checksum string `json:"checksum"` + DeployKernel string `json:"deployKernel"` + DeployRamdisk string `json:"deployRamdisk"` +} + +// Platform stores all the global configuration that all machinesets use. +type Platform struct { + // LibvirtURI is the identifier for the libvirtd connection. It must be + // reachable from the host where the installer is run. + // +optional + // Default is qemu:///system + LibvirtURI string `json:"libvirtURI,omitempty"` + + // IronicURI is the identifier for the Ironic connection. It must be + // reachable from the host where the installer is run. + // +optional + IronicURI string `json:"ironicURI,omitempty"` + + // External bridge is used for external communication. + // +optional + ExternalBridge string `json:"externalBridge,omitempty"` + + // Provisioning bridge is used for provisioning nodes. + // +optional + ProvisioningBridge string `json:"provisioningBridge,omitempty"` + + // Hosts is the information needed to create the objects in Ironic. + Hosts []*Host `json:"hosts"` + + // Images contains the information needed to provision a host + Image Image `json:"image"` + + // DefaultMachinePlatform is the default configuration used when + // installing on bare metal for machine pools which do not define their own + // platform configuration. + // +optional + DefaultMachinePlatform *MachinePool `json:"defaultMachinePlatform,omitempty"` + + // APIVIP is the VIP to use for internal API communication + APIVIP string `json:"apiVIP"` + + // IngressVIP is the VIP to use for ingress traffic + IngressVIP string `json:"ingressVIP"` +} diff --git a/pkg/types/baremetal/validation/machinepool.go b/pkg/types/baremetal/validation/machinepool.go new file mode 100644 index 00000000000..673d948276e --- /dev/null +++ b/pkg/types/baremetal/validation/machinepool.go @@ -0,0 +1,12 @@ +package validation + +import ( + "k8s.io/apimachinery/pkg/util/validation/field" + + "github.com/openshift/installer/pkg/types/baremetal" +) + +// ValidateMachinePool checks that the specified machine pool is valid. +func ValidateMachinePool(p *baremetal.MachinePool, fldPath *field.Path) field.ErrorList { + return field.ErrorList{} +} diff --git a/pkg/types/baremetal/validation/platform.go b/pkg/types/baremetal/validation/platform.go new file mode 100644 index 00000000000..da03d75f3ed --- /dev/null +++ b/pkg/types/baremetal/validation/platform.go @@ -0,0 +1,44 @@ +package validation + +import ( + "github.com/openshift/installer/pkg/types/baremetal" + "github.com/openshift/installer/pkg/validate" + "k8s.io/apimachinery/pkg/util/validation/field" +) + +// ValidatePlatform checks that the specified platform is valid. +func ValidatePlatform(p *baremetal.Platform, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if err := validate.URI(p.LibvirtURI); err != nil { + allErrs = append(allErrs, field.Invalid(fldPath.Child("libvirtURI"), p.LibvirtURI, err.Error())) + } + + if err := validate.URI(p.IronicURI); err != nil { + allErrs = append(allErrs, field.Invalid(fldPath.Child("ironicURI"), p.LibvirtURI, err.Error())) + } + + if err := validate.Interface(p.ExternalBridge); err != nil { + allErrs = append(allErrs, field.Invalid(fldPath.Child("externalBridge"), p.ExternalBridge, err.Error())) + } + + if err := validate.Interface(p.ProvisioningBridge); err != nil { + allErrs = append(allErrs, field.Invalid(fldPath.Child("provisioningBridge"), p.ProvisioningBridge, err.Error())) + } + + if p.Hosts == nil { + allErrs = append(allErrs, field.Invalid(fldPath.Child("hosts"), p.Hosts, "bare metal hosts are missing")) + } + + if p.DefaultMachinePlatform != nil { + allErrs = append(allErrs, ValidateMachinePool(p.DefaultMachinePlatform, fldPath.Child("defaultMachinePlatform"))...) + } + + if err := validate.IP(p.APIVIP); err != nil { + allErrs = append(allErrs, field.Invalid(fldPath.Child("apiVIP"), p.APIVIP, err.Error())) + } + + if err := validate.IP(p.IngressVIP); err != nil { + allErrs = append(allErrs, field.Invalid(fldPath.Child("ingressVIP"), p.IngressVIP, err.Error())) + } + return allErrs +} diff --git a/pkg/types/clustermetadata.go b/pkg/types/clustermetadata.go index 49ec26fcd4b..a398e965608 100644 --- a/pkg/types/clustermetadata.go +++ b/pkg/types/clustermetadata.go @@ -3,6 +3,7 @@ package types import ( "github.com/openshift/installer/pkg/types/aws" "github.com/openshift/installer/pkg/types/azure" + "github.com/openshift/installer/pkg/types/baremetal" "github.com/openshift/installer/pkg/types/gcp" "github.com/openshift/installer/pkg/types/libvirt" "github.com/openshift/installer/pkg/types/openstack" @@ -27,6 +28,7 @@ type ClusterPlatformMetadata struct { Libvirt *libvirt.Metadata `json:"libvirt,omitempty"` Azure *azure.Metadata `json:"azure,omitempty"` GCP *gcp.Metadata `json:"gcp,omitempty"` + BareMetal *baremetal.Metadata `json:"baremetal,omitempty"` } // Platform returns a string representation of the platform @@ -51,5 +53,8 @@ func (cpm *ClusterPlatformMetadata) Platform() string { if cpm.GCP != nil { return gcp.Name } + if cpm.BareMetal != nil { + return "baremetal" + } return "" } diff --git a/pkg/types/defaults/installconfig.go b/pkg/types/defaults/installconfig.go index 8ec411af15e..879fec46ac8 100644 --- a/pkg/types/defaults/installconfig.go +++ b/pkg/types/defaults/installconfig.go @@ -5,6 +5,7 @@ import ( "github.com/openshift/installer/pkg/types" awsdefaults "github.com/openshift/installer/pkg/types/aws/defaults" azuredefaults "github.com/openshift/installer/pkg/types/azure/defaults" + baremetaldefaults "github.com/openshift/installer/pkg/types/baremetal/defaults" gcpdefaults "github.com/openshift/installer/pkg/types/gcp/defaults" libvirtdefaults "github.com/openshift/installer/pkg/types/libvirt/defaults" nonedefaults "github.com/openshift/installer/pkg/types/none/defaults" @@ -69,6 +70,8 @@ func SetInstallConfigDefaults(c *types.InstallConfig) { openstackdefaults.SetPlatformDefaults(c.Platform.OpenStack) case c.Platform.VSphere != nil: vspheredefaults.SetPlatformDefaults(c.Platform.VSphere, c) + case c.Platform.BareMetal != nil: + baremetaldefaults.SetPlatformDefaults(c.Platform.BareMetal, c) case c.Platform.None != nil: nonedefaults.SetPlatformDefaults(c.Platform.None) } diff --git a/pkg/types/installconfig.go b/pkg/types/installconfig.go index a2727523706..b6c20e2c977 100644 --- a/pkg/types/installconfig.go +++ b/pkg/types/installconfig.go @@ -6,6 +6,7 @@ import ( "github.com/openshift/installer/pkg/ipnet" "github.com/openshift/installer/pkg/types/aws" "github.com/openshift/installer/pkg/types/azure" + "github.com/openshift/installer/pkg/types/baremetal" "github.com/openshift/installer/pkg/types/gcp" "github.com/openshift/installer/pkg/types/libvirt" "github.com/openshift/installer/pkg/types/none" @@ -34,6 +35,7 @@ var ( // hidden-but-supported platform names. This list isn't presented // to the user in the interactive wizard. HiddenPlatformNames = []string{ + baremetal.Name, none.Name, openstack.Name, vsphere.Name, @@ -99,6 +101,10 @@ type Platform struct { // +optional Azure *azure.Platform `json:"azure,omitempty"` + // BareMetal is the configuration used when installing on bare metal. + // +optional + BareMetal *baremetal.Platform `json:"baremetal,omitempty"` + // GCP is the configuration used when installing on Google Cloud Platform. // +optional GCP *gcp.Platform `json:"gcp,omitempty"` @@ -131,6 +137,8 @@ func (p *Platform) Name() string { return aws.Name case p.Azure != nil: return azure.Name + case p.BareMetal != nil: + return baremetal.Name case p.GCP != nil: return gcp.Name case p.Libvirt != nil: diff --git a/pkg/types/machinepools.go b/pkg/types/machinepools.go index 4aa1bbe998f..99b42b6957a 100644 --- a/pkg/types/machinepools.go +++ b/pkg/types/machinepools.go @@ -3,6 +3,7 @@ package types import ( "github.com/openshift/installer/pkg/types/aws" "github.com/openshift/installer/pkg/types/azure" + "github.com/openshift/installer/pkg/types/baremetal" "github.com/openshift/installer/pkg/types/gcp" "github.com/openshift/installer/pkg/types/libvirt" "github.com/openshift/installer/pkg/types/openstack" @@ -48,6 +49,9 @@ type MachinePoolPlatform struct { // Azure is the configuration used when installing on OpenStack. Azure *azure.MachinePool `json:"azure,omitempty"` + // BareMetal is the configuration used when installing on bare metal. + BareMetal *baremetal.MachinePool `json:"baremetal,omitempty"` + // GCP is the configuration used when installing on GCP GCP *gcp.MachinePool `json:"gcp,omitempty"` @@ -72,6 +76,8 @@ func (p *MachinePoolPlatform) Name() string { return aws.Name case p.Azure != nil: return azure.Name + case p.BareMetal != nil: + return baremetal.Name case p.GCP != nil: return gcp.Name case p.Libvirt != nil: diff --git a/pkg/types/validation/installconfig.go b/pkg/types/validation/installconfig.go index 85007d769b5..6f399100f02 100644 --- a/pkg/types/validation/installconfig.go +++ b/pkg/types/validation/installconfig.go @@ -17,6 +17,8 @@ import ( awsvalidation "github.com/openshift/installer/pkg/types/aws/validation" "github.com/openshift/installer/pkg/types/azure" azurevalidation "github.com/openshift/installer/pkg/types/azure/validation" + "github.com/openshift/installer/pkg/types/baremetal" + baremetalvalidation "github.com/openshift/installer/pkg/types/baremetal/validation" "github.com/openshift/installer/pkg/types/gcp" gcpvalidation "github.com/openshift/installer/pkg/types/gcp/validation" "github.com/openshift/installer/pkg/types/libvirt" @@ -240,6 +242,11 @@ func validatePlatform(platform *types.Platform, fldPath *field.Path, openStackVa if platform.VSphere != nil { validate(vsphere.Name, platform.VSphere, func(f *field.Path) field.ErrorList { return vspherevalidation.ValidatePlatform(platform.VSphere, f) }) } + if platform.BareMetal != nil { + validate(baremetal.Name, platform.BareMetal, func(f *field.Path) field.ErrorList { + return baremetalvalidation.ValidatePlatform(platform.BareMetal, f) + }) + } return allErrs } diff --git a/pkg/types/validation/installconfig_test.go b/pkg/types/validation/installconfig_test.go index ea68e4819b6..9c44ff725bc 100644 --- a/pkg/types/validation/installconfig_test.go +++ b/pkg/types/validation/installconfig_test.go @@ -361,7 +361,7 @@ func TestValidateInstallConfig(t *testing.T) { c.Platform = types.Platform{} return c }(), - expectedError: `^platform: Invalid value: "": must specify one of the platforms \(aws, azure, gcp, none, openstack, vsphere\)$`, + expectedError: `^platform: Invalid value: "": must specify one of the platforms \(aws, azure, baremetal, gcp, none, openstack, vsphere\)$`, }, { name: "multiple platforms", @@ -392,7 +392,7 @@ func TestValidateInstallConfig(t *testing.T) { } return c }(), - expectedError: `^platform: Invalid value: "libvirt": must specify one of the platforms \(aws, azure, gcp, none, openstack, vsphere\)$`, + expectedError: `^platform: Invalid value: "libvirt": must specify one of the platforms \(aws, azure, baremetal, gcp, none, openstack, vsphere\)$`, }, { name: "invalid libvirt platform", @@ -404,7 +404,7 @@ func TestValidateInstallConfig(t *testing.T) { c.Platform.Libvirt.URI = "" return c }(), - expectedError: `^\[platform: Invalid value: "libvirt": must specify one of the platforms \(aws, azure, gcp, none, openstack, vsphere\), platform\.libvirt\.uri: Invalid value: "": invalid URI "" \(no scheme\)]$`, + expectedError: `^\[platform: Invalid value: "libvirt": must specify one of the platforms \(aws, azure, baremetal, gcp, none, openstack, vsphere\), platform\.libvirt\.uri: Invalid value: "": invalid URI "" \(no scheme\)]$`, }, { name: "valid none platform", diff --git a/pkg/types/validation/machinepools.go b/pkg/types/validation/machinepools.go index 52064f21956..33867a09c14 100644 --- a/pkg/types/validation/machinepools.go +++ b/pkg/types/validation/machinepools.go @@ -10,6 +10,8 @@ import ( awsvalidation "github.com/openshift/installer/pkg/types/aws/validation" "github.com/openshift/installer/pkg/types/azure" azurevalidation "github.com/openshift/installer/pkg/types/azure/validation" + "github.com/openshift/installer/pkg/types/baremetal" + baremetalvalidation "github.com/openshift/installer/pkg/types/baremetal/validation" "github.com/openshift/installer/pkg/types/libvirt" libvirtvalidation "github.com/openshift/installer/pkg/types/libvirt/validation" "github.com/openshift/installer/pkg/types/openstack" @@ -71,5 +73,8 @@ func validateMachinePoolPlatform(platform *types.Platform, p *types.MachinePoolP if p.OpenStack != nil { validate(openstack.Name, p.OpenStack, func(f *field.Path) field.ErrorList { return openstackvalidation.ValidateMachinePool(p.OpenStack, f) }) } + if p.BareMetal != nil { + validate(baremetal.Name, p.BareMetal, func(f *field.Path) field.ErrorList { return baremetalvalidation.ValidateMachinePool(p.BareMetal, f) }) + } return allErrs } diff --git a/pkg/validate/validate.go b/pkg/validate/validate.go index 1c1877bed91..03d51b6e696 100644 --- a/pkg/validate/validate.go +++ b/pkg/validate/validate.go @@ -132,3 +132,26 @@ func URIWithProtocol(uri string, protocol string) error { } return nil } + +// IP validates if a string is a valid IP. +func IP(ip string) error { + addr := net.ParseIP(ip) + if addr == nil { + return fmt.Errorf("'%s' is not a valid IP", ip) + } + return nil +} + +// Interface validates if a string is a valid network interface +func Interface(iface string) error { + if _, err := net.InterfaceByName(iface); err != nil { + return fmt.Errorf("%s is not a valid network interface: %s", iface, err) + } + return nil +} + +// MAC validates that a value is a valid mac address +func MAC(addr string) error { + _, err := net.ParseMAC(addr) + return err +} diff --git a/vendor/github.com/evanphx/json-patch/LICENSE b/vendor/github.com/evanphx/json-patch/LICENSE new file mode 100644 index 00000000000..0eb9b72d84d --- /dev/null +++ b/vendor/github.com/evanphx/json-patch/LICENSE @@ -0,0 +1,25 @@ +Copyright (c) 2014, Evan Phoenix +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright notice + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. +* Neither the name of the Evan Phoenix nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/evanphx/json-patch/errors.go b/vendor/github.com/evanphx/json-patch/errors.go new file mode 100644 index 00000000000..75304b4437c --- /dev/null +++ b/vendor/github.com/evanphx/json-patch/errors.go @@ -0,0 +1,38 @@ +package jsonpatch + +import "fmt" + +// AccumulatedCopySizeError is an error type returned when the accumulated size +// increase caused by copy operations in a patch operation has exceeded the +// limit. +type AccumulatedCopySizeError struct { + limit int64 + accumulated int64 +} + +// NewAccumulatedCopySizeError returns an AccumulatedCopySizeError. +func NewAccumulatedCopySizeError(l, a int64) *AccumulatedCopySizeError { + return &AccumulatedCopySizeError{limit: l, accumulated: a} +} + +// Error implements the error interface. +func (a *AccumulatedCopySizeError) Error() string { + return fmt.Sprintf("Unable to complete the copy, the accumulated size increase of copy is %d, exceeding the limit %d", a.accumulated, a.limit) +} + +// ArraySizeError is an error type returned when the array size has exceeded +// the limit. +type ArraySizeError struct { + limit int + size int +} + +// NewArraySizeError returns an ArraySizeError. +func NewArraySizeError(l, s int) *ArraySizeError { + return &ArraySizeError{limit: l, size: s} +} + +// Error implements the error interface. +func (a *ArraySizeError) Error() string { + return fmt.Sprintf("Unable to create array of size %d, limit is %d", a.size, a.limit) +} diff --git a/vendor/github.com/evanphx/json-patch/merge.go b/vendor/github.com/evanphx/json-patch/merge.go new file mode 100644 index 00000000000..6806c4c200b --- /dev/null +++ b/vendor/github.com/evanphx/json-patch/merge.go @@ -0,0 +1,383 @@ +package jsonpatch + +import ( + "bytes" + "encoding/json" + "fmt" + "reflect" +) + +func merge(cur, patch *lazyNode, mergeMerge bool) *lazyNode { + curDoc, err := cur.intoDoc() + + if err != nil { + pruneNulls(patch) + return patch + } + + patchDoc, err := patch.intoDoc() + + if err != nil { + return patch + } + + mergeDocs(curDoc, patchDoc, mergeMerge) + + return cur +} + +func mergeDocs(doc, patch *partialDoc, mergeMerge bool) { + for k, v := range *patch { + if v == nil { + if mergeMerge { + (*doc)[k] = nil + } else { + delete(*doc, k) + } + } else { + cur, ok := (*doc)[k] + + if !ok || cur == nil { + pruneNulls(v) + (*doc)[k] = v + } else { + (*doc)[k] = merge(cur, v, mergeMerge) + } + } + } +} + +func pruneNulls(n *lazyNode) { + sub, err := n.intoDoc() + + if err == nil { + pruneDocNulls(sub) + } else { + ary, err := n.intoAry() + + if err == nil { + pruneAryNulls(ary) + } + } +} + +func pruneDocNulls(doc *partialDoc) *partialDoc { + for k, v := range *doc { + if v == nil { + delete(*doc, k) + } else { + pruneNulls(v) + } + } + + return doc +} + +func pruneAryNulls(ary *partialArray) *partialArray { + newAry := []*lazyNode{} + + for _, v := range *ary { + if v != nil { + pruneNulls(v) + newAry = append(newAry, v) + } + } + + *ary = newAry + + return ary +} + +var errBadJSONDoc = fmt.Errorf("Invalid JSON Document") +var errBadJSONPatch = fmt.Errorf("Invalid JSON Patch") +var errBadMergeTypes = fmt.Errorf("Mismatched JSON Documents") + +// MergeMergePatches merges two merge patches together, such that +// applying this resulting merged merge patch to a document yields the same +// as merging each merge patch to the document in succession. +func MergeMergePatches(patch1Data, patch2Data []byte) ([]byte, error) { + return doMergePatch(patch1Data, patch2Data, true) +} + +// MergePatch merges the patchData into the docData. +func MergePatch(docData, patchData []byte) ([]byte, error) { + return doMergePatch(docData, patchData, false) +} + +func doMergePatch(docData, patchData []byte, mergeMerge bool) ([]byte, error) { + doc := &partialDoc{} + + docErr := json.Unmarshal(docData, doc) + + patch := &partialDoc{} + + patchErr := json.Unmarshal(patchData, patch) + + if _, ok := docErr.(*json.SyntaxError); ok { + return nil, errBadJSONDoc + } + + if _, ok := patchErr.(*json.SyntaxError); ok { + return nil, errBadJSONPatch + } + + if docErr == nil && *doc == nil { + return nil, errBadJSONDoc + } + + if patchErr == nil && *patch == nil { + return nil, errBadJSONPatch + } + + if docErr != nil || patchErr != nil { + // Not an error, just not a doc, so we turn straight into the patch + if patchErr == nil { + if mergeMerge { + doc = patch + } else { + doc = pruneDocNulls(patch) + } + } else { + patchAry := &partialArray{} + patchErr = json.Unmarshal(patchData, patchAry) + + if patchErr != nil { + return nil, errBadJSONPatch + } + + pruneAryNulls(patchAry) + + out, patchErr := json.Marshal(patchAry) + + if patchErr != nil { + return nil, errBadJSONPatch + } + + return out, nil + } + } else { + mergeDocs(doc, patch, mergeMerge) + } + + return json.Marshal(doc) +} + +// resemblesJSONArray indicates whether the byte-slice "appears" to be +// a JSON array or not. +// False-positives are possible, as this function does not check the internal +// structure of the array. It only checks that the outer syntax is present and +// correct. +func resemblesJSONArray(input []byte) bool { + input = bytes.TrimSpace(input) + + hasPrefix := bytes.HasPrefix(input, []byte("[")) + hasSuffix := bytes.HasSuffix(input, []byte("]")) + + return hasPrefix && hasSuffix +} + +// CreateMergePatch will return a merge patch document capable of converting +// the original document(s) to the modified document(s). +// The parameters can be bytes of either two JSON Documents, or two arrays of +// JSON documents. +// The merge patch returned follows the specification defined at http://tools.ietf.org/html/draft-ietf-appsawg-json-merge-patch-07 +func CreateMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) { + originalResemblesArray := resemblesJSONArray(originalJSON) + modifiedResemblesArray := resemblesJSONArray(modifiedJSON) + + // Do both byte-slices seem like JSON arrays? + if originalResemblesArray && modifiedResemblesArray { + return createArrayMergePatch(originalJSON, modifiedJSON) + } + + // Are both byte-slices are not arrays? Then they are likely JSON objects... + if !originalResemblesArray && !modifiedResemblesArray { + return createObjectMergePatch(originalJSON, modifiedJSON) + } + + // None of the above? Then return an error because of mismatched types. + return nil, errBadMergeTypes +} + +// createObjectMergePatch will return a merge-patch document capable of +// converting the original document to the modified document. +func createObjectMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) { + originalDoc := map[string]interface{}{} + modifiedDoc := map[string]interface{}{} + + err := json.Unmarshal(originalJSON, &originalDoc) + if err != nil { + return nil, errBadJSONDoc + } + + err = json.Unmarshal(modifiedJSON, &modifiedDoc) + if err != nil { + return nil, errBadJSONDoc + } + + dest, err := getDiff(originalDoc, modifiedDoc) + if err != nil { + return nil, err + } + + return json.Marshal(dest) +} + +// createArrayMergePatch will return an array of merge-patch documents capable +// of converting the original document to the modified document for each +// pair of JSON documents provided in the arrays. +// Arrays of mismatched sizes will result in an error. +func createArrayMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) { + originalDocs := []json.RawMessage{} + modifiedDocs := []json.RawMessage{} + + err := json.Unmarshal(originalJSON, &originalDocs) + if err != nil { + return nil, errBadJSONDoc + } + + err = json.Unmarshal(modifiedJSON, &modifiedDocs) + if err != nil { + return nil, errBadJSONDoc + } + + total := len(originalDocs) + if len(modifiedDocs) != total { + return nil, errBadJSONDoc + } + + result := []json.RawMessage{} + for i := 0; i < len(originalDocs); i++ { + original := originalDocs[i] + modified := modifiedDocs[i] + + patch, err := createObjectMergePatch(original, modified) + if err != nil { + return nil, err + } + + result = append(result, json.RawMessage(patch)) + } + + return json.Marshal(result) +} + +// Returns true if the array matches (must be json types). +// As is idiomatic for go, an empty array is not the same as a nil array. +func matchesArray(a, b []interface{}) bool { + if len(a) != len(b) { + return false + } + if (a == nil && b != nil) || (a != nil && b == nil) { + return false + } + for i := range a { + if !matchesValue(a[i], b[i]) { + return false + } + } + return true +} + +// Returns true if the values matches (must be json types) +// The types of the values must match, otherwise it will always return false +// If two map[string]interface{} are given, all elements must match. +func matchesValue(av, bv interface{}) bool { + if reflect.TypeOf(av) != reflect.TypeOf(bv) { + return false + } + switch at := av.(type) { + case string: + bt := bv.(string) + if bt == at { + return true + } + case float64: + bt := bv.(float64) + if bt == at { + return true + } + case bool: + bt := bv.(bool) + if bt == at { + return true + } + case nil: + // Both nil, fine. + return true + case map[string]interface{}: + bt := bv.(map[string]interface{}) + for key := range at { + if !matchesValue(at[key], bt[key]) { + return false + } + } + for key := range bt { + if !matchesValue(at[key], bt[key]) { + return false + } + } + return true + case []interface{}: + bt := bv.([]interface{}) + return matchesArray(at, bt) + } + return false +} + +// getDiff returns the (recursive) difference between a and b as a map[string]interface{}. +func getDiff(a, b map[string]interface{}) (map[string]interface{}, error) { + into := map[string]interface{}{} + for key, bv := range b { + av, ok := a[key] + // value was added + if !ok { + into[key] = bv + continue + } + // If types have changed, replace completely + if reflect.TypeOf(av) != reflect.TypeOf(bv) { + into[key] = bv + continue + } + // Types are the same, compare values + switch at := av.(type) { + case map[string]interface{}: + bt := bv.(map[string]interface{}) + dst := make(map[string]interface{}, len(bt)) + dst, err := getDiff(at, bt) + if err != nil { + return nil, err + } + if len(dst) > 0 { + into[key] = dst + } + case string, float64, bool: + if !matchesValue(av, bv) { + into[key] = bv + } + case []interface{}: + bt := bv.([]interface{}) + if !matchesArray(at, bt) { + into[key] = bv + } + case nil: + switch bv.(type) { + case nil: + // Both nil, fine. + default: + into[key] = bv + } + default: + panic(fmt.Sprintf("Unknown type:%T in key %s", av, key)) + } + } + // Now add all deleted values as nil + for key := range a { + _, found := b[key] + if !found { + into[key] = nil + } + } + return into, nil +} diff --git a/vendor/github.com/evanphx/json-patch/patch.go b/vendor/github.com/evanphx/json-patch/patch.go new file mode 100644 index 00000000000..1b5f95e6112 --- /dev/null +++ b/vendor/github.com/evanphx/json-patch/patch.go @@ -0,0 +1,776 @@ +package jsonpatch + +import ( + "bytes" + "encoding/json" + "fmt" + "strconv" + "strings" + + "github.com/pkg/errors" +) + +const ( + eRaw = iota + eDoc + eAry +) + +var ( + // SupportNegativeIndices decides whether to support non-standard practice of + // allowing negative indices to mean indices starting at the end of an array. + // Default to true. + SupportNegativeIndices bool = true + // AccumulatedCopySizeLimit limits the total size increase in bytes caused by + // "copy" operations in a patch. + AccumulatedCopySizeLimit int64 = 0 +) + +var ( + ErrTestFailed = errors.New("test failed") + ErrMissing = errors.New("missing value") + ErrUnknownType = errors.New("unknown object type") + ErrInvalid = errors.New("invalid state detected") + ErrInvalidIndex = errors.New("invalid index referenced") +) + +type lazyNode struct { + raw *json.RawMessage + doc partialDoc + ary partialArray + which int +} + +// Operation is a single JSON-Patch step, such as a single 'add' operation. +type Operation map[string]*json.RawMessage + +// Patch is an ordered collection of Operations. +type Patch []Operation + +type partialDoc map[string]*lazyNode +type partialArray []*lazyNode + +type container interface { + get(key string) (*lazyNode, error) + set(key string, val *lazyNode) error + add(key string, val *lazyNode) error + remove(key string) error +} + +func newLazyNode(raw *json.RawMessage) *lazyNode { + return &lazyNode{raw: raw, doc: nil, ary: nil, which: eRaw} +} + +func (n *lazyNode) MarshalJSON() ([]byte, error) { + switch n.which { + case eRaw: + return json.Marshal(n.raw) + case eDoc: + return json.Marshal(n.doc) + case eAry: + return json.Marshal(n.ary) + default: + return nil, ErrUnknownType + } +} + +func (n *lazyNode) UnmarshalJSON(data []byte) error { + dest := make(json.RawMessage, len(data)) + copy(dest, data) + n.raw = &dest + n.which = eRaw + return nil +} + +func deepCopy(src *lazyNode) (*lazyNode, int, error) { + if src == nil { + return nil, 0, nil + } + a, err := src.MarshalJSON() + if err != nil { + return nil, 0, err + } + sz := len(a) + ra := make(json.RawMessage, sz) + copy(ra, a) + return newLazyNode(&ra), sz, nil +} + +func (n *lazyNode) intoDoc() (*partialDoc, error) { + if n.which == eDoc { + return &n.doc, nil + } + + if n.raw == nil { + return nil, ErrInvalid + } + + err := json.Unmarshal(*n.raw, &n.doc) + + if err != nil { + return nil, err + } + + n.which = eDoc + return &n.doc, nil +} + +func (n *lazyNode) intoAry() (*partialArray, error) { + if n.which == eAry { + return &n.ary, nil + } + + if n.raw == nil { + return nil, ErrInvalid + } + + err := json.Unmarshal(*n.raw, &n.ary) + + if err != nil { + return nil, err + } + + n.which = eAry + return &n.ary, nil +} + +func (n *lazyNode) compact() []byte { + buf := &bytes.Buffer{} + + if n.raw == nil { + return nil + } + + err := json.Compact(buf, *n.raw) + + if err != nil { + return *n.raw + } + + return buf.Bytes() +} + +func (n *lazyNode) tryDoc() bool { + if n.raw == nil { + return false + } + + err := json.Unmarshal(*n.raw, &n.doc) + + if err != nil { + return false + } + + n.which = eDoc + return true +} + +func (n *lazyNode) tryAry() bool { + if n.raw == nil { + return false + } + + err := json.Unmarshal(*n.raw, &n.ary) + + if err != nil { + return false + } + + n.which = eAry + return true +} + +func (n *lazyNode) equal(o *lazyNode) bool { + if n.which == eRaw { + if !n.tryDoc() && !n.tryAry() { + if o.which != eRaw { + return false + } + + return bytes.Equal(n.compact(), o.compact()) + } + } + + if n.which == eDoc { + if o.which == eRaw { + if !o.tryDoc() { + return false + } + } + + if o.which != eDoc { + return false + } + + for k, v := range n.doc { + ov, ok := o.doc[k] + + if !ok { + return false + } + + if v == nil && ov == nil { + continue + } + + if !v.equal(ov) { + return false + } + } + + return true + } + + if o.which != eAry && !o.tryAry() { + return false + } + + if len(n.ary) != len(o.ary) { + return false + } + + for idx, val := range n.ary { + if !val.equal(o.ary[idx]) { + return false + } + } + + return true +} + +// Kind reads the "op" field of the Operation. +func (o Operation) Kind() string { + if obj, ok := o["op"]; ok && obj != nil { + var op string + + err := json.Unmarshal(*obj, &op) + + if err != nil { + return "unknown" + } + + return op + } + + return "unknown" +} + +// Path reads the "path" field of the Operation. +func (o Operation) Path() (string, error) { + if obj, ok := o["path"]; ok && obj != nil { + var op string + + err := json.Unmarshal(*obj, &op) + + if err != nil { + return "unknown", err + } + + return op, nil + } + + return "unknown", errors.Wrapf(ErrMissing, "operation missing path field") +} + +// From reads the "from" field of the Operation. +func (o Operation) From() (string, error) { + if obj, ok := o["from"]; ok && obj != nil { + var op string + + err := json.Unmarshal(*obj, &op) + + if err != nil { + return "unknown", err + } + + return op, nil + } + + return "unknown", errors.Wrapf(ErrMissing, "operation, missing from field") +} + +func (o Operation) value() *lazyNode { + if obj, ok := o["value"]; ok { + return newLazyNode(obj) + } + + return nil +} + +// ValueInterface decodes the operation value into an interface. +func (o Operation) ValueInterface() (interface{}, error) { + if obj, ok := o["value"]; ok && obj != nil { + var v interface{} + + err := json.Unmarshal(*obj, &v) + + if err != nil { + return nil, err + } + + return v, nil + } + + return nil, errors.Wrapf(ErrMissing, "operation, missing value field") +} + +func isArray(buf []byte) bool { +Loop: + for _, c := range buf { + switch c { + case ' ': + case '\n': + case '\t': + continue + case '[': + return true + default: + break Loop + } + } + + return false +} + +func findObject(pd *container, path string) (container, string) { + doc := *pd + + split := strings.Split(path, "/") + + if len(split) < 2 { + return nil, "" + } + + parts := split[1 : len(split)-1] + + key := split[len(split)-1] + + var err error + + for _, part := range parts { + + next, ok := doc.get(decodePatchKey(part)) + + if next == nil || ok != nil { + return nil, "" + } + + if isArray(*next.raw) { + doc, err = next.intoAry() + + if err != nil { + return nil, "" + } + } else { + doc, err = next.intoDoc() + + if err != nil { + return nil, "" + } + } + } + + return doc, decodePatchKey(key) +} + +func (d *partialDoc) set(key string, val *lazyNode) error { + (*d)[key] = val + return nil +} + +func (d *partialDoc) add(key string, val *lazyNode) error { + (*d)[key] = val + return nil +} + +func (d *partialDoc) get(key string) (*lazyNode, error) { + return (*d)[key], nil +} + +func (d *partialDoc) remove(key string) error { + _, ok := (*d)[key] + if !ok { + return errors.Wrapf(ErrMissing, "Unable to remove nonexistent key: %s", key) + } + + delete(*d, key) + return nil +} + +// set should only be used to implement the "replace" operation, so "key" must +// be an already existing index in "d". +func (d *partialArray) set(key string, val *lazyNode) error { + idx, err := strconv.Atoi(key) + if err != nil { + return err + } + (*d)[idx] = val + return nil +} + +func (d *partialArray) add(key string, val *lazyNode) error { + if key == "-" { + *d = append(*d, val) + return nil + } + + idx, err := strconv.Atoi(key) + if err != nil { + return errors.Wrapf(err, "value was not a proper array index: '%s'", key) + } + + sz := len(*d) + 1 + + ary := make([]*lazyNode, sz) + + cur := *d + + if idx >= len(ary) { + return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + } + + if SupportNegativeIndices { + if idx < -len(ary) { + return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + } + + if idx < 0 { + idx += len(ary) + } + } + + copy(ary[0:idx], cur[0:idx]) + ary[idx] = val + copy(ary[idx+1:], cur[idx:]) + + *d = ary + return nil +} + +func (d *partialArray) get(key string) (*lazyNode, error) { + idx, err := strconv.Atoi(key) + + if err != nil { + return nil, err + } + + if idx >= len(*d) { + return nil, errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + } + + return (*d)[idx], nil +} + +func (d *partialArray) remove(key string) error { + idx, err := strconv.Atoi(key) + if err != nil { + return err + } + + cur := *d + + if idx >= len(cur) { + return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + } + + if SupportNegativeIndices { + if idx < -len(cur) { + return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + } + + if idx < 0 { + idx += len(cur) + } + } + + ary := make([]*lazyNode, len(cur)-1) + + copy(ary[0:idx], cur[0:idx]) + copy(ary[idx:], cur[idx+1:]) + + *d = ary + return nil + +} + +func (p Patch) add(doc *container, op Operation) error { + path, err := op.Path() + if err != nil { + return errors.Wrapf(ErrMissing, "add operation failed to decode path") + } + + con, key := findObject(doc, path) + + if con == nil { + return errors.Wrapf(ErrMissing, "add operation does not apply: doc is missing path: \"%s\"", path) + } + + err = con.add(key, op.value()) + if err != nil { + return errors.Wrapf(err, "error in add for path: '%s'", path) + } + + return nil +} + +func (p Patch) remove(doc *container, op Operation) error { + path, err := op.Path() + if err != nil { + return errors.Wrapf(ErrMissing, "remove operation failed to decode path") + } + + con, key := findObject(doc, path) + + if con == nil { + return errors.Wrapf(ErrMissing, "remove operation does not apply: doc is missing path: \"%s\"", path) + } + + err = con.remove(key) + if err != nil { + return errors.Wrapf(err, "error in remove for path: '%s'", path) + } + + return nil +} + +func (p Patch) replace(doc *container, op Operation) error { + path, err := op.Path() + if err != nil { + return errors.Wrapf(err, "replace operation failed to decode path") + } + + con, key := findObject(doc, path) + + if con == nil { + return errors.Wrapf(ErrMissing, "replace operation does not apply: doc is missing path: %s", path) + } + + _, ok := con.get(key) + if ok != nil { + return errors.Wrapf(ErrMissing, "replace operation does not apply: doc is missing key: %s", path) + } + + err = con.set(key, op.value()) + if err != nil { + return errors.Wrapf(err, "error in remove for path: '%s'", path) + } + + return nil +} + +func (p Patch) move(doc *container, op Operation) error { + from, err := op.From() + if err != nil { + return errors.Wrapf(err, "move operation failed to decode from") + } + + con, key := findObject(doc, from) + + if con == nil { + return errors.Wrapf(ErrMissing, "move operation does not apply: doc is missing from path: %s", from) + } + + val, err := con.get(key) + if err != nil { + return errors.Wrapf(err, "error in move for path: '%s'", key) + } + + err = con.remove(key) + if err != nil { + return errors.Wrapf(err, "error in move for path: '%s'", key) + } + + path, err := op.Path() + if err != nil { + return errors.Wrapf(err, "move operation failed to decode path") + } + + con, key = findObject(doc, path) + + if con == nil { + return errors.Wrapf(ErrMissing, "move operation does not apply: doc is missing destination path: %s", path) + } + + err = con.add(key, val) + if err != nil { + return errors.Wrapf(err, "error in move for path: '%s'", path) + } + + return nil +} + +func (p Patch) test(doc *container, op Operation) error { + path, err := op.Path() + if err != nil { + return errors.Wrapf(err, "test operation failed to decode path") + } + + con, key := findObject(doc, path) + + if con == nil { + return errors.Wrapf(ErrMissing, "test operation does not apply: is missing path: %s", path) + } + + val, err := con.get(key) + if err != nil { + return errors.Wrapf(err, "error in test for path: '%s'", path) + } + + if val == nil { + if op.value().raw == nil { + return nil + } + return errors.Wrapf(ErrTestFailed, "testing value %s failed", path) + } else if op.value() == nil { + return errors.Wrapf(ErrTestFailed, "testing value %s failed", path) + } + + if val.equal(op.value()) { + return nil + } + + return errors.Wrapf(ErrTestFailed, "testing value %s failed", path) +} + +func (p Patch) copy(doc *container, op Operation, accumulatedCopySize *int64) error { + from, err := op.From() + if err != nil { + return errors.Wrapf(err, "copy operation failed to decode from") + } + + con, key := findObject(doc, from) + + if con == nil { + return errors.Wrapf(ErrMissing, "copy operation does not apply: doc is missing from path: %s", from) + } + + val, err := con.get(key) + if err != nil { + return errors.Wrapf(err, "error in copy for from: '%s'", from) + } + + path, err := op.Path() + if err != nil { + return errors.Wrapf(ErrMissing, "copy operation failed to decode path") + } + + con, key = findObject(doc, path) + + if con == nil { + return errors.Wrapf(ErrMissing, "copy operation does not apply: doc is missing destination path: %s", path) + } + + valCopy, sz, err := deepCopy(val) + if err != nil { + return errors.Wrapf(err, "error while performing deep copy") + } + + (*accumulatedCopySize) += int64(sz) + if AccumulatedCopySizeLimit > 0 && *accumulatedCopySize > AccumulatedCopySizeLimit { + return NewAccumulatedCopySizeError(AccumulatedCopySizeLimit, *accumulatedCopySize) + } + + err = con.add(key, valCopy) + if err != nil { + return errors.Wrapf(err, "error while adding value during copy") + } + + return nil +} + +// Equal indicates if 2 JSON documents have the same structural equality. +func Equal(a, b []byte) bool { + ra := make(json.RawMessage, len(a)) + copy(ra, a) + la := newLazyNode(&ra) + + rb := make(json.RawMessage, len(b)) + copy(rb, b) + lb := newLazyNode(&rb) + + return la.equal(lb) +} + +// DecodePatch decodes the passed JSON document as an RFC 6902 patch. +func DecodePatch(buf []byte) (Patch, error) { + var p Patch + + err := json.Unmarshal(buf, &p) + + if err != nil { + return nil, err + } + + return p, nil +} + +// Apply mutates a JSON document according to the patch, and returns the new +// document. +func (p Patch) Apply(doc []byte) ([]byte, error) { + return p.ApplyIndent(doc, "") +} + +// ApplyIndent mutates a JSON document according to the patch, and returns the new +// document indented. +func (p Patch) ApplyIndent(doc []byte, indent string) ([]byte, error) { + var pd container + if doc[0] == '[' { + pd = &partialArray{} + } else { + pd = &partialDoc{} + } + + err := json.Unmarshal(doc, pd) + + if err != nil { + return nil, err + } + + err = nil + + var accumulatedCopySize int64 + + for _, op := range p { + switch op.Kind() { + case "add": + err = p.add(&pd, op) + case "remove": + err = p.remove(&pd, op) + case "replace": + err = p.replace(&pd, op) + case "move": + err = p.move(&pd, op) + case "test": + err = p.test(&pd, op) + case "copy": + err = p.copy(&pd, op, &accumulatedCopySize) + default: + err = fmt.Errorf("Unexpected kind: %s", op.Kind()) + } + + if err != nil { + return nil, err + } + } + + if indent != "" { + return json.MarshalIndent(pd, "", indent) + } + + return json.Marshal(pd) +} + +// From http://tools.ietf.org/html/rfc6901#section-4 : +// +// Evaluation of each reference token begins by decoding any escaped +// character sequence. This is performed by first transforming any +// occurrence of the sequence '~1' to '/', and then transforming any +// occurrence of the sequence '~0' to '~'. + +var ( + rfc6901Decoder = strings.NewReplacer("~1", "/", "~0", "~") +) + +func decodePatchKey(k string) string { + return rfc6901Decoder.Replace(k) +} diff --git a/vendor/github.com/metal3-io/baremetal-operator/LICENSE b/vendor/github.com/metal3-io/baremetal-operator/LICENSE new file mode 100644 index 00000000000..261eeb9e9f8 --- /dev/null +++ b/vendor/github.com/metal3-io/baremetal-operator/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/metal3-io/baremetal-operator/pkg/bmc/access.go b/vendor/github.com/metal3-io/baremetal-operator/pkg/bmc/access.go new file mode 100644 index 00000000000..288395f1171 --- /dev/null +++ b/vendor/github.com/metal3-io/baremetal-operator/pkg/bmc/access.go @@ -0,0 +1,116 @@ +package bmc + +import ( + "net" + "net/url" + "strings" + + "github.com/pkg/errors" +) + +// AccessDetails contains the information about how to get to a BMC. +// +// NOTE(dhellmann): This structure is very likely to change as we +// adapt it to additional types. +type AccessDetails interface { + // Type returns the kind of the BMC, indicating the driver that + // will be used to communicate with it. + Type() string + + // NeedsMAC returns true when the host is going to need a separate + // port created rather than having it discovered. + NeedsMAC() bool + + // The name of the driver to instantiate the BMC with. This may differ + // from the Type - both the ipmi and libvirt types use the ipmi driver. + Driver() string + + // DriverInfo returns a data structure to pass as the DriverInfo + // parameter when creating a node in Ironic. The structure is + // pre-populated with the access information, and the caller is + // expected to add any other information that might be needed + // (such as the kernel and ramdisk locations). + DriverInfo(bmcCreds Credentials) map[string]interface{} + + // Boot interface to set + BootInterface() string +} + +func getTypeHostPort(address string) (bmcType, host, port, path string, err error) { + // Start by assuming "type://host:port" + parsedURL, err := url.Parse(address) + if err != nil { + // We failed to parse the URL, but it may just be a host or + // host:port string (which the URL parser rejects because ":" + // is not allowed in the first segment of a + // path. Unfortunately there is no error class to represent + // that specific error, so we have to guess. + if strings.Contains(address, ":") { + // If we can parse host:port, carry on with those + // values. Otherwise, report the original parser error. + var err2 error + host, port, err2 = net.SplitHostPort(address) + if err2 != nil { + return "", "", "", "", errors.Wrap(err, "failed to parse BMC address information") + } + bmcType = "ipmi" + } else { + bmcType = "ipmi" + host = address + } + } else { + // Successfully parsed the URL + bmcType = parsedURL.Scheme + if parsedURL.Opaque != "" { + parsedURL, err = url.Parse(strings.Replace(address, ":", "://", 1)) + if err != nil { + return "", "", "", "", errors.Wrap(err, "failed to parse BMC address information") + + } + } + port = parsedURL.Port() + host = parsedURL.Hostname() + if parsedURL.Scheme == "" { + bmcType = "ipmi" + if host == "" { + // If there was no scheme at all, the hostname was + // interpreted as a path. + host = parsedURL.Path + } + } else { + path = parsedURL.Path + } + } + return bmcType, host, port, path, nil +} + +// NewAccessDetails creates an AccessDetails structure from the URL +// for a BMC. +func NewAccessDetails(address string) (AccessDetails, error) { + + bmcType, host, port, path, err := getTypeHostPort(address) + if err != nil { + return nil, err + } + + var addr AccessDetails + switch bmcType { + case "ipmi", "libvirt": + addr = &ipmiAccessDetails{ + bmcType: bmcType, + portNum: port, + hostname: host, + } + case "idrac", "idrac+http", "idrac+https": + addr = &iDracAccessDetails{ + bmcType: bmcType, + portNum: port, + hostname: host, + path: path, + } + default: + err = &UnknownBMCTypeError{address, bmcType} + } + + return addr, err +} diff --git a/vendor/github.com/metal3-io/baremetal-operator/pkg/bmc/credentials.go b/vendor/github.com/metal3-io/baremetal-operator/pkg/bmc/credentials.go new file mode 100644 index 00000000000..cbf28a50f24 --- /dev/null +++ b/vendor/github.com/metal3-io/baremetal-operator/pkg/bmc/credentials.go @@ -0,0 +1,18 @@ +package bmc + +// Credentials holds the information for authenticating with the BMC. +type Credentials struct { + Username string + Password string +} + +// Validate returns an error if the credentials are invalid +func (creds Credentials) Validate() error { + if creds.Username == "" { + return &CredentialsValidationError{message: "Missing BMC connection detail 'username' in credentials"} + } + if creds.Password == "" { + return &CredentialsValidationError{message: "Missing BMC connection details 'password' in credentials"} + } + return nil +} diff --git a/vendor/github.com/metal3-io/baremetal-operator/pkg/bmc/errors.go b/vendor/github.com/metal3-io/baremetal-operator/pkg/bmc/errors.go new file mode 100644 index 00000000000..e018fa66b66 --- /dev/null +++ b/vendor/github.com/metal3-io/baremetal-operator/pkg/bmc/errors.go @@ -0,0 +1,28 @@ +package bmc + +import ( + "fmt" +) + +// UnknownBMCTypeError is returned when the provided BMC address cannot be +// mapped to a driver. +type UnknownBMCTypeError struct { + address string + bmcType string +} + +func (e UnknownBMCTypeError) Error() string { + return fmt.Sprintf("Unknown BMC type '%s' for address %s", + e.bmcType, e.address) +} + +// CredentialsValidationError is returned when the provided BMC credentials +// are invalid (e.g. null) +type CredentialsValidationError struct { + message string +} + +func (e CredentialsValidationError) Error() string { + return fmt.Sprintf("Validation error with BMC credentials: %s", + e.message) +} diff --git a/vendor/github.com/metal3-io/baremetal-operator/pkg/bmc/idrac.go b/vendor/github.com/metal3-io/baremetal-operator/pkg/bmc/idrac.go new file mode 100644 index 00000000000..c907f63f282 --- /dev/null +++ b/vendor/github.com/metal3-io/baremetal-operator/pkg/bmc/idrac.go @@ -0,0 +1,56 @@ +package bmc + +import ( + "strings" +) + +type iDracAccessDetails struct { + bmcType string + portNum string + hostname string + path string +} + +func (a *iDracAccessDetails) Type() string { + return a.bmcType +} + +// NeedsMAC returns true when the host is going to need a separate +// port created rather than having it discovered. +func (a *iDracAccessDetails) NeedsMAC() bool { + return false +} + +func (a *iDracAccessDetails) Driver() string { + return "idrac" +} + +// DriverInfo returns a data structure to pass as the DriverInfo +// parameter when creating a node in Ironic. The structure is +// pre-populated with the access information, and the caller is +// expected to add any other information that might be needed (such as +// the kernel and ramdisk locations). +func (a *iDracAccessDetails) DriverInfo(bmcCreds Credentials) map[string]interface{} { + result := map[string]interface{}{ + "drac_username": bmcCreds.Username, + "drac_password": bmcCreds.Password, + "drac_address": a.hostname, + } + + schemes := strings.Split(a.bmcType, "+") + if len(schemes) > 1 { + result["drac_protocol"] = schemes[1] + } + if a.portNum != "" { + result["drac_port"] = a.portNum + } + if a.path != "" { + result["drac_path"] = a.path + } + + return result +} + +func (a *iDracAccessDetails) BootInterface() string { + return "ipxe" +} diff --git a/vendor/github.com/metal3-io/baremetal-operator/pkg/bmc/ipmi.go b/vendor/github.com/metal3-io/baremetal-operator/pkg/bmc/ipmi.go new file mode 100644 index 00000000000..3e4191e2745 --- /dev/null +++ b/vendor/github.com/metal3-io/baremetal-operator/pkg/bmc/ipmi.go @@ -0,0 +1,51 @@ +package bmc + +type ipmiAccessDetails struct { + bmcType string + portNum string + hostname string +} + +const ipmiDefaultPort = "623" + +func (a *ipmiAccessDetails) Type() string { + return a.bmcType +} + +// NeedsMAC returns true when the host is going to need a separate +// port created rather than having it discovered. +func (a *ipmiAccessDetails) NeedsMAC() bool { + // libvirt-based hosts used for dev and testing require a MAC + // address, specified as part of the host, but we don't want the + // provisioner to have to know the rules about which drivers + // require what so we hide that detail inside this class and just + // let the provisioner know that "some" drivers require a MAC and + // it should ask. + return a.bmcType == "libvirt" +} + +func (a *ipmiAccessDetails) Driver() string { + return "ipmi" +} + +// DriverInfo returns a data structure to pass as the DriverInfo +// parameter when creating a node in Ironic. The structure is +// pre-populated with the access information, and the caller is +// expected to add any other information that might be needed (such as +// the kernel and ramdisk locations). +func (a *ipmiAccessDetails) DriverInfo(bmcCreds Credentials) map[string]interface{} { + result := map[string]interface{}{ + "ipmi_port": a.portNum, + "ipmi_username": bmcCreds.Username, + "ipmi_password": bmcCreds.Password, + "ipmi_address": a.hostname, + } + if a.portNum == "" { + result["ipmi_port"] = ipmiDefaultPort + } + return result +} + +func (a *ipmiAccessDetails) BootInterface() string { + return "ipxe" +} diff --git a/vendor/github.com/metal3-io/baremetal-operator/pkg/hardware/profile.go b/vendor/github.com/metal3-io/baremetal-operator/pkg/hardware/profile.go new file mode 100644 index 00000000000..ad2808c133a --- /dev/null +++ b/vendor/github.com/metal3-io/baremetal-operator/pkg/hardware/profile.go @@ -0,0 +1,98 @@ +package hardware + +import ( + "fmt" +) + +const ( + // DefaultProfileName is the default hardware profile to use when + // no other profile matches. + DefaultProfileName string = "unknown" +) + +// Profile holds the settings for a class of hardware. +type Profile struct { + // Name holds the profile name + Name string + + // RootDeviceHints holds the suggestions for placing the storage + // for the root filesystem. + RootDeviceHints RootDeviceHints + + // RootGB is the size of the root volume in GB + RootGB int + + // LocalGB is the size of something(?) + LocalGB int + + // CPUArch is the architecture of the CPU. + CPUArch string +} + +// RootDeviceHints holds the hints for specifying the storage location +// for the root filesystem for the image. +// +// NOTE(dhellmann): Valid ironic hints are: "vendor, +// wwn_vendor_extension, wwn_with_extension, by_path, serial, wwn, +// size, rotational, name, hctl, model" +type RootDeviceHints struct { + // A device name like "/dev/vda" + DeviceName string + + // A SCSI bus address like 0:0:0:0 + HCTL string +} + +var profiles = make(map[string]Profile) + +func init() { + profiles[DefaultProfileName] = Profile{ + Name: DefaultProfileName, + RootDeviceHints: RootDeviceHints{ + DeviceName: "/dev/sda", + }, + RootGB: 10, + LocalGB: 50, + CPUArch: "x86_64", + } + + profiles["libvirt"] = Profile{ + Name: "libvirt", + RootDeviceHints: RootDeviceHints{ + DeviceName: "/dev/vda", + }, + RootGB: 10, + LocalGB: 50, + CPUArch: "x86_64", + } + + profiles["dell"] = Profile{ + Name: "dell", + RootDeviceHints: RootDeviceHints{ + HCTL: "0:0:0:0", + }, + RootGB: 10, + LocalGB: 50, + CPUArch: "x86_64", + } + + profiles["dell-raid"] = Profile{ + Name: "dell-raid", + RootDeviceHints: RootDeviceHints{ + HCTL: "0:2:0:0", + }, + RootGB: 10, + LocalGB: 50, + CPUArch: "x86_64", + } + +} + +// GetProfile returns the named profile +func GetProfile(name string) (Profile, error) { + profile, ok := profiles[name] + if !ok { + return Profile{}, fmt.Errorf("No hardware profile named %q", name) + } + return profile, nil +} diff --git a/vendor/github.com/metal3-io/cluster-api-provider-baremetal/LICENSE b/vendor/github.com/metal3-io/cluster-api-provider-baremetal/LICENSE new file mode 100644 index 00000000000..261eeb9e9f8 --- /dev/null +++ b/vendor/github.com/metal3-io/cluster-api-provider-baremetal/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/metal3-io/cluster-api-provider-baremetal/pkg/apis/addtoscheme_baremetal_v1alpha1.go b/vendor/github.com/metal3-io/cluster-api-provider-baremetal/pkg/apis/addtoscheme_baremetal_v1alpha1.go new file mode 100644 index 00000000000..5fbf89d0129 --- /dev/null +++ b/vendor/github.com/metal3-io/cluster-api-provider-baremetal/pkg/apis/addtoscheme_baremetal_v1alpha1.go @@ -0,0 +1,26 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apis + +import ( + "github.com/metal3-io/cluster-api-provider-baremetal/pkg/apis/baremetal/v1alpha1" +) + +func init() { + // Register the types with the Scheme so the components can map objects to GroupVersionKinds and back + AddToSchemes = append(AddToSchemes, v1alpha1.SchemeBuilder.AddToScheme) +} diff --git a/vendor/github.com/metal3-io/cluster-api-provider-baremetal/pkg/apis/apis.go b/vendor/github.com/metal3-io/cluster-api-provider-baremetal/pkg/apis/apis.go new file mode 100644 index 00000000000..956c65ce5aa --- /dev/null +++ b/vendor/github.com/metal3-io/cluster-api-provider-baremetal/pkg/apis/apis.go @@ -0,0 +1,33 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Generate deepcopy for apis +//go:generate go run ../../vendor/k8s.io/code-generator/cmd/deepcopy-gen/main.go -O zz_generated.deepcopy -i ./... -h ../../hack/boilerplate.go.txt + +// Package apis contains Kubernetes API groups. +package apis + +import ( + "k8s.io/apimachinery/pkg/runtime" +) + +// AddToSchemes may be used to add all resources defined in the project to a Scheme +var AddToSchemes runtime.SchemeBuilder + +// AddToScheme adds all Resources to the Scheme +func AddToScheme(s *runtime.Scheme) error { + return AddToSchemes.AddToScheme(s) +} diff --git a/vendor/github.com/metal3-io/cluster-api-provider-baremetal/pkg/apis/baremetal/v1alpha1/baremetalmachineproviderspec_types.go b/vendor/github.com/metal3-io/cluster-api-provider-baremetal/pkg/apis/baremetal/v1alpha1/baremetalmachineproviderspec_types.go new file mode 100644 index 00000000000..c54ce442910 --- /dev/null +++ b/vendor/github.com/metal3-io/cluster-api-provider-baremetal/pkg/apis/baremetal/v1alpha1/baremetalmachineproviderspec_types.go @@ -0,0 +1,104 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "fmt" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/selection" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// BareMetalMachineProviderSpec holds data that the actuator needs to provision +// and manage a Machine. +// +k8s:openapi-gen=true +type BareMetalMachineProviderSpec struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // Image is the image to be provisioned. + Image Image `json:"image"` + + // UserData references the Secret that holds user data needed by the bare metal + // operator. The Namespace is optional; it will default to the Machine's + // namespace if not specified. + UserData *corev1.SecretReference `json:"userData,omitempty"` + + // HostSelector specifies matching criteria for labels on BareMetalHosts. + // This is used to limit the set of BareMetalHost objects considered for + // claiming for a Machine. + HostSelector HostSelector `json:"hostSelector,omitempty"` +} + +// HostSelector specifies matching criteria for labels on BareMetalHosts. +// This is used to limit the set of BareMetalHost objects considered for +// claiming for a Machine. +type HostSelector struct { + // Key/value pairs of labels that must exist on a chosen BareMetalHost + MatchLabels map[string]string `json:"matchLabels,omitempty"` + + // Label match expressions that must be true on a chosen BareMetalHost + MatchExpressions []HostSelectorRequirement `json:"matchExpressions,omitempty"` +} + +type HostSelectorRequirement struct { + Key string `json:"key"` + Operator selection.Operator `json:"operator"` + Values []string `json:"values"` +} + +// Image holds the details of an image to use during provisioning. +type Image struct { + // URL is a location of an image to deploy. + URL string `json:"url"` + + // Checksum is a md5sum value or a URL to retrieve one. + Checksum string `json:"checksum"` +} + +// IsValid returns an error if the object is not valid, otherwise nil. The +// string representation of the error is suitable for human consumption. +func (s *BareMetalMachineProviderSpec) IsValid() error { + missing := []string{} + if s.Image.URL == "" { + missing = append(missing, "Image.URL") + } + if s.Image.Checksum == "" { + missing = append(missing, "Image.Checksum") + } + if len(missing) > 0 { + return fmt.Errorf("Missing fields from ProviderSpec: %v", missing) + } + return nil +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// BareMetalMachineProviderSpecList contains a list of BareMetalMachineProviderSpec +type BareMetalMachineProviderSpecList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []BareMetalMachineProviderSpec `json:"items"` +} + +func init() { + SchemeBuilder.Register(&BareMetalMachineProviderSpec{}, &BareMetalMachineProviderSpecList{}) +} diff --git a/vendor/github.com/metal3-io/cluster-api-provider-baremetal/pkg/apis/baremetal/v1alpha1/baremetalmachineproviderstatus_types.go b/vendor/github.com/metal3-io/cluster-api-provider-baremetal/pkg/apis/baremetal/v1alpha1/baremetalmachineproviderstatus_types.go new file mode 100644 index 00000000000..122ba5f7b55 --- /dev/null +++ b/vendor/github.com/metal3-io/cluster-api-provider-baremetal/pkg/apis/baremetal/v1alpha1/baremetalmachineproviderstatus_types.go @@ -0,0 +1,44 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// BareMetalMachineProviderStatus is the Schema for the baremetalmachineproviderstatuses API +// +k8s:openapi-gen=true +type BareMetalMachineProviderStatus struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// BareMetalMachineProviderStatusList contains a list of BareMetalMachineProviderStatus +type BareMetalMachineProviderStatusList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []BareMetalMachineProviderStatus `json:"items"` +} + +func init() { + SchemeBuilder.Register(&BareMetalMachineProviderStatus{}, &BareMetalMachineProviderStatusList{}) +} diff --git a/vendor/github.com/metal3-io/cluster-api-provider-baremetal/pkg/apis/baremetal/v1alpha1/doc.go b/vendor/github.com/metal3-io/cluster-api-provider-baremetal/pkg/apis/baremetal/v1alpha1/doc.go new file mode 100644 index 00000000000..e88a2570dfc --- /dev/null +++ b/vendor/github.com/metal3-io/cluster-api-provider-baremetal/pkg/apis/baremetal/v1alpha1/doc.go @@ -0,0 +1,23 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v1alpha1 contains API Schema definitions for the baremetal v1alpha1 API group +// +k8s:openapi-gen=true +// +k8s:deepcopy-gen=package,register +// +k8s:conversion-gen=github.com/metal3-io/cluster-api-provider-baremetal/pkg/apis/baremetal +// +k8s:defaulter-gen=TypeMeta +// +groupName=baremetal.cluster.k8s.io +package v1alpha1 diff --git a/vendor/github.com/metal3-io/cluster-api-provider-baremetal/pkg/apis/baremetal/v1alpha1/register.go b/vendor/github.com/metal3-io/cluster-api-provider-baremetal/pkg/apis/baremetal/v1alpha1/register.go new file mode 100644 index 00000000000..ffbae771a94 --- /dev/null +++ b/vendor/github.com/metal3-io/cluster-api-provider-baremetal/pkg/apis/baremetal/v1alpha1/register.go @@ -0,0 +1,46 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// NOTE: Boilerplate only. Ignore this file. + +// Package v1alpha1 contains API Schema definitions for the baremetal v1alpha1 API group +// +k8s:openapi-gen=true +// +k8s:deepcopy-gen=package,register +// +k8s:conversion-gen=github.com/metal3-io/cluster-api-provider-baremetal/pkg/apis/baremetal +// +k8s:defaulter-gen=TypeMeta +// +groupName=baremetal.cluster.k8s.io +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + // SchemeGroupVersion is group version used to register these objects + SchemeGroupVersion = schema.GroupVersion{Group: "baremetal.cluster.k8s.io", Version: "v1alpha1"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion} + + // AddToScheme is required by pkg/client/... + AddToScheme = SchemeBuilder.AddToScheme +) + +// Resource is required by pkg/client/listers/... +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} diff --git a/vendor/github.com/metal3-io/cluster-api-provider-baremetal/pkg/apis/baremetal/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/metal3-io/cluster-api-provider-baremetal/pkg/apis/baremetal/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 00000000000..81d9c6a3e4d --- /dev/null +++ b/vendor/github.com/metal3-io/cluster-api-provider-baremetal/pkg/apis/baremetal/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,217 @@ +// +build !ignore_autogenerated + +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by main. DO NOT EDIT. + +package v1alpha1 + +import ( + v1 "k8s.io/api/core/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BareMetalMachineProviderSpec) DeepCopyInto(out *BareMetalMachineProviderSpec) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Image = in.Image + if in.UserData != nil { + in, out := &in.UserData, &out.UserData + *out = new(v1.SecretReference) + **out = **in + } + in.HostSelector.DeepCopyInto(&out.HostSelector) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BareMetalMachineProviderSpec. +func (in *BareMetalMachineProviderSpec) DeepCopy() *BareMetalMachineProviderSpec { + if in == nil { + return nil + } + out := new(BareMetalMachineProviderSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BareMetalMachineProviderSpec) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BareMetalMachineProviderSpecList) DeepCopyInto(out *BareMetalMachineProviderSpecList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]BareMetalMachineProviderSpec, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BareMetalMachineProviderSpecList. +func (in *BareMetalMachineProviderSpecList) DeepCopy() *BareMetalMachineProviderSpecList { + if in == nil { + return nil + } + out := new(BareMetalMachineProviderSpecList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BareMetalMachineProviderSpecList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BareMetalMachineProviderStatus) DeepCopyInto(out *BareMetalMachineProviderStatus) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BareMetalMachineProviderStatus. +func (in *BareMetalMachineProviderStatus) DeepCopy() *BareMetalMachineProviderStatus { + if in == nil { + return nil + } + out := new(BareMetalMachineProviderStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BareMetalMachineProviderStatus) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BareMetalMachineProviderStatusList) DeepCopyInto(out *BareMetalMachineProviderStatusList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]BareMetalMachineProviderStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BareMetalMachineProviderStatusList. +func (in *BareMetalMachineProviderStatusList) DeepCopy() *BareMetalMachineProviderStatusList { + if in == nil { + return nil + } + out := new(BareMetalMachineProviderStatusList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BareMetalMachineProviderStatusList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HostSelector) DeepCopyInto(out *HostSelector) { + *out = *in + if in.MatchLabels != nil { + in, out := &in.MatchLabels, &out.MatchLabels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.MatchExpressions != nil { + in, out := &in.MatchExpressions, &out.MatchExpressions + *out = make([]HostSelectorRequirement, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostSelector. +func (in *HostSelector) DeepCopy() *HostSelector { + if in == nil { + return nil + } + out := new(HostSelector) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HostSelectorRequirement) DeepCopyInto(out *HostSelectorRequirement) { + *out = *in + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostSelectorRequirement. +func (in *HostSelectorRequirement) DeepCopy() *HostSelectorRequirement { + if in == nil { + return nil + } + out := new(HostSelectorRequirement) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Image) DeepCopyInto(out *Image) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Image. +func (in *Image) DeepCopy() *Image { + if in == nil { + return nil + } + out := new(Image) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/openshift/cloud-credential-operator/pkg/controller/utils/condition_utils.go b/vendor/github.com/openshift/cloud-credential-operator/pkg/controller/utils/condition_utils.go new file mode 100644 index 00000000000..fed97445d44 --- /dev/null +++ b/vendor/github.com/openshift/cloud-credential-operator/pkg/controller/utils/condition_utils.go @@ -0,0 +1,119 @@ +/* +Copyright 2018 The OpenShift Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package utils + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + minterv1 "github.com/openshift/cloud-credential-operator/pkg/apis/cloudcredential/v1" +) + +// UpdateConditionCheck tests whether a condition should be updated from the +// old condition to the new condition. Returns true if the condition should +// be updated. +type UpdateConditionCheck func(oldReason, oldMessage, newReason, newMessage string) bool + +// UpdateConditionAlways returns true. The condition will always be updated. +func UpdateConditionAlways(_, _, _, _ string) bool { + return true +} + +// UpdateConditionIfReasonOrMessageChange returns true if there is a change +// in the reason or the message of the condition. +func UpdateConditionIfReasonOrMessageChange(oldReason, oldMessage, newReason, newMessage string) bool { + return oldReason != newReason || + oldMessage != newMessage +} + +// UpdateConditionNever return false. The condition will never be updated, +// unless there is a change in the status of the condition. +func UpdateConditionNever(_, _, _, _ string) bool { + return false +} + +// FindCredentialsRequestCondition iterates all conditions on a CredentialsRequest looking for the +// specified condition type. If none exists nil will be returned. +func FindCredentialsRequestCondition(conditions []minterv1.CredentialsRequestCondition, conditionType minterv1.CredentialsRequestConditionType) *minterv1.CredentialsRequestCondition { + for i, condition := range conditions { + if condition.Type == conditionType { + return &conditions[i] + } + } + return nil +} + +func shouldUpdateCondition( + oldStatus corev1.ConditionStatus, oldReason, oldMessage string, + newStatus corev1.ConditionStatus, newReason, newMessage string, + updateConditionCheck UpdateConditionCheck, +) bool { + if oldStatus != newStatus { + return true + } + return updateConditionCheck(oldReason, oldMessage, newReason, newMessage) +} + +// SetCredentialsRequestCondition sets the condition for the CredentialsRequest and returns the new slice of conditions. +// If the CredentialsRequest does not already have a condition with the specified type, +// a condition will be added to the slice if and only if the specified +// status is True. +// If the CredentialsRequest does already have a condition with the specified type, +// the condition will be updated if either of the following are true. +// 1) Requested status is different than existing status. +// 2) The updateConditionCheck function returns true. +func SetCredentialsRequestCondition( + conditions []minterv1.CredentialsRequestCondition, + conditionType minterv1.CredentialsRequestConditionType, + status corev1.ConditionStatus, + reason string, + message string, + updateConditionCheck UpdateConditionCheck, +) []minterv1.CredentialsRequestCondition { + now := metav1.Now() + existingCondition := FindCredentialsRequestCondition(conditions, conditionType) + if existingCondition == nil { + if status == corev1.ConditionTrue { + conditions = append( + conditions, + minterv1.CredentialsRequestCondition{ + Type: conditionType, + Status: status, + Reason: reason, + Message: message, + LastTransitionTime: now, + LastProbeTime: now, + }, + ) + } + } else { + if shouldUpdateCondition( + existingCondition.Status, existingCondition.Reason, existingCondition.Message, + status, reason, message, + updateConditionCheck, + ) { + if existingCondition.Status != status { + existingCondition.LastTransitionTime = now + } + existingCondition.Status = status + existingCondition.Reason = reason + existingCondition.Message = message + existingCondition.LastProbeTime = now + } + } + return conditions +} diff --git a/vendor/github.com/openshift/cloud-credential-operator/pkg/controller/utils/utils.go b/vendor/github.com/openshift/cloud-credential-operator/pkg/controller/utils/utils.go new file mode 100644 index 00000000000..cf9530c4f12 --- /dev/null +++ b/vendor/github.com/openshift/cloud-credential-operator/pkg/controller/utils/utils.go @@ -0,0 +1,60 @@ +package utils + +import ( + "context" + "fmt" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" + + "sigs.k8s.io/controller-runtime/pkg/client" + + log "github.com/sirupsen/logrus" + + configv1 "github.com/openshift/api/config/v1" +) + +const ( + awsCredsSecretIDKey = "aws_access_key_id" + awsCredsSecretAccessKey = "aws_secret_access_key" +) + +func LoadCredsFromSecret(kubeClient client.Client, namespace, secretName string) ([]byte, []byte, error) { + + secret := &corev1.Secret{} + err := kubeClient.Get(context.TODO(), + types.NamespacedName{ + Name: secretName, + Namespace: namespace, + }, + secret) + if err != nil { + return nil, nil, err + } + accessKeyID, ok := secret.Data[awsCredsSecretIDKey] + if !ok { + return nil, nil, fmt.Errorf("AWS credentials secret %v did not contain key %v", + secretName, awsCredsSecretIDKey) + } + secretAccessKey, ok := secret.Data[awsCredsSecretAccessKey] + if !ok { + return nil, nil, fmt.Errorf("AWS credentials secret %v did not contain key %v", + secretName, awsCredsSecretAccessKey) + } + return accessKeyID, secretAccessKey, nil +} + +// LoadInfrastructureName loads the cluster Infrastructure config and returns the infra name +// used to identify this cluster, and tag some cloud objects. +func LoadInfrastructureName(c client.Client, logger log.FieldLogger) (string, error) { + infra := &configv1.Infrastructure{} + err := c.Get(context.Background(), types.NamespacedName{Name: "cluster"}, infra) + if err != nil { + logger.WithError(err).Error("error loading Infrastructure config 'cluster'") + return "", err + } + + logger.Debugf("Loaded infrastructure name: %s", infra.Status.InfrastructureName) + return infra.Status.InfrastructureName, nil + +} diff --git a/vendor/k8s.io/client-go/dynamic/interface.go b/vendor/k8s.io/client-go/dynamic/interface.go new file mode 100644 index 00000000000..c457be1780b --- /dev/null +++ b/vendor/k8s.io/client-go/dynamic/interface.go @@ -0,0 +1,59 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dynamic + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/watch" +) + +type Interface interface { + Resource(resource schema.GroupVersionResource) NamespaceableResourceInterface +} + +type ResourceInterface interface { + Create(obj *unstructured.Unstructured, options metav1.CreateOptions, subresources ...string) (*unstructured.Unstructured, error) + Update(obj *unstructured.Unstructured, options metav1.UpdateOptions, subresources ...string) (*unstructured.Unstructured, error) + UpdateStatus(obj *unstructured.Unstructured, options metav1.UpdateOptions) (*unstructured.Unstructured, error) + Delete(name string, options *metav1.DeleteOptions, subresources ...string) error + DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error + Get(name string, options metav1.GetOptions, subresources ...string) (*unstructured.Unstructured, error) + List(opts metav1.ListOptions) (*unstructured.UnstructuredList, error) + Watch(opts metav1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, options metav1.UpdateOptions, subresources ...string) (*unstructured.Unstructured, error) +} + +type NamespaceableResourceInterface interface { + Namespace(string) ResourceInterface + ResourceInterface +} + +// APIPathResolverFunc knows how to convert a groupVersion to its API path. The Kind field is optional. +// TODO find a better place to move this for existing callers +type APIPathResolverFunc func(kind schema.GroupVersionKind) string + +// LegacyAPIPathResolverFunc can resolve paths properly with the legacy API. +// TODO find a better place to move this for existing callers +func LegacyAPIPathResolverFunc(kind schema.GroupVersionKind) string { + if len(kind.Group) == 0 { + return "/api" + } + return "/apis" +} diff --git a/vendor/k8s.io/client-go/dynamic/scheme.go b/vendor/k8s.io/client-go/dynamic/scheme.go new file mode 100644 index 00000000000..c4aa081f91f --- /dev/null +++ b/vendor/k8s.io/client-go/dynamic/scheme.go @@ -0,0 +1,98 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dynamic + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/apimachinery/pkg/runtime/serializer/json" + "k8s.io/apimachinery/pkg/runtime/serializer/versioning" +) + +var watchScheme = runtime.NewScheme() +var basicScheme = runtime.NewScheme() +var deleteScheme = runtime.NewScheme() +var parameterScheme = runtime.NewScheme() +var deleteOptionsCodec = serializer.NewCodecFactory(deleteScheme) +var dynamicParameterCodec = runtime.NewParameterCodec(parameterScheme) + +var versionV1 = schema.GroupVersion{Version: "v1"} + +func init() { + metav1.AddToGroupVersion(watchScheme, versionV1) + metav1.AddToGroupVersion(basicScheme, versionV1) + metav1.AddToGroupVersion(parameterScheme, versionV1) + metav1.AddToGroupVersion(deleteScheme, versionV1) +} + +var watchJsonSerializerInfo = runtime.SerializerInfo{ + MediaType: "application/json", + EncodesAsText: true, + Serializer: json.NewSerializer(json.DefaultMetaFactory, watchScheme, watchScheme, false), + PrettySerializer: json.NewSerializer(json.DefaultMetaFactory, watchScheme, watchScheme, true), + StreamSerializer: &runtime.StreamSerializerInfo{ + EncodesAsText: true, + Serializer: json.NewSerializer(json.DefaultMetaFactory, watchScheme, watchScheme, false), + Framer: json.Framer, + }, +} + +// watchNegotiatedSerializer is used to read the wrapper of the watch stream +type watchNegotiatedSerializer struct{} + +var watchNegotiatedSerializerInstance = watchNegotiatedSerializer{} + +func (s watchNegotiatedSerializer) SupportedMediaTypes() []runtime.SerializerInfo { + return []runtime.SerializerInfo{watchJsonSerializerInfo} +} + +func (s watchNegotiatedSerializer) EncoderForVersion(encoder runtime.Encoder, gv runtime.GroupVersioner) runtime.Encoder { + return versioning.NewDefaultingCodecForScheme(watchScheme, encoder, nil, gv, nil) +} + +func (s watchNegotiatedSerializer) DecoderToVersion(decoder runtime.Decoder, gv runtime.GroupVersioner) runtime.Decoder { + return versioning.NewDefaultingCodecForScheme(watchScheme, nil, decoder, nil, gv) +} + +// basicNegotiatedSerializer is used to handle discovery and error handling serialization +type basicNegotiatedSerializer struct{} + +func (s basicNegotiatedSerializer) SupportedMediaTypes() []runtime.SerializerInfo { + return []runtime.SerializerInfo{ + { + MediaType: "application/json", + EncodesAsText: true, + Serializer: json.NewSerializer(json.DefaultMetaFactory, basicScheme, basicScheme, false), + PrettySerializer: json.NewSerializer(json.DefaultMetaFactory, basicScheme, basicScheme, true), + StreamSerializer: &runtime.StreamSerializerInfo{ + EncodesAsText: true, + Serializer: json.NewSerializer(json.DefaultMetaFactory, basicScheme, basicScheme, false), + Framer: json.Framer, + }, + }, + } +} + +func (s basicNegotiatedSerializer) EncoderForVersion(encoder runtime.Encoder, gv runtime.GroupVersioner) runtime.Encoder { + return versioning.NewDefaultingCodecForScheme(watchScheme, encoder, nil, gv, nil) +} + +func (s basicNegotiatedSerializer) DecoderToVersion(decoder runtime.Decoder, gv runtime.GroupVersioner) runtime.Decoder { + return versioning.NewDefaultingCodecForScheme(watchScheme, nil, decoder, nil, gv) +} diff --git a/vendor/k8s.io/client-go/dynamic/simple.go b/vendor/k8s.io/client-go/dynamic/simple.go new file mode 100644 index 00000000000..9e21cda6e37 --- /dev/null +++ b/vendor/k8s.io/client-go/dynamic/simple.go @@ -0,0 +1,326 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dynamic + +import ( + "io" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer/streaming" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/rest" +) + +type dynamicClient struct { + client *rest.RESTClient +} + +var _ Interface = &dynamicClient{} + +// NewForConfigOrDie creates a new Interface for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) Interface { + ret, err := NewForConfig(c) + if err != nil { + panic(err) + } + return ret +} + +func NewForConfig(inConfig *rest.Config) (Interface, error) { + config := rest.CopyConfig(inConfig) + // for serializing the options + config.GroupVersion = &schema.GroupVersion{} + config.APIPath = "/if-you-see-this-search-for-the-break" + config.AcceptContentTypes = "application/json" + config.ContentType = "application/json" + config.NegotiatedSerializer = basicNegotiatedSerializer{} // this gets used for discovery and error handling types + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + restClient, err := rest.RESTClientFor(config) + if err != nil { + return nil, err + } + + return &dynamicClient{client: restClient}, nil +} + +type dynamicResourceClient struct { + client *dynamicClient + namespace string + resource schema.GroupVersionResource +} + +func (c *dynamicClient) Resource(resource schema.GroupVersionResource) NamespaceableResourceInterface { + return &dynamicResourceClient{client: c, resource: resource} +} + +func (c *dynamicResourceClient) Namespace(ns string) ResourceInterface { + ret := *c + ret.namespace = ns + return &ret +} + +func (c *dynamicResourceClient) Create(obj *unstructured.Unstructured, opts metav1.CreateOptions, subresources ...string) (*unstructured.Unstructured, error) { + outBytes, err := runtime.Encode(unstructured.UnstructuredJSONScheme, obj) + if err != nil { + return nil, err + } + name := "" + if len(subresources) > 0 { + accessor, err := meta.Accessor(obj) + if err != nil { + return nil, err + } + name = accessor.GetName() + } + + result := c.client.client. + Post(). + AbsPath(append(c.makeURLSegments(name), subresources...)...). + Body(outBytes). + SpecificallyVersionedParams(&opts, dynamicParameterCodec, versionV1). + Do() + if err := result.Error(); err != nil { + return nil, err + } + + retBytes, err := result.Raw() + if err != nil { + return nil, err + } + uncastObj, err := runtime.Decode(unstructured.UnstructuredJSONScheme, retBytes) + if err != nil { + return nil, err + } + return uncastObj.(*unstructured.Unstructured), nil +} + +func (c *dynamicResourceClient) Update(obj *unstructured.Unstructured, opts metav1.UpdateOptions, subresources ...string) (*unstructured.Unstructured, error) { + accessor, err := meta.Accessor(obj) + if err != nil { + return nil, err + } + outBytes, err := runtime.Encode(unstructured.UnstructuredJSONScheme, obj) + if err != nil { + return nil, err + } + + result := c.client.client. + Put(). + AbsPath(append(c.makeURLSegments(accessor.GetName()), subresources...)...). + Body(outBytes). + SpecificallyVersionedParams(&opts, dynamicParameterCodec, versionV1). + Do() + if err := result.Error(); err != nil { + return nil, err + } + + retBytes, err := result.Raw() + if err != nil { + return nil, err + } + uncastObj, err := runtime.Decode(unstructured.UnstructuredJSONScheme, retBytes) + if err != nil { + return nil, err + } + return uncastObj.(*unstructured.Unstructured), nil +} + +func (c *dynamicResourceClient) UpdateStatus(obj *unstructured.Unstructured, opts metav1.UpdateOptions) (*unstructured.Unstructured, error) { + accessor, err := meta.Accessor(obj) + if err != nil { + return nil, err + } + + outBytes, err := runtime.Encode(unstructured.UnstructuredJSONScheme, obj) + if err != nil { + return nil, err + } + + result := c.client.client. + Put(). + AbsPath(append(c.makeURLSegments(accessor.GetName()), "status")...). + Body(outBytes). + SpecificallyVersionedParams(&opts, dynamicParameterCodec, versionV1). + Do() + if err := result.Error(); err != nil { + return nil, err + } + + retBytes, err := result.Raw() + if err != nil { + return nil, err + } + uncastObj, err := runtime.Decode(unstructured.UnstructuredJSONScheme, retBytes) + if err != nil { + return nil, err + } + return uncastObj.(*unstructured.Unstructured), nil +} + +func (c *dynamicResourceClient) Delete(name string, opts *metav1.DeleteOptions, subresources ...string) error { + if opts == nil { + opts = &metav1.DeleteOptions{} + } + deleteOptionsByte, err := runtime.Encode(deleteOptionsCodec.LegacyCodec(schema.GroupVersion{Version: "v1"}), opts) + if err != nil { + return err + } + + result := c.client.client. + Delete(). + AbsPath(append(c.makeURLSegments(name), subresources...)...). + Body(deleteOptionsByte). + Do() + return result.Error() +} + +func (c *dynamicResourceClient) DeleteCollection(opts *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + if opts == nil { + opts = &metav1.DeleteOptions{} + } + deleteOptionsByte, err := runtime.Encode(deleteOptionsCodec.LegacyCodec(schema.GroupVersion{Version: "v1"}), opts) + if err != nil { + return err + } + + result := c.client.client. + Delete(). + AbsPath(c.makeURLSegments("")...). + Body(deleteOptionsByte). + SpecificallyVersionedParams(&listOptions, dynamicParameterCodec, versionV1). + Do() + return result.Error() +} + +func (c *dynamicResourceClient) Get(name string, opts metav1.GetOptions, subresources ...string) (*unstructured.Unstructured, error) { + result := c.client.client.Get().AbsPath(append(c.makeURLSegments(name), subresources...)...).SpecificallyVersionedParams(&opts, dynamicParameterCodec, versionV1).Do() + if err := result.Error(); err != nil { + return nil, err + } + retBytes, err := result.Raw() + if err != nil { + return nil, err + } + uncastObj, err := runtime.Decode(unstructured.UnstructuredJSONScheme, retBytes) + if err != nil { + return nil, err + } + return uncastObj.(*unstructured.Unstructured), nil +} + +func (c *dynamicResourceClient) List(opts metav1.ListOptions) (*unstructured.UnstructuredList, error) { + result := c.client.client.Get().AbsPath(c.makeURLSegments("")...).SpecificallyVersionedParams(&opts, dynamicParameterCodec, versionV1).Do() + if err := result.Error(); err != nil { + return nil, err + } + retBytes, err := result.Raw() + if err != nil { + return nil, err + } + uncastObj, err := runtime.Decode(unstructured.UnstructuredJSONScheme, retBytes) + if err != nil { + return nil, err + } + if list, ok := uncastObj.(*unstructured.UnstructuredList); ok { + return list, nil + } + + list, err := uncastObj.(*unstructured.Unstructured).ToList() + if err != nil { + return nil, err + } + return list, nil +} + +func (c *dynamicResourceClient) Watch(opts metav1.ListOptions) (watch.Interface, error) { + internalGV := schema.GroupVersions{ + {Group: c.resource.Group, Version: runtime.APIVersionInternal}, + // always include the legacy group as a decoding target to handle non-error `Status` return types + {Group: "", Version: runtime.APIVersionInternal}, + } + s := &rest.Serializers{ + Encoder: watchNegotiatedSerializerInstance.EncoderForVersion(watchJsonSerializerInfo.Serializer, c.resource.GroupVersion()), + Decoder: watchNegotiatedSerializerInstance.DecoderToVersion(watchJsonSerializerInfo.Serializer, internalGV), + + RenegotiatedDecoder: func(contentType string, params map[string]string) (runtime.Decoder, error) { + return watchNegotiatedSerializerInstance.DecoderToVersion(watchJsonSerializerInfo.Serializer, internalGV), nil + }, + StreamingSerializer: watchJsonSerializerInfo.StreamSerializer.Serializer, + Framer: watchJsonSerializerInfo.StreamSerializer.Framer, + } + + wrappedDecoderFn := func(body io.ReadCloser) streaming.Decoder { + framer := s.Framer.NewFrameReader(body) + return streaming.NewDecoder(framer, s.StreamingSerializer) + } + + opts.Watch = true + return c.client.client.Get().AbsPath(c.makeURLSegments("")...). + SpecificallyVersionedParams(&opts, dynamicParameterCodec, versionV1). + WatchWithSpecificDecoders(wrappedDecoderFn, unstructured.UnstructuredJSONScheme) +} + +func (c *dynamicResourceClient) Patch(name string, pt types.PatchType, data []byte, opts metav1.UpdateOptions, subresources ...string) (*unstructured.Unstructured, error) { + result := c.client.client. + Patch(pt). + AbsPath(append(c.makeURLSegments(name), subresources...)...). + Body(data). + SpecificallyVersionedParams(&opts, dynamicParameterCodec, versionV1). + Do() + if err := result.Error(); err != nil { + return nil, err + } + retBytes, err := result.Raw() + if err != nil { + return nil, err + } + uncastObj, err := runtime.Decode(unstructured.UnstructuredJSONScheme, retBytes) + if err != nil { + return nil, err + } + return uncastObj.(*unstructured.Unstructured), nil +} + +func (c *dynamicResourceClient) makeURLSegments(name string) []string { + url := []string{} + if len(c.resource.Group) == 0 { + url = append(url, "api") + } else { + url = append(url, "apis", c.resource.Group) + } + url = append(url, c.resource.Version) + + if len(c.namespace) > 0 { + url = append(url, "namespaces", c.namespace) + } + url = append(url, c.resource.Resource) + + if len(name) > 0 { + url = append(url, name) + } + + return url +} diff --git a/vendor/k8s.io/client-go/restmapper/category_expansion.go b/vendor/k8s.io/client-go/restmapper/category_expansion.go new file mode 100644 index 00000000000..1620bbcf810 --- /dev/null +++ b/vendor/k8s.io/client-go/restmapper/category_expansion.go @@ -0,0 +1,119 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package restmapper + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/discovery" +) + +// CategoryExpander maps category strings to GroupResouces. +// Categories are classification or 'tag' of a group of resources. +type CategoryExpander interface { + Expand(category string) ([]schema.GroupResource, bool) +} + +// SimpleCategoryExpander implements CategoryExpander interface +// using a static mapping of categories to GroupResource mapping. +type SimpleCategoryExpander struct { + Expansions map[string][]schema.GroupResource +} + +// Expand fulfills CategoryExpander +func (e SimpleCategoryExpander) Expand(category string) ([]schema.GroupResource, bool) { + ret, ok := e.Expansions[category] + return ret, ok +} + +// discoveryCategoryExpander struct lets a REST Client wrapper (discoveryClient) to retrieve list of APIResourceList, +// and then convert to fallbackExpander +type discoveryCategoryExpander struct { + discoveryClient discovery.DiscoveryInterface +} + +// NewDiscoveryCategoryExpander returns a category expander that makes use of the "categories" fields from +// the API, found through the discovery client. In case of any error or no category found (which likely +// means we're at a cluster prior to categories support, fallback to the expander provided. +func NewDiscoveryCategoryExpander(client discovery.DiscoveryInterface) CategoryExpander { + if client == nil { + panic("Please provide discovery client to shortcut expander") + } + return discoveryCategoryExpander{discoveryClient: client} +} + +// Expand fulfills CategoryExpander +func (e discoveryCategoryExpander) Expand(category string) ([]schema.GroupResource, bool) { + // Get all supported resources for groups and versions from server, if no resource found, fallback anyway. + apiResourceLists, _ := e.discoveryClient.ServerResources() + if len(apiResourceLists) == 0 { + return nil, false + } + + discoveredExpansions := map[string][]schema.GroupResource{} + for _, apiResourceList := range apiResourceLists { + gv, err := schema.ParseGroupVersion(apiResourceList.GroupVersion) + if err != nil { + continue + } + // Collect GroupVersions by categories + for _, apiResource := range apiResourceList.APIResources { + if categories := apiResource.Categories; len(categories) > 0 { + for _, category := range categories { + groupResource := schema.GroupResource{ + Group: gv.Group, + Resource: apiResource.Name, + } + discoveredExpansions[category] = append(discoveredExpansions[category], groupResource) + } + } + } + } + + ret, ok := discoveredExpansions[category] + return ret, ok +} + +// UnionCategoryExpander implements CategoryExpander interface. +// It maps given category string to union of expansions returned by all the CategoryExpanders in the list. +type UnionCategoryExpander []CategoryExpander + +// Expand fulfills CategoryExpander +func (u UnionCategoryExpander) Expand(category string) ([]schema.GroupResource, bool) { + ret := []schema.GroupResource{} + ok := false + + // Expand the category for each CategoryExpander in the list and merge/combine the results. + for _, expansion := range u { + curr, currOk := expansion.Expand(category) + + for _, currGR := range curr { + found := false + for _, existing := range ret { + if existing == currGR { + found = true + break + } + } + if !found { + ret = append(ret, currGR) + } + } + ok = ok || currOk + } + + return ret, ok +} diff --git a/vendor/k8s.io/client-go/restmapper/discovery.go b/vendor/k8s.io/client-go/restmapper/discovery.go new file mode 100644 index 00000000000..aa158626af4 --- /dev/null +++ b/vendor/k8s.io/client-go/restmapper/discovery.go @@ -0,0 +1,339 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package restmapper + +import ( + "fmt" + "strings" + "sync" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/discovery" + + "github.com/golang/glog" +) + +// APIGroupResources is an API group with a mapping of versions to +// resources. +type APIGroupResources struct { + Group metav1.APIGroup + // A mapping of version string to a slice of APIResources for + // that version. + VersionedResources map[string][]metav1.APIResource +} + +// NewDiscoveryRESTMapper returns a PriorityRESTMapper based on the discovered +// groups and resources passed in. +func NewDiscoveryRESTMapper(groupResources []*APIGroupResources) meta.RESTMapper { + unionMapper := meta.MultiRESTMapper{} + + var groupPriority []string + // /v1 is special. It should always come first + resourcePriority := []schema.GroupVersionResource{{Group: "", Version: "v1", Resource: meta.AnyResource}} + kindPriority := []schema.GroupVersionKind{{Group: "", Version: "v1", Kind: meta.AnyKind}} + + for _, group := range groupResources { + groupPriority = append(groupPriority, group.Group.Name) + + // Make sure the preferred version comes first + if len(group.Group.PreferredVersion.Version) != 0 { + preferred := group.Group.PreferredVersion.Version + if _, ok := group.VersionedResources[preferred]; ok { + resourcePriority = append(resourcePriority, schema.GroupVersionResource{ + Group: group.Group.Name, + Version: group.Group.PreferredVersion.Version, + Resource: meta.AnyResource, + }) + + kindPriority = append(kindPriority, schema.GroupVersionKind{ + Group: group.Group.Name, + Version: group.Group.PreferredVersion.Version, + Kind: meta.AnyKind, + }) + } + } + + for _, discoveryVersion := range group.Group.Versions { + resources, ok := group.VersionedResources[discoveryVersion.Version] + if !ok { + continue + } + + // Add non-preferred versions after the preferred version, in case there are resources that only exist in those versions + if discoveryVersion.Version != group.Group.PreferredVersion.Version { + resourcePriority = append(resourcePriority, schema.GroupVersionResource{ + Group: group.Group.Name, + Version: discoveryVersion.Version, + Resource: meta.AnyResource, + }) + + kindPriority = append(kindPriority, schema.GroupVersionKind{ + Group: group.Group.Name, + Version: discoveryVersion.Version, + Kind: meta.AnyKind, + }) + } + + gv := schema.GroupVersion{Group: group.Group.Name, Version: discoveryVersion.Version} + versionMapper := meta.NewDefaultRESTMapper([]schema.GroupVersion{gv}) + + for _, resource := range resources { + scope := meta.RESTScopeNamespace + if !resource.Namespaced { + scope = meta.RESTScopeRoot + } + + // if we have a slash, then this is a subresource and we shouldn't create mappings for those. + if strings.Contains(resource.Name, "/") { + continue + } + + plural := gv.WithResource(resource.Name) + singular := gv.WithResource(resource.SingularName) + // this is for legacy resources and servers which don't list singular forms. For those we must still guess. + if len(resource.SingularName) == 0 { + _, singular = meta.UnsafeGuessKindToResource(gv.WithKind(resource.Kind)) + } + + versionMapper.AddSpecific(gv.WithKind(strings.ToLower(resource.Kind)), plural, singular, scope) + versionMapper.AddSpecific(gv.WithKind(resource.Kind), plural, singular, scope) + // TODO this is producing unsafe guesses that don't actually work, but it matches previous behavior + versionMapper.Add(gv.WithKind(resource.Kind+"List"), scope) + } + // TODO why is this type not in discovery (at least for "v1") + versionMapper.Add(gv.WithKind("List"), meta.RESTScopeRoot) + unionMapper = append(unionMapper, versionMapper) + } + } + + for _, group := range groupPriority { + resourcePriority = append(resourcePriority, schema.GroupVersionResource{ + Group: group, + Version: meta.AnyVersion, + Resource: meta.AnyResource, + }) + kindPriority = append(kindPriority, schema.GroupVersionKind{ + Group: group, + Version: meta.AnyVersion, + Kind: meta.AnyKind, + }) + } + + return meta.PriorityRESTMapper{ + Delegate: unionMapper, + ResourcePriority: resourcePriority, + KindPriority: kindPriority, + } +} + +// GetAPIGroupResources uses the provided discovery client to gather +// discovery information and populate a slice of APIGroupResources. +func GetAPIGroupResources(cl discovery.DiscoveryInterface) ([]*APIGroupResources, error) { + apiGroups, err := cl.ServerGroups() + if err != nil { + if apiGroups == nil || len(apiGroups.Groups) == 0 { + return nil, err + } + // TODO track the errors and update callers to handle partial errors. + } + var result []*APIGroupResources + for _, group := range apiGroups.Groups { + groupResources := &APIGroupResources{ + Group: group, + VersionedResources: make(map[string][]metav1.APIResource), + } + for _, version := range group.Versions { + resources, err := cl.ServerResourcesForGroupVersion(version.GroupVersion) + if err != nil { + // continue as best we can + // TODO track the errors and update callers to handle partial errors. + if resources == nil || len(resources.APIResources) == 0 { + continue + } + } + groupResources.VersionedResources[version.Version] = resources.APIResources + } + result = append(result, groupResources) + } + return result, nil +} + +// DeferredDiscoveryRESTMapper is a RESTMapper that will defer +// initialization of the RESTMapper until the first mapping is +// requested. +type DeferredDiscoveryRESTMapper struct { + initMu sync.Mutex + delegate meta.RESTMapper + cl discovery.CachedDiscoveryInterface +} + +// NewDeferredDiscoveryRESTMapper returns a +// DeferredDiscoveryRESTMapper that will lazily query the provided +// client for discovery information to do REST mappings. +func NewDeferredDiscoveryRESTMapper(cl discovery.CachedDiscoveryInterface) *DeferredDiscoveryRESTMapper { + return &DeferredDiscoveryRESTMapper{ + cl: cl, + } +} + +func (d *DeferredDiscoveryRESTMapper) getDelegate() (meta.RESTMapper, error) { + d.initMu.Lock() + defer d.initMu.Unlock() + + if d.delegate != nil { + return d.delegate, nil + } + + groupResources, err := GetAPIGroupResources(d.cl) + if err != nil { + return nil, err + } + + d.delegate = NewDiscoveryRESTMapper(groupResources) + return d.delegate, err +} + +// Reset resets the internally cached Discovery information and will +// cause the next mapping request to re-discover. +func (d *DeferredDiscoveryRESTMapper) Reset() { + glog.V(5).Info("Invalidating discovery information") + + d.initMu.Lock() + defer d.initMu.Unlock() + + d.cl.Invalidate() + d.delegate = nil +} + +// KindFor takes a partial resource and returns back the single match. +// It returns an error if there are multiple matches. +func (d *DeferredDiscoveryRESTMapper) KindFor(resource schema.GroupVersionResource) (gvk schema.GroupVersionKind, err error) { + del, err := d.getDelegate() + if err != nil { + return schema.GroupVersionKind{}, err + } + gvk, err = del.KindFor(resource) + if err != nil && !d.cl.Fresh() { + d.Reset() + gvk, err = d.KindFor(resource) + } + return +} + +// KindsFor takes a partial resource and returns back the list of +// potential kinds in priority order. +func (d *DeferredDiscoveryRESTMapper) KindsFor(resource schema.GroupVersionResource) (gvks []schema.GroupVersionKind, err error) { + del, err := d.getDelegate() + if err != nil { + return nil, err + } + gvks, err = del.KindsFor(resource) + if len(gvks) == 0 && !d.cl.Fresh() { + d.Reset() + gvks, err = d.KindsFor(resource) + } + return +} + +// ResourceFor takes a partial resource and returns back the single +// match. It returns an error if there are multiple matches. +func (d *DeferredDiscoveryRESTMapper) ResourceFor(input schema.GroupVersionResource) (gvr schema.GroupVersionResource, err error) { + del, err := d.getDelegate() + if err != nil { + return schema.GroupVersionResource{}, err + } + gvr, err = del.ResourceFor(input) + if err != nil && !d.cl.Fresh() { + d.Reset() + gvr, err = d.ResourceFor(input) + } + return +} + +// ResourcesFor takes a partial resource and returns back the list of +// potential resource in priority order. +func (d *DeferredDiscoveryRESTMapper) ResourcesFor(input schema.GroupVersionResource) (gvrs []schema.GroupVersionResource, err error) { + del, err := d.getDelegate() + if err != nil { + return nil, err + } + gvrs, err = del.ResourcesFor(input) + if len(gvrs) == 0 && !d.cl.Fresh() { + d.Reset() + gvrs, err = d.ResourcesFor(input) + } + return +} + +// RESTMapping identifies a preferred resource mapping for the +// provided group kind. +func (d *DeferredDiscoveryRESTMapper) RESTMapping(gk schema.GroupKind, versions ...string) (m *meta.RESTMapping, err error) { + del, err := d.getDelegate() + if err != nil { + return nil, err + } + m, err = del.RESTMapping(gk, versions...) + if err != nil && !d.cl.Fresh() { + d.Reset() + m, err = d.RESTMapping(gk, versions...) + } + return +} + +// RESTMappings returns the RESTMappings for the provided group kind +// in a rough internal preferred order. If no kind is found, it will +// return a NoResourceMatchError. +func (d *DeferredDiscoveryRESTMapper) RESTMappings(gk schema.GroupKind, versions ...string) (ms []*meta.RESTMapping, err error) { + del, err := d.getDelegate() + if err != nil { + return nil, err + } + ms, err = del.RESTMappings(gk, versions...) + if len(ms) == 0 && !d.cl.Fresh() { + d.Reset() + ms, err = d.RESTMappings(gk, versions...) + } + return +} + +// ResourceSingularizer converts a resource name from plural to +// singular (e.g., from pods to pod). +func (d *DeferredDiscoveryRESTMapper) ResourceSingularizer(resource string) (singular string, err error) { + del, err := d.getDelegate() + if err != nil { + return resource, err + } + singular, err = del.ResourceSingularizer(resource) + if err != nil && !d.cl.Fresh() { + d.Reset() + singular, err = d.ResourceSingularizer(resource) + } + return +} + +func (d *DeferredDiscoveryRESTMapper) String() string { + del, err := d.getDelegate() + if err != nil { + return fmt.Sprintf("DeferredDiscoveryRESTMapper{%v}", err) + } + return fmt.Sprintf("DeferredDiscoveryRESTMapper{\n\t%v\n}", del) +} + +// Make sure it satisfies the interface +var _ meta.RESTMapper = &DeferredDiscoveryRESTMapper{} diff --git a/vendor/k8s.io/client-go/restmapper/shortcut.go b/vendor/k8s.io/client-go/restmapper/shortcut.go new file mode 100644 index 00000000000..d9f4be0b6b1 --- /dev/null +++ b/vendor/k8s.io/client-go/restmapper/shortcut.go @@ -0,0 +1,172 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package restmapper + +import ( + "strings" + + "github.com/golang/glog" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/discovery" +) + +// shortcutExpander is a RESTMapper that can be used for Kubernetes resources. It expands the resource first, then invokes the wrapped +type shortcutExpander struct { + RESTMapper meta.RESTMapper + + discoveryClient discovery.DiscoveryInterface +} + +var _ meta.RESTMapper = &shortcutExpander{} + +// NewShortcutExpander wraps a restmapper in a layer that expands shortcuts found via discovery +func NewShortcutExpander(delegate meta.RESTMapper, client discovery.DiscoveryInterface) meta.RESTMapper { + return shortcutExpander{RESTMapper: delegate, discoveryClient: client} +} + +// KindFor fulfills meta.RESTMapper +func (e shortcutExpander) KindFor(resource schema.GroupVersionResource) (schema.GroupVersionKind, error) { + return e.RESTMapper.KindFor(e.expandResourceShortcut(resource)) +} + +// KindsFor fulfills meta.RESTMapper +func (e shortcutExpander) KindsFor(resource schema.GroupVersionResource) ([]schema.GroupVersionKind, error) { + return e.RESTMapper.KindsFor(e.expandResourceShortcut(resource)) +} + +// ResourcesFor fulfills meta.RESTMapper +func (e shortcutExpander) ResourcesFor(resource schema.GroupVersionResource) ([]schema.GroupVersionResource, error) { + return e.RESTMapper.ResourcesFor(e.expandResourceShortcut(resource)) +} + +// ResourceFor fulfills meta.RESTMapper +func (e shortcutExpander) ResourceFor(resource schema.GroupVersionResource) (schema.GroupVersionResource, error) { + return e.RESTMapper.ResourceFor(e.expandResourceShortcut(resource)) +} + +// ResourceSingularizer fulfills meta.RESTMapper +func (e shortcutExpander) ResourceSingularizer(resource string) (string, error) { + return e.RESTMapper.ResourceSingularizer(e.expandResourceShortcut(schema.GroupVersionResource{Resource: resource}).Resource) +} + +// RESTMapping fulfills meta.RESTMapper +func (e shortcutExpander) RESTMapping(gk schema.GroupKind, versions ...string) (*meta.RESTMapping, error) { + return e.RESTMapper.RESTMapping(gk, versions...) +} + +// RESTMappings fulfills meta.RESTMapper +func (e shortcutExpander) RESTMappings(gk schema.GroupKind, versions ...string) ([]*meta.RESTMapping, error) { + return e.RESTMapper.RESTMappings(gk, versions...) +} + +// getShortcutMappings returns a set of tuples which holds short names for resources. +// First the list of potential resources will be taken from the API server. +// Next we will append the hardcoded list of resources - to be backward compatible with old servers. +// NOTE that the list is ordered by group priority. +func (e shortcutExpander) getShortcutMappings() ([]*metav1.APIResourceList, []resourceShortcuts, error) { + res := []resourceShortcuts{} + // get server resources + // This can return an error *and* the results it was able to find. We don't need to fail on the error. + apiResList, err := e.discoveryClient.ServerResources() + if err != nil { + glog.V(1).Infof("Error loading discovery information: %v", err) + } + for _, apiResources := range apiResList { + gv, err := schema.ParseGroupVersion(apiResources.GroupVersion) + if err != nil { + glog.V(1).Infof("Unable to parse groupversion = %s due to = %s", apiResources.GroupVersion, err.Error()) + continue + } + for _, apiRes := range apiResources.APIResources { + for _, shortName := range apiRes.ShortNames { + rs := resourceShortcuts{ + ShortForm: schema.GroupResource{Group: gv.Group, Resource: shortName}, + LongForm: schema.GroupResource{Group: gv.Group, Resource: apiRes.Name}, + } + res = append(res, rs) + } + } + } + + return apiResList, res, nil +} + +// expandResourceShortcut will return the expanded version of resource +// (something that a pkg/api/meta.RESTMapper can understand), if it is +// indeed a shortcut. If no match has been found, we will match on group prefixing. +// Lastly we will return resource unmodified. +func (e shortcutExpander) expandResourceShortcut(resource schema.GroupVersionResource) schema.GroupVersionResource { + // get the shortcut mappings and return on first match. + if allResources, shortcutResources, err := e.getShortcutMappings(); err == nil { + // avoid expanding if there's an exact match to a full resource name + for _, apiResources := range allResources { + gv, err := schema.ParseGroupVersion(apiResources.GroupVersion) + if err != nil { + continue + } + if len(resource.Group) != 0 && resource.Group != gv.Group { + continue + } + for _, apiRes := range apiResources.APIResources { + if resource.Resource == apiRes.Name { + return resource + } + if resource.Resource == apiRes.SingularName { + return resource + } + } + } + + for _, item := range shortcutResources { + if len(resource.Group) != 0 && resource.Group != item.ShortForm.Group { + continue + } + if resource.Resource == item.ShortForm.Resource { + resource.Resource = item.LongForm.Resource + resource.Group = item.LongForm.Group + return resource + } + } + + // we didn't find exact match so match on group prefixing. This allows autoscal to match autoscaling + if len(resource.Group) == 0 { + return resource + } + for _, item := range shortcutResources { + if !strings.HasPrefix(item.ShortForm.Group, resource.Group) { + continue + } + if resource.Resource == item.ShortForm.Resource { + resource.Resource = item.LongForm.Resource + resource.Group = item.LongForm.Group + return resource + } + } + } + + return resource +} + +// ResourceShortcuts represents a structure that holds the information how to +// transition from resource's shortcut to its full name. +type resourceShortcuts struct { + ShortForm schema.GroupResource + LongForm schema.GroupResource +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/apimachinery.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/apimachinery.go new file mode 100644 index 00000000000..cf775c3dc89 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/apimachinery.go @@ -0,0 +1,94 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package apiutil contains utilities for working with raw Kubernetes +// API machinery, such as creating RESTMappers and raw REST clients, +// and extracting the GVK of an object. +package apiutil + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/discovery" + "k8s.io/client-go/rest" + "k8s.io/client-go/restmapper" +) + +// NewDiscoveryRESTMapper constructs a new RESTMapper based on discovery +// information fetched by a new client with the given config. +func NewDiscoveryRESTMapper(c *rest.Config) (meta.RESTMapper, error) { + // Get a mapper + dc := discovery.NewDiscoveryClientForConfigOrDie(c) + gr, err := restmapper.GetAPIGroupResources(dc) + if err != nil { + return nil, err + } + return restmapper.NewDiscoveryRESTMapper(gr), nil +} + +// GVKForObject finds the GroupVersionKind associated with the given object, if there is only a single such GVK. +func GVKForObject(obj runtime.Object, scheme *runtime.Scheme) (schema.GroupVersionKind, error) { + gvks, isUnversioned, err := scheme.ObjectKinds(obj) + if err != nil { + return schema.GroupVersionKind{}, err + } + if isUnversioned { + return schema.GroupVersionKind{}, fmt.Errorf("cannot create a new informer for the unversioned type %T", obj) + } + + if len(gvks) < 1 { + return schema.GroupVersionKind{}, fmt.Errorf("no group-version-kinds associated with type %T", obj) + } + if len(gvks) > 1 { + // this should only trigger for things like metav1.XYZ -- + // normal versioned types should be fine + return schema.GroupVersionKind{}, fmt.Errorf( + "multiple group-version-kinds associated with type %T, refusing to guess at one", obj) + } + return gvks[0], nil +} + +// RESTClientForGVK constructs a new rest.Interface capable of accessing the resource associated +// with the given GroupVersionKind. The REST client will be configured to use the negotiated serializer from +// baseConfig, if set, otherwise a default serializer will be set. +func RESTClientForGVK(gvk schema.GroupVersionKind, baseConfig *rest.Config, codecs serializer.CodecFactory) (rest.Interface, error) { + cfg := createRestConfig(gvk, baseConfig) + if cfg.NegotiatedSerializer == nil { + cfg.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: codecs} + } + return rest.RESTClientFor(cfg) +} + +//createRestConfig copies the base config and updates needed fields for a new rest config +func createRestConfig(gvk schema.GroupVersionKind, baseConfig *rest.Config) *rest.Config { + gv := gvk.GroupVersion() + + cfg := rest.CopyConfig(baseConfig) + cfg.GroupVersion = &gv + if gvk.Group == "" { + cfg.APIPath = "/api" + } else { + cfg.APIPath = "/apis" + } + if cfg.UserAgent == "" { + cfg.UserAgent = rest.DefaultKubernetesUserAgent() + } + return cfg +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/client.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/client.go new file mode 100644 index 00000000000..43facbfba66 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/client.go @@ -0,0 +1,189 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "context" + "fmt" + "reflect" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" +) + +// Options are creation options for a Client +type Options struct { + // Scheme, if provided, will be used to map go structs to GroupVersionKinds + Scheme *runtime.Scheme + + // Mapper, if provided, will be used to map GroupVersionKinds to Resources + Mapper meta.RESTMapper +} + +// New returns a new Client using the provided config and Options. +// The returned client reads *and* writes directly from the server +// (it doesn't use object caches). It understands how to work with +// normal types (both custom resources and aggregated/built-in resources), +// as well as unstructured types. +// +// In the case of normal types, the scheme will be used to look up the +// corresponding group, version, and kind for the given type. In the +// case of unstrctured types, the group, version, and kind will be extracted +// from the corresponding fields on the object. +func New(config *rest.Config, options Options) (Client, error) { + if config == nil { + return nil, fmt.Errorf("must provide non-nil rest.Config to client.New") + } + + // Init a scheme if none provided + if options.Scheme == nil { + options.Scheme = scheme.Scheme + } + + // Init a Mapper if none provided + if options.Mapper == nil { + var err error + options.Mapper, err = apiutil.NewDiscoveryRESTMapper(config) + if err != nil { + return nil, err + } + } + + dynamicClient, err := dynamic.NewForConfig(config) + if err != nil { + return nil, err + } + + c := &client{ + typedClient: typedClient{ + cache: clientCache{ + config: config, + scheme: options.Scheme, + mapper: options.Mapper, + codecs: serializer.NewCodecFactory(options.Scheme), + resourceByType: make(map[reflect.Type]*resourceMeta), + }, + paramCodec: runtime.NewParameterCodec(options.Scheme), + }, + unstructuredClient: unstructuredClient{ + client: dynamicClient, + restMapper: options.Mapper, + }, + } + + return c, nil +} + +var _ Client = &client{} + +// client is a client.Client that reads and writes directly from/to an API server. It lazily initializes +// new clients at the time they are used, and caches the client. +type client struct { + typedClient typedClient + unstructuredClient unstructuredClient +} + +// Create implements client.Client +func (c *client) Create(ctx context.Context, obj runtime.Object, opts ...CreateOptionFunc) error { + _, ok := obj.(*unstructured.Unstructured) + if ok { + return c.unstructuredClient.Create(ctx, obj, opts...) + } + return c.typedClient.Create(ctx, obj, opts...) +} + +// Update implements client.Client +func (c *client) Update(ctx context.Context, obj runtime.Object, opts ...UpdateOptionFunc) error { + _, ok := obj.(*unstructured.Unstructured) + if ok { + return c.unstructuredClient.Update(ctx, obj, opts...) + } + return c.typedClient.Update(ctx, obj, opts...) +} + +// Delete implements client.Client +func (c *client) Delete(ctx context.Context, obj runtime.Object, opts ...DeleteOptionFunc) error { + _, ok := obj.(*unstructured.Unstructured) + if ok { + return c.unstructuredClient.Delete(ctx, obj, opts...) + } + return c.typedClient.Delete(ctx, obj, opts...) +} + +// Patch implements client.Client +func (c *client) Patch(ctx context.Context, obj runtime.Object, patch Patch, opts ...PatchOptionFunc) error { + _, ok := obj.(*unstructured.Unstructured) + if ok { + return c.unstructuredClient.Patch(ctx, obj, patch, opts...) + } + return c.typedClient.Patch(ctx, obj, patch, opts...) +} + +// Get implements client.Client +func (c *client) Get(ctx context.Context, key ObjectKey, obj runtime.Object) error { + _, ok := obj.(*unstructured.Unstructured) + if ok { + return c.unstructuredClient.Get(ctx, key, obj) + } + return c.typedClient.Get(ctx, key, obj) +} + +// List implements client.Client +func (c *client) List(ctx context.Context, obj runtime.Object, opts ...ListOptionFunc) error { + _, ok := obj.(*unstructured.UnstructuredList) + if ok { + return c.unstructuredClient.List(ctx, obj, opts...) + } + return c.typedClient.List(ctx, obj, opts...) +} + +// Status implements client.StatusClient +func (c *client) Status() StatusWriter { + return &statusWriter{client: c} +} + +// statusWriter is client.StatusWriter that writes status subresource +type statusWriter struct { + client *client +} + +// ensure statusWriter implements client.StatusWriter +var _ StatusWriter = &statusWriter{} + +// Update implements client.StatusWriter +func (sw *statusWriter) Update(ctx context.Context, obj runtime.Object, opts ...UpdateOptionFunc) error { + _, ok := obj.(*unstructured.Unstructured) + if ok { + return sw.client.unstructuredClient.UpdateStatus(ctx, obj, opts...) + } + return sw.client.typedClient.UpdateStatus(ctx, obj, opts...) +} + +// Patch implements client.Client +func (sw *statusWriter) Patch(ctx context.Context, obj runtime.Object, patch Patch, opts ...PatchOptionFunc) error { + _, ok := obj.(*unstructured.Unstructured) + if ok { + return sw.client.unstructuredClient.PatchStatus(ctx, obj, patch, opts...) + } + return sw.client.typedClient.PatchStatus(ctx, obj, patch, opts...) +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/client_cache.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/client_cache.go new file mode 100644 index 00000000000..2a1ff05d503 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/client_cache.go @@ -0,0 +1,145 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "reflect" + "strings" + "sync" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" +) + +// clientCache creates and caches rest clients and metadata for Kubernetes types +type clientCache struct { + // config is the rest.Config to talk to an apiserver + config *rest.Config + + // scheme maps go structs to GroupVersionKinds + scheme *runtime.Scheme + + // mapper maps GroupVersionKinds to Resources + mapper meta.RESTMapper + + // codecs are used to create a REST client for a gvk + codecs serializer.CodecFactory + + // resourceByType caches type metadata + resourceByType map[reflect.Type]*resourceMeta + mu sync.RWMutex +} + +// newResource maps obj to a Kubernetes Resource and constructs a client for that Resource. +// If the object is a list, the resource represents the item's type instead. +func (c *clientCache) newResource(obj runtime.Object) (*resourceMeta, error) { + gvk, err := apiutil.GVKForObject(obj, c.scheme) + if err != nil { + return nil, err + } + + if strings.HasSuffix(gvk.Kind, "List") && meta.IsListType(obj) { + // if this was a list, treat it as a request for the item's resource + gvk.Kind = gvk.Kind[:len(gvk.Kind)-4] + } + + client, err := apiutil.RESTClientForGVK(gvk, c.config, c.codecs) + if err != nil { + return nil, err + } + mapping, err := c.mapper.RESTMapping(gvk.GroupKind(), gvk.Version) + if err != nil { + return nil, err + } + return &resourceMeta{Interface: client, mapping: mapping, gvk: gvk}, nil +} + +// getResource returns the resource meta information for the given type of object. +// If the object is a list, the resource represents the item's type instead. +func (c *clientCache) getResource(obj runtime.Object) (*resourceMeta, error) { + typ := reflect.TypeOf(obj) + + // It's better to do creation work twice than to not let multiple + // people make requests at once + c.mu.RLock() + r, known := c.resourceByType[typ] + c.mu.RUnlock() + + if known { + return r, nil + } + + // Initialize a new Client + c.mu.Lock() + defer c.mu.Unlock() + r, err := c.newResource(obj) + if err != nil { + return nil, err + } + c.resourceByType[typ] = r + return r, err +} + +// getObjMeta returns objMeta containing both type and object metadata and state +func (c *clientCache) getObjMeta(obj runtime.Object) (*objMeta, error) { + r, err := c.getResource(obj) + if err != nil { + return nil, err + } + m, err := meta.Accessor(obj) + if err != nil { + return nil, err + } + return &objMeta{resourceMeta: r, Object: m}, err +} + +// resourceMeta caches state for a Kubernetes type. +type resourceMeta struct { + // client is the rest client used to talk to the apiserver + rest.Interface + // gvk is the GroupVersionKind of the resourceMeta + gvk schema.GroupVersionKind + // mapping is the rest mapping + mapping *meta.RESTMapping +} + +// isNamespaced returns true if the type is namespaced +func (r *resourceMeta) isNamespaced() bool { + if r.mapping.Scope.Name() == meta.RESTScopeNameRoot { + return false + } + return true +} + +// resource returns the resource name of the type +func (r *resourceMeta) resource() string { + return r.mapping.Resource.Resource +} + +// objMeta stores type and object information about a Kubernetes type +type objMeta struct { + // resourceMeta contains type information for the object + *resourceMeta + + // Object contains meta data for the object instance + metav1.Object +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/doc.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/doc.go new file mode 100644 index 00000000000..6c13af211f0 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/doc.go @@ -0,0 +1,49 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package client contains functionality for interacting with Kubernetes API +// servers. +// +// Clients +// +// Clients are split into two interfaces -- Readers and Writers. Readers +// get and list, while writers create, update, and delete. +// +// The New function can be used to create a new client that talks directly +// to the API server. +// +// A common pattern in Kubernetes to read from a cache and write to the API +// server. This pattern is covered by the DelegatingClient type, which can +// be used to have a client whose Reader is different from the Writer. +// +// Options +// +// Many client operations in Kubernetes support options. These options are +// represented as variadic arguments at the end of a given method call. +// For instance, to use a label selector on list, you can call +// err := someReader.List(context.Background(), &podList, client.MatchingLabels(someLabelMap)) +// +// Indexing +// +// Indexes may be added to caches using a FieldIndexer. This allows you to easily +// and efficiently look up objects with certain properties. You can then make +// use of the index by specifying a field selector on calls to List on the Reader +// corresponding to the given Cache. +// +// For instance, a Secret controller might have an index on the +// `.spec.volumes.secret.secretName` field in Pod objects, so that it could +// easily look up all pods that reference a given secret. +package client diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/interfaces.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/interfaces.go new file mode 100644 index 00000000000..e4ca8be85cf --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/interfaces.go @@ -0,0 +1,132 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "context" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" +) + +// ObjectKey identifies a Kubernetes Object. +type ObjectKey = types.NamespacedName + +// ObjectKeyFromObject returns the ObjectKey given a runtime.Object +func ObjectKeyFromObject(obj runtime.Object) (ObjectKey, error) { + accessor, err := meta.Accessor(obj) + if err != nil { + return ObjectKey{}, err + } + return ObjectKey{Namespace: accessor.GetNamespace(), Name: accessor.GetName()}, nil +} + +// Patch is a patch that can be applied to a Kubernetes object. +type Patch interface { + // Type is the PatchType of the patch. + Type() types.PatchType + // Data is the raw data representing the patch. + Data(obj runtime.Object) ([]byte, error) +} + +// TODO(directxman12): is there a sane way to deal with get/delete options? + +// Reader knows how to read and list Kubernetes objects. +type Reader interface { + // Get retrieves an obj for the given object key from the Kubernetes Cluster. + // obj must be a struct pointer so that obj can be updated with the response + // returned by the Server. + Get(ctx context.Context, key ObjectKey, obj runtime.Object) error + + // List retrieves list of objects for a given namespace and list options. On a + // successful call, Items field in the list will be populated with the + // result returned from the server. + List(ctx context.Context, list runtime.Object, opts ...ListOptionFunc) error +} + +// Writer knows how to create, delete, and update Kubernetes objects. +type Writer interface { + // Create saves the object obj in the Kubernetes cluster. + Create(ctx context.Context, obj runtime.Object, opts ...CreateOptionFunc) error + + // Delete deletes the given obj from Kubernetes cluster. + Delete(ctx context.Context, obj runtime.Object, opts ...DeleteOptionFunc) error + + // Update updates the given obj in the Kubernetes cluster. obj must be a + // struct pointer so that obj can be updated with the content returned by the Server. + Update(ctx context.Context, obj runtime.Object, opts ...UpdateOptionFunc) error + + // Patch patches the given obj in the Kubernetes cluster. obj must be a + // struct pointer so that obj can be updated with the content returned by the Server. + Patch(ctx context.Context, obj runtime.Object, patch Patch, opts ...PatchOptionFunc) error +} + +// StatusClient knows how to create a client which can update status subresource +// for kubernetes objects. +type StatusClient interface { + Status() StatusWriter +} + +// StatusWriter knows how to update status subresource of a Kubernetes object. +type StatusWriter interface { + // Update updates the fields corresponding to the status subresource for the + // given obj. obj must be a struct pointer so that obj can be updated + // with the content returned by the Server. + Update(ctx context.Context, obj runtime.Object, opts ...UpdateOptionFunc) error + + // Patch patches the given object's subresource. obj must be a struct + // pointer so that obj can be updated with the content returned by the + // Server. + Patch(ctx context.Context, obj runtime.Object, patch Patch, opts ...PatchOptionFunc) error +} + +// Client knows how to perform CRUD operations on Kubernetes objects. +type Client interface { + Reader + Writer + StatusClient +} + +// IndexerFunc knows how to take an object and turn it into a series +// of non-namespaced keys. Namespaced objects are automatically given +// namespaced and non-spaced variants, so keys do not need to include namespace. +type IndexerFunc func(runtime.Object) []string + +// FieldIndexer knows how to index over a particular "field" such that it +// can later be used by a field selector. +type FieldIndexer interface { + // IndexFields adds an index with the given field name on the given object type + // by using the given function to extract the value for that field. If you want + // compatibility with the Kubernetes API server, only return one key, and only use + // fields that the API server supports. Otherwise, you can return multiple keys, + // and "equality" in the field selector means that at least one key matches the value. + // The FieldIndexer will automatically take care of indexing over namespace + // and supporting efficient all-namespace queries. + IndexField(obj runtime.Object, field string, extractValue IndexerFunc) error +} + +// IgnoreNotFound returns nil on NotFound errors. +// All other values that are not NotFound errors or nil are returned unmodified. +func IgnoreNotFound(err error) error { + if apierrors.IsNotFound(err) { + return nil + } + return err +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/options.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/options.go new file mode 100644 index 00000000000..46ddf66f978 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/options.go @@ -0,0 +1,417 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" +) + +// CreateOptions contains options for create requests. It's generally a subset +// of metav1.CreateOptions. +type CreateOptions struct { + // When present, indicates that modifications should not be + // persisted. An invalid or unrecognized dryRun directive will + // result in an error response and no further processing of the + // request. Valid values are: + // - All: all dry run stages will be processed + DryRun []string + + // Raw represents raw CreateOptions, as passed to the API server. + Raw *metav1.CreateOptions +} + +// AsCreateOptions returns these options as a metav1.CreateOptions. +// This may mutate the Raw field. +func (o *CreateOptions) AsCreateOptions() *metav1.CreateOptions { + + if o == nil { + return &metav1.CreateOptions{} + } + if o.Raw == nil { + o.Raw = &metav1.CreateOptions{} + } + + o.Raw.DryRun = o.DryRun + return o.Raw +} + +// ApplyOptions executes the given CreateOptionFuncs and returns the mutated +// CreateOptions. +func (o *CreateOptions) ApplyOptions(optFuncs []CreateOptionFunc) *CreateOptions { + for _, optFunc := range optFuncs { + optFunc(o) + } + return o +} + +// CreateOptionFunc is a function that mutates a CreateOptions struct. It implements +// the functional options pattern. See +// https://github.com/tmrts/go-patterns/blob/master/idiom/functional-options.md. +type CreateOptionFunc func(*CreateOptions) + +// CreateDryRunAll is a functional option that sets the DryRun +// field of a CreateOptions struct to metav1.DryRunAll. +var CreateDryRunAll CreateOptionFunc = func(opts *CreateOptions) { + opts.DryRun = []string{metav1.DryRunAll} +} + +// DeleteOptions contains options for delete requests. It's generally a subset +// of metav1.DeleteOptions. +type DeleteOptions struct { + // GracePeriodSeconds is the duration in seconds before the object should be + // deleted. Value must be non-negative integer. The value zero indicates + // delete immediately. If this value is nil, the default grace period for the + // specified type will be used. + GracePeriodSeconds *int64 + + // Preconditions must be fulfilled before a deletion is carried out. If not + // possible, a 409 Conflict status will be returned. + Preconditions *metav1.Preconditions + + // PropagationPolicy determined whether and how garbage collection will be + // performed. Either this field or OrphanDependents may be set, but not both. + // The default policy is decided by the existing finalizer set in the + // metadata.finalizers and the resource-specific default policy. + // Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - + // allow the garbage collector to delete the dependents in the background; + // 'Foreground' - a cascading policy that deletes all dependents in the + // foreground. + PropagationPolicy *metav1.DeletionPropagation + + // Raw represents raw DeleteOptions, as passed to the API server. + Raw *metav1.DeleteOptions +} + +// AsDeleteOptions returns these options as a metav1.DeleteOptions. +// This may mutate the Raw field. +func (o *DeleteOptions) AsDeleteOptions() *metav1.DeleteOptions { + + if o == nil { + return &metav1.DeleteOptions{} + } + if o.Raw == nil { + o.Raw = &metav1.DeleteOptions{} + } + + o.Raw.GracePeriodSeconds = o.GracePeriodSeconds + o.Raw.Preconditions = o.Preconditions + o.Raw.PropagationPolicy = o.PropagationPolicy + return o.Raw +} + +// ApplyOptions executes the given DeleteOptionFuncs and returns the mutated +// DeleteOptions. +func (o *DeleteOptions) ApplyOptions(optFuncs []DeleteOptionFunc) *DeleteOptions { + for _, optFunc := range optFuncs { + optFunc(o) + } + return o +} + +// DeleteOptionFunc is a function that mutates a DeleteOptions struct. It implements +// the functional options pattern. See +// https://github.com/tmrts/go-patterns/blob/master/idiom/functional-options.md. +type DeleteOptionFunc func(*DeleteOptions) + +// GracePeriodSeconds is a functional option that sets the GracePeriodSeconds +// field of a DeleteOptions struct. +func GracePeriodSeconds(gp int64) DeleteOptionFunc { + return func(opts *DeleteOptions) { + opts.GracePeriodSeconds = &gp + } +} + +// Preconditions is a functional option that sets the Preconditions field of a +// DeleteOptions struct. +func Preconditions(p *metav1.Preconditions) DeleteOptionFunc { + return func(opts *DeleteOptions) { + opts.Preconditions = p + } +} + +// PropagationPolicy is a functional option that sets the PropagationPolicy +// field of a DeleteOptions struct. +func PropagationPolicy(p metav1.DeletionPropagation) DeleteOptionFunc { + return func(opts *DeleteOptions) { + opts.PropagationPolicy = &p + } +} + +// ListOptions contains options for limiting or filtering results. +// It's generally a subset of metav1.ListOptions, with support for +// pre-parsed selectors (since generally, selectors will be executed +// against the cache). +type ListOptions struct { + // LabelSelector filters results by label. Use SetLabelSelector to + // set from raw string form. + LabelSelector labels.Selector + // FieldSelector filters results by a particular field. In order + // to use this with cache-based implementations, restrict usage to + // a single field-value pair that's been added to the indexers. + FieldSelector fields.Selector + + // Namespace represents the namespace to list for, or empty for + // non-namespaced objects, or to list across all namespaces. + Namespace string + + // Raw represents raw ListOptions, as passed to the API server. Note + // that these may not be respected by all implementations of interface, + // and the LabelSelector and FieldSelector fields are ignored. + Raw *metav1.ListOptions +} + +// SetLabelSelector sets this the label selector of these options +// from a string form of the selector. +func (o *ListOptions) SetLabelSelector(selRaw string) error { + sel, err := labels.Parse(selRaw) + if err != nil { + return err + } + o.LabelSelector = sel + return nil +} + +// SetFieldSelector sets this the label selector of these options +// from a string form of the selector. +func (o *ListOptions) SetFieldSelector(selRaw string) error { + sel, err := fields.ParseSelector(selRaw) + if err != nil { + return err + } + o.FieldSelector = sel + return nil +} + +// AsListOptions returns these options as a flattened metav1.ListOptions. +// This may mutate the Raw field. +func (o *ListOptions) AsListOptions() *metav1.ListOptions { + if o == nil { + return &metav1.ListOptions{} + } + if o.Raw == nil { + o.Raw = &metav1.ListOptions{} + } + if o.LabelSelector != nil { + o.Raw.LabelSelector = o.LabelSelector.String() + } + if o.FieldSelector != nil { + o.Raw.FieldSelector = o.FieldSelector.String() + } + return o.Raw +} + +// ApplyOptions executes the given ListOptionFuncs and returns the mutated +// ListOptions. +func (o *ListOptions) ApplyOptions(optFuncs []ListOptionFunc) *ListOptions { + for _, optFunc := range optFuncs { + optFunc(o) + } + return o +} + +// ListOptionFunc is a function that mutates a ListOptions struct. It implements +// the functional options pattern. See +// https://github.com/tmrts/go-patterns/blob/master/idiom/functional-options.md. +type ListOptionFunc func(*ListOptions) + +// MatchingLabels is a convenience function that sets the label selector +// to match the given labels, and then returns the options. +// It mutates the list options. +func (o *ListOptions) MatchingLabels(lbls map[string]string) *ListOptions { + sel := labels.SelectorFromSet(lbls) + o.LabelSelector = sel + return o +} + +// MatchingField is a convenience function that sets the field selector +// to match the given field, and then returns the options. +// It mutates the list options. +func (o *ListOptions) MatchingField(name, val string) *ListOptions { + sel := fields.SelectorFromSet(fields.Set{name: val}) + o.FieldSelector = sel + return o +} + +// InNamespace is a convenience function that sets the namespace, +// and then returns the options. It mutates the list options. +func (o *ListOptions) InNamespace(ns string) *ListOptions { + o.Namespace = ns + return o +} + +// MatchingLabels is a functional option that sets the LabelSelector field of +// a ListOptions struct. +func MatchingLabels(lbls map[string]string) ListOptionFunc { + sel := labels.SelectorFromSet(lbls) + return func(opts *ListOptions) { + opts.LabelSelector = sel + } +} + +// MatchingField is a functional option that sets the FieldSelector field of +// a ListOptions struct. +func MatchingField(name, val string) ListOptionFunc { + sel := fields.SelectorFromSet(fields.Set{name: val}) + return func(opts *ListOptions) { + opts.FieldSelector = sel + } +} + +// InNamespace is a functional option that sets the Namespace field of +// a ListOptions struct. +func InNamespace(ns string) ListOptionFunc { + return func(opts *ListOptions) { + opts.Namespace = ns + } +} + +// UseListOptions is a functional option that replaces the fields of a +// ListOptions struct with those of a different ListOptions struct. +// +// Example: +// cl.List(ctx, list, client.UseListOptions(lo.InNamespace(ns).MatchingLabels(labels))) +func UseListOptions(newOpts *ListOptions) ListOptionFunc { + return func(opts *ListOptions) { + *opts = *newOpts + } +} + +// UpdateOptions contains options for create requests. It's generally a subset +// of metav1.UpdateOptions. +type UpdateOptions struct { + // When present, indicates that modifications should not be + // persisted. An invalid or unrecognized dryRun directive will + // result in an error response and no further processing of the + // request. Valid values are: + // - All: all dry run stages will be processed + DryRun []string + + // Raw represents raw UpdateOptions, as passed to the API server. + Raw *metav1.UpdateOptions +} + +// AsUpdateOptions returns these options as a metav1.UpdateOptions. +// This may mutate the Raw field. +func (o *UpdateOptions) AsUpdateOptions() *metav1.UpdateOptions { + + if o == nil { + return &metav1.UpdateOptions{} + } + if o.Raw == nil { + o.Raw = &metav1.UpdateOptions{} + } + + o.Raw.DryRun = o.DryRun + return o.Raw +} + +// ApplyOptions executes the given UpdateOptionFuncs and returns the mutated +// UpdateOptions. +func (o *UpdateOptions) ApplyOptions(optFuncs []UpdateOptionFunc) *UpdateOptions { + for _, optFunc := range optFuncs { + optFunc(o) + } + return o +} + +// UpdateOptionFunc is a function that mutates a UpdateOptions struct. It implements +// the functional options pattern. See +// https://github.com/tmrts/go-patterns/blob/master/idiom/functional-options.md. +type UpdateOptionFunc func(*UpdateOptions) + +// UpdateDryRunAll is a functional option that sets the DryRun +// field of a UpdateOptions struct to metav1.DryRunAll. +var UpdateDryRunAll UpdateOptionFunc = func(opts *UpdateOptions) { + opts.DryRun = []string{metav1.DryRunAll} +} + +// PatchOptions contains options for patch requests. +type PatchOptions struct { + // When present, indicates that modifications should not be + // persisted. An invalid or unrecognized dryRun directive will + // result in an error response and no further processing of the + // request. Valid values are: + // - All: all dry run stages will be processed + DryRun []string + + // Force is going to "force" Apply requests. It means user will + // re-acquire conflicting fields owned by other people. Force + // flag must be unset for non-apply patch requests. + // +optional + Force *bool + + // FieldManager is the name of the user or component submitting + // this request. It must be set with server-side apply. + FieldManager string + + // Raw represents raw PatchOptions, as passed to the API server. + Raw *metav1.PatchOptions +} + +// ApplyOptions executes the given PatchOptionFuncs, mutating these PatchOptions. +// It returns the mutated PatchOptions for convenience. +func (o *PatchOptions) ApplyOptions(optFuncs []PatchOptionFunc) *PatchOptions { + for _, optFunc := range optFuncs { + optFunc(o) + } + return o +} + +// AsPatchOptions returns these options as a metav1.PatchOptions. +// This may mutate the Raw field. +func (o *PatchOptions) AsPatchOptions() *metav1.PatchOptions { + if o == nil { + return &metav1.PatchOptions{} + } + if o.Raw == nil { + o.Raw = &metav1.PatchOptions{} + } + + o.Raw.DryRun = o.DryRun + o.Raw.Force = o.Force + o.Raw.FieldManager = o.FieldManager + return o.Raw +} + +// PatchOptionFunc is a function that mutates a PatchOptions struct. It implements +// the functional options pattern. See +// https://github.com/tmrts/go-patterns/blob/master/idiom/functional-options.md. +type PatchOptionFunc func(*PatchOptions) + +// ForceOwnership sets the Force option, indicating that +// in case of conflicts with server-side apply, the client should +// acquire ownership of the conflicting field. Most controllers +// should use this. +var ForceOwnership PatchOptionFunc = func(opts *PatchOptions) { + definitelyTrue := true + opts.Force = &definitelyTrue +} + +// PatchDryRunAll is a functional option that sets the DryRun +// field of a PatchOptions struct to metav1.DryRunAll. +var PatchDryRunAll PatchOptionFunc = func(opts *PatchOptions) { + opts.DryRun = []string{metav1.DryRunAll} +} + +// FieldOwner set the field manager name for the given server-side apply patch. +func FieldOwner(name string) PatchOptionFunc { + return func(opts *PatchOptions) { + opts.FieldManager = name + } +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/patch.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/patch.go new file mode 100644 index 00000000000..ab7efc287b0 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/patch.go @@ -0,0 +1,95 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + jsonpatch "github.com/evanphx/json-patch" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/json" +) + +var ( + // Apply uses server-side apply to patch the given object. + Apply = applyPatch{} +) + +type patch struct { + patchType types.PatchType + data []byte +} + +// Type implements Patch. +func (s *patch) Type() types.PatchType { + return s.patchType +} + +// Data implements Patch. +func (s *patch) Data(obj runtime.Object) ([]byte, error) { + return s.data, nil +} + +// ConstantPatch constructs a new Patch with the given PatchType and data. +func ConstantPatch(patchType types.PatchType, data []byte) Patch { + return &patch{patchType, data} +} + +type mergeFromPatch struct { + from runtime.Object +} + +// Type implements patch. +func (s *mergeFromPatch) Type() types.PatchType { + return types.MergePatchType +} + +// Data implements Patch. +func (s *mergeFromPatch) Data(obj runtime.Object) ([]byte, error) { + originalJSON, err := json.Marshal(s.from) + if err != nil { + return nil, err + } + + modifiedJSON, err := json.Marshal(obj) + if err != nil { + return nil, err + } + + return jsonpatch.CreateMergePatch(originalJSON, modifiedJSON) +} + +// MergeFrom creates a Patch that patches using the merge-patch strategy with the given object as base. +func MergeFrom(obj runtime.Object) Patch { + return &mergeFromPatch{obj} +} + +// applyPatch uses server-side apply to patch the object. +type applyPatch struct{} + +// Type implements Patch. +func (p applyPatch) Type() types.PatchType { + return types.ApplyPatchType +} + +// Data implements Patch. +func (p applyPatch) Data(obj runtime.Object) ([]byte, error) { + // NB(directxman12): we might techically want to be using an actual encoder + // here (in case some more performant encoder is introduced) but this is + // correct and sufficient for our uses (it's what the JSON serializer in + // client-go does, more-or-less). + return json.Marshal(obj) +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/split.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/split.go new file mode 100644 index 00000000000..db7f16a717b --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/split.go @@ -0,0 +1,61 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "context" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" +) + +// DelegatingClient forms a Client by composing separate reader, writer and +// statusclient interfaces. This way, you can have an Client that reads from a +// cache and writes to the API server. +type DelegatingClient struct { + Reader + Writer + StatusClient +} + +// DelegatingReader forms a Reader that will cause Get and List requests for +// unstructured types to use the ClientReader while requests for any other type +// of object with use the CacheReader. This avoids accidentally caching the +// entire cluster in the common case of loading arbitrary unstructured objects +// (e.g. from OwnerReferences). +type DelegatingReader struct { + CacheReader Reader + ClientReader Reader +} + +// Get retrieves an obj for a given object key from the Kubernetes Cluster. +func (d *DelegatingReader) Get(ctx context.Context, key ObjectKey, obj runtime.Object) error { + _, isUnstructured := obj.(*unstructured.Unstructured) + if isUnstructured { + return d.ClientReader.Get(ctx, key, obj) + } + return d.CacheReader.Get(ctx, key, obj) +} + +// List retrieves list of objects for a given namespace and list options. +func (d *DelegatingReader) List(ctx context.Context, list runtime.Object, opts ...ListOptionFunc) error { + _, isUnstructured := list.(*unstructured.UnstructuredList) + if isUnstructured { + return d.ClientReader.List(ctx, list, opts...) + } + return d.CacheReader.List(ctx, list, opts...) +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/typed_client.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/typed_client.go new file mode 100644 index 00000000000..76f429b6505 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/typed_client.go @@ -0,0 +1,188 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "context" + + "k8s.io/apimachinery/pkg/runtime" +) + +// client is a client.Client that reads and writes directly from/to an API server. It lazily initializes +// new clients at the time they are used, and caches the client. +type typedClient struct { + cache clientCache + paramCodec runtime.ParameterCodec +} + +// Create implements client.Client +func (c *typedClient) Create(ctx context.Context, obj runtime.Object, opts ...CreateOptionFunc) error { + o, err := c.cache.getObjMeta(obj) + if err != nil { + return err + } + + createOpts := &CreateOptions{} + createOpts.ApplyOptions(opts) + return o.Post(). + NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). + Resource(o.resource()). + Body(obj). + VersionedParams(createOpts.AsCreateOptions(), c.paramCodec). + Context(ctx). + Do(). + Into(obj) +} + +// Update implements client.Client +func (c *typedClient) Update(ctx context.Context, obj runtime.Object, opts ...UpdateOptionFunc) error { + o, err := c.cache.getObjMeta(obj) + if err != nil { + return err + } + + updateOpts := &UpdateOptions{} + updateOpts.ApplyOptions(opts) + return o.Put(). + NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). + Resource(o.resource()). + Name(o.GetName()). + Body(obj). + VersionedParams(updateOpts.AsUpdateOptions(), c.paramCodec). + Context(ctx). + Do(). + Into(obj) +} + +// Delete implements client.Client +func (c *typedClient) Delete(ctx context.Context, obj runtime.Object, opts ...DeleteOptionFunc) error { + o, err := c.cache.getObjMeta(obj) + if err != nil { + return err + } + + deleteOpts := DeleteOptions{} + return o.Delete(). + NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). + Resource(o.resource()). + Name(o.GetName()). + Body(deleteOpts.ApplyOptions(opts).AsDeleteOptions()). + Context(ctx). + Do(). + Error() +} + +// Patch implements client.Client +func (c *typedClient) Patch(ctx context.Context, obj runtime.Object, patch Patch, opts ...PatchOptionFunc) error { + o, err := c.cache.getObjMeta(obj) + if err != nil { + return err + } + + data, err := patch.Data(obj) + if err != nil { + return err + } + + patchOpts := &PatchOptions{} + return o.Patch(patch.Type()). + NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). + Resource(o.resource()). + Name(o.GetName()). + VersionedParams(patchOpts.ApplyOptions(opts).AsPatchOptions(), c.paramCodec). + Body(data). + Context(ctx). + Do(). + Into(obj) +} + +// Get implements client.Client +func (c *typedClient) Get(ctx context.Context, key ObjectKey, obj runtime.Object) error { + r, err := c.cache.getResource(obj) + if err != nil { + return err + } + return r.Get(). + NamespaceIfScoped(key.Namespace, r.isNamespaced()). + Resource(r.resource()). + Context(ctx). + Name(key.Name).Do().Into(obj) +} + +// List implements client.Client +func (c *typedClient) List(ctx context.Context, obj runtime.Object, opts ...ListOptionFunc) error { + r, err := c.cache.getResource(obj) + if err != nil { + return err + } + listOpts := ListOptions{} + listOpts.ApplyOptions(opts) + return r.Get(). + NamespaceIfScoped(listOpts.Namespace, r.isNamespaced()). + Resource(r.resource()). + VersionedParams(listOpts.AsListOptions(), c.paramCodec). + Context(ctx). + Do(). + Into(obj) +} + +// UpdateStatus used by StatusWriter to write status. +func (c *typedClient) UpdateStatus(ctx context.Context, obj runtime.Object, opts ...UpdateOptionFunc) error { + o, err := c.cache.getObjMeta(obj) + if err != nil { + return err + } + // TODO(droot): examine the returned error and check if it error needs to be + // wrapped to improve the UX ? + // It will be nice to receive an error saying the object doesn't implement + // status subresource and check CRD definition + return o.Put(). + NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). + Resource(o.resource()). + Name(o.GetName()). + SubResource("status"). + Body(obj). + VersionedParams((&UpdateOptions{}).ApplyOptions(opts).AsUpdateOptions(), c.paramCodec). + Context(ctx). + Do(). + Into(obj) +} + +// PatchStatus used by StatusWriter to write status. +func (c *typedClient) PatchStatus(ctx context.Context, obj runtime.Object, patch Patch, opts ...PatchOptionFunc) error { + o, err := c.cache.getObjMeta(obj) + if err != nil { + return err + } + + data, err := patch.Data(obj) + if err != nil { + return err + } + + patchOpts := &PatchOptions{} + return o.Patch(patch.Type()). + NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). + Resource(o.resource()). + Name(o.GetName()). + SubResource("status"). + Body(data). + VersionedParams(patchOpts.ApplyOptions(opts).AsPatchOptions(), c.paramCodec). + Context(ctx). + Do(). + Into(obj) +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/unstructured_client.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/unstructured_client.go new file mode 100644 index 00000000000..f13dd18854d --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/unstructured_client.go @@ -0,0 +1,212 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "context" + "fmt" + "strings" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/dynamic" +) + +// client is a client.Client that reads and writes directly from/to an API server. It lazily initializes +// new clients at the time they are used, and caches the client. +type unstructuredClient struct { + client dynamic.Interface + restMapper meta.RESTMapper +} + +// Create implements client.Client +func (uc *unstructuredClient) Create(_ context.Context, obj runtime.Object, opts ...CreateOptionFunc) error { + u, ok := obj.(*unstructured.Unstructured) + if !ok { + return fmt.Errorf("unstructured client did not understand object: %T", obj) + } + createOpts := CreateOptions{} + createOpts.ApplyOptions(opts) + r, err := uc.getResourceInterface(u.GroupVersionKind(), u.GetNamespace()) + if err != nil { + return err + } + i, err := r.Create(u, *createOpts.AsCreateOptions()) + if err != nil { + return err + } + u.Object = i.Object + return nil +} + +// Update implements client.Client +func (uc *unstructuredClient) Update(_ context.Context, obj runtime.Object, opts ...UpdateOptionFunc) error { + u, ok := obj.(*unstructured.Unstructured) + if !ok { + return fmt.Errorf("unstructured client did not understand object: %T", obj) + } + updateOpts := UpdateOptions{} + updateOpts.ApplyOptions(opts) + r, err := uc.getResourceInterface(u.GroupVersionKind(), u.GetNamespace()) + if err != nil { + return err + } + i, err := r.Update(u, *updateOpts.AsUpdateOptions()) + if err != nil { + return err + } + u.Object = i.Object + return nil +} + +// Delete implements client.Client +func (uc *unstructuredClient) Delete(_ context.Context, obj runtime.Object, opts ...DeleteOptionFunc) error { + u, ok := obj.(*unstructured.Unstructured) + if !ok { + return fmt.Errorf("unstructured client did not understand object: %T", obj) + } + r, err := uc.getResourceInterface(u.GroupVersionKind(), u.GetNamespace()) + if err != nil { + return err + } + deleteOpts := DeleteOptions{} + err = r.Delete(u.GetName(), deleteOpts.ApplyOptions(opts).AsDeleteOptions()) + return err +} + +// Patch implements client.Client +func (uc *unstructuredClient) Patch(_ context.Context, obj runtime.Object, patch Patch, opts ...PatchOptionFunc) error { + u, ok := obj.(*unstructured.Unstructured) + if !ok { + return fmt.Errorf("unstructured client did not understand object: %T", obj) + } + r, err := uc.getResourceInterface(u.GroupVersionKind(), u.GetNamespace()) + if err != nil { + return err + } + + data, err := patch.Data(obj) + if err != nil { + return err + } + + patchOpts := &PatchOptions{} + i, err := r.Patch(u.GetName(), patch.Type(), data, *patchOpts.ApplyOptions(opts).AsPatchOptions()) + if err != nil { + return err + } + u.Object = i.Object + return nil +} + +// Get implements client.Client +func (uc *unstructuredClient) Get(_ context.Context, key ObjectKey, obj runtime.Object) error { + u, ok := obj.(*unstructured.Unstructured) + if !ok { + return fmt.Errorf("unstructured client did not understand object: %T", obj) + } + r, err := uc.getResourceInterface(u.GroupVersionKind(), key.Namespace) + if err != nil { + return err + } + i, err := r.Get(key.Name, metav1.GetOptions{}) + if err != nil { + return err + } + u.Object = i.Object + return nil +} + +// List implements client.Client +func (uc *unstructuredClient) List(_ context.Context, obj runtime.Object, opts ...ListOptionFunc) error { + u, ok := obj.(*unstructured.UnstructuredList) + if !ok { + return fmt.Errorf("unstructured client did not understand object: %T", obj) + } + gvk := u.GroupVersionKind() + if strings.HasSuffix(gvk.Kind, "List") { + gvk.Kind = gvk.Kind[:len(gvk.Kind)-4] + } + listOpts := ListOptions{} + listOpts.ApplyOptions(opts) + r, err := uc.getResourceInterface(gvk, listOpts.Namespace) + if err != nil { + return err + } + + i, err := r.List(*listOpts.AsListOptions()) + if err != nil { + return err + } + u.Items = i.Items + u.Object = i.Object + return nil +} + +func (uc *unstructuredClient) UpdateStatus(_ context.Context, obj runtime.Object, opts ...UpdateOptionFunc) error { + u, ok := obj.(*unstructured.Unstructured) + if !ok { + return fmt.Errorf("unstructured client did not understand object: %T", obj) + } + r, err := uc.getResourceInterface(u.GroupVersionKind(), u.GetNamespace()) + if err != nil { + return err + } + i, err := r.UpdateStatus(u, *(&UpdateOptions{}).ApplyOptions(opts).AsUpdateOptions()) + if err != nil { + return err + } + u.Object = i.Object + return nil +} + +func (uc *unstructuredClient) PatchStatus(_ context.Context, obj runtime.Object, patch Patch, opts ...PatchOptionFunc) error { + u, ok := obj.(*unstructured.Unstructured) + if !ok { + return fmt.Errorf("unstructured client did not understand object: %T", obj) + } + r, err := uc.getResourceInterface(u.GroupVersionKind(), u.GetNamespace()) + if err != nil { + return err + } + + data, err := patch.Data(obj) + if err != nil { + return err + } + + i, err := r.Patch(u.GetName(), patch.Type(), data, *(&PatchOptions{}).ApplyOptions(opts).AsPatchOptions(), "status") + if err != nil { + return err + } + u.Object = i.Object + return nil +} + +func (uc *unstructuredClient) getResourceInterface(gvk schema.GroupVersionKind, ns string) (dynamic.ResourceInterface, error) { + mapping, err := uc.restMapper.RESTMapping(gvk.GroupKind(), gvk.Version) + if err != nil { + return nil, err + } + if mapping.Scope.Name() == meta.RESTScopeNameRoot { + return uc.client.Resource(mapping.Resource), nil + } + return uc.client.Resource(mapping.Resource).Namespace(ns), nil +}