diff --git a/.github/workflows/e2e.yaml b/.github/workflows/e2e.yaml new file mode 100644 index 000000000..c34471d7f --- /dev/null +++ b/.github/workflows/e2e.yaml @@ -0,0 +1,25 @@ +on: + pull_request_target: + types: [labeled] +name: e2e +jobs: + integration: + runs-on: ubuntu-latest + if: contains(github.event.pull_request.labels.*.name, 'ok-to-test') + steps: + - name: Checkout code + uses: actions/checkout@v2 + with: + fetch-depth: 0 + repository: kubevirt/project-infra + path: project-infra + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v1 + - name: kubeconfig + run: 'echo -e "$KUBECONFIG" > $GITHUB_WORKSPACE/project-infra/.kubeconfig' + shell: bash + env: + KUBECONFIG: ${{secrets.KUBECONFIG}} + - name: Test + run: | + $GITHUB_WORKSPACE/project-infra/hack/mkpj.sh --job pull-kubernetes-sigs-cluster-api-provider-kubevirt-e2e --pull-number ${{github.event.number}} --kubeconfig $GITHUB_WORKSPACE/project-infra/.kubeconfig --trigger-job diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml new file mode 100644 index 000000000..20adfd23d --- /dev/null +++ b/.github/workflows/test.yaml @@ -0,0 +1,20 @@ +on: [push, pull_request] +name: build +jobs: + unit_test: + strategy: + matrix: + go-version: [1.16.x, 1.17.x] + os: [ubuntu-latest, macos-latest, windows-latest] + runs-on: ${{ matrix.os }} + steps: + - name: Install Go + uses: actions/setup-go@v2 + with: + go-version: ${{ matrix.go-version }} + - name: Checkout code + uses: actions/checkout@v2 + - name: Test + run: make test + - name: Build + run: make manager diff --git a/Dockerfile b/Dockerfile index ed105e23e..053515965 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,61 @@ -FROM gcr.io/distroless/static +# syntax=docker/dockerfile:1.1-experimental -COPY bin/manager /usr/bin/ +# Copyright 2021 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. -ENTRYPOINT /usr/bin/manager +# Build the manager binary +# Run this with docker build --build-arg builder_image= +ARG builder_image=golang:1.16.2 +FROM ${builder_image} as builder +WORKDIR /workspace + +# Run this with docker build --build-arg goproxy=$(go env GOPROXY) to override the goproxy +ARG goproxy=https://proxy.golang.org +ENV GOPROXY=$goproxy + +# Copy the Go Modules manifests +COPY go.mod go.mod +COPY go.sum go.sum + +# Cache deps before building and copying source so that we don't need to re-download as much +# and so that source changes don't invalidate our downloaded layer +RUN --mount=type=cache,target=/go/pkg/mod \ + go mod download + +# Copy the sources +COPY ./ ./ + +# Cache the go build into the the Go’s compiler cache folder so we take benefits of compiler caching across docker build calls +RUN --mount=type=cache,target=/root/.cache/go-build \ + --mount=type=cache,target=/go/pkg/mod \ + go build . + +# Build +ARG ARCH=amd64 +ARG ldflags + +# Do not force rebuild of up-to-date packages (do not use -a) and use the compiler cache folder +RUN --mount=type=cache,target=/root/.cache/go-build \ + --mount=type=cache,target=/go/pkg/mod \ + CGO_ENABLED=0 GOOS=linux GOARCH=${ARCH} \ + go build -a -ldflags "${ldflags} -extldflags '-static'" \ + -o manager . + +# Production image +FROM gcr.io/distroless/static:nonroot +WORKDIR / +COPY --from=builder /workspace/manager . +# Use uid of nonroot user (65532) because kubernetes expects numeric user when applying pod security policies +USER 65532 +ENTRYPOINT ["/manager"] diff --git a/OWNERS b/OWNERS index 02943433f..41e687abd 100644 --- a/OWNERS +++ b/OWNERS @@ -1,6 +1,14 @@ -# See the OWNERS docs at https://go.k8s.io/owners +# See the OWNERS docs: https://git.k8s.io/community/contributors/guide/owners.md approvers: - - agradouski - - cchengleo - - sig-cluster-lifecycle-leads +- rmohr +- davidvossel +- nunnatsa +- nirarg +options: {} +reviewers: +- rmohr +- davidvossel +- nunnatsa +- nirarg + diff --git a/README.md b/README.md index 7a39820a9..e67e410b6 100644 --- a/README.md +++ b/README.md @@ -25,6 +25,10 @@ We also encourage ALL active community participants to act as if they are mainta - Join the [SIG Cluster Lifecycle](https://groups.google.com/g/kubernetes-sig-cluster-lifecycle) Google Group to documents and calendar. - Participate in the conversations on [Kubernetes Discuss](https://discuss.kubernetes.io/c/contributors/cluster-api/23) +- **Meetings:** + - Cluster API Provider KubeVirt Syncup Meetings: [Tuesdays at 8:00 PT (Pacific Time)](https://zoom.us/j/94685513559?pwd=cnI3RUQyZ3RrckpOc1BQNDA1Q1BrZz09) (weekly starting Tuesday December 7th, 2021). [Convert to your timezone](http://www.thetimezoneconverter.com/?t=8:00&tz=PT%20%28Pacific%20Time%29). + - [Meeting notes and Agenda](https://docs.google.com/document/d/1ZAnRLCKOVbDqrsrYis2OR0aZIAlqp576gCJVCkMNiHM/edit?usp=sharing). + ### Other ways to communicate with the maintainers Please check in with us in the [#cluster-api-kubevirt](https://sigs.k8s.io/cluster-api-provider-kubevirt). You can also join our [Mailing List](https://groups.google.com/forum/#!forum/kubernetes-dev) @@ -45,7 +49,7 @@ If you think you have found a bug please follow the instruction below. We also have a issue tracker to track features. If you think you have a feature idea, that could make Cluster API provider Kubevirt become even more awesome, then follow these steps. -- Opem a feature request +- Open a feature request - Remember users might be searching for the issue in future, so please make sure to give it a meaningful title to help others. - Clearly define the use case with concrete examples. Example: type `this` and cluster-api-provider-kubevirt does `that`. - Some of our larger features will require some design. If you would like to include a techincal design to your feature, please go ahead. diff --git a/api/v1alpha4/kubevirtcluster_types.go b/api/v1alpha4/kubevirtcluster_types.go index 4d8214ef6..2bc8b53c7 100644 --- a/api/v1alpha4/kubevirtcluster_types.go +++ b/api/v1alpha4/kubevirtcluster_types.go @@ -35,10 +35,13 @@ type KubevirtClusterSpec struct { // ControlPlaneEndpoint represents the endpoint used to communicate with the control plane. // +optional - ControlPlaneEndpoint APIEndpoint `json:"controlPlaneEndpoint"` + ControlPlaneEndpoint APIEndpoint `json:"controlPlaneEndpoint,omitempty"` // SSHKeys is a reference to a local struct for SSH keys persistence. - SshKeys SSHKeys `json:"sshKeys"` + SshKeys SSHKeys `json:"sshKeys,omitempty"` + + // InfraClusterSecretRef is a reference to a secret with a kubeconfig for external cluster used for infra. + InfraClusterSecretRef *corev1.ObjectReference `json:"infraClusterSecretRef,omitempty"` } // KubevirtClusterStatus defines the observed state of KubevirtCluster. diff --git a/api/v1alpha4/kubevirtmachine_types.go b/api/v1alpha4/kubevirtmachine_types.go index 0d6e78f07..eaa732dc4 100644 --- a/api/v1alpha4/kubevirtmachine_types.go +++ b/api/v1alpha4/kubevirtmachine_types.go @@ -35,11 +35,6 @@ type KubevirtMachineSpec struct { // ProviderID TBD what to use for Kubevirt // +optional ProviderID *string `json:"providerID,omitempty"` - - // Bootstrapped is true when the kubeadm bootstrapping has been run - // against this machine - // +optional - Bootstrapped bool `json:"bootstrapped,omitempty"` } // KubevirtMachineStatus defines the observed state of KubevirtMachine. diff --git a/api/v1alpha4/kubevirtmachinetemplate_webhook_test.go b/api/v1alpha4/kubevirtmachinetemplate_webhook_test.go index dc1f94df6..589662d7a 100644 --- a/api/v1alpha4/kubevirtmachinetemplate_webhook_test.go +++ b/api/v1alpha4/kubevirtmachinetemplate_webhook_test.go @@ -18,6 +18,7 @@ package v1alpha4 import ( "errors" + . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" ) @@ -38,18 +39,14 @@ var _ = Describe("Template Validation", func() { oldTemplate: &KubevirtMachineTemplate{ Spec: KubevirtMachineTemplateSpec{ Template: KubevirtMachineTemplateResource{ - Spec: KubevirtMachineSpec{ - Bootstrapped: true, - }, + Spec: KubevirtMachineSpec{}, }, }, }, newTemplate: &KubevirtMachineTemplate{ Spec: KubevirtMachineTemplateSpec{ Template: KubevirtMachineTemplateResource{ - Spec: KubevirtMachineSpec{ - Bootstrapped: true, - }, + Spec: KubevirtMachineSpec{}, }, }, }, @@ -64,13 +61,14 @@ var _ = Describe("Template Validation", func() { }) Context("Template comparison with errors", func() { BeforeEach(func() { + providerID := "test" tests = test{ name: "return no error if no modification", oldTemplate: &KubevirtMachineTemplate{ Spec: KubevirtMachineTemplateSpec{ Template: KubevirtMachineTemplateResource{ Spec: KubevirtMachineSpec{ - Bootstrapped: true, + ProviderID: nil, }, }, }, @@ -79,7 +77,7 @@ var _ = Describe("Template Validation", func() { Spec: KubevirtMachineTemplateSpec{ Template: KubevirtMachineTemplateResource{ Spec: KubevirtMachineSpec{ - Bootstrapped: false, + ProviderID: &providerID, }, }, }, diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_kubevirtclusters.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_kubevirtclusters.yaml index 97b89746c..97076968e 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_kubevirtclusters.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_kubevirtclusters.yaml @@ -52,6 +52,43 @@ spec: - host - port type: object + infraClusterSecretRef: + description: InfraClusterSecretRef is a reference to a secret with + a kubeconfig for external cluster used for infra. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead of + an entire object, this string should contain a valid JSON/Go + field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within + a pod, this would take on a value like: "spec.containers{name}" + (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" + (container with index 2 in this pod). This syntax is chosen + only to have some well-defined way of referencing a part of + an object. TODO: this design is not final and this field is + subject to change in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference + is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object sshKeys: description: SSHKeys is a reference to a local struct for SSH keys persistence. @@ -99,8 +136,6 @@ spec: ssh keys. type: string type: object - required: - - sshKeys type: object status: description: KubevirtClusterStatus defines the observed state of KubevirtCluster. diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_kubevirtmachines.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_kubevirtmachines.yaml index b1e5dae2e..184716862 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_kubevirtmachines.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_kubevirtmachines.yaml @@ -38,10 +38,6 @@ spec: spec: description: KubevirtMachineSpec defines the desired state of KubevirtMachine. properties: - bootstrapped: - description: Bootstrapped is true when the kubeadm bootstrapping has - been run against this machine - type: boolean providerID: description: ProviderID TBD what to use for Kubevirt type: string diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_kubevirtmachinetemplates.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_kubevirtmachinetemplates.yaml index 90597341d..57d96c662 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_kubevirtmachinetemplates.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_kubevirtmachinetemplates.yaml @@ -48,10 +48,6 @@ spec: description: Spec is the specification of the desired behavior of the machine. properties: - bootstrapped: - description: Bootstrapped is true when the kubeadm bootstrapping - has been run against this machine - type: boolean providerID: description: ProviderID TBD what to use for Kubevirt type: string diff --git a/config/manager/manager.yaml b/config/manager/manager.yaml index 624b3a5ce..79f686a96 100644 --- a/config/manager/manager.yaml +++ b/config/manager/manager.yaml @@ -16,11 +16,12 @@ spec: control-plane: controller-manager spec: containers: - - args: + - command: + - /manager + args: - "--leader-elect" - "--metrics-bind-addr=127.0.0.1:8080" - "--feature-gates=MachinePool=false" - command: ["/usr/bin/manager"] image: controller:latest name: manager env: diff --git a/controllers/kubevirtcluster_controller.go b/controllers/kubevirtcluster_controller.go index 547b4b102..99bcc7577 100644 --- a/controllers/kubevirtcluster_controller.go +++ b/controllers/kubevirtcluster_controller.go @@ -24,6 +24,7 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" infrav1 "sigs.k8s.io/cluster-api-provider-kubevirt/api/v1alpha4" "sigs.k8s.io/cluster-api-provider-kubevirt/pkg/context" + "sigs.k8s.io/cluster-api-provider-kubevirt/pkg/infracluster" "sigs.k8s.io/cluster-api-provider-kubevirt/pkg/loadbalancer" "sigs.k8s.io/cluster-api-provider-kubevirt/pkg/ssh" clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha4" @@ -36,12 +37,14 @@ import ( "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/source" + "time" ) // KubevirtClusterReconciler reconciles a KubevirtCluster object. type KubevirtClusterReconciler struct { client.Client - Log logr.Logger + InfraCluster infracluster.InfraCluster + Log logr.Logger } // +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=kubevirtclusters,verbs=get;list;watch;create;update;patch;delete @@ -82,8 +85,17 @@ func (r *KubevirtClusterReconciler) Reconcile(goctx gocontext.Context, req ctrl. Logger: ctrl.LoggerFrom(goctx).WithName(req.Namespace).WithName(req.Name), } + infraClusterClient, infraClusterNamespace, err := r.InfraCluster.GenerateInfraClusterClient(clusterContext) + if err != nil { + return ctrl.Result{RequeueAfter: 10 * time.Second}, errors.Wrap(err, "failed to generate infra cluster client") + } + if infraClusterClient == nil { + clusterContext.Logger.Info("Waiting for infra cluster client...") + return ctrl.Result{RequeueAfter: 10 * time.Second}, nil + } + // Create a helper for managing a service hosting the load-balancer. - externalLoadBalancer, err := loadbalancer.NewLoadBalancer(clusterContext, r.Client) + externalLoadBalancer, err := loadbalancer.NewLoadBalancer(clusterContext, infraClusterClient, infraClusterNamespace) if err != nil { return ctrl.Result{}, errors.Wrapf(err, "failed to create helper for managing the externalLoadBalancer") } @@ -111,7 +123,7 @@ func (r *KubevirtClusterReconciler) Reconcile(goctx gocontext.Context, req ctrl. // Handle deleted clusters if !kubevirtCluster.DeletionTimestamp.IsZero() { - return r.reconcileDelete(clusterContext) + return r.reconcileDelete(clusterContext, externalLoadBalancer) } // Handle non-deleted clusters @@ -169,7 +181,12 @@ func (r *KubevirtClusterReconciler) reconcileNormal(ctx *context.ClusterContext, return ctrl.Result{}, nil } -func (r *KubevirtClusterReconciler) reconcileDelete(ctx *context.ClusterContext) (ctrl.Result, error) { +func (r *KubevirtClusterReconciler) reconcileDelete(ctx *context.ClusterContext, externalLoadBalancer *loadbalancer.LoadBalancer) (ctrl.Result, error) { + ctx.Logger.Info("Deleting load balancer service...") + if err := externalLoadBalancer.Delete(ctx); err != nil { + ctx.Logger.Error(err, "Failed to delete load balancer service.") + } + // Set the LoadBalancerAvailableCondition reporting delete is started, and issue a patch in order to make // this visible to the users. patchHelper, err := patch.NewHelper(ctx.KubevirtCluster, r.Client) diff --git a/controllers/kubevirtmachine_controller.go b/controllers/kubevirtmachine_controller.go index abc60c970..71fdb03b6 100644 --- a/controllers/kubevirtmachine_controller.go +++ b/controllers/kubevirtmachine_controller.go @@ -20,13 +20,14 @@ import ( gocontext "context" "encoding/base64" "fmt" + "regexp" "time" + "sigs.k8s.io/cluster-api-provider-kubevirt/pkg/context" "sigs.k8s.io/cluster-api-provider-kubevirt/pkg/ssh" + "sigs.k8s.io/cluster-api-provider-kubevirt/pkg/infracluster" "sigs.k8s.io/cluster-api-provider-kubevirt/pkg/workloadcluster" - infrav1 "sigs.k8s.io/cluster-api-provider-kubevirt/api/v1alpha4" - "sigs.k8s.io/cluster-api-provider-kubevirt/pkg/context" kubevirthandler "sigs.k8s.io/cluster-api-provider-kubevirt/pkg/kubevirt" "github.com/pkg/errors" @@ -36,7 +37,6 @@ import ( "k8s.io/apimachinery/pkg/types" clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha4" "sigs.k8s.io/cluster-api/util" - clusterutil "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/conditions" "sigs.k8s.io/cluster-api/util/patch" "sigs.k8s.io/cluster-api/util/predicates" @@ -51,6 +51,7 @@ import ( // KubevirtMachineReconciler reconciles a KubevirtMachine object. type KubevirtMachineReconciler struct { client.Client + InfraCluster infracluster.InfraCluster WorkloadCluster workloadcluster.WorkloadCluster } @@ -170,31 +171,25 @@ func (r *KubevirtMachineReconciler) Reconcile(goctx gocontext.Context, req ctrl. func (r *KubevirtMachineReconciler) reconcileNormal(ctx *context.MachineContext) (res ctrl.Result, retErr error) { // If the machine is already provisioned, return if ctx.KubevirtMachine.Status.Ready { + ctx.Logger.Info("KubevirtMachine.Status.Ready is set -- nothing to do!") return ctrl.Result{}, nil } // Make sure bootstrap data is available and populated. if ctx.Machine.Spec.Bootstrap.DataSecretName == nil { if !util.IsControlPlaneMachine(ctx.Machine) && !conditions.IsTrue(ctx.Cluster, clusterv1.ControlPlaneInitializedCondition) { - ctx.Logger.Info("Waiting for the control plane to be initialized") + ctx.Logger.Info("Waiting for the control plane to be initialized...") conditions.MarkFalse(ctx.KubevirtMachine, infrav1.VMProvisionedCondition, clusterv1.WaitingForControlPlaneAvailableReason, clusterv1.ConditionSeverityInfo, "") return ctrl.Result{}, nil } - ctx.Logger.Info("Waiting for the Bootstrap provider controller to set bootstrap data") + ctx.Logger.Info("Waiting for Machine.Spec.Bootstrap.DataSecretName...") conditions.MarkFalse(ctx.KubevirtMachine, infrav1.VMProvisionedCondition, infrav1.WaitingForBootstrapDataReason, clusterv1.ConditionSeverityInfo, "") return ctrl.Result{}, nil } - clusterContext := &context.ClusterContext{ - Context: ctx.Context, - Cluster: ctx.Cluster, - KubevirtCluster: ctx.KubevirtCluster, - Logger: ctx.Logger, - } - // Fetch SSH keys to be used for cluster nodes, and update bootstrap script cloud-init with public key - clusterNodeSshKeys := ssh.NewClusterNodeSshKeys(clusterContext, r.Client) + clusterNodeSshKeys := ssh.NewClusterNodeSshKeys(ctx.ClusterContext(), r.Client) if persisted := clusterNodeSshKeys.IsPersistedToSecret(); !persisted { ctx.Logger.Info("Waiting for ssh keys data secret to be created by KubevirtCluster controller...") return ctrl.Result{RequeueAfter: 10 * time.Second}, nil @@ -203,78 +198,58 @@ func (r *KubevirtMachineReconciler) reconcileNormal(ctx *context.MachineContext) return ctrl.Result{}, errors.Wrap(err, "failed to fetch ssh keys for cluster nodes") } - if err := r.reconcileKubevirtBootstrapSecret(ctx, clusterNodeSshKeys); err != nil { - ctx.Logger.Info("Waiting for the Bootstrap provider controller to set bootstrap data") + infraClusterClient, infraClusterNamespace, err := r.InfraCluster.GenerateInfraClusterClient(ctx.ClusterContext()) + if err != nil { + return ctrl.Result{RequeueAfter: 10 * time.Second}, errors.Wrap(err, "failed to generate infra cluster client") + } + if infraClusterClient == nil { + ctx.Logger.Info("Waiting for infra cluster client...") + return ctrl.Result{RequeueAfter: 10 * time.Second}, nil + } + + if err := r.reconcileKubevirtBootstrapSecret(ctx, infraClusterClient, infraClusterNamespace, clusterNodeSshKeys); err != nil { conditions.MarkFalse(ctx.KubevirtMachine, infrav1.VMProvisionedCondition, infrav1.WaitingForBootstrapDataReason, clusterv1.ConditionSeverityInfo, "") - return ctrl.Result{}, nil + return ctrl.Result{RequeueAfter: 10 * time.Second}, errors.Wrap(err, "failed to fetch kubevirt bootstrap secret") } // Create a helper for managing the KubeVirt VM hosting the machine. - externalMachine, err := kubevirthandler.NewMachine(ctx, r.Client) + externalMachine, err := kubevirthandler.NewMachine(ctx, infraClusterClient, infraClusterNamespace, clusterNodeSshKeys) if err != nil { return ctrl.Result{}, errors.Wrapf(err, "failed to create helper for managing the externalMachine") } // Provision the underlying VM if not existing if !externalMachine.Exists() { - ctx.Logger.Info("Creating underlying VM instance...") if err := externalMachine.Create(); err != nil { return ctrl.Result{}, errors.Wrap(err, "failed to create VM instance") } } - vmCommandExecutor := ssh.VMCommandExecutor{ - IPAddress: externalMachine.Address(), - PublicKey: clusterNodeSshKeys.PublicKey, - PrivateKey: clusterNodeSshKeys.PrivateKey, - } - // Wait for VM to boot - if !externalMachine.IsBooted(vmCommandExecutor) { - ctx.Logger.Info("Waiting for underlying VM instance to boot...") + if !externalMachine.IsReady() { + ctx.Logger.Info("KubeVirt VM is not ready...") return ctrl.Result{RequeueAfter: 20 * time.Second}, nil } - // Update the VMProvisionedCondition condition - // NOTE: it is required to create the patch helper at this point, otherwise it won't surface if we issue a patch down in the code - // (because if we create patch helper after this point the VMProvisionedCondition=True exists both on before and after). - patchHelper, err := patch.NewHelper(ctx.KubevirtMachine, r.Client) - if err != nil { - return ctrl.Result{}, err - } conditions.MarkTrue(ctx.KubevirtMachine, infrav1.VMProvisionedCondition) - // At, this stage, we are ready for bootstrap. However, if the BootstrapExecSucceededCondition is missing we add it and we - // issue an patch so the user can see the change of state before the bootstrap actually starts. - // NOTE: usually controller should not rely on status they are setting, but on the observed state; however - // in this case we are doing this because we explicitly want to give a feedback to users. - if !conditions.Has(ctx.KubevirtMachine, infrav1.BootstrapExecSucceededCondition) { - conditions.MarkFalse(ctx.KubevirtMachine, infrav1.BootstrapExecSucceededCondition, infrav1.BootstrappingReason, clusterv1.ConditionSeverityInfo, "") - if err := ctx.PatchKubevirtMachine(patchHelper); err != nil { - return ctrl.Result{}, errors.Wrap(err, "failed to patch KubevirtMachine") - } + ipAddress := externalMachine.Address() + if ipAddress == "" { + ctx.Logger.Info(fmt.Sprintf("KubevirtMachine %s: Got empty ipAddress, requeue", ctx.KubevirtMachine.Name)) + return ctrl.Result{RequeueAfter: 20 * time.Second}, nil } - // Wait for VM to bootstrap with Kubernetes - if !ctx.KubevirtMachine.Spec.Bootstrapped { - if !externalMachine.IsBootstrapped(vmCommandExecutor) { + if externalMachine.SupportsCheckingIsBootstrapped() && !conditions.IsTrue(ctx.KubevirtMachine, infrav1.BootstrapExecSucceededCondition) { + if !externalMachine.IsBootstrapped() { ctx.Logger.Info("Waiting for underlying VM to bootstrap...") conditions.MarkFalse(ctx.KubevirtMachine, infrav1.BootstrapExecSucceededCondition, infrav1.BootstrapFailedReason, clusterv1.ConditionSeverityWarning, "VM not bootstrapped yet") return ctrl.Result{RequeueAfter: 10 * time.Second}, nil } - - ctx.KubevirtMachine.Spec.Bootstrapped = true + // Update the condition BootstrapExecSucceededCondition + conditions.MarkTrue(ctx.KubevirtMachine, infrav1.BootstrapExecSucceededCondition) + ctx.Logger.Info("Underlying VM has boostrapped.") } - // Update the condition BootstrapExecSucceededCondition - conditions.MarkTrue(ctx.KubevirtMachine, infrav1.BootstrapExecSucceededCondition) - - ipAddress := externalMachine.Address() - ctx.Logger.Info(fmt.Sprintf("KubevirtMachine %s: Got ipAddress <%s>", ctx.KubevirtMachine.Name, ipAddress)) - if ipAddress == "" { - ctx.Logger.Info(fmt.Sprintf("KubevirtMachine %s: Got empty ipAddress, requeue", ctx.KubevirtMachine.Name)) - return ctrl.Result{RequeueAfter: 20 * time.Second}, nil - } ctx.KubevirtMachine.Status.Addresses = []clusterv1.MachineAddress{ { Type: clusterv1.MachineHostName, @@ -296,7 +271,7 @@ func (r *KubevirtMachineReconciler) reconcileNormal(ctx *context.MachineContext) providerID, err := externalMachine.GenerateProviderID() if err != nil { - ctx.Logger.Error(err, "Failed to patch node with provider id...") + ctx.Logger.Error(err, "Failed to patch node with provider id.") return ctrl.Result{RequeueAfter: 5 * time.Second}, nil } @@ -323,7 +298,6 @@ func (r *KubevirtMachineReconciler) updateNodeProviderID(ctx *context.MachineCon if workloadClusterClient == nil { ctx.Logger.Info("Waiting for workload cluster client...") return ctrl.Result{RequeueAfter: 10 * time.Second}, nil - } // using workload cluster client, get the corresponding cluster node @@ -354,6 +328,41 @@ func (r *KubevirtMachineReconciler) updateNodeProviderID(ctx *context.MachineCon } func (r *KubevirtMachineReconciler) reconcileDelete(ctx *context.MachineContext) (ctrl.Result, error) { + infraClusterClient, infraClusterNamespace, err := r.InfraCluster.GenerateInfraClusterClient(ctx.ClusterContext()) + if err != nil { + return ctrl.Result{RequeueAfter: 10 * time.Second}, errors.Wrap(err, "failed to generate infra cluster client") + } + if infraClusterClient == nil { + ctx.Logger.Info("Waiting for infra cluster client...") + return ctrl.Result{RequeueAfter: 10 * time.Second}, nil + } + + // Fetch SSH keys to be used for cluster nodes, and update bootstrap script cloud-init with public key + clusterNodeSshKeys := ssh.NewClusterNodeSshKeys(ctx.ClusterContext(), r.Client) + if persisted := clusterNodeSshKeys.IsPersistedToSecret(); !persisted { + ctx.Logger.Info("Waiting for ssh keys data secret to be created by KubevirtCluster controller...") + return ctrl.Result{RequeueAfter: 10 * time.Second}, nil + } + if err := clusterNodeSshKeys.FetchPersistedKeysFromSecret(); err != nil { + return ctrl.Result{}, errors.Wrap(err, "failed to fetch ssh keys for cluster nodes") + } + + ctx.Logger.Info("Deleting VM bootstrap secret...") + if err := r.deleteKubevirtBootstrapSecret(ctx, infraClusterClient, infraClusterNamespace); err != nil { + return ctrl.Result{RequeueAfter: 10 * time.Second}, errors.Wrap(err, "failed to delete bootstrap secret") + } + + ctx.Logger.Info("Deleting VM...") + externalMachine, err := kubevirthandler.NewMachine(ctx, infraClusterClient, infraClusterNamespace, clusterNodeSshKeys) + if err != nil { + return ctrl.Result{RequeueAfter: 10 * time.Second}, errors.Wrap(err, "failed to create helper for externalMachine access") + } + if externalMachine.Exists() { + if err := externalMachine.Delete(); err != nil { + return ctrl.Result{RequeueAfter: 10 * time.Second}, errors.Wrap(err, "failed to delete VM") + } + } + // Set the VMProvisionedCondition reporting delete is started, and issue a patch in order to make // this visible to the users. patchHelper, err := patch.NewHelper(ctx.KubevirtMachine, r.Client) @@ -435,15 +444,16 @@ func (r *KubevirtMachineReconciler) KubevirtClusterToKubevirtMachines(o client.O } // reconcileKubevirtBootstrapSecret creates bootstrap cloud-init secret for KubeVirt virtual machines -func (r *KubevirtMachineReconciler) reconcileKubevirtBootstrapSecret(ctx *context.MachineContext, sshKeys *ssh.ClusterNodeSshKeys) error { +func (r *KubevirtMachineReconciler) reconcileKubevirtBootstrapSecret(ctx *context.MachineContext, infraClusterClient client.Client, infraClusterNamespace string, sshKeys *ssh.ClusterNodeSshKeys) error { if ctx.Machine.Spec.Bootstrap.DataSecretName == nil { return errors.New("error retrieving bootstrap data: linked Machine's bootstrap.dataSecretName is nil") } // Exit early if exists. bootstrapDataSecret := &corev1.Secret{} - bootstrapDataSecretKey := client.ObjectKey{Namespace: ctx.Machine.GetNamespace(), Name: *ctx.Machine.Spec.Bootstrap.DataSecretName + "-userdata"} - if err := r.Client.Get(ctx, bootstrapDataSecretKey, bootstrapDataSecret); err == nil { + bootstrapDataSecretKey := client.ObjectKey{Namespace: infraClusterNamespace, Name: *ctx.Machine.Spec.Bootstrap.DataSecretName + "-userdata"} + if err := infraClusterClient.Get(ctx, bootstrapDataSecretKey, bootstrapDataSecret); err == nil { + ctx.BootstrapDataSecret = bootstrapDataSecret return nil } @@ -458,36 +468,23 @@ func (r *KubevirtMachineReconciler) reconcileKubevirtBootstrapSecret(ctx *contex return errors.New("error retrieving bootstrap data: secret value key is missing") } - ctx.Logger.Info("Adding users config to bootstrap data...") - updatedValue := []byte(string(value) + usersCloudConfig(sshKeys.PublicKey)) + if isCloudConfigUserData(value) { + ctx.Logger.Info("Adding users and ssh config to bootstrap userdata...") + value = []byte(string(value) + usersCloudConfig(sshKeys.PublicKey)) + } newBootstrapDataSecret := &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: s.Name + "-userdata", - Namespace: ctx.Machine.GetNamespace(), + Namespace: infraClusterNamespace, }, } + ctx.BootstrapDataSecret = newBootstrapDataSecret - _, err := controllerutil.CreateOrUpdate(ctx, r.Client, newBootstrapDataSecret, func() error { + _, err := controllerutil.CreateOrUpdate(ctx, infraClusterClient, newBootstrapDataSecret, func() error { newBootstrapDataSecret.Type = clusterv1.ClusterSecretType newBootstrapDataSecret.Data = map[string][]byte{ - "userdata": updatedValue, - } - - // set owner reference for secret - mutateFn := func() (err error) { - newBootstrapDataSecret.SetOwnerReferences(clusterutil.EnsureOwnerRef( - newBootstrapDataSecret.OwnerReferences, - metav1.OwnerReference{ - APIVersion: ctx.KubevirtMachine.APIVersion, - Kind: ctx.KubevirtMachine.Kind, - Name: ctx.KubevirtMachine.Name, - UID: ctx.KubevirtMachine.UID, - })) - return nil - } - if _, err := controllerutil.CreateOrUpdate(ctx, r.Client, newBootstrapDataSecret, mutateFn); err != nil { - return errors.Wrapf(err, "failed to set owner reference for secret") + "userdata": value, } return nil @@ -500,6 +497,26 @@ func (r *KubevirtMachineReconciler) reconcileKubevirtBootstrapSecret(ctx *contex return nil } +// deleteKubevirtBootstrapSecret deletes bootstrap cloud-init secret for KubeVirt virtual machines +func (r *KubevirtMachineReconciler) deleteKubevirtBootstrapSecret(ctx *context.MachineContext, infraClusterClient client.Client, infraClusterNamespace string) error { + bootstrapDataSecret := &corev1.Secret{} + bootstrapDataSecretKey := client.ObjectKey{Namespace: infraClusterNamespace, Name: *ctx.Machine.Spec.Bootstrap.DataSecretName + "-userdata"} + if err := infraClusterClient.Get(ctx, bootstrapDataSecretKey, bootstrapDataSecret); err != nil { + // the secret does not exist, exit without error + return nil + } + + if err := infraClusterClient.Delete(ctx, bootstrapDataSecret); err != nil { + return errors.Wrapf(err, "failed to delete kubevirt bootstrap secret for cluster") + } + + return nil +} + +func isCloudConfigUserData(userData []byte) bool { + return regexp.MustCompile(`(?m)^#cloud-config`).MatchString(string(userData)) +} + // usersCloudConfig generates 'users' cloud config for capk user with a given ssh public key func usersCloudConfig(sshPublicKey []byte) string { sshPublicKeyString := base64.StdEncoding.EncodeToString(sshPublicKey) diff --git a/controllers/kubevirtmachine_controller_test.go b/controllers/kubevirtmachine_controller_test.go index e93bd34b7..569e0b687 100644 --- a/controllers/kubevirtmachine_controller_test.go +++ b/controllers/kubevirtmachine_controller_test.go @@ -17,15 +17,18 @@ limitations under the License. package controllers import ( + gocontext "context" "time" "github.com/golang/mock/gomock" . "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo/extensions/table" . "github.com/onsi/gomega" "github.com/pkg/errors" "sigs.k8s.io/cluster-api-provider-kubevirt/pkg/context" "sigs.k8s.io/cluster-api-provider-kubevirt/pkg/testing" - "sigs.k8s.io/cluster-api-provider-kubevirt/pkg/workloadcluster/mock" + infraclustermock "sigs.k8s.io/cluster-api-provider-kubevirt/pkg/infracluster/mock" + workloadclustermock "sigs.k8s.io/cluster-api-provider-kubevirt/pkg/workloadcluster/mock" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" @@ -57,6 +60,16 @@ var ( anotherKubevirtMachine *infrav1.KubevirtMachine anotherMachine *clusterv1.Machine + vm *kubevirtv1.VirtualMachine + vmi *kubevirtv1.VirtualMachineInstance + + sshKeySecretName string + bootstrapSecretName string + + sshKeySecret *corev1.Secret + bootstrapSecret *corev1.Secret + bootstrapUserDataSecret *corev1.Secret + fakeClient client.Client kubevirtMachineReconciler KubevirtMachineReconciler fakeWorkloadClusterClient client.Client @@ -107,9 +120,203 @@ var _ = Describe("KubevirtClusterToKubevirtMachines", func() { }) }) +var _ = Describe("utility functions", func() { + table.DescribeTable("should detect userdata is cloud-config", func(userData []byte, expected bool) { + Expect(isCloudConfigUserData(userData)).To(Equal(expected)) + }, + table.Entry("should detect cloud-config", []byte("#something\n\n#something else\n#cloud-config\nthe end"), true), + table.Entry("should not detect cloud-config", []byte("#something\n\n#something else\n#not-cloud-config\nthe end"), false), + table.Entry("should not detect cloud-config", []byte("#something\n\n#something else\n #cloud-config\nthe end"), false), + ) +}) + +var _ = Describe("reconcile a kubevirt machine", func() { + mockCtrl = gomock.NewController(GinkgoT()) + workloadClusterMock := workloadclustermock.NewMockWorkloadCluster(mockCtrl) + infraClusterMock := infraclustermock.NewMockInfraCluster(mockCtrl) + testLogger := ctrl.Log.WithName("test") + var machineContext *context.MachineContext + + BeforeEach(func() { + + bootstrapSecretName = "bootstrap-secret" + sshKeySecretName = "ssh-keys" + clusterName = "kvcluster" + machineName = "test-machine" + kubevirtMachineName = "test-kubevirt-machine" + kubevirtMachine = testing.NewKubevirtMachine(kubevirtMachineName, machineName) + kubevirtCluster = testing.NewKubevirtCluster(clusterName, machineName) + + cluster = testing.NewCluster(clusterName, kubevirtCluster) + machine = testing.NewMachine(clusterName, machineName, kubevirtMachine) + + machine.Spec = clusterv1.MachineSpec{ + Bootstrap: clusterv1.Bootstrap{ + DataSecretName: &bootstrapSecretName, + }, + } + + kubevirtCluster.Spec.SshKeys = infrav1.SSHKeys{DataSecretName: &sshKeySecretName} + + sshKeySecret = &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: sshKeySecretName, + }, + Data: map[string][]byte{ + "pub": []byte("sha-rsa 1234"), + "key": []byte("sha-rsa 5678"), + }, + } + + bootstrapSecret = &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: bootstrapSecretName, + }, + Data: map[string][]byte{ + "value": []byte("shell-script"), + }, + } + + bootstrapUserDataSecret = &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: bootstrapSecretName + "-userdata", + }, + Data: map[string][]byte{ + "userdata": []byte("shell-script"), + }, + } + + vm = &kubevirtv1.VirtualMachine{ + TypeMeta: metav1.TypeMeta{ + Kind: "VirtualMachine", + APIVersion: "kubevirt.io", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: kubevirtMachineName, + }, + } + + vmi = &kubevirtv1.VirtualMachineInstance{ + TypeMeta: metav1.TypeMeta{ + Kind: "VirtualMachineInstance", + APIVersion: "kubevirt.io", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: kubevirtMachineName, + }, + } + + }) + + setupClient := func(objects []client.Object) { + machineContext = &context.MachineContext{ + Context: gocontext.Background(), + Cluster: cluster, + KubevirtCluster: kubevirtCluster, + Machine: machine, + KubevirtMachine: kubevirtMachine, + Logger: testLogger, + } + + fakeClient = fake.NewClientBuilder().WithScheme(setupScheme()).WithObjects(objects...).Build() + kubevirtMachineReconciler = KubevirtMachineReconciler{ + Client: fakeClient, + WorkloadCluster: workloadClusterMock, + InfraCluster: infraClusterMock, + } + + } + AfterEach(func() {}) + + It("should create KubeVirt VM", func() { + objects := []client.Object{ + cluster, + kubevirtCluster, + machine, + kubevirtMachine, + sshKeySecret, + bootstrapSecret, + bootstrapUserDataSecret, + } + + setupClient(objects) + + clusterContext := &context.ClusterContext{Context: machineContext.Context, Cluster: machineContext.Cluster, KubevirtCluster: machineContext.KubevirtCluster, Logger: machineContext.Logger} + infraClusterMock.EXPECT().GenerateInfraClusterClient(clusterContext).Return(fakeClient, cluster.Namespace, nil) + + out, err := kubevirtMachineReconciler.reconcileNormal(machineContext) + + Expect(err).ShouldNot(HaveOccurred()) + + // should expect to re-enqueue while waiting for VMI to come online + Expect(out).To(Equal(ctrl.Result{RequeueAfter: 20 * time.Second})) + + // should expect VM to be created with expected name + vm := &kubevirtv1.VirtualMachine{} + vmKey := client.ObjectKey{Namespace: kubevirtMachine.Namespace, Name: kubevirtMachine.Name} + err = fakeClient.Get(gocontext.Background(), vmKey, vm) + Expect(err).NotTo(HaveOccurred()) + + // Should expect kubevirt machine is still not ready + Expect(machineContext.KubevirtMachine.Status.Ready).To(BeFalse()) + Expect(machineContext.KubevirtMachine.Spec.ProviderID).To(BeNil()) + }) + + It("should detect when VMI is ready and mark KubevirtMachine ready", func() { + vmi.Status.Conditions = []kubevirtv1.VirtualMachineInstanceCondition{ + { + Type: kubevirtv1.VirtualMachineInstanceReady, + Status: corev1.ConditionTrue, + }, + } + vmi.Status.Interfaces = []kubevirtv1.VirtualMachineInstanceNetworkInterface{ + + { + IP: "1.1.1.1", + }, + } + + objects := []client.Object{ + cluster, + kubevirtCluster, + machine, + kubevirtMachine, + sshKeySecret, + bootstrapSecret, + bootstrapUserDataSecret, + vm, + vmi, + } + + setupClient(objects) + + clusterContext := &context.ClusterContext{Context: machineContext.Context, Cluster: machineContext.Cluster, KubevirtCluster: machineContext.KubevirtCluster, Logger: machineContext.Logger} + infraClusterMock.EXPECT().GenerateInfraClusterClient(clusterContext).Return(fakeClient, cluster.Namespace, nil) + + Expect(machineContext.KubevirtMachine.Status.Ready).To(BeFalse()) + out, err := kubevirtMachineReconciler.reconcileNormal(machineContext) + + Expect(err).ShouldNot(HaveOccurred()) + + // should expect to re-enqueue while waiting for VMI to come online + Expect(out).To(Equal(ctrl.Result{})) + + // should expect VM to be created with expected name + vm := &kubevirtv1.VirtualMachine{} + vmKey := client.ObjectKey{Namespace: kubevirtMachine.Namespace, Name: kubevirtMachine.Name} + err = fakeClient.Get(gocontext.Background(), vmKey, vm) + Expect(err).NotTo(HaveOccurred()) + + Expect(machineContext.KubevirtMachine.Status.Ready).To(BeTrue()) + Expect(*machineContext.KubevirtMachine.Spec.ProviderID).To(Equal("kubevirt://" + kubevirtMachineName)) + }) + +}) + var _ = Describe("updateNodeProviderID", func() { mockCtrl = gomock.NewController(GinkgoT()) - workloadClusterMock := mock.NewMockWorkloadCluster(mockCtrl) + workloadClusterMock := workloadclustermock.NewMockWorkloadCluster(mockCtrl) + infraClusterMock := infraclustermock.NewMockInfraCluster(mockCtrl) expectedProviderId := "aa-66@test" testLogger := ctrl.Log.WithName("test") @@ -126,6 +333,7 @@ var _ = Describe("updateNodeProviderID", func() { kubevirtMachineReconciler = KubevirtMachineReconciler{ Client: fakeClient, WorkloadCluster: workloadClusterMock, + InfraCluster: infraClusterMock, } workloadClusterObjects := []client.Object{ @@ -140,7 +348,6 @@ var _ = Describe("updateNodeProviderID", func() { }, } fakeWorkloadClusterClient = fake.NewClientBuilder().WithScheme(setupScheme()).WithObjects(workloadClusterObjects...).Build() - }) AfterEach(func() {}) diff --git a/hack/functest.sh b/hack/functest.sh index e4db8a63b..3591084ea 100755 --- a/hack/functest.sh +++ b/hack/functest.sh @@ -7,3 +7,7 @@ set -e -o pipefail ./kubevirtci create-cluster ./kubevirtci kubectl wait --for=condition=ControlPlaneInitialized=true cluster/kvcluster --timeout=10m ./kubevirtci kubectl wait --for=condition=ControlPlaneReady=true cluster/kvcluster --timeout=10m + +CONTROL_PLANE_MACHINE=$(./kubevirtci kubectl get kubevirtmachine | grep kvcluster-control-plane | awk '{ print $1 }') +./kubevirtci kubectl wait --for=condition=BootstrapExecSucceeded=true kubevirtmachine/${CONTROL_PLANE_MACHINE} --timeout=1m + diff --git a/kubevirtci b/kubevirtci index a8fe19de7..757a7f6e3 100755 --- a/kubevirtci +++ b/kubevirtci @@ -65,7 +65,6 @@ function kubevirtci::down() { function kubevirtci::build() { export REGISTRY="localhost:$(cluster-up/cluster-up/cli.sh ports registry)" - make manager make docker-build make docker-push } diff --git a/main.go b/main.go index e69d5102f..3c998d980 100644 --- a/main.go +++ b/main.go @@ -24,6 +24,7 @@ import ( "time" "sigs.k8s.io/cluster-api-provider-kubevirt/controllers" + "sigs.k8s.io/cluster-api-provider-kubevirt/pkg/infracluster" "sigs.k8s.io/cluster-api-provider-kubevirt/pkg/workloadcluster" "sigs.k8s.io/controller-runtime/pkg/controller" @@ -150,6 +151,7 @@ func setupChecks(mgr ctrl.Manager) { func setupReconcilers(ctx context.Context, mgr ctrl.Manager) { if err := (&controllers.KubevirtMachineReconciler{ Client: mgr.GetClient(), + InfraCluster: infracluster.New(mgr.GetClient()), WorkloadCluster: workloadcluster.New(mgr.GetClient()), }).SetupWithManager(ctx, mgr, controller.Options{ MaxConcurrentReconciles: concurrency, @@ -158,8 +160,9 @@ func setupReconcilers(ctx context.Context, mgr ctrl.Manager) { os.Exit(1) } if err := (&controllers.KubevirtClusterReconciler{ - Client: mgr.GetClient(), - Log: ctrl.Log.WithName("controllers").WithName("KubevirtCluster"), + Client: mgr.GetClient(), + InfraCluster: infracluster.New(mgr.GetClient()), + Log: ctrl.Log.WithName("controllers").WithName("KubevirtCluster"), }).SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "KubevirtCluster") os.Exit(1) diff --git a/pkg/context/machine_context.go b/pkg/context/machine_context.go index 9cdb8b89f..279178687 100644 --- a/pkg/context/machine_context.go +++ b/pkg/context/machine_context.go @@ -18,12 +18,15 @@ package context import ( "context" + "encoding/base64" "fmt" + "strings" "sigs.k8s.io/cluster-api/util/conditions" "sigs.k8s.io/cluster-api/util/patch" "github.com/go-logr/logr" + corev1 "k8s.io/api/core/v1" infrav1 "sigs.k8s.io/cluster-api-provider-kubevirt/api/v1alpha4" clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha4" ) @@ -31,11 +34,22 @@ import ( // MachineContext is a Go context used with a KubeVirt machine. type MachineContext struct { context.Context - Cluster *clusterv1.Cluster - Machine *clusterv1.Machine - KubevirtCluster *infrav1.KubevirtCluster - KubevirtMachine *infrav1.KubevirtMachine - Logger logr.Logger + Cluster *clusterv1.Cluster + Machine *clusterv1.Machine + KubevirtCluster *infrav1.KubevirtCluster + KubevirtMachine *infrav1.KubevirtMachine + BootstrapDataSecret *corev1.Secret + Logger logr.Logger +} + +// ClusterContext returns cluster context from this machine context +func (c *MachineContext) ClusterContext() *ClusterContext { + return &ClusterContext{ + Context: c.Context, + Cluster: c.Cluster, + KubevirtCluster: c.KubevirtCluster, + Logger: c.Logger, + } } // String returns KubeVirt machine GroupVersionKind @@ -66,3 +80,22 @@ func (c *MachineContext) PatchKubevirtMachine(patchHelper *patch.Helper) error { }}, ) } + +func (c *MachineContext) HasInjectedCapkSSHKeys(sshPublicKey []byte) bool { + if c.BootstrapDataSecret == nil || len(sshPublicKey) == 0 { + return false + } + value, ok := c.BootstrapDataSecret.Data["userdata"] + if !ok { + return false + } + + sshPublicKeyString := base64.StdEncoding.EncodeToString(sshPublicKey) + sshPublicKeyDecoded, err := base64.StdEncoding.DecodeString(sshPublicKeyString) + + if err != nil { + return false + } + + return strings.Contains(string(value), string(sshPublicKeyDecoded)) +} diff --git a/pkg/infracluster/infracluster.go b/pkg/infracluster/infracluster.go new file mode 100644 index 000000000..32fb89e9a --- /dev/null +++ b/pkg/infracluster/infracluster.go @@ -0,0 +1,67 @@ +package infracluster + +import ( + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + "k8s.io/client-go/tools/clientcmd" + "sigs.k8s.io/cluster-api-provider-kubevirt/pkg/context" + "sigs.k8s.io/controller-runtime/pkg/client" + "strings" +) + +//go:generate mockgen -source=./infracluster.go -destination=./mock/infracluster_generated.go -package=mock +type InfraCluster interface { + GenerateInfraClusterClient(ctx *context.ClusterContext) (client.Client, string, error) +} + +// New creates new InfraCluster instance +func New(client client.Client) InfraCluster { + return &infraCluster{ + Client: client, + } +} + +type infraCluster struct { + client.Client +} + +// GenerateInfraClusterClient creates a client for infra cluster. +func (w *infraCluster) GenerateInfraClusterClient(ctx *context.ClusterContext) (client.Client, string, error) { + infraClusterSecretRef := ctx.KubevirtCluster.Spec.InfraClusterSecretRef + + if infraClusterSecretRef == nil { + return w.Client, ctx.Cluster.Namespace, nil + } + + infraKubeconfigSecret := &corev1.Secret{} + infraKubeconfigSecretKey := client.ObjectKey{Namespace: infraClusterSecretRef.Namespace, Name: infraClusterSecretRef.Name} + if err := w.Client.Get(ctx.Context, infraKubeconfigSecretKey, infraKubeconfigSecret); err != nil { + return nil, "", errors.Wrapf(err, "failed to fetch infra kubeconfig secret %s/%s", infraClusterSecretRef.Namespace, infraClusterSecretRef.Name) + } + + kubeConfig, ok := infraKubeconfigSecret.Data["kubeconfig"] + if !ok { + return nil, "", errors.New("Failed to retrieve infra kubeconfig from secret: 'kubeconfig' key is missing.") + } + + namespace := "default" + namespaceBytes, ok := infraKubeconfigSecret.Data["namespace"] + if ok { + namespace = string(namespaceBytes) + namespace = strings.TrimSpace(namespace) + } + + // generate REST config + restConfig, err := clientcmd.RESTConfigFromKubeConfig(kubeConfig) + if err != nil { + return nil, "", errors.Wrap(err, "failed to create REST config") + } + + // create the client + infraClusterClient, err := client.New(restConfig, client.Options{Scheme: w.Client.Scheme()}) + if err != nil { + return nil, "", errors.Wrap(err, "failed to create infra cluster client") + } + + return infraClusterClient, namespace, nil +} diff --git a/pkg/infracluster/mock/infracluster_generated.go b/pkg/infracluster/mock/infracluster_generated.go new file mode 100644 index 000000000..17af2a14b --- /dev/null +++ b/pkg/infracluster/mock/infracluster_generated.go @@ -0,0 +1,52 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: ./infracluster.go + +// Package mock is a generated GoMock package. +package mock + +import ( + reflect "reflect" + + gomock "github.com/golang/mock/gomock" + context "sigs.k8s.io/cluster-api-provider-kubevirt/pkg/context" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +// MockInfraCluster is a mock of InfraCluster interface. +type MockInfraCluster struct { + ctrl *gomock.Controller + recorder *MockInfraClusterMockRecorder +} + +// MockInfraClusterMockRecorder is the mock recorder for MockInfraCluster. +type MockInfraClusterMockRecorder struct { + mock *MockInfraCluster +} + +// NewMockInfraCluster creates a new mock instance. +func NewMockInfraCluster(ctrl *gomock.Controller) *MockInfraCluster { + mock := &MockInfraCluster{ctrl: ctrl} + mock.recorder = &MockInfraClusterMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockInfraCluster) EXPECT() *MockInfraClusterMockRecorder { + return m.recorder +} + +// GenerateInfraClusterClient mocks base method. +func (m *MockInfraCluster) GenerateInfraClusterClient(ctx *context.ClusterContext) (client.Client, string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GenerateInfraClusterClient", ctx) + ret0, _ := ret[0].(client.Client) + ret1, _ := ret[1].(string) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 +} + +// GenerateInfraClusterClient indicates an expected call of GenerateInfraClusterClient. +func (mr *MockInfraClusterMockRecorder) GenerateInfraClusterClient(ctx interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GenerateInfraClusterClient", reflect.TypeOf((*MockInfraCluster)(nil).GenerateInfraClusterClient), ctx) +} diff --git a/pkg/kubevirt/machine.go b/pkg/kubevirt/machine.go index 5b167dbc1..adeb6904c 100644 --- a/pkg/kubevirt/machine.go +++ b/pkg/kubevirt/machine.go @@ -21,13 +21,13 @@ import ( "fmt" "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" kubevirtv1 "kubevirt.io/api/core/v1" "sigs.k8s.io/cluster-api-provider-kubevirt/pkg/context" + "sigs.k8s.io/cluster-api-provider-kubevirt/pkg/ssh" clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha4" - "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" ) @@ -35,15 +35,26 @@ import ( // Machine implement a service for managing the KubeVirt VM hosting a kubernetes node. type Machine struct { client client.Client + namespace string machineContext *context.MachineContext vmInstance *kubevirtv1.VirtualMachineInstance + + sshKeys *ssh.ClusterNodeSshKeys + getCommandExecutor func(string, *ssh.ClusterNodeSshKeys) ssh.VMCommandExecutor } // NewMachine returns a new Machine service for the given context. -func NewMachine(ctx *context.MachineContext, client client.Client) (*Machine, error) { - machine := &Machine{client, ctx, nil} +func NewMachine(ctx *context.MachineContext, client client.Client, namespace string, sshKeys *ssh.ClusterNodeSshKeys) (*Machine, error) { + machine := &Machine{ + client: client, + namespace: namespace, + machineContext: ctx, + vmInstance: nil, + sshKeys: sshKeys, + getCommandExecutor: ssh.NewVMCommandExecutor, + } - namespacedName := types.NamespacedName{Namespace: ctx.KubevirtMachine.Namespace, Name: ctx.KubevirtMachine.Name} + namespacedName := types.NamespacedName{Namespace: namespace, Name: ctx.KubevirtMachine.Name} vmi := &kubevirtv1.VirtualMachineInstance{} err := client.Get(ctx.Context, namespacedName, vmi) @@ -69,24 +80,13 @@ func (m *Machine) Exists() bool { func (m *Machine) Create() error { m.machineContext.Logger.Info(fmt.Sprintf("Creating VM with role '%s'...", nodeRole(m.machineContext))) - virtualMachine := newVirtualMachineFromKubevirtMachine(m.machineContext) + virtualMachine := newVirtualMachineFromKubevirtMachine(m.machineContext, m.namespace) mutateFn := func() (err error) { - // Ensure the KubevirtMachine is marked as an owner of the VirtualMachine. - virtualMachine.SetOwnerReferences(util.EnsureOwnerRef( - virtualMachine.OwnerReferences, - metav1.OwnerReference{ - APIVersion: m.machineContext.KubevirtMachine.APIVersion, - Kind: m.machineContext.KubevirtMachine.Kind, - Name: m.machineContext.KubevirtMachine.Name, - UID: m.machineContext.KubevirtMachine.UID, - })) - - // TODO: to remove those labels if virtualMachine.Labels == nil { virtualMachine.Labels = map[string]string{} } - virtualMachine.Labels[clusterv1.ClusterLabelName] = "capk" + virtualMachine.Labels[clusterv1.ClusterLabelName] = m.machineContext.Cluster.Name return nil } @@ -94,15 +94,24 @@ func (m *Machine) Create() error { return err } - namespacedName := types.NamespacedName{Namespace: m.machineContext.KubevirtMachine.Namespace, Name: m.machineContext.KubevirtMachine.Name} - vmi := &kubevirtv1.VirtualMachineInstance{} - if err := m.client.Get(m.machineContext.Context, namespacedName, vmi); err != nil { - if apierrors.IsNotFound(err) { - return errors.New("failed to create VM instance") + return nil +} + +// Returns if VMI has ready condition or not. +func (m *Machine) hasReadyCondition() bool { + + if m.vmInstance == nil { + return false + } + + for _, cond := range m.vmInstance.Status.Conditions { + if cond.Type == kubevirtv1.VirtualMachineInstanceReady && + cond.Status == corev1.ConditionTrue { + return true } } - return nil + return false } // Address returns the IP address of the VM. @@ -114,31 +123,40 @@ func (m *Machine) Address() string { return "" } -// IsBooted checks if the VM is booted. -func (m *Machine) IsBooted(executor CommandExecutor) bool { - if m.Address() == "" { - return false - } - - output, err := executor.ExecuteCommand("hostname") - if err != nil || output != m.machineContext.KubevirtMachine.Name { +// IsReady checks if the VM is ready +func (m *Machine) IsReady() bool { + if !m.hasReadyCondition() { return false } return true } +// SupportsCheckingIsBootstrapped checks if we have a method of checking +// that this bootstrapper has completed. +func (m *Machine) SupportsCheckingIsBootstrapped() bool { + // Right now, we can only check if bootstrapping has + // completed if we are using a bootstrapper that allows + // for us to inject ssh keys into the guest. + + if m.sshKeys != nil { + return m.machineContext.HasInjectedCapkSSHKeys(m.sshKeys.PublicKey) + } + return false +} + // IsBootstrapped checks if the VM is bootstrapped with Kubernetes. -func (m *Machine) IsBootstrapped(executor CommandExecutor) bool { - if !m.IsBooted(executor) { +func (m *Machine) IsBootstrapped() bool { + if !m.IsReady() { return false } + executor := m.getCommandExecutor(m.Address(), m.sshKeys) + output, err := executor.ExecuteCommand("cat /run/cluster-api/bootstrap-success.complete") if err != nil || output != "success" { return false } - return true } @@ -152,3 +170,21 @@ func (m *Machine) GenerateProviderID() (string, error) { return providerID, nil } + +// Delete deletes VM for this machine. +func (m *Machine) Delete() error { + namespacedName := types.NamespacedName{Namespace: m.machineContext.KubevirtMachine.Namespace, Name: m.machineContext.KubevirtMachine.Name} + vm := &kubevirtv1.VirtualMachine{} + if err := m.client.Get(m.machineContext.Context, namespacedName, vm); err != nil { + if apierrors.IsNotFound(err) { + m.machineContext.Logger.Info(fmt.Sprintf("VM does not exist, nothing to do.")) + return nil + } + } + + if err := m.client.Delete(gocontext.Background(), vm); err != nil { + return errors.Wrapf(err, "failed to delete VM") + } + + return nil +} diff --git a/pkg/kubevirt/machine_test.go b/pkg/kubevirt/machine_test.go index 3fa897908..cb4c0ab73 100644 --- a/pkg/kubevirt/machine_test.go +++ b/pkg/kubevirt/machine_test.go @@ -20,6 +20,7 @@ import ( gocontext "context" "fmt" + "sigs.k8s.io/cluster-api-provider-kubevirt/pkg/ssh" "sigs.k8s.io/cluster-api-provider-kubevirt/pkg/testing" . "github.com/onsi/ginkgo" @@ -41,6 +42,8 @@ var ( kubevirtCluster = testing.NewKubevirtCluster(clusterName, kubevirtClusterName) cluster = testing.NewCluster(clusterName, kubevirtCluster) + sshKey = "ssh-rsa 1234" + machineName = "test-machine" kubevirtMachineName = "test-kubevirt-machine" kubevirtMachine = testing.NewKubevirtMachine(kubevirtMachineName, machineName) @@ -48,12 +51,15 @@ var ( virtualMachineInstance = testing.NewVirtualMachineInstance(kubevirtMachine) + bootstrapDataSecret = testing.NewBootstrapDataSecret([]byte(fmt.Sprintf("#cloud-config\n\n%s\n", sshKey))) + machineContext = &context.MachineContext{ - Context: gocontext.TODO(), - Cluster: cluster, - KubevirtCluster: kubevirtCluster, - Machine: machine, - KubevirtMachine: kubevirtMachine, + Context: gocontext.TODO(), + Cluster: cluster, + KubevirtCluster: kubevirtCluster, + Machine: machine, + KubevirtMachine: kubevirtMachine, + BootstrapDataSecret: bootstrapDataSecret, } fakeClient client.Client @@ -61,6 +67,7 @@ var ( ) var _ = Describe("Without KubeVirt VM running", func() { + BeforeEach(func() { objects := []client.Object{ cluster, @@ -77,7 +84,7 @@ var _ = Describe("Without KubeVirt VM running", func() { AfterEach(func() {}) It("NewMachine should have client and machineContext set, but vmInstance equal nil", func() { - externalMachine, err := NewMachine(machineContext, fakeClient) + externalMachine, err := defaultTestMachine(machineContext, fakeClient, fakeVMCommandExecutor, []byte{}) Expect(err).NotTo(HaveOccurred()) Expect(externalMachine.client).To(Equal(fakeClient)) Expect(externalMachine.machineContext).To(Equal(machineContext)) @@ -85,31 +92,37 @@ var _ = Describe("Without KubeVirt VM running", func() { }) It("Exists should return false", func() { - externalMachine, err := NewMachine(machineContext, fakeClient) + externalMachine, err := defaultTestMachine(machineContext, fakeClient, fakeVMCommandExecutor, []byte{}) Expect(err).NotTo(HaveOccurred()) Expect(externalMachine.Exists()).To(BeFalse()) }) It("Address should return ''", func() { - externalMachine, err := NewMachine(machineContext, fakeClient) + externalMachine, err := defaultTestMachine(machineContext, fakeClient, fakeVMCommandExecutor, []byte{}) Expect(err).NotTo(HaveOccurred()) Expect(externalMachine.Address()).To(Equal("")) }) - It("IsBooted should return false", func() { - externalMachine, err := NewMachine(machineContext, fakeClient) + It("IsReady should return false", func() { + externalMachine, err := defaultTestMachine(machineContext, fakeClient, fakeVMCommandExecutor, []byte{}) Expect(err).NotTo(HaveOccurred()) - Expect(externalMachine.IsBooted(fakeVMCommandExecutor)).To(BeFalse()) + Expect(externalMachine.IsReady()).To(BeFalse()) }) It("IsBootstrapped should return false", func() { - externalMachine, err := NewMachine(machineContext, fakeClient) + externalMachine, err := defaultTestMachine(machineContext, fakeClient, fakeVMCommandExecutor, []byte{}) + Expect(err).NotTo(HaveOccurred()) + Expect(externalMachine.IsBootstrapped()).To(BeFalse()) + }) + + It("SupportsCheckingIsBootstrapped should return false", func() { + externalMachine, err := defaultTestMachine(machineContext, fakeClient, fakeVMCommandExecutor, []byte{}) Expect(err).NotTo(HaveOccurred()) - Expect(externalMachine.IsBootstrapped(fakeVMCommandExecutor)).To(BeFalse()) + Expect(externalMachine.SupportsCheckingIsBootstrapped()).To(BeFalse()) }) It("GenerateProviderID should fail", func() { - externalMachine, err := NewMachine(machineContext, fakeClient) + externalMachine, err := defaultTestMachine(machineContext, fakeClient, fakeVMCommandExecutor, []byte{}) Expect(err).NotTo(HaveOccurred()) providerId, err := externalMachine.GenerateProviderID() Expect(err).To(HaveOccurred()) @@ -119,6 +132,12 @@ var _ = Describe("Without KubeVirt VM running", func() { var _ = Describe("With KubeVirt VM running", func() { BeforeEach(func() { + virtualMachineInstance.Status.Conditions = []kubevirtv1.VirtualMachineInstanceCondition{ + { + Type: kubevirtv1.VirtualMachineInstanceReady, + Status: corev1.ConditionTrue, + }, + } objects := []client.Object{ cluster, kubevirtCluster, @@ -135,7 +154,7 @@ var _ = Describe("With KubeVirt VM running", func() { AfterEach(func() {}) It("NewMachine should have all client, machineContext and vmInstance NOT nil", func() { - externalMachine, err := NewMachine(machineContext, fakeClient) + externalMachine, err := defaultTestMachine(machineContext, fakeClient, fakeVMCommandExecutor, []byte(sshKey)) Expect(err).NotTo(HaveOccurred()) Expect(externalMachine.client).ToNot(BeNil()) Expect(externalMachine.machineContext).To(Equal(machineContext)) @@ -143,33 +162,39 @@ var _ = Describe("With KubeVirt VM running", func() { }) It("Exists should return true", func() { - externalMachine, err := NewMachine(machineContext, fakeClient) + externalMachine, err := defaultTestMachine(machineContext, fakeClient, fakeVMCommandExecutor, []byte(sshKey)) Expect(err).NotTo(HaveOccurred()) Expect(externalMachine.Exists()).To(BeTrue()) }) It("Address should return non-empty IP", func() { - externalMachine, err := NewMachine(machineContext, fakeClient) + externalMachine, err := defaultTestMachine(machineContext, fakeClient, fakeVMCommandExecutor, []byte(sshKey)) Expect(err).NotTo(HaveOccurred()) Expect(externalMachine.Address()).To(Equal(virtualMachineInstance.Status.Interfaces[0].IP)) }) - It("IsBooted should return true", func() { - externalMachine, err := NewMachine(machineContext, fakeClient) + It("IsReady should return true", func() { + externalMachine, err := defaultTestMachine(machineContext, fakeClient, fakeVMCommandExecutor, []byte(sshKey)) Expect(err).NotTo(HaveOccurred()) - Expect(externalMachine.IsBooted(fakeVMCommandExecutor)).To(BeTrue()) + Expect(externalMachine.IsReady()).To(BeTrue()) }) It("IsBootstrapped should return true", func() { - externalMachine, err := NewMachine(machineContext, fakeClient) + externalMachine, err := defaultTestMachine(machineContext, fakeClient, fakeVMCommandExecutor, []byte(sshKey)) + Expect(err).NotTo(HaveOccurred()) + Expect(externalMachine.IsBootstrapped()).To(BeTrue()) + }) + + It("SupportsCheckingIsBootstrapped should return true", func() { + externalMachine, err := defaultTestMachine(machineContext, fakeClient, fakeVMCommandExecutor, []byte(sshKey)) Expect(err).NotTo(HaveOccurred()) - Expect(externalMachine.IsBootstrapped(fakeVMCommandExecutor)).To(BeTrue()) + Expect(externalMachine.SupportsCheckingIsBootstrapped()).To(BeTrue()) }) It("GenerateProviderID should succeed", func() { expectedProviderId := fmt.Sprintf("kubevirt://%s", kubevirtMachineName) - externalMachine, err := NewMachine(machineContext, fakeClient) + externalMachine, err := defaultTestMachine(machineContext, fakeClient, fakeVMCommandExecutor, []byte(sshKey)) Expect(err).NotTo(HaveOccurred()) providerId, err := externalMachine.GenerateProviderID() Expect(providerId).To(Equal(expectedProviderId)) @@ -212,3 +237,14 @@ func (e FakeVMCommandExecutor) ExecuteCommand(command string) (string, error) { return "", errors.New("unexpected input argument") } } + +func defaultTestMachine(ctx *context.MachineContext, client client.Client, vmExecutor FakeVMCommandExecutor, sshPubKey []byte) (*Machine, error) { + + machine, err := NewMachine(ctx, client, ctx.Cluster.Namespace, &ssh.ClusterNodeSshKeys{PublicKey: sshPubKey}) + + machine.getCommandExecutor = func(fake string, fakeKeys *ssh.ClusterNodeSshKeys) ssh.VMCommandExecutor { + return vmExecutor + } + + return machine, err +} diff --git a/pkg/kubevirt/utils.go b/pkg/kubevirt/utils.go index 9974c5ddb..ff3e929e7 100644 --- a/pkg/kubevirt/utils.go +++ b/pkg/kubevirt/utils.go @@ -35,7 +35,7 @@ type CommandExecutor interface { } // newVirtualMachineFromKubevirtMachine creates VirtualMachine instance. -func newVirtualMachineFromKubevirtMachine(ctx *context.MachineContext) *kubevirtv1.VirtualMachine { +func newVirtualMachineFromKubevirtMachine(ctx *context.MachineContext, namespace string) *kubevirtv1.VirtualMachine { runAlways := kubevirtv1.RunStrategyAlways vmiTemplate := buildVirtualMachineInstanceTemplate(ctx) @@ -51,7 +51,7 @@ func newVirtualMachineFromKubevirtMachine(ctx *context.MachineContext) *kubevirt virtualMachine.ObjectMeta = metav1.ObjectMeta{ Name: ctx.KubevirtMachine.Name, - Namespace: ctx.KubevirtMachine.Namespace, + Namespace: namespace, Labels: map[string]string{ "kubevirt.io/vm": ctx.KubevirtMachine.Name, clusterLabelKey: ctx.KubevirtCluster.Name, diff --git a/pkg/loadbalancer/loadbalancer.go b/pkg/loadbalancer/loadbalancer.go index 567756877..080a408e7 100644 --- a/pkg/loadbalancer/loadbalancer.go +++ b/pkg/loadbalancer/loadbalancer.go @@ -18,6 +18,8 @@ package loadbalancer import ( "fmt" + "github.com/pkg/errors" + clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha4" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -25,38 +27,33 @@ import ( "k8s.io/apimachinery/pkg/util/intstr" infrav1 "sigs.k8s.io/cluster-api-provider-kubevirt/api/v1alpha4" "sigs.k8s.io/cluster-api-provider-kubevirt/pkg/context" - clusterutil "sigs.k8s.io/cluster-api/util" runtimeclient "sigs.k8s.io/controller-runtime/pkg/client" ctrlutil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/kind/pkg/cluster/constants" ) -//type lbCreator interface { -// CreateExternalLoadBalancerNode(name, image, clusterLabel, listenAddress string, port int32) (*types.Node, error) -//} - // LoadBalancer manages the load balancer for a specific KubeVirt cluster. type LoadBalancer struct { name string service *corev1.Service - client runtimeclient.Client kubevirtCluster *infrav1.KubevirtCluster + infraClient runtimeclient.Client + infraNamespace string } // NewLoadBalancer returns a new helper for managing a mock load-balancer (using service). -func NewLoadBalancer(ctx *context.ClusterContext, client runtimeclient.Client) (*LoadBalancer, error) { +func NewLoadBalancer(ctx *context.ClusterContext, client runtimeclient.Client, namespace string) (*LoadBalancer, error) { name := ctx.KubevirtCluster.Name + "-lb" // Look for the service that is mocking the load-balancer for the cluster. // Filter based on the label and the roles regardless of whether or not it is running. loadBalancer := &corev1.Service{} loadBalancerKey := runtimeclient.ObjectKey{ - Namespace: ctx.KubevirtCluster.Namespace, + Namespace: namespace, Name: name, } if err := client.Get(ctx.Context, loadBalancerKey, loadBalancer); err != nil { if apierrors.IsNotFound(err) { loadBalancer = nil - ctx.Logger.Info("No load balancer found") } else { return nil, err } @@ -65,21 +62,27 @@ func NewLoadBalancer(ctx *context.ClusterContext, client runtimeclient.Client) ( return &LoadBalancer{ name: name, service: loadBalancer, - client: client, kubevirtCluster: ctx.KubevirtCluster, + infraClient: client, + infraNamespace: namespace, }, nil } +// IsFound checks if load balancer already exists +func (l *LoadBalancer) IsFound() bool { + return l.service != nil +} + // Create creates a service of ClusterIP type to serve as a load-balancer for the cluster. func (l *LoadBalancer) Create(ctx *context.ClusterContext) error { // Skip creation if exists. - if l.service != nil { + if l.IsFound() { return fmt.Errorf("the load balancer service already exists") } lbService := &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ - Namespace: l.kubevirtCluster.Namespace, + Namespace: l.infraNamespace, Name: l.name, }, Spec: corev1.ServiceSpec{ @@ -97,34 +100,28 @@ func (l *LoadBalancer) Create(ctx *context.ClusterContext) error { }, } mutateFn := func() (err error) { - lbService.SetOwnerReferences(clusterutil.EnsureOwnerRef( - lbService.OwnerReferences, - metav1.OwnerReference{ - APIVersion: l.kubevirtCluster.APIVersion, - Kind: l.kubevirtCluster.Kind, - Name: l.kubevirtCluster.Name, - UID: l.kubevirtCluster.UID, - })) + if lbService.Labels == nil { + lbService.Labels = map[string]string{} + } + lbService.Labels[clusterv1.ClusterLabelName] = ctx.Cluster.Name + return nil } - if _, err := ctrlutil.CreateOrUpdate(ctx, l.client, lbService, mutateFn); err != nil { + if _, err := ctrlutil.CreateOrUpdate(ctx.Context, l.infraClient, lbService, mutateFn); err != nil { return corev1.ErrIntOverflowGenerated } return nil } -func (l *LoadBalancer) IsFound() bool { - return l.service != nil -} - +// IP returns ip address of the load balancer func (l *LoadBalancer) IP(ctx *context.ClusterContext) (string, error) { loadBalancer := &corev1.Service{} loadBalancerKey := runtimeclient.ObjectKey{ - Namespace: l.kubevirtCluster.Namespace, + Namespace: l.infraNamespace, Name: l.name, } - if err := l.client.Get(ctx.Context, loadBalancerKey, loadBalancer); err != nil { + if err := l.infraClient.Get(ctx.Context, loadBalancerKey, loadBalancer); err != nil { return "", err } @@ -134,3 +131,16 @@ func (l *LoadBalancer) IP(ctx *context.ClusterContext) (string, error) { return loadBalancer.Spec.ClusterIP, nil } + +// Delete deletes load-balancer service. +func (l *LoadBalancer) Delete(ctx *context.ClusterContext) error { + if !l.IsFound() { + return nil + } + + if err := l.infraClient.Delete(ctx, l.service); err != nil { + return errors.Wrapf(err, "failed to delete load balancer service") + } + + return nil +} diff --git a/pkg/loadbalancer/loadbalancer_test.go b/pkg/loadbalancer/loadbalancer_test.go index b4505634f..2f8715bd8 100644 --- a/pkg/loadbalancer/loadbalancer_test.go +++ b/pkg/loadbalancer/loadbalancer_test.go @@ -67,7 +67,7 @@ var _ = Describe("Load Balancer", func() { }) It("should initialize load balancer without error", func() { - lb, err = loadbalancer.NewLoadBalancer(clusterContext, fakeClient) + lb, err = loadbalancer.NewLoadBalancer(clusterContext, fakeClient, "") Expect(err).NotTo(HaveOccurred()) }) @@ -97,7 +97,7 @@ var _ = Describe("Load Balancer", func() { }) It("should initialize load balancer without error", func() { - lb, err = loadbalancer.NewLoadBalancer(clusterContext, fakeClient) + lb, err = loadbalancer.NewLoadBalancer(clusterContext, fakeClient, "") Expect(err).NotTo(HaveOccurred()) }) diff --git a/pkg/ssh/cluster_node_ssh_keys.go b/pkg/ssh/cluster_node_ssh_keys.go index 2dbe6ca8d..c3ee80298 100644 --- a/pkg/ssh/cluster_node_ssh_keys.go +++ b/pkg/ssh/cluster_node_ssh_keys.go @@ -94,6 +94,12 @@ func (c *ClusterNodeSshKeys) PersistKeysToSecret() (*corev1.Secret, error) { Name: c.ClusterContext.KubevirtCluster.Name, UID: c.ClusterContext.KubevirtCluster.UID, })) + + if newSecret.Labels == nil { + newSecret.Labels = map[string]string{} + } + newSecret.Labels[clusterv1.ClusterLabelName] = c.ClusterContext.Cluster.Name + return nil } if _, err := controllerutil.CreateOrUpdate(c.ClusterContext.Context, c.Client, newSecret, mutateFn); err != nil { diff --git a/pkg/ssh/ssh_command_executor.go b/pkg/ssh/ssh_command_executor.go index 554de4d58..7505664b7 100644 --- a/pkg/ssh/ssh_command_executor.go +++ b/pkg/ssh/ssh_command_executor.go @@ -25,14 +25,26 @@ import ( "golang.org/x/crypto/ssh" ) -type VMCommandExecutor struct { +type VMCommandExecutor interface { + ExecuteCommand(string) (string, error) +} + +type vmCommandExecutor struct { IPAddress string PublicKey []byte PrivateKey []byte } +func NewVMCommandExecutor(address string, keys *ClusterNodeSshKeys) VMCommandExecutor { + return vmCommandExecutor{ + IPAddress: address, + PublicKey: keys.PublicKey, + PrivateKey: keys.PrivateKey, + } +} + // ExecuteCommand runs command inside a VM, via SSH, and returns the command output. -func (e VMCommandExecutor) ExecuteCommand(command string) (string, error) { +func (e vmCommandExecutor) ExecuteCommand(command string) (string, error) { // create signer signer, err := signerFromPem(e.PrivateKey, []byte("")) @@ -50,24 +62,22 @@ func (e VMCommandExecutor) ExecuteCommand(command string) (string, error) { connection, err := ssh.Dial("tcp", hostAddress, sshConfig) if err != nil { - return "", fmt.Errorf("failed to dial IP %s: %s", hostAddress, err) + return "", fmt.Errorf("ssh: failed to dial IP %s, error: %s", hostAddress, err.Error()) } session, err := connection.NewSession() if err != nil { - return "", fmt.Errorf("failed to create session: %s", err) + return "", fmt.Errorf("ssh: failed to create session, error: %s", err.Error()) } defer session.Close() - // ctx.Logger.Info(fmt.Sprintf("ssh: running command inside VM `%s`...", command)) var b bytes.Buffer session.Stdout = &b if err := session.Run(command); err != nil { - return "", fmt.Errorf("failed to run the command: " + err.Error()) + return "", fmt.Errorf("ssh: failed to run command `%s`, error: %s", command, err.Error()) } output := strings.Trim(b.String(), "\n") - // ctx.Logger.Info(fmt.Sprintf("ssh: command `%s` output is `%s`", command, output)) return output, nil } diff --git a/pkg/testing/common.go b/pkg/testing/common.go index fe08adb40..eb63e1ec2 100644 --- a/pkg/testing/common.go +++ b/pkg/testing/common.go @@ -99,3 +99,10 @@ func NewVirtualMachineInstance(kubevirtMachine *infrav1.KubevirtMachine) *kubevi }, } } + +func NewBootstrapDataSecret(userData []byte) *corev1.Secret { + s := &corev1.Secret{} + s.Data = make(map[string][]byte) + s.Data["userdata"] = userData + return s +} diff --git a/templates/cluster-template-ext-infra.yaml b/templates/cluster-template-ext-infra.yaml new file mode 100644 index 000000000..2cc134a1d --- /dev/null +++ b/templates/cluster-template-ext-infra.yaml @@ -0,0 +1,143 @@ +--- +apiVersion: cluster.x-k8s.io/v1alpha4 +kind: Cluster +metadata: + name: "${CLUSTER_NAME}" + namespace: "${NAMESPACE}" +spec: + clusterNetwork: + pods: + cidrBlocks: + - 10.244.0.0/16 + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4 + kind: KubevirtCluster + name: '${CLUSTER_NAME}' + namespace: "${NAMESPACE}" + controlPlaneRef: + apiVersion: controlplane.cluster.x-k8s.io/v1alpha4 + kind: KubeadmControlPlane + name: '${CLUSTER_NAME}-control-plane' + namespace: "${NAMESPACE}" +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4 +kind: KubevirtCluster +metadata: + name: "${CLUSTER_NAME}" + namespace: "${NAMESPACE}" +spec: + infraClusterSecretRef: + apiVersion: v1 + kind: Secret + name: external-infra-kubeconfig + namespace: capk-system +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4 +kind: KubevirtMachineTemplate +metadata: + name: "${CLUSTER_NAME}-control-plane" + namespace: "${NAMESPACE}" +spec: + template: + spec: + vmSpec: + domain: + cpu: + cores: 2 + memory: + guest: "4Gi" + devices: + disks: + - disk: + bus: virtio + name: containervolume + volumes: + - containerDisk: + image: "${NODE_VM_IMAGE_TEMPLATE}" + name: containervolume +--- +kind: KubeadmControlPlane +apiVersion: controlplane.cluster.x-k8s.io/v1alpha4 +metadata: + name: "${CLUSTER_NAME}-control-plane" + namespace: "${NAMESPACE}" +spec: + replicas: ${CONTROL_PLANE_MACHINE_COUNT} + machineTemplate: + infrastructureRef: + kind: KubevirtMachineTemplate + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4 + name: "${CLUSTER_NAME}-control-plane" + namespace: "${NAMESPACE}" + kubeadmConfigSpec: + clusterConfiguration: + imageRepository: ${IMAGE_REPO} + initConfiguration: + nodeRegistration: + criSocket: "${CRI_PATH}" + joinConfiguration: + nodeRegistration: + criSocket: "{CRI_PATH}" + version: "${KUBERNETES_VERSION}" +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4 +kind: KubevirtMachineTemplate +metadata: + name: "${CLUSTER_NAME}-md-0" + namespace: "${NAMESPACE}" +spec: + template: + spec: + vmSpec: + domain: + cpu: + cores: 2 + memory: + guest: "4Gi" + devices: + disks: + - disk: + bus: virtio + name: containervolume + volumes: + - containerDisk: + image: "${NODE_VM_IMAGE_TEMPLATE}" + name: containervolume +--- +apiVersion: bootstrap.cluster.x-k8s.io/v1alpha4 +kind: KubeadmConfigTemplate +metadata: + name: "${CLUSTER_NAME}-md-0" + namespace: "${NAMESPACE}" +spec: + template: + spec: + joinConfiguration: + nodeRegistration: + kubeletExtraArgs: {} +--- +apiVersion: cluster.x-k8s.io/v1alpha4 +kind: MachineDeployment +metadata: + name: "${CLUSTER_NAME}-md-0" + namespace: "${NAMESPACE}" +spec: + clusterName: "${CLUSTER_NAME}" + replicas: ${WORKER_MACHINE_COUNT} + selector: + matchLabels: + template: + spec: + clusterName: "${CLUSTER_NAME}" + version: "${KUBERNETES_VERSION}" + bootstrap: + configRef: + name: "${CLUSTER_NAME}-md-0" + namespace: "${NAMESPACE}" + apiVersion: bootstrap.cluster.x-k8s.io/v1alpha4 + kind: KubeadmConfigTemplate + infrastructureRef: + name: "${CLUSTER_NAME}-md-0" + namespace: "${NAMESPACE}" + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4 + kind: KubevirtMachineTemplate