Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 6 additions & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,7 @@ token-minter:
# Run this when updating any of the types in the api package to regenerate the
# deepcopy code and CRD manifest files.
.PHONY: api
api: hypershift-api cluster-api cluster-api-provider-aws cluster-api-provider-ibmcloud api-docs
api: hypershift-api cluster-api cluster-api-provider-aws cluster-api-provider-ibmcloud cluster-api-provider-kubevirt api-docs

.PHONY: hypershift-api
hypershift-api: $(CONTROLLER_GEN)
Expand All @@ -121,6 +121,11 @@ cluster-api-provider-ibmcloud: $(CONTROLLER_GEN)
rm -rf cmd/install/assets/cluster-api-provider-ibmcloud/*.yaml
$(CONTROLLER_GEN) $(CRD_OPTIONS) paths="./vendor/sigs.k8s.io/cluster-api-provider-ibmcloud/api/v1beta1" output:crd:artifacts:config=cmd/install/assets/cluster-api-provider-ibmcloud

.PHONY: cluster-api-provider-kubevirt
cluster-api-provider-kubevirt: $(CONTROLLER_GEN)
rm -rf cmd/install/assets/cluster-api-provider-kubevirt/*.yaml
$(CONTROLLER_GEN) $(CRD_OPTIONS) paths="./vendor/sigs.k8s.io/cluster-api-provider-kubevirt/api/v1alpha1" output:crd:artifacts:config=cmd/install/assets/cluster-api-provider-kubevirt

.PHONY: api-docs
api-docs: $(GENAPIDOCS)
hack/gen-api-docs.sh $(GENAPIDOCS) $(DIR)
Expand Down
56 changes: 56 additions & 0 deletions api/fixtures/example.go
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,9 @@ import (

hyperv1 "github.com/openshift/hypershift/api/v1alpha1"

apiresource "k8s.io/apimachinery/pkg/api/resource"
kubevirtv1 "kubevirt.io/api/core/v1"
capikubevirt "sigs.k8s.io/cluster-api-provider-kubevirt/api/v1alpha1"
crclient "sigs.k8s.io/controller-runtime/pkg/client"
)

Expand Down Expand Up @@ -67,6 +70,7 @@ type ExampleOptions struct {
AWS *ExampleAWSOptions
None *ExampleNoneOptions
Agent *ExampleAgentOptions
Kubevirt *ExampleKubevirtOptions
NetworkType hyperv1.NetworkType
ControlPlaneAvailabilityPolicy hyperv1.AvailabilityPolicy
InfrastructureAvailabilityPolicy hyperv1.AvailabilityPolicy
Expand All @@ -80,6 +84,13 @@ type ExampleAgentOptions struct {
APIServerAddress string
}

type ExampleKubevirtOptions struct {
APIServerAddress string
Memory string
Cores uint32
Image string
}

type ExampleAWSOptions struct {
Region string
Zone string
Expand Down Expand Up @@ -236,6 +247,11 @@ web_identity_token_file = /var/run/secrets/openshift/serviceaccount/token
Type: hyperv1.AgentPlatform,
}
services = o.getServicePublishingStrategyMappingByAPIServerAddress(o.Agent.APIServerAddress)
case o.Kubevirt != nil:
platformSpec = hyperv1.PlatformSpec{
Type: hyperv1.KubevirtPlatform,
}
services = o.getServicePublishingStrategyMappingByAPIServerAddress(o.Kubevirt.APIServerAddress)

default:
panic("no platform specified")
Expand Down Expand Up @@ -340,6 +356,46 @@ web_identity_token_file = /var/run/secrets/openshift/serviceaccount/token
IOPS: o.AWS.RootVolumeIOPS,
},
}
case hyperv1.KubevirtPlatform:
runAlways := kubevirtv1.RunStrategyAlways
guestQuantity := apiresource.MustParse(o.Kubevirt.Memory)
nodePool.Spec.Platform.Kubevirt = &hyperv1.KubevirtNodePoolPlatform{
NodeTemplate: &capikubevirt.VirtualMachineTemplateSpec{
Spec: kubevirtv1.VirtualMachineSpec{
RunStrategy: &runAlways,
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

fyi, in the future we might need to reconsider RunStrategy. When we use runStrategy: Always, it means that the kubevirt VM could crash and recover with a new IP. If we need to avoid that, we'll have to consider a way to support runStrategy:Manual.

This is fine for now. Just be aware that there are likely some issues for us to follow up on here.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

In general I would even consider changing it in kubevirt itself, so that we restart the VMI in-place. Rescheduling has a lot of overhead. All other k8s controllers avoid that and restart in-place. Potentially has impact on how professional your monitoring has to be though.

Template: &kubevirtv1.VirtualMachineInstanceTemplateSpec{
Spec: kubevirtv1.VirtualMachineInstanceSpec{
Domain: kubevirtv1.DomainSpec{
CPU: &kubevirtv1.CPU{Cores: o.Kubevirt.Cores},
Memory: &kubevirtv1.Memory{Guest: &guestQuantity},
Devices: kubevirtv1.Devices{
Disks: []kubevirtv1.Disk{
{
Name: "containervolume",
DiskDevice: kubevirtv1.DiskDevice{
Disk: &kubevirtv1.DiskTarget{
Bus: "virtio",
},
},
},
},
},
},
Volumes: []kubevirtv1.Volume{
{
Name: "containervolume",
VolumeSource: kubevirtv1.VolumeSource{
ContainerDisk: &kubevirtv1.ContainerDiskSource{
Image: o.Kubevirt.Image,
},
},
},
},
},
},
},
},
}
}
}

Expand Down
2 changes: 2 additions & 0 deletions api/scheme.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ import (
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
capiaws "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
capiibm "sigs.k8s.io/cluster-api-provider-ibmcloud/api/v1beta1"
capikubevirt "sigs.k8s.io/cluster-api-provider-kubevirt/api/v1alpha1"
capiv1 "sigs.k8s.io/cluster-api/api/v1beta1"
)

Expand Down Expand Up @@ -50,4 +51,5 @@ func init() {
kasv1beta1.AddToScheme(Scheme)
prometheusoperatorv1.AddToScheme(Scheme)
agentv1.AddToScheme(Scheme)
capikubevirt.AddToScheme(Scheme)
}
5 changes: 4 additions & 1 deletion api/v1alpha1/hostedcluster_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -384,7 +384,7 @@ const (

// PlatformType is a specific supported infrastructure provider.
//
// +kubebuilder:validation:Enum=AWS;None;IBMCloud;Agent
// +kubebuilder:validation:Enum=AWS;None;IBMCloud;Agent;KubeVirt
type PlatformType string

const (
Expand All @@ -399,6 +399,9 @@ const (

// AgentPlatform represents user supplied insfrastructure booted with agents.
AgentPlatform PlatformType = "Agent"

// KubevirtPlatform represents Kubevirt infrastructure.
KubevirtPlatform PlatformType = "KubeVirt"
)

// PlatformSpec specifies the underlying infrastructure provider for the cluster
Expand Down
14 changes: 14 additions & 0 deletions api/v1alpha1/nodepool_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ import (
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
capikubevirt "sigs.k8s.io/cluster-api-provider-kubevirt/api/v1alpha1"
)

const (
Expand Down Expand Up @@ -286,6 +287,19 @@ type NodePoolPlatform struct {

// IBMCloud defines IBMCloud specific settings for components
IBMCloud *IBMCloudPlatformSpec `json:"ibmcloud,omitempty"`

// Kubevirt specifies the configuration used when operating on KubeVirt platform.
//
// +optional
// +immutable
Kubevirt *KubevirtNodePoolPlatform `json:"kubevirt,omitempty"`
}

// KubevirtNodePoolPlatform specifies the configuration of a NodePool when operating
// on KubeVirt platform.
type KubevirtNodePoolPlatform struct {
// NodeTemplate Spec contains the VirtualMachineInstance specification.
Copy link
Member

@enxebre enxebre Dec 23, 2021

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I don't have strong opinions here but curious if you have considered passing kubevirtv1.VirtualMachineSpec instead (do you really need to expose the ObjectMeta)? have you considered embed the type? If not embedded have you considered naming NodeTemplate vs VirtualMachineTemplate vs Template?

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Also should NodeTemplate be required?

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

ignore the required comment, it's already unless indicated otherwise.

Copy link
Contributor

@rmohr rmohr Dec 23, 2021

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I don't have strong opinions here but curious if you have considered passing kubevirtv1.VirtualMachineSpec instead (do you really need to expose the ObjectMeta)? have you considered embed the type? If not embedded have you considered naming NodeTemplate vs VirtualMachineTemplate vs Template?

I think being able to set annotations and labels is pretty important but also kubevirt platform specific. Therefore I think it makes sense to also have the objectmeta.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes we need ObjectMeta on the KubeVirt VM itself that is independent of the NodePool. Even the Namespace is important to us.

Here's one reason why

Right now the kubevirt provider is assumed to be starting kubevirt VMs within the management cluster, that's not guaranteed to be the case in the future though. We're open to the possibility that the NodePool will be creating kubevirt machines in an external cluster. In this case, the namespace within the ObjectMeta on the VM here actually represents the namespace on the external cluster to launch the KubeVirt VM.

NodeTemplate *capikubevirt.VirtualMachineTemplateSpec `json:"nodeTemplate,omitempty"`
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I don't think we should omitempty here. Not a blocker.

}

// AWSNodePoolPlatform specifies the configuration of a NodePool when operating
Expand Down
26 changes: 26 additions & 0 deletions api/v1alpha1/zz_generated.deepcopy.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

3 changes: 3 additions & 0 deletions cmd/cluster/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ import (
"github.com/openshift/hypershift/cmd/cluster/agent"
"github.com/openshift/hypershift/cmd/cluster/aws"
"github.com/openshift/hypershift/cmd/cluster/core"
"github.com/openshift/hypershift/cmd/cluster/kubevirt"
"github.com/openshift/hypershift/cmd/cluster/none"
"github.com/spf13/cobra"
)
Expand Down Expand Up @@ -58,6 +59,7 @@ func NewCreateCommands() *cobra.Command {
cmd.AddCommand(aws.NewCreateCommand(opts))
cmd.AddCommand(none.NewCreateCommand(opts))
cmd.AddCommand(agent.NewCreateCommand(opts))
cmd.AddCommand(kubevirt.NewCreateCommand(opts))

return cmd
}
Expand Down Expand Up @@ -85,6 +87,7 @@ func NewDestroyCommands() *cobra.Command {
cmd.AddCommand(aws.NewDestroyCommand(opts))
cmd.AddCommand(none.NewDestroyCommand(opts))
cmd.AddCommand(agent.NewDestroyCommand(opts))
cmd.AddCommand(kubevirt.NewDestroyCommand(opts))

return cmd
}
8 changes: 8 additions & 0 deletions cmd/cluster/core/create.go
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,7 @@ type CreateOptions struct {
ServiceCIDR string
PodCIDR string
NonePlatform NonePlatformCreateOptions
KubevirtPlatform KubevirtPlatformCreateOptions
AWSPlatform AWSPlatformOptions
AgentPlatform AgentPlatformCreateOptions
Wait bool
Expand All @@ -65,6 +66,13 @@ type NonePlatformCreateOptions struct {
APIServerAddress string
}

type KubevirtPlatformCreateOptions struct {
APIServerAddress string
Memory string
Cores uint32
ContainerDiskImage string
}

type AWSPlatformOptions struct {
AWSCredentialsFile string
AdditionalTags []string
Expand Down
91 changes: 91 additions & 0 deletions cmd/cluster/kubevirt/create.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,91 @@
package kubevirt

import (
"context"
"errors"
"fmt"
"os"
"os/signal"
"syscall"

apifixtures "github.com/openshift/hypershift/api/fixtures"
"github.com/openshift/hypershift/cmd/cluster/core"
"github.com/spf13/cobra"
utilrand "k8s.io/apimachinery/pkg/util/rand"
)

func NewCreateCommand(opts *core.CreateOptions) *cobra.Command {
cmd := &cobra.Command{
Use: "kubevirt",
Short: "Creates basic functional HostedCluster resources for KubeVirt platform",
SilenceUsage: true,
}

opts.KubevirtPlatform = core.KubevirtPlatformCreateOptions{
Memory: "4Gi",
Cores: 2,
ContainerDiskImage: "",
}

cmd.Flags().StringVar(&opts.KubevirtPlatform.Memory, "memory", opts.KubevirtPlatform.Memory, "The amount of memory which is visible inside the Guest OS (type BinarySI, e.g. 5Gi, 100Mi)")
cmd.Flags().Uint32Var(&opts.KubevirtPlatform.Cores, "cores", opts.KubevirtPlatform.Cores, "The number of cores inside the vmi, Must be a value greater or equal 1")
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

"The number of cores inside the vmi, Must be a value greater or equal 1

Should we validate this below and fail otherwise? the way it's now zero would pass through right?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Added validation in applyPlatformSpecificsValues function

cmd.Flags().StringVar(&opts.KubevirtPlatform.ContainerDiskImage, "containerdisk", opts.KubevirtPlatform.ContainerDiskImage, "A reference to docker image with the embedded disk to be used to create the machines")

cmd.Run = func(cmd *cobra.Command, args []string) {
ctx, cancel := context.WithCancel(context.Background())
sigs := make(chan os.Signal, 1)
signal.Notify(sigs, syscall.SIGINT)
go func() {
<-sigs
cancel()
}()

if err := CreateCluster(ctx, opts); err != nil {
log.Error(err, "Failed to create cluster")
os.Exit(1)
}
}

return cmd
}

func CreateCluster(ctx context.Context, opts *core.CreateOptions) error {
return core.CreateCluster(ctx, opts, applyPlatformSpecificsValues)
}

func applyPlatformSpecificsValues(ctx context.Context, exampleOptions *apifixtures.ExampleOptions, opts *core.CreateOptions) (err error) {
if opts.KubevirtPlatform.APIServerAddress == "" {
if opts.KubevirtPlatform.APIServerAddress, err = core.GetAPIServerAddressByNode(ctx); err != nil {
return err
}
}

if opts.NodePoolReplicas > -1 {
// TODO (nargaman): replace with official container image, after RFE-2501 is completed
// As long as there is no official container image
// The image must be provided by user
// Otherwise it must fail
if opts.KubevirtPlatform.ContainerDiskImage == "" {
return errors.New("the container disk image for the Kubevirt machine must be provided by user (\"--containerdisk\" flag)")
}
}

if opts.KubevirtPlatform.Cores < 1 {
return errors.New("the number of cores inside the machine must be a value greater or equal 1")
}

infraID := opts.InfraID
if len(infraID) == 0 {
infraID = fmt.Sprintf("%s-%s", opts.Name, utilrand.String(5))
}
exampleOptions.InfraID = infraID
exampleOptions.BaseDomain = "example.com"

exampleOptions.Kubevirt = &apifixtures.ExampleKubevirtOptions{
APIServerAddress: opts.KubevirtPlatform.APIServerAddress,
Memory: opts.KubevirtPlatform.Memory,
Cores: opts.KubevirtPlatform.Cores,
Image: opts.KubevirtPlatform.ContainerDiskImage,
}
return nil
}
Loading