Skip to content

Commit

Permalink
Fixes kubevirt image cacher
Browse files Browse the repository at this point in the history
Signed-off-by: David Vossel <[email protected]>
  • Loading branch information
davidvossel committed Oct 3, 2024
1 parent cde9ea4 commit 5167a2b
Show file tree
Hide file tree
Showing 4 changed files with 48 additions and 28 deletions.
16 changes: 16 additions & 0 deletions hypershift-operator/controllers/nodepool/kubevirt.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,22 @@ import (
corev1 "k8s.io/api/core/v1"
)

func (r *NodePoolReconciler) addKubeVirtCacheNameToStatus(kubevirtBootImage kubevirt.BootImage, nodePool *hyperv1.NodePool) {
if namer, ok := kubevirtBootImage.(kubevirt.BootImageNamer); ok {
if cacheName := namer.GetCacheName(); len(cacheName) > 0 {
if nodePool.Status.Platform == nil {
nodePool.Status.Platform = &hyperv1.NodePoolPlatformStatus{}
}

if nodePool.Status.Platform.KubeVirt == nil {
nodePool.Status.Platform.KubeVirt = &hyperv1.KubeVirtNodePoolStatus{}
}

nodePool.Status.Platform.KubeVirt.CacheName = cacheName
}
}
}

func (r *NodePoolReconciler) setKubevirtConditions(ctx context.Context, nodePool *hyperv1.NodePool, hcluster *hyperv1.HostedCluster, controlPlaneNamespace string, releaseImage *releaseinfo.ReleaseImage) error {
// moved KubeVirt specific handling up here, so the caching of the boot image will start as early as possible
// in order to actually save time. Caching form the original location will take more time, because the VMs can't
Expand Down
28 changes: 21 additions & 7 deletions hypershift-operator/controllers/nodepool/kubevirt/imagecacher.go
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ const (
type BootImage interface {
// CacheImage creates a PVC to cache the node image.
CacheImage(context.Context, client.Client, *hyperv1.NodePool, string) error
getDVSourceForVMTemplate() *v1beta1.DataVolumeSource
getDVSourceForVMTemplate() (*v1beta1.DataVolumeSource, error)
String() string
}

Expand Down Expand Up @@ -71,13 +71,13 @@ func (bootImage) CacheImage(_ context.Context, _ client.Client, _ *hyperv1.NodeP
return nil // no implementation
}

func (bi bootImage) getDVSourceForVMTemplate() *v1beta1.DataVolumeSource {
func (bi bootImage) getDVSourceForVMTemplate() (*v1beta1.DataVolumeSource, error) {
if bi.isHTTP {
return &v1beta1.DataVolumeSource{
HTTP: &v1beta1.DataVolumeSourceHTTP{
URL: bi.name,
},
}
}, nil
}

pullMethod := v1beta1.RegistryPullNode
Expand All @@ -86,7 +86,7 @@ func (bi bootImage) getDVSourceForVMTemplate() *v1beta1.DataVolumeSource {
URL: &bi.name,
PullMethod: &pullMethod,
},
}
}, nil
}

// cachedBootImage is the implementation of the BootImage interface for QCOW images
Expand All @@ -98,7 +98,7 @@ type cachedBootImage struct {
isHTTP bool
}

func newCachedBootImage(name, hash, namespace string, isHTTP bool) *cachedBootImage {
func newCachedBootImage(name, hash, namespace string, isHTTP bool, nodePool *hyperv1.NodePool) *cachedBootImage {
cbi := &cachedBootImage{
hash: hash,
namespace: namespace,
Expand All @@ -111,6 +111,15 @@ func newCachedBootImage(name, hash, namespace string, isHTTP bool) *cachedBootIm
cbi.name = containerImagePrefix + name
}

// default to the current cached image. This will be updated
// later on if it is out of date.
if nodePool.Status.Platform != nil &&
nodePool.Status.Platform.KubeVirt != nil &&
len(nodePool.Status.Platform.KubeVirt.CacheName) > 0 {

cbi.dvName = nodePool.Status.Platform.KubeVirt.CacheName
}

return cbi
}

Expand Down Expand Up @@ -202,13 +211,18 @@ func (qi *cachedBootImage) createDVForCache(ctx context.Context, cl client.Clien
return dv, nil
}

func (qi *cachedBootImage) getDVSourceForVMTemplate() *v1beta1.DataVolumeSource {
func (qi *cachedBootImage) getDVSourceForVMTemplate() (*v1beta1.DataVolumeSource, error) {

if qi.dvName == "" {
return nil, fmt.Errorf("no boot image source found for kubevirt machine")
}

return &v1beta1.DataVolumeSource{
PVC: &v1beta1.DataVolumeSourcePVC{
Namespace: qi.namespace,
Name: qi.dvName,
},
}
}, nil
}

func (qi *cachedBootImage) GetCacheName() string {
Expand Down
16 changes: 11 additions & 5 deletions hypershift-operator/controllers/nodepool/kubevirt/kubevirt.go
Original file line number Diff line number Diff line change
Expand Up @@ -112,7 +112,7 @@ func GetImage(nodePool *hyperv1.NodePool, releaseImage *releaseinfo.ReleaseImage

// KubeVirt Caching is disabled by default
if rootVolume != nil && rootVolume.CacheStrategy != nil && rootVolume.CacheStrategy.Type == hyperv1.KubevirtCachingStrategyPVC {
return newCachedBootImage(imageName, imageHash, hostedNamespace, isHTTP), nil
return newCachedBootImage(imageName, imageHash, hostedNamespace, isHTTP, nodePool), nil
}

return newBootImage(imageName, isHTTP), nil
Expand Down Expand Up @@ -147,7 +147,7 @@ func PlatformValidation(nodePool *hyperv1.NodePool) error {
return nil
}

func virtualMachineTemplateBase(nodePool *hyperv1.NodePool, bootImage BootImage) *capikubevirt.VirtualMachineTemplateSpec {
func virtualMachineTemplateBase(nodePool *hyperv1.NodePool, bootImage BootImage) (*capikubevirt.VirtualMachineTemplateSpec, error) {
const rootVolumeName = "rhcos"

var (
Expand All @@ -156,7 +156,10 @@ func virtualMachineTemplateBase(nodePool *hyperv1.NodePool, bootImage BootImage)
guaranteedResources = false
)

dvSource := bootImage.getDVSourceForVMTemplate()
dvSource, err := bootImage.getDVSourceForVMTemplate()
if err != nil {
return nil, err
}

kvPlatform := nodePool.Spec.Platform.Kubevirt

Expand Down Expand Up @@ -299,7 +302,7 @@ func virtualMachineTemplateBase(nodePool *hyperv1.NodePool, bootImage BootImage)
template.Spec.Template.Spec.Domain.Devices.HostDevices = hostDevices
}

return template
return template, nil
}

func virtualMachineInterfaceName(idx int, name string) string {
Expand Down Expand Up @@ -369,7 +372,10 @@ func MachineTemplateSpec(nodePool *hyperv1.NodePool, hcluster *hyperv1.HostedClu
}
}

vmTemplate := virtualMachineTemplateBase(nodePool, bootImage)
vmTemplate, err := virtualMachineTemplateBase(nodePool, bootImage)
if err != nil {
return nil, fmt.Errorf("unable to generate kubevirt machine template: %w", err)
}

// Newer versions of the NodePool controller transitioned to spreading VMs across the cluster
// using TopologySpreadConstraints instead of Pod Anti-Affinity. When the new controller interacts
Expand Down
16 changes: 0 additions & 16 deletions hypershift-operator/controllers/nodepool/nodepool_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -899,22 +899,6 @@ func (r *NodePoolReconciler) setCIDRConflictCondition(nodePool *hyperv1.NodePool
return nil
}

func (r *NodePoolReconciler) addKubeVirtCacheNameToStatus(kubevirtBootImage kubevirt.BootImage, nodePool *hyperv1.NodePool) {
if namer, ok := kubevirtBootImage.(kubevirt.BootImageNamer); ok {
if cacheName := namer.GetCacheName(); len(cacheName) > 0 {
if nodePool.Status.Platform == nil {
nodePool.Status.Platform = &hyperv1.NodePoolPlatformStatus{}
}

if nodePool.Status.Platform.KubeVirt == nil {
nodePool.Status.Platform.KubeVirt = &hyperv1.KubeVirtNodePoolStatus{}
}

nodePool.Status.Platform.KubeVirt.CacheName = cacheName
}
}
}

// createReachedIgnitionEndpointCondition creates a condition for the NodePool based on the tokenSecret data.
func (r NodePoolReconciler) createReachedIgnitionEndpointCondition(ctx context.Context, tokenSecret *corev1.Secret, generation int64) (*hyperv1.NodePoolCondition, error) {
var condition *hyperv1.NodePoolCondition
Expand Down

0 comments on commit 5167a2b

Please sign in to comment.