Skip to content

Commit

Permalink
Merge pull request openshift#4847 from davidvossel/cache_fix
Browse files Browse the repository at this point in the history
NO-JIRA: Fixes kubevirt image cacher
  • Loading branch information
openshift-merge-bot[bot] authored Oct 7, 2024
2 parents 605ee5a + cf44a03 commit 73f7964
Show file tree
Hide file tree
Showing 5 changed files with 62 additions and 32 deletions.
16 changes: 16 additions & 0 deletions hypershift-operator/controllers/nodepool/kubevirt.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,22 @@ import (
corev1 "k8s.io/api/core/v1"
)

func (r *NodePoolReconciler) addKubeVirtCacheNameToStatus(kubevirtBootImage kubevirt.BootImage, nodePool *hyperv1.NodePool) {
if namer, ok := kubevirtBootImage.(kubevirt.BootImageNamer); ok {
if cacheName := namer.GetCacheName(); len(cacheName) > 0 {
if nodePool.Status.Platform == nil {
nodePool.Status.Platform = &hyperv1.NodePoolPlatformStatus{}
}

if nodePool.Status.Platform.KubeVirt == nil {
nodePool.Status.Platform.KubeVirt = &hyperv1.KubeVirtNodePoolStatus{}
}

nodePool.Status.Platform.KubeVirt.CacheName = cacheName
}
}
}

func (r *NodePoolReconciler) setKubevirtConditions(ctx context.Context, nodePool *hyperv1.NodePool, hcluster *hyperv1.HostedCluster, controlPlaneNamespace string, releaseImage *releaseinfo.ReleaseImage) error {
// moved KubeVirt specific handling up here, so the caching of the boot image will start as early as possible
// in order to actually save time. Caching form the original location will take more time, because the VMs can't
Expand Down
29 changes: 22 additions & 7 deletions hypershift-operator/controllers/nodepool/kubevirt/imagecacher.go
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ const (
type BootImage interface {
// CacheImage creates a PVC to cache the node image.
CacheImage(context.Context, client.Client, *hyperv1.NodePool, string) error
getDVSourceForVMTemplate() *v1beta1.DataVolumeSource
getDVSourceForVMTemplate() (*v1beta1.DataVolumeSource, error)
String() string
}

Expand Down Expand Up @@ -71,13 +71,13 @@ func (bootImage) CacheImage(_ context.Context, _ client.Client, _ *hyperv1.NodeP
return nil // no implementation
}

func (bi bootImage) getDVSourceForVMTemplate() *v1beta1.DataVolumeSource {
func (bi bootImage) getDVSourceForVMTemplate() (*v1beta1.DataVolumeSource, error) {
if bi.isHTTP {
return &v1beta1.DataVolumeSource{
HTTP: &v1beta1.DataVolumeSourceHTTP{
URL: bi.name,
},
}
}, nil
}

pullMethod := v1beta1.RegistryPullNode
Expand All @@ -86,7 +86,7 @@ func (bi bootImage) getDVSourceForVMTemplate() *v1beta1.DataVolumeSource {
URL: &bi.name,
PullMethod: &pullMethod,
},
}
}, nil
}

// cachedBootImage is the implementation of the BootImage interface for QCOW images
Expand All @@ -98,7 +98,7 @@ type cachedBootImage struct {
isHTTP bool
}

func newCachedBootImage(name, hash, namespace string, isHTTP bool) *cachedBootImage {
func newCachedBootImage(name, hash, namespace string, isHTTP bool, nodePool *hyperv1.NodePool) *cachedBootImage {
cbi := &cachedBootImage{
hash: hash,
namespace: namespace,
Expand All @@ -111,6 +111,16 @@ func newCachedBootImage(name, hash, namespace string, isHTTP bool) *cachedBootIm
cbi.name = containerImagePrefix + name
}

// default to the current cached image. This will be updated
// later on if it is out of date.
if nodePool != nil &&
nodePool.Status.Platform != nil &&
nodePool.Status.Platform.KubeVirt != nil &&
len(nodePool.Status.Platform.KubeVirt.CacheName) > 0 {

cbi.dvName = nodePool.Status.Platform.KubeVirt.CacheName
}

return cbi
}

Expand Down Expand Up @@ -202,13 +212,18 @@ func (qi *cachedBootImage) createDVForCache(ctx context.Context, cl client.Clien
return dv, nil
}

func (qi *cachedBootImage) getDVSourceForVMTemplate() *v1beta1.DataVolumeSource {
func (qi *cachedBootImage) getDVSourceForVMTemplate() (*v1beta1.DataVolumeSource, error) {

if qi.dvName == "" {
return nil, fmt.Errorf("no boot image source found for kubevirt machine")
}

return &v1beta1.DataVolumeSource{
PVC: &v1beta1.DataVolumeSourcePVC{
Namespace: qi.namespace,
Name: qi.dvName,
},
}
}, nil
}

func (qi *cachedBootImage) GetCacheName() string {
Expand Down
16 changes: 11 additions & 5 deletions hypershift-operator/controllers/nodepool/kubevirt/kubevirt.go
Original file line number Diff line number Diff line change
Expand Up @@ -112,7 +112,7 @@ func GetImage(nodePool *hyperv1.NodePool, releaseImage *releaseinfo.ReleaseImage

// KubeVirt Caching is disabled by default
if rootVolume != nil && rootVolume.CacheStrategy != nil && rootVolume.CacheStrategy.Type == hyperv1.KubevirtCachingStrategyPVC {
return newCachedBootImage(imageName, imageHash, hostedNamespace, isHTTP), nil
return newCachedBootImage(imageName, imageHash, hostedNamespace, isHTTP, nodePool), nil
}

return newBootImage(imageName, isHTTP), nil
Expand Down Expand Up @@ -147,7 +147,7 @@ func PlatformValidation(nodePool *hyperv1.NodePool) error {
return nil
}

func virtualMachineTemplateBase(nodePool *hyperv1.NodePool, bootImage BootImage) *capikubevirt.VirtualMachineTemplateSpec {
func virtualMachineTemplateBase(nodePool *hyperv1.NodePool, bootImage BootImage) (*capikubevirt.VirtualMachineTemplateSpec, error) {
const rootVolumeName = "rhcos"

var (
Expand All @@ -156,7 +156,10 @@ func virtualMachineTemplateBase(nodePool *hyperv1.NodePool, bootImage BootImage)
guaranteedResources = false
)

dvSource := bootImage.getDVSourceForVMTemplate()
dvSource, err := bootImage.getDVSourceForVMTemplate()
if err != nil {
return nil, err
}

kvPlatform := nodePool.Spec.Platform.Kubevirt

Expand Down Expand Up @@ -299,7 +302,7 @@ func virtualMachineTemplateBase(nodePool *hyperv1.NodePool, bootImage BootImage)
template.Spec.Template.Spec.Domain.Devices.HostDevices = hostDevices
}

return template
return template, nil
}

func virtualMachineInterfaceName(idx int, name string) string {
Expand Down Expand Up @@ -369,7 +372,10 @@ func MachineTemplateSpec(nodePool *hyperv1.NodePool, hcluster *hyperv1.HostedClu
}
}

vmTemplate := virtualMachineTemplateBase(nodePool, bootImage)
vmTemplate, err := virtualMachineTemplateBase(nodePool, bootImage)
if err != nil {
return nil, fmt.Errorf("unable to generate kubevirt machine template: %w", err)
}

// Newer versions of the NodePool controller transitioned to spreading VMs across the cluster
// using TopologySpreadConstraints instead of Pod Anti-Affinity. When the new controller interacts
Expand Down
17 changes: 13 additions & 4 deletions hypershift-operator/controllers/nodepool/kubevirt/kubevirt_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -583,8 +583,17 @@ func TestKubevirtMachineTemplate(t *testing.T) {
}
g.Expect(PlatformValidation(tc.nodePool)).To(Succeed())

bootImage := newCachedBootImage(bootImageName, imageHash, hostedClusterNamespace, false)
bootImage.dvName = bootImageNamePrefix + "12345"
np := &hyperv1.NodePool{
Status: hyperv1.NodePoolStatus{
Platform: &hyperv1.NodePoolPlatformStatus{
KubeVirt: &hyperv1.KubeVirtNodePoolStatus{
CacheName: bootImageNamePrefix + "12345",
},
},
},
}

bootImage := newCachedBootImage(bootImageName, imageHash, hostedClusterNamespace, false, np)
result, err := MachineTemplateSpec(tc.nodePool, tc.hcluster, &releaseinfo.ReleaseImage{}, bootImage)
g.Expect(err).ToNot(HaveOccurred())
g.Expect(result).To(Equal(tc.expected), "Comparison failed\n%v", cmp.Diff(tc.expected, result))
Expand Down Expand Up @@ -721,7 +730,7 @@ func TestCacheImage(t *testing.T) {
_ = v1beta1.AddToScheme(scheme)
cl := fake.NewClientBuilder().WithScheme(scheme).WithObjects(tc.existingResources...).Build()

bootImage := newCachedBootImage(bootImageName, imageHash, hostedClusterNamespace, false)
bootImage := newCachedBootImage(bootImageName, imageHash, hostedClusterNamespace, false, nil)
err := bootImage.CacheImage(ctx, cl, tc.nodePool, infraId)

if tc.errExpected != (err != nil) {
Expand Down Expand Up @@ -1117,7 +1126,7 @@ func TestJsonPatch(t *testing.T) {

g.Expect(PlatformValidation(tc.nodePool)).To(Succeed())

bootImage := newCachedBootImage(bootImageName, imageHash, hostedClusterNamespace, false)
bootImage := newCachedBootImage(bootImageName, imageHash, hostedClusterNamespace, false, nil)
bootImage.dvName = bootImageNamePrefix + "12345"
result, err := MachineTemplateSpec(tc.nodePool, tc.hcluster, &releaseinfo.ReleaseImage{}, bootImage)
g.Expect(err).ToNot(HaveOccurred())
Expand Down
16 changes: 0 additions & 16 deletions hypershift-operator/controllers/nodepool/nodepool_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -899,22 +899,6 @@ func (r *NodePoolReconciler) setCIDRConflictCondition(nodePool *hyperv1.NodePool
return nil
}

func (r *NodePoolReconciler) addKubeVirtCacheNameToStatus(kubevirtBootImage kubevirt.BootImage, nodePool *hyperv1.NodePool) {
if namer, ok := kubevirtBootImage.(kubevirt.BootImageNamer); ok {
if cacheName := namer.GetCacheName(); len(cacheName) > 0 {
if nodePool.Status.Platform == nil {
nodePool.Status.Platform = &hyperv1.NodePoolPlatformStatus{}
}

if nodePool.Status.Platform.KubeVirt == nil {
nodePool.Status.Platform.KubeVirt = &hyperv1.KubeVirtNodePoolStatus{}
}

nodePool.Status.Platform.KubeVirt.CacheName = cacheName
}
}
}

// createReachedIgnitionEndpointCondition creates a condition for the NodePool based on the tokenSecret data.
func (r NodePoolReconciler) createReachedIgnitionEndpointCondition(ctx context.Context, tokenSecret *corev1.Secret, generation int64) (*hyperv1.NodePoolCondition, error) {
var condition *hyperv1.NodePoolCondition
Expand Down

0 comments on commit 73f7964

Please sign in to comment.