diff --git a/cmd/manager/main.go b/cmd/manager/main.go index e5fc2a39722..0ae7744e157 100644 --- a/cmd/manager/main.go +++ b/cmd/manager/main.go @@ -4,7 +4,6 @@ import ( "context" "flag" golog "log" - "math/rand" "net/http" _ "net/http/pprof" "os" @@ -258,7 +257,6 @@ func main() { log.WithField("pprof_host_port", pprofHostPort).Info("Enabling pprof") log.Println(http.ListenAndServe(pprofHostPort, nil)) }() - rand.Seed(time.Now().UnixNano()) cmd := newRootCommand() err := cmd.Execute() if err != nil { diff --git a/contrib/pkg/adm/managedns/enable.go b/contrib/pkg/adm/managedns/enable.go index 6ad17d876f1..1e44e64de70 100644 --- a/contrib/pkg/adm/managedns/enable.go +++ b/contrib/pkg/adm/managedns/enable.go @@ -27,11 +27,11 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client/config" hivev1 "github.com/openshift/hive/apis/hive/v1" - hiveutils "github.com/openshift/hive/contrib/pkg/utils" - awsutils "github.com/openshift/hive/contrib/pkg/utils/aws" - azureutils "github.com/openshift/hive/contrib/pkg/utils/azure" - gcputils "github.com/openshift/hive/contrib/pkg/utils/gcp" + "github.com/openshift/hive/contrib/pkg/utils" "github.com/openshift/hive/pkg/constants" + awscreds "github.com/openshift/hive/pkg/creds/aws" + azurecreds "github.com/openshift/hive/pkg/creds/azure" + gcpcreds "github.com/openshift/hive/pkg/creds/gcp" "github.com/openshift/hive/pkg/resource" "github.com/openshift/hive/pkg/util/scheme" ) @@ -46,9 +46,6 @@ managed domains, create a credentials secret for your cloud provider, and link i the ExternalDNS section of HiveConfig. ` const ( - cloudAWS = "aws" - cloudGCP = "gcp" - cloudAzure = "azure" hiveAdmissionDeployment = "hiveadmission" hiveConfigName = "hive" waitTime = time.Minute * 2 @@ -95,7 +92,7 @@ func NewEnableManageDNSCommand() *cobra.Command { } flags := cmd.Flags() - flags.StringVar(&opt.Cloud, "cloud", cloudAWS, "Cloud provider: aws(default)|gcp|azure)") + flags.StringVar(&opt.Cloud, "cloud", constants.PlatformAWS, "Cloud provider: aws(default)|gcp|azure)") flags.StringVar(&opt.CredsFile, "creds-file", "", "Cloud credentials file (defaults vary depending on cloud)") flags.StringVar(&opt.AzureResourceGroup, "azure-resource-group-name", "os4-common", "Azure Resource Group (Only applicable if --cloud azure)") return cmd @@ -127,8 +124,7 @@ func (o *Options) Run(args []string) error { // Update the current HiveConfig, which should always exist as the operator will // create a default one once run. hc := &hivev1.HiveConfig{} - o.hiveClient.Get(context.TODO(), types.NamespacedName{Name: hiveConfigName}, hc) - if err != nil { + if err := o.hiveClient.Get(context.TODO(), types.NamespacedName{Name: hiveConfigName}, hc); err != nil { log.WithError(err).Fatal("error looking up HiveConfig 'hive'") } @@ -139,7 +135,7 @@ func (o *Options) Run(args []string) error { var credsSecret *corev1.Secret switch o.Cloud { - case cloudAWS: + case constants.PlatformAWS: // Apply a secret for credentials to manage the root domain: credsSecret, err = o.generateAWSCredentialsSecret() if err != nil { @@ -148,7 +144,7 @@ func (o *Options) Run(args []string) error { dnsConf.AWS = &hivev1.ManageDNSAWSConfig{ CredentialsSecretRef: corev1.LocalObjectReference{Name: credsSecret.Name}, } - case cloudGCP: + case constants.PlatformGCP: // Apply a secret for credentials to manage the root domain: credsSecret, err = o.generateGCPCredentialsSecret() if err != nil { @@ -157,7 +153,7 @@ func (o *Options) Run(args []string) error { dnsConf.GCP = &hivev1.ManageDNSGCPConfig{ CredentialsSecretRef: corev1.LocalObjectReference{Name: credsSecret.Name}, } - case cloudAzure: + case constants.PlatformAzure: credsSecret, err = o.generateAzureCredentialsSecret() if err != nil { log.WithError(err).Fatal("error generating manageDNS credentials secret") @@ -313,7 +309,7 @@ func (o *Options) waitForHiveConfigToBeProcessed() error { func (o *Options) generateAWSCredentialsSecret() (*corev1.Secret, error) { defaultCredsFilePath := filepath.Join(o.homeDir, ".aws", "credentials") - accessKeyID, secretAccessKey, err := awsutils.GetAWSCreds(o.CredsFile, defaultCredsFilePath) + accessKeyID, secretAccessKey, err := awscreds.GetAWSCreds(o.CredsFile, defaultCredsFilePath) if err != nil { return nil, err } @@ -334,7 +330,7 @@ func (o *Options) generateAWSCredentialsSecret() (*corev1.Secret, error) { } func (o *Options) generateGCPCredentialsSecret() (*corev1.Secret, error) { - saFileContents, err := gcputils.GetCreds(o.CredsFile) + saFileContents, err := gcpcreds.GetCreds(o.CredsFile) if err != nil { return nil, err } @@ -354,7 +350,7 @@ func (o *Options) generateGCPCredentialsSecret() (*corev1.Secret, error) { } func (o *Options) generateAzureCredentialsSecret() (*corev1.Secret, error) { - spFileContents, err := azureutils.GetCreds(o.CredsFile) + spFileContents, err := azurecreds.GetCreds(o.CredsFile) if err != nil { return nil, err } @@ -379,12 +375,15 @@ func (o *Options) getResourceHelper() (resource.Helper, error) { log.WithError(err).Error("Cannot get client config") return nil, err } - return resource.NewHelperFromRESTConfig(cfg, "util-managedns-enable", log.WithField("command", "adm manage-dns enable")) + return resource.NewHelper( + log.WithField("command", "adm manage-dns enable"), + resource.FromRESTConfig(cfg), + resource.WithControllerName("util-managedns-enable")) } func (o *Options) setupLocalClients() error { log.Debug("creating cluster client config") - hiveClient, err := hiveutils.GetClient("hiveutil-managedns-enable") + hiveClient, err := utils.GetClient("hiveutil-managedns-enable") if err != nil { log.WithError(err).Error("failed to create a hive config client") return err diff --git a/contrib/pkg/awsprivatelink/awsprivatelink.go b/contrib/pkg/awsprivatelink/awsprivatelink.go index 4ad5d7996e4..08d369376dd 100644 --- a/contrib/pkg/awsprivatelink/awsprivatelink.go +++ b/contrib/pkg/awsprivatelink/awsprivatelink.go @@ -17,6 +17,17 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" ) +const ( + // privateLinkHubAcctCredsName is the name of the AWS PrivateLink Hub account credentials Secret + // created by the "hiveutil awsprivatelink enable" command + privateLinkHubAcctCredsName = "awsprivatelink-hub-acct-creds" + + // privateLinkHubAcctCredsLabel is added to the AWS PrivateLink Hub account credentials Secret + // created by the "hiveutil awsprivatelink enable" command and + // referenced by HiveConfig.spec.awsPrivateLink.credentialsSecretRef. + privateLinkHubAcctCredsLabel = "hive.openshift.io/awsprivatelink-hub-acct-credentials" +) + var ( logLevelDebug bool credsSecretRef string diff --git a/contrib/pkg/awsprivatelink/disable.go b/contrib/pkg/awsprivatelink/disable.go index af9b3dfb7fa..8f6e533a692 100644 --- a/contrib/pkg/awsprivatelink/disable.go +++ b/contrib/pkg/awsprivatelink/disable.go @@ -5,7 +5,6 @@ import ( hivev1 "github.com/openshift/hive/apis/hive/v1" "github.com/openshift/hive/contrib/pkg/awsprivatelink/common" - awsutils "github.com/openshift/hive/contrib/pkg/utils/aws" operatorutils "github.com/openshift/hive/pkg/operator/hive" corev1 "k8s.io/api/core/v1" @@ -78,8 +77,8 @@ func (o *disableOptions) Run(cmd *cobra.Command, args []string) error { if err := common.DynamicClient.List( context.Background(), hubAcctSecrets, - client.MatchingFields{"metadata.name": awsutils.PrivateLinkHubAcctCredsName}, - client.MatchingLabels{awsutils.PrivateLinkHubAcctCredsLabel: "true"}, + client.MatchingFields{"metadata.name": privateLinkHubAcctCredsName}, + client.MatchingLabels{privateLinkHubAcctCredsLabel: "true"}, client.InNamespace(hiveNS), ); err != nil { log.WithError(err).Error("Failed to list Hub account credentials Secrets") diff --git a/contrib/pkg/awsprivatelink/enable.go b/contrib/pkg/awsprivatelink/enable.go index ea532f33015..2b1849fa30e 100644 --- a/contrib/pkg/awsprivatelink/enable.go +++ b/contrib/pkg/awsprivatelink/enable.go @@ -13,8 +13,8 @@ import ( configv1 "github.com/openshift/api/config/v1" hivev1 "github.com/openshift/hive/apis/hive/v1" "github.com/openshift/hive/contrib/pkg/awsprivatelink/common" - awsutils "github.com/openshift/hive/contrib/pkg/utils/aws" "github.com/openshift/hive/pkg/awsclient" + awscreds "github.com/openshift/hive/pkg/creds/aws" operatorutils "github.com/openshift/hive/pkg/operator/hive" log "github.com/sirupsen/logrus" @@ -147,11 +147,11 @@ func (o *enableOptions) Run(cmd *cobra.Command, args []string) error { switch err = common.DynamicClient.Create(context.Background(), credsSecretInHiveNS); { case err == nil: - log.Infof("Secret/%s created in namespace %s", awsutils.PrivateLinkHubAcctCredsName, hiveNS) + log.Infof("Secret/%s created in namespace %s", privateLinkHubAcctCredsName, hiveNS) case apierrors.IsAlreadyExists(err): - log.Warnf("Secret/%s already exists in namespace %s", awsutils.PrivateLinkHubAcctCredsName, hiveNS) + log.Warnf("Secret/%s already exists in namespace %s", privateLinkHubAcctCredsName, hiveNS) default: - log.WithError(err).Fatalf("Failed to create Secret/%s in namespace %s", awsutils.PrivateLinkHubAcctCredsName, hiveNS) + log.WithError(err).Fatalf("Failed to create Secret/%s in namespace %s", privateLinkHubAcctCredsName, hiveNS) } // Update HiveConfig @@ -194,11 +194,11 @@ func (o *enableOptions) getOrCopyCredsSecret(source *corev1.Secret, namespace st APIVersion: corev1.SchemeGroupVersion.String(), }, ObjectMeta: metav1.ObjectMeta{ - Name: awsutils.PrivateLinkHubAcctCredsName, + Name: privateLinkHubAcctCredsName, Namespace: namespace, // Secrets without this label (e.g., the ones created and configured manually) won't be deleted // when calling "hiveutil awsprivatelink disable". - Labels: map[string]string{awsutils.PrivateLinkHubAcctCredsLabel: "true"}, + Labels: map[string]string{privateLinkHubAcctCredsLabel: "true"}, }, Type: corev1.SecretTypeOpaque, } @@ -210,7 +210,7 @@ func (o *enableOptions) getOrCopyCredsSecret(source *corev1.Secret, namespace st // Get creds from environment default: defaultCredsFilePath := filepath.Join(o.homeDir, ".aws", "credentials") - accessKeyID, secretAccessKey, err := awsutils.GetAWSCreds("", defaultCredsFilePath) + accessKeyID, secretAccessKey, err := awscreds.GetAWSCreds("", defaultCredsFilePath) if err != nil { return nil, err } diff --git a/contrib/pkg/awsprivatelink/endpointvpc/add.go b/contrib/pkg/awsprivatelink/endpointvpc/add.go index 8cd7655f58f..e738f79cc9c 100644 --- a/contrib/pkg/awsprivatelink/endpointvpc/add.go +++ b/contrib/pkg/awsprivatelink/endpointvpc/add.go @@ -12,7 +12,6 @@ import ( hivev1 "github.com/openshift/hive/apis/hive/v1" "github.com/openshift/hive/contrib/pkg/awsprivatelink/common" - awsutils "github.com/openshift/hive/contrib/pkg/utils/aws" "github.com/openshift/hive/pkg/awsclient" log "github.com/sirupsen/logrus" @@ -94,7 +93,7 @@ func (o *endpointVPCAddOptions) Complete(cmd *cobra.Command, args []string) erro regions.Insert(associatedVpc.AWSPrivateLinkVPC.Region) } // Use the passed-in credsSecret if possible - awsClientsByRegion, err := awsutils.GetAWSClientsByRegion(common.CredsSecret, regions) + awsClientsByRegion, err := getAWSClientsByRegion(common.CredsSecret, regions) if err != nil { log.WithError(err).Fatal("Failed to get AWS clients") } @@ -136,7 +135,7 @@ func (o *endpointVPCAddOptions) Validate(cmd *cobra.Command, args []string) erro func (o *endpointVPCAddOptions) Run(cmd *cobra.Command, args []string) error { // Get default SG of the endpoint VPC - endpointVPCDefaultSG, err := awsutils.GetDefaultSGOfVpc(o.endpointVpcClients, o.endpointVpcId) + endpointVPCDefaultSG, err := getDefaultSGOfVpc(o.endpointVpcClients, o.endpointVpcId) if err != nil { log.WithError(err).Fatal("Failed to get default SG of the endpoint VPC") } @@ -192,7 +191,7 @@ func (o *endpointVPCAddOptions) Run(cmd *cobra.Command, args []string) error { } // Update SGs - associatedVpcWorkerSG, err := awsutils.GetWorkerSGFromVpcId(associatedVpcClients, associatedVpcId) + associatedVpcWorkerSG, err := getWorkerSGFromVpcId(associatedVpcClients, associatedVpcId) if err != nil { log.WithError(err).Fatal("Failed to get worker SG of the associated VPC") } @@ -203,7 +202,7 @@ func (o *endpointVPCAddOptions) Run(cmd *cobra.Command, args []string) error { // Associated VPC & endpoint VPC in the same region => allow ingress from SG of the peer case associatedVpcRegion == o.endpointVpcRegion: log.Info("Authorizing traffic from the associated VPC's worker SG to the endpoint VPC's default SG") - if _, err = awsutils.AuthorizeAllIngressFromSG( + if _, err = authorizeAllIngressFromSG( o.endpointVpcClients, aws.String(endpointVPCDefaultSG), aws.String(associatedVpcWorkerSG), @@ -218,7 +217,7 @@ func (o *endpointVPCAddOptions) Run(cmd *cobra.Command, args []string) error { } log.Info("Authorizing traffic from the endpoint VPC's default SG to the associated VPC's worker SG") - if _, err = awsutils.AuthorizeAllIngressFromSG( + if _, err = authorizeAllIngressFromSG( associatedVpcClients, aws.String(associatedVpcWorkerSG), aws.String(endpointVPCDefaultSG), @@ -235,7 +234,7 @@ func (o *endpointVPCAddOptions) Run(cmd *cobra.Command, args []string) error { // Associated VPC & endpoint VPC in different regions => allow ingress from CIDR of the peer default: log.Info("Authorizing traffic from the associated VPC's CIDR block to the endpoint VPC's default SG") - if _, err = awsutils.AuthorizeAllIngressFromCIDR( + if _, err = authorizeAllIngressFromCIDR( o.endpointVpcClients, aws.String(endpointVPCDefaultSG), associatedVpcCIDR, @@ -250,7 +249,7 @@ func (o *endpointVPCAddOptions) Run(cmd *cobra.Command, args []string) error { } log.Info("Authorizing traffic from the endpoint VPC's CIDR block to the associated VPC's worker SG") - if _, err = awsutils.AuthorizeAllIngressFromCIDR( + if _, err = authorizeAllIngressFromCIDR( associatedVpcClients, aws.String(associatedVpcWorkerSG), endpointVpcCIDR, @@ -309,7 +308,7 @@ func (o *endpointVPCAddOptions) addEndpointVpcToHiveConfig() { }, Subnets: endpointSubnets, } - if idx, ok := awsutils.FindVpcInInventory(o.endpointVpcId, o.hiveConfig.Spec.AWSPrivateLink.EndpointVPCInventory); ok { + if idx, ok := findVpcInInventory(o.endpointVpcId, o.hiveConfig.Spec.AWSPrivateLink.EndpointVPCInventory); ok { if reflect.DeepEqual(o.hiveConfig.Spec.AWSPrivateLink.EndpointVPCInventory[idx], endpointVpcToAdd) { log.Warn("Endpoint VPC found in HiveConfig. HiveConfig unchanged.") return diff --git a/contrib/pkg/awsprivatelink/endpointvpc/helpers.go b/contrib/pkg/awsprivatelink/endpointvpc/helpers.go new file mode 100644 index 00000000000..bb3c35b9b5a --- /dev/null +++ b/contrib/pkg/awsprivatelink/endpointvpc/helpers.go @@ -0,0 +1,192 @@ +package endpointvpc + +import ( + "fmt" + "strings" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/sets" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/ec2" + ec2types "github.com/aws/aws-sdk-go-v2/service/ec2/types" + + hivev1 "github.com/openshift/hive/apis/hive/v1" + "github.com/openshift/hive/pkg/awsclient" +) + +func getAWSClientsByRegion(secret *corev1.Secret, regions sets.Set[string]) (map[string]awsclient.Client, error) { + awsClientsByRegion := make(map[string]awsclient.Client) + for region := range regions { + awsClients, err := awsclient.NewClientFromSecret(secret, region) + if err != nil { + return awsClientsByRegion, err + } + awsClientsByRegion[region] = awsClients + } + + return awsClientsByRegion, nil +} + +func findVpcInInventory(vpcId string, inventory []hivev1.AWSPrivateLinkInventory) (int, bool) { + for i, endpointVpc := range inventory { + if vpcId == endpointVpc.AWSPrivateLinkVPC.VPCID { + return i, true + } + } + + return -1, false +} + +// getDefaultSGOfVpc gets the default SG of a VPC. +func getDefaultSGOfVpc(awsClients awsclient.Client, vpcId string) (string, error) { + describeSecurityGroupsOutput, err := awsClients.DescribeSecurityGroups(&ec2.DescribeSecurityGroupsInput{ + Filters: []ec2types.Filter{ + { + Name: aws.String("vpc-id"), + Values: []string{vpcId}, + }, + { + Name: aws.String("group-name"), + Values: []string{"default"}, + }, + }, + }) + if err != nil { + return "", err + } + if len(describeSecurityGroupsOutput.SecurityGroups) == 0 { + return "", fmt.Errorf("default SG not found for VPC %v", vpcId) + } + + return *describeSecurityGroupsOutput.SecurityGroups[0].GroupId, nil +} + +// revokeAllIngressFromCIDR removes an SG inbound rule which allows all ingress originating from a CIDR block. +func revokeAllIngressFromCIDR(awsClients awsclient.Client, SG, cidr *string) (*ec2.RevokeSecurityGroupIngressOutput, error) { + return awsClients.RevokeSecurityGroupIngress(&ec2.RevokeSecurityGroupIngressInput{ + GroupId: SG, + IpPermissions: []ec2types.IpPermission{ + { + IpRanges: []ec2types.IpRange{ + { + CidrIp: cidr, + }, + }, + IpProtocol: aws.String("-1"), + }, + }, + }) +} + +// revokeAllIngressFromSG removes an SG inbound rule which allows all ingress originating from another SG. +func revokeAllIngressFromSG(awsClients awsclient.Client, SG, sourceSG *string) (*ec2.RevokeSecurityGroupIngressOutput, error) { + return awsClients.RevokeSecurityGroupIngress(&ec2.RevokeSecurityGroupIngressInput{ + GroupId: SG, + IpPermissions: []ec2types.IpPermission{ + { + IpProtocol: aws.String("-1"), + UserIdGroupPairs: []ec2types.UserIdGroupPair{ + { + GroupId: sourceSG, + }, + }, + }, + }, + }) +} + +// authorizeAllIngressFromCIDR adds an SG inbound rule which allows all ingress originating from a CIDR block. +func authorizeAllIngressFromCIDR(awsClients awsclient.Client, SG, cidr, description *string) (*ec2.AuthorizeSecurityGroupIngressOutput, error) { + return awsClients.AuthorizeSecurityGroupIngress(&ec2.AuthorizeSecurityGroupIngressInput{ + GroupId: SG, + IpPermissions: []ec2types.IpPermission{ + { + IpRanges: []ec2types.IpRange{ + { + CidrIp: cidr, + Description: description, + }, + }, + IpProtocol: aws.String("-1"), + }, + }, + }) +} + +// authorizeAllIngressFromSG adds an SG inbound rule which allows all ingress originating from another SG. +func authorizeAllIngressFromSG(awsClients awsclient.Client, SG, sourceSG, description *string) (*ec2.AuthorizeSecurityGroupIngressOutput, error) { + return awsClients.AuthorizeSecurityGroupIngress(&ec2.AuthorizeSecurityGroupIngressInput{ + GroupId: SG, + IpPermissions: []ec2types.IpPermission{ + { + IpProtocol: aws.String("-1"), + UserIdGroupPairs: []ec2types.UserIdGroupPair{ + { + Description: description, + GroupId: sourceSG, + }, + }, + }, + }, + }) +} + +// getInfraIdFromVpcId gets the infraID of an OCP cluster using the ID of the VPC it resides. +func getInfraIdFromVpcId(awsClients awsclient.Client, vpcId string) (string, error) { + // When we specify the resource IDs explicitly instead of using filtering, + // AWS functions will return a non-nil error if nothing is found. + describeVpcsOutput, err := awsClients.DescribeVpcs(&ec2.DescribeVpcsInput{ + VpcIds: []string{vpcId}, + }) + if err != nil { + return "", err + } + + targetPrefix := "kubernetes.io/cluster/" + for _, tag := range describeVpcsOutput.Vpcs[0].Tags { + if k := aws.ToString(tag.Key); strings.HasPrefix(k, targetPrefix) { + return strings.Replace(k, targetPrefix, "", 1), nil + } + } + return "", fmt.Errorf("no tag with prefix %v found on VPC %v", targetPrefix, vpcId) +} + +// getWorkerSGFromVpcId gets the worker SG ID of an OCP cluster using the ID of the VPC it resides. +func getWorkerSGFromVpcId(awsClients awsclient.Client, vpcId string) (string, error) { + infraID, err := getInfraIdFromVpcId(awsClients, vpcId) + if err != nil { + return "", err + } + + describeSecurityGroupsOutput, err := awsClients.DescribeSecurityGroups(&ec2.DescribeSecurityGroupsInput{ + Filters: []ec2types.Filter{ + { + Name: aws.String("tag:Name"), + Values: []string{infraID + "-worker-sg", infraID + "-node"}, + }, + }, + }) + if err != nil { + return "", err + } + if len(describeSecurityGroupsOutput.SecurityGroups) == 0 { + return "", fmt.Errorf("worker SG not found for VPC %v", vpcId) + } + + return aws.ToString(describeSecurityGroupsOutput.SecurityGroups[0].GroupId), err +} + +// getCIDRFromVpcId gets the CIDR block of a VPC using the ID of it. +func getCIDRFromVpcId(awsClients awsclient.Client, vpcId string) (string, error) { + // When we specify the resource IDs explicitly instead of using filtering, + // AWS functions will return a non-nil error if nothing is found. + describeVpcOutput, err := awsClients.DescribeVpcs(&ec2.DescribeVpcsInput{ + VpcIds: []string{vpcId}, + }) + if err != nil { + return "", err + } + + return aws.ToString(describeVpcOutput.Vpcs[0].CidrBlock), nil +} diff --git a/contrib/pkg/awsprivatelink/endpointvpc/remove.go b/contrib/pkg/awsprivatelink/endpointvpc/remove.go index cf7c0823ca8..2fe34c526b0 100644 --- a/contrib/pkg/awsprivatelink/endpointvpc/remove.go +++ b/contrib/pkg/awsprivatelink/endpointvpc/remove.go @@ -9,7 +9,6 @@ import ( hivev1 "github.com/openshift/hive/apis/hive/v1" "github.com/openshift/hive/contrib/pkg/awsprivatelink/common" - awsutils "github.com/openshift/hive/contrib/pkg/utils/aws" "github.com/openshift/hive/pkg/awsclient" log "github.com/sirupsen/logrus" @@ -78,7 +77,7 @@ func (o *endpointVPCRemoveOptions) Complete(cmd *cobra.Command, args []string) e } // Get endpoint VPC and AWS clients for it - endpointVpcIdx, ok := awsutils.FindVpcInInventory(o.endpointVpcId, o.hiveConfig.Spec.AWSPrivateLink.EndpointVPCInventory) + endpointVpcIdx, ok := findVpcInInventory(o.endpointVpcId, o.hiveConfig.Spec.AWSPrivateLink.EndpointVPCInventory) if !ok { log.Fatalf("Endpoint VPC %v not found in HiveConfig.spec.awsPrivateLink.endpointVPCInventory. "+ "Please call `hiveutil privatelink endpointvpc add ...` to add it first", o.endpointVpcId) @@ -94,7 +93,7 @@ func (o *endpointVPCRemoveOptions) Complete(cmd *cobra.Command, args []string) e for _, associatedVpc := range o.associatedVpcs { regions.Insert(associatedVpc.AWSPrivateLinkVPC.Region) } - awsClientsByRegion, err := awsutils.GetAWSClientsByRegion(common.CredsSecret, regions) + awsClientsByRegion, err := getAWSClientsByRegion(common.CredsSecret, regions) if err != nil { log.WithError(err).Fatal("Failed to get AWS clients") } @@ -111,7 +110,7 @@ func (o *endpointVPCRemoveOptions) Validate(cmd *cobra.Command, args []string) e func (o *endpointVPCRemoveOptions) Run(cmd *cobra.Command, args []string) error { // Get default SG of the endpoint VPC - endpointVPCDefaultSG, err := awsutils.GetDefaultSGOfVpc(o.endpointVpcClients, o.endpointVpcId) + endpointVPCDefaultSG, err := getDefaultSGOfVpc(o.endpointVpcClients, o.endpointVpcId) if err != nil { log.WithError(err).Fatal("Failed to get default SG of the endpoint VPC") } @@ -124,12 +123,12 @@ func (o *endpointVPCRemoveOptions) Run(cmd *cobra.Command, args []string) error associatedVpcId := associatedVpc.AWSPrivateLinkVPC.VPCID log.Infof("Removing networking elements between associated VPC %v and endpoint VPC %v", associatedVpcId, o.endpointVpcId) - associatedVpcCIDR, err := awsutils.GetCIDRFromVpcId(associatedVpcClients, associatedVpcId) + associatedVpcCIDR, err := getCIDRFromVpcId(associatedVpcClients, associatedVpcId) if err != nil { log.Fatal("Failed to get CIDR of associated VPC") } log.Debugf("Found associated VPC CIDR = %v", associatedVpcCIDR) - endpointVpcCIDR, err := awsutils.GetCIDRFromVpcId(o.endpointVpcClients, o.endpointVpcId) + endpointVpcCIDR, err := getCIDRFromVpcId(o.endpointVpcClients, o.endpointVpcId) if err != nil { log.Fatal("Failed to get CIDR of endpoint VPC") } @@ -166,7 +165,7 @@ func (o *endpointVPCRemoveOptions) Run(cmd *cobra.Command, args []string) error } // Update SGs - associatedVpcWorkerSG, err := awsutils.GetWorkerSGFromVpcId(associatedVpcClients, associatedVpcId) + associatedVpcWorkerSG, err := getWorkerSGFromVpcId(associatedVpcClients, associatedVpcId) if err != nil { log.WithError(err).Fatal("Failed to get worker SG of the associated Hive cluster") } @@ -177,7 +176,7 @@ func (o *endpointVPCRemoveOptions) Run(cmd *cobra.Command, args []string) error // Associated VPC & endpoint VPC in the same region => revoke ingress from SG of the peer case associatedVpcRegion == o.endpointVpcRegion: log.Info("Revoking access from the endpoint VPC's default SG to the associated VPC's worker SG") - if _, err = awsutils.RevokeAllIngressFromSG( + if _, err = revokeAllIngressFromSG( associatedVpcClients, aws.String(associatedVpcWorkerSG), aws.String(endpointVPCDefaultSG), @@ -191,7 +190,7 @@ func (o *endpointVPCRemoveOptions) Run(cmd *cobra.Command, args []string) error } log.Info("Revoking access from the associated VPC's worker SG to the endpoint VPC's default SG") - if _, err = awsutils.RevokeAllIngressFromSG( + if _, err = revokeAllIngressFromSG( o.endpointVpcClients, aws.String(endpointVPCDefaultSG), aws.String(associatedVpcWorkerSG), @@ -207,7 +206,7 @@ func (o *endpointVPCRemoveOptions) Run(cmd *cobra.Command, args []string) error // Associated VPC & endpoint VPC in different regions => revoke ingress from CIDR of the peer default: log.Info("Revoking access from the endpoint VPC's CIDR block to the associated VPC's worker SG") - if _, err = awsutils.RevokeAllIngressFromCIDR( + if _, err = revokeAllIngressFromCIDR( associatedVpcClients, aws.String(associatedVpcWorkerSG), aws.String(endpointVpcCIDR), @@ -221,7 +220,7 @@ func (o *endpointVPCRemoveOptions) Run(cmd *cobra.Command, args []string) error } log.Info("Revoking access from the associated VPC's CIDR block to the endpoint VPC's default SG") - if _, err = awsutils.RevokeAllIngressFromCIDR( + if _, err = revokeAllIngressFromCIDR( o.endpointVpcClients, aws.String(endpointVPCDefaultSG), aws.String(associatedVpcCIDR), diff --git a/contrib/pkg/clusterpool/clusterpool.go b/contrib/pkg/clusterpool/clusterpool.go index 8ec8305ab5b..9f2828befcd 100644 --- a/contrib/pkg/clusterpool/clusterpool.go +++ b/contrib/pkg/clusterpool/clusterpool.go @@ -15,23 +15,21 @@ import ( "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/sets" "k8s.io/cli-runtime/pkg/printers" "k8s.io/client-go/util/homedir" hivev1 "github.com/openshift/hive/apis/hive/v1" "github.com/openshift/hive/contrib/pkg/utils" - awsutils "github.com/openshift/hive/contrib/pkg/utils/aws" - azureutils "github.com/openshift/hive/contrib/pkg/utils/azure" - gcputils "github.com/openshift/hive/contrib/pkg/utils/gcp" "github.com/openshift/hive/pkg/clusterresource" + "github.com/openshift/hive/pkg/constants" + awscreds "github.com/openshift/hive/pkg/creds/aws" + azurecreds "github.com/openshift/hive/pkg/creds/azure" + gcpcreds "github.com/openshift/hive/pkg/creds/gcp" "github.com/openshift/hive/pkg/util/scheme" ) const ( - cloudAWS = "aws" - cloudAzure = "azure" - cloudGCP = "gcp" - longDesc = ` OVERVIEW The hiveutil clusterpool create-pool command generates and applies the @@ -56,11 +54,11 @@ used. ) var ( - validClouds = map[string]bool{ - cloudAWS: true, - cloudAzure: true, - cloudGCP: true, - } + validClouds = sets.New( + constants.PlatformAWS, + constants.PlatformAzure, + constants.PlatformGCP, + ) ) type ClusterPoolOptions struct { @@ -124,7 +122,7 @@ create-pool CLUSTER_POOL_NAME --cloud=gcp`, } flags := cmd.Flags() - flags.StringVar(&opt.Cloud, "cloud", cloudAWS, "Cloud provider: aws(default)|azure|gcp)") + flags.StringVar(&opt.Cloud, "cloud", constants.PlatformAWS, "Cloud provider: aws(default)|azure|gcp)") flags.StringVarP(&opt.Namespace, "namespace", "n", "", "Namespace to create cluster pool in") flags.StringVar(&opt.BaseDomain, "base-domain", "new-installer.openshift.com", "Base domain for the cluster pool") flags.StringVar(&opt.PullSecret, "pull-secret", "", "Pull secret for cluster pool. Takes precedence over pull-secret-file.") @@ -149,11 +147,11 @@ func (o *ClusterPoolOptions) complete(args []string) error { if o.Region == "" { switch o.Cloud { - case cloudAWS: + case constants.PlatformAWS: o.Region = "us-east-1" - case cloudAzure: + case constants.PlatformAzure: o.Region = "centralus" - case cloudGCP: + case constants.PlatformGCP: o.Region = "us-east1" } } @@ -203,7 +201,7 @@ func (o *ClusterPoolOptions) validate(cmd *cobra.Command) error { return fmt.Errorf("must specify only one of image set, release image or release image source") } - if !validClouds[o.Cloud] { + if !validClouds.Has(o.Cloud) { cmd.Usage() return fmt.Errorf("unsupported cloud: %s", o.Cloud) } @@ -269,7 +267,7 @@ func (o *ClusterPoolOptions) generateObjects() ([]runtime.Object, error) { } var awsBuilder *clusterresource.AWSCloudBuilder - if o.Cloud == cloudAWS { + if o.Cloud == constants.PlatformAWS { awsBuilder = &clusterresource.AWSCloudBuilder{ Region: o.Region, // TODO: CLI option for this @@ -280,9 +278,9 @@ func (o *ClusterPoolOptions) generateObjects() ([]runtime.Object, error) { if o.createCloudSecret { switch o.Cloud { - case cloudAWS: + case constants.PlatformAWS: defaultCredsFilePath := filepath.Join(o.homeDir, ".aws", "credentials") - accessKeyID, secretAccessKey, err := awsutils.GetAWSCreds(o.CredsFile, defaultCredsFilePath) + accessKeyID, secretAccessKey, err := awscreds.GetAWSCreds(o.CredsFile, defaultCredsFilePath) if err != nil { o.log.WithError(err).Error("Failed to get AWS credentials") return nil, err @@ -290,8 +288,8 @@ func (o *ClusterPoolOptions) generateObjects() ([]runtime.Object, error) { // Update AWS cloud builder with creds awsBuilder.AccessKeyID = accessKeyID awsBuilder.SecretAccessKey = secretAccessKey - case cloudAzure: - creds, err := azureutils.GetCreds(o.CredsFile) + case constants.PlatformAzure: + creds, err := azurecreds.GetCreds(o.CredsFile) if err != nil { o.log.WithError(err).Error("Failed to read in Azure credentials") return nil, err @@ -301,8 +299,8 @@ func (o *ClusterPoolOptions) generateObjects() ([]runtime.Object, error) { BaseDomainResourceGroupName: o.AzureBaseDomainResourceGroupName, Region: o.Region, } - case cloudGCP: - creds, err := gcputils.GetCreds(o.CredsFile) + case constants.PlatformGCP: + creds, err := gcpcreds.GetCreds(o.CredsFile) if err != nil { o.log.WithError(err).Error("Failed to get GCP credentials") return nil, err diff --git a/contrib/pkg/createcluster/create.go b/contrib/pkg/createcluster/create.go index 2e4b5ca657a..aff1a6af4f7 100644 --- a/contrib/pkg/createcluster/create.go +++ b/contrib/pkg/createcluster/create.go @@ -7,7 +7,6 @@ import ( "os" "os/user" "path/filepath" - "sort" "strings" "time" @@ -18,17 +17,18 @@ import ( "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/sets" "k8s.io/cli-runtime/pkg/printers" hivev1 "github.com/openshift/hive/apis/hive/v1" hivev1azure "github.com/openshift/hive/apis/hive/v1/azure" "github.com/openshift/hive/contrib/pkg/utils" - awsutils "github.com/openshift/hive/contrib/pkg/utils/aws" - azurecredutil "github.com/openshift/hive/contrib/pkg/utils/azure" - gcputils "github.com/openshift/hive/contrib/pkg/utils/gcp" - openstackutils "github.com/openshift/hive/contrib/pkg/utils/openstack" "github.com/openshift/hive/pkg/clusterresource" "github.com/openshift/hive/pkg/constants" + awscreds "github.com/openshift/hive/pkg/creds/aws" + azurecreds "github.com/openshift/hive/pkg/creds/azure" + gcpcreds "github.com/openshift/hive/pkg/creds/gcp" + openstackcreds "github.com/openshift/hive/pkg/creds/openstack" "github.com/openshift/hive/pkg/gcpclient" "github.com/openshift/hive/pkg/util/scheme" installertypes "github.com/openshift/installer/pkg/types" @@ -90,13 +90,6 @@ https://amd64.ocp.releases.ci.openshift.org/api/v1/releasestream/4-stable/latest ` const ( hiveutilCreatedLabel = "hive.openshift.io/hiveutil-created" - cloudAWS = "aws" - cloudAzure = "azure" - cloudGCP = "gcp" - cloudIBM = "ibmcloud" - cloudOpenStack = "openstack" - cloudVSphere = "vsphere" - cloudNutanix = "nutanix" testFailureManifest = `apiVersion: v1 kind: NotARealSecret @@ -108,18 +101,18 @@ type: TestFailResource ) var ( - validClouds = map[string]bool{ - cloudAWS: true, - cloudAzure: true, - cloudGCP: true, - cloudIBM: true, - cloudOpenStack: true, - cloudVSphere: true, - cloudNutanix: true, - } - manualCCOModeClouds = map[string]bool{ - cloudIBM: true, - } + validClouds = sets.New( + constants.PlatformAWS, + constants.PlatformAzure, + constants.PlatformGCP, + constants.PlatformIBMCloud, + constants.PlatformOpenStack, + constants.PlatformVSphere, + constants.PlatformNutanix, + ) + manualCCOModeClouds = sets.New( + constants.PlatformIBMCloud, + ) ) // Options is the set of options to generate and apply a new cluster deployment @@ -277,23 +270,8 @@ create-cluster CLUSTER_DEPLOYMENT_NAME --cloud=vsphere --vsphere-vcenter=vmware. }, } - clouds := []string{} - for cloud, valid := range validClouds { - if valid { - clouds = append(clouds, cloud) - } - } - sort.Strings(clouds) - - manualModeClouds := []string{} - for cloud, manual := range manualCCOModeClouds { - if manual { - manualModeClouds = append(manualModeClouds, cloud) - } - } - flags := cmd.Flags() - flags.StringVar(&opt.Cloud, "cloud", cloudAWS, fmt.Sprintf("Cloud provider: %s", strings.Join(clouds, "|"))) + flags.StringVar(&opt.Cloud, "cloud", constants.PlatformAWS, fmt.Sprintf("Cloud provider: %s", strings.Join(sets.List(validClouds), "|"))) flags.StringVarP(&opt.Namespace, "namespace", "n", "", "Namespace to create cluster deployment in") flags.StringVar(&opt.SSHPrivateKeyFile, "ssh-private-key-file", "", "file name containing private key contents") flags.StringVar(&opt.SSHPublicKeyFile, "ssh-public-key-file", defaultSSHPublicKeyFile, "file name of SSH public key for cluster") @@ -306,7 +284,7 @@ create-cluster CLUSTER_DEPLOYMENT_NAME --cloud=vsphere --vsphere-vcenter=vmware. flags.StringVar(&opt.BoundServiceAccountSigningKeyFile, "bound-service-account-signing-key-file", "", "Private service account signing key (often created with ccoutil create key-pair)") flags.BoolVar(&opt.CredentialsModeManual, "credentials-mode-manual", false, fmt.Sprintf(`Configure the Cloud Credential Operator in the target cluster to Manual mode. Implies the use of --manifests to inject custom Secrets for all CredentialsRequests in the cluster. -This option is redundant (but permitted) for following clouds, which always use manual mode: %s`, strings.Join(manualModeClouds, "|"))) +This option is redundant (but permitted) for following clouds, which always use manual mode: %s`, strings.Join(sets.List(manualCCOModeClouds), "|"))) flags.StringVar(&opt.CredsFile, "creds-file", "", "Cloud credentials file (defaults vary depending on cloud)") flags.StringVar(&opt.ClusterImageSet, "image-set", "", "Cluster image set to use for this cluster deployment") flags.StringVar(&opt.ReleaseImage, "release-image", "", "Release image to use for installing this cluster deployment") @@ -404,13 +382,13 @@ func (o *Options) Complete(cmd *cobra.Command, args []string) error { if o.Region == "" { switch o.Cloud { - case cloudAWS: + case constants.PlatformAWS: o.Region = "us-east-1" - case cloudAzure: + case constants.PlatformAzure: o.Region = "centralus" - case cloudGCP: + case constants.PlatformGCP: o.Region = "us-east1" - case cloudIBM: + case constants.PlatformIBMCloud: o.Region = "us-east" } } @@ -423,7 +401,7 @@ func (o *Options) Complete(cmd *cobra.Command, args []string) error { o.HibernateAfterDur = &dur } - if manualCloud := manualCCOModeClouds[o.Cloud]; manualCloud && !o.CredentialsModeManual { + if manualCCOModeClouds.Has(o.Cloud) && !o.CredentialsModeManual { o.CredentialsModeManual = true o.log.Infof("Using Manual credentials mode for cloud=%s", o.Cloud) } @@ -448,20 +426,20 @@ func (o *Options) Validate(cmd *cobra.Command) error { o.log.Info("If specifying a serving certificate, specify a valid serving certificate key") return fmt.Errorf("invalid serving cert") } - if !validClouds[o.Cloud] { + if !validClouds.Has(o.Cloud) { cmd.Usage() o.log.Infof("Unsupported cloud: %s", o.Cloud) return fmt.Errorf("unsupported cloud: %s", o.Cloud) } - if o.Cloud == cloudOpenStack { + if o.Cloud == constants.PlatformOpenStack { if o.OpenStackAPIFloatingIP == "" { - msg := fmt.Sprintf("--openstack-api-floating-ip must be set when using --cloud=%q", cloudOpenStack) + msg := fmt.Sprintf("--openstack-api-floating-ip must be set when using --cloud=%q", constants.PlatformOpenStack) o.log.Info(msg) return errors.New(msg) } if o.OpenStackCloud == "" { - msg := fmt.Sprintf("--openstack-cloud must be set when using --cloud=%q", cloudOpenStack) + msg := fmt.Sprintf("--openstack-cloud must be set when using --cloud=%q", constants.PlatformOpenStack) o.log.Info(msg) return errors.New(msg) } @@ -471,12 +449,12 @@ func (o *Options) Validate(cmd *cobra.Command) error { return fmt.Errorf("Manual credentials mode requires --manifests containing custom Secrets with manually provisioned credentials") } - if o.AWSPrivateLink && o.Cloud != cloudAWS { - return fmt.Errorf("--aws-private-link can only be enabled when using --cloud=%q", cloudAWS) + if o.AWSPrivateLink && o.Cloud != constants.PlatformAWS { + return fmt.Errorf("--aws-private-link can only be enabled when using --cloud=%q", constants.PlatformAWS) } - if o.PrivateLink && o.Cloud != cloudAWS && o.Cloud != cloudGCP { - return fmt.Errorf("--private-link can only be enabled when using --cloud={%q,%q}", cloudAWS, cloudGCP) + if o.PrivateLink && o.Cloud != constants.PlatformAWS && o.Cloud != constants.PlatformGCP { + return fmt.Errorf("--private-link can only be enabled when using --cloud={%q,%q}", constants.PlatformAWS, constants.PlatformGCP) } if o.Adopt { @@ -506,7 +484,7 @@ func (o *Options) Validate(cmd *cobra.Command) error { if o.Region != "" { switch c := o.Cloud; c { - case cloudAWS, cloudAzure, cloudGCP, cloudIBM: + case constants.PlatformAWS, constants.PlatformAzure, constants.PlatformGCP, constants.PlatformIBMCloud: default: return fmt.Errorf("cannot specify --region when using --cloud=%q", c) } @@ -672,9 +650,9 @@ func (o *Options) GenerateObjects() ([]runtime.Object, error) { } switch o.Cloud { - case cloudAWS: + case constants.PlatformAWS: defaultCredsFilePath := filepath.Join(o.homeDir, ".aws", "credentials") - accessKeyID, secretAccessKey, err := awsutils.GetAWSCreds(o.CredsFile, defaultCredsFilePath) + accessKeyID, secretAccessKey, err := awscreds.GetAWSCreds(o.CredsFile, defaultCredsFilePath) if err != nil { return nil, err } @@ -700,8 +678,8 @@ func (o *Options) GenerateObjects() ([]runtime.Object, error) { PrivateLink: o.PrivateLink || o.AWSPrivateLink, } builder.CloudBuilder = awsProvider - case cloudAzure: - creds, err := azurecredutil.GetCreds(o.CredsFile) + case constants.PlatformAzure: + creds, err := azurecreds.GetCreds(o.CredsFile) if err != nil { o.log.WithError(err).Error("Failed to read in Azure credentials") return nil, err @@ -715,8 +693,8 @@ func (o *Options) GenerateObjects() ([]runtime.Object, error) { ResourceGroupName: o.AzureResourceGroupName, } builder.CloudBuilder = azureProvider - case cloudGCP: - creds, err := gcputils.GetCreds(o.CredsFile) + case constants.PlatformGCP: + creds, err := gcpcreds.GetCreds(o.CredsFile) if err != nil { return nil, err } @@ -734,8 +712,8 @@ func (o *Options) GenerateObjects() ([]runtime.Object, error) { DiscardLocalSsdOnHibernate: o.discardLocalSsdOnHibernate, } builder.CloudBuilder = gcpProvider - case cloudOpenStack: - cloudsYAMLContent, err := openstackutils.GetCreds(o.CredsFile) + case constants.PlatformOpenStack: + cloudsYAMLContent, err := openstackcreds.GetCreds(o.CredsFile) if err != nil { return nil, err } @@ -749,7 +727,7 @@ func (o *Options) GenerateObjects() ([]runtime.Object, error) { IngressFloatingIP: o.OpenStackIngressFloatingIP, } builder.CloudBuilder = openStackProvider - case cloudVSphere: + case constants.PlatformVSphere: vsphereUsername := os.Getenv(constants.VSphereUsernameEnvVar) if vsphereUsername == "" { return nil, fmt.Errorf("no %s env var set, cannot proceed", constants.VSphereUsernameEnvVar) @@ -849,10 +827,10 @@ func (o *Options) GenerateObjects() ([]runtime.Object, error) { CACert: bytes.Join(caCerts, []byte("\n")), } builder.CloudBuilder = vsphereProvider - case cloudIBM: + case constants.PlatformIBMCloud: ibmCloudAPIKey := os.Getenv(constants.IBMCloudAPIKeyEnvVar) if ibmCloudAPIKey == "" { - return nil, fmt.Errorf("%s env var is required when using --cloud=%q", constants.IBMCloudAPIKeyEnvVar, cloudIBM) + return nil, fmt.Errorf("%s env var is required when using --cloud=%q", constants.IBMCloudAPIKeyEnvVar, constants.PlatformIBMCloud) } ibmCloudProvider := &clusterresource.IBMCloudBuilder{ APIKey: ibmCloudAPIKey, @@ -860,7 +838,7 @@ func (o *Options) GenerateObjects() ([]runtime.Object, error) { InstanceType: o.IBMInstanceType, } builder.CloudBuilder = ibmCloudProvider - case cloudNutanix: + case constants.PlatformNutanix: builder.CloudBuilder, err = o.getNutanixCloudBuilder() if err != nil { return nil, err @@ -1028,7 +1006,7 @@ func (o *Options) generateSampleSyncSets() []runtime.Object { var syncsets []runtime.Object for i := range [10]int{} { syncsets = append(syncsets, sampleSyncSet(fmt.Sprintf("%s-sample-syncset%d", o.Name, i), o.Namespace, o.Name)) - syncsets = append(syncsets, sampleSelectorSyncSet(fmt.Sprintf("sample-selector-syncset%d", i), o.Name)) + syncsets = append(syncsets, sampleSelectorSyncSet(fmt.Sprintf("sample-selector-syncset%d", i))) } return syncsets } @@ -1061,7 +1039,7 @@ func sampleSyncSet(name, namespace, cdName string) *hivev1.SyncSet { } } -func sampleSelectorSyncSet(name, cdName string) *hivev1.SelectorSyncSet { +func sampleSelectorSyncSet(name string) *hivev1.SelectorSyncSet { return &hivev1.SelectorSyncSet{ TypeMeta: metav1.TypeMeta{ Kind: "SelectorSyncSet", diff --git a/contrib/pkg/deprovision/awstagdeprovision.go b/contrib/pkg/deprovision/awstagdeprovision.go index 506aeb4aa68..082d6af4b99 100644 --- a/contrib/pkg/deprovision/awstagdeprovision.go +++ b/contrib/pkg/deprovision/awstagdeprovision.go @@ -8,7 +8,7 @@ import ( "github.com/spf13/cobra" "github.com/openshift/hive/contrib/pkg/utils" - awsutils "github.com/openshift/hive/contrib/pkg/utils/aws" + awscreds "github.com/openshift/hive/pkg/creds/aws" "github.com/openshift/installer/pkg/destroy/aws" ) @@ -65,7 +65,7 @@ func completeAWSUninstaller(o *aws.ClusterUninstaller, logLevel string, args []s "This is expected when in standalone mode. "+ "We expect to find your AWS credentials in one of the usual places.", err) } - awsutils.ConfigureCreds(client, nil) + awscreds.ConfigureCreds(client, nil) return nil } diff --git a/contrib/pkg/deprovision/azure.go b/contrib/pkg/deprovision/azure.go index 55745e24523..93bfc1a94c3 100644 --- a/contrib/pkg/deprovision/azure.go +++ b/contrib/pkg/deprovision/azure.go @@ -11,7 +11,7 @@ import ( installertypesazure "github.com/openshift/installer/pkg/types/azure" "github.com/openshift/hive/contrib/pkg/utils" - azureutils "github.com/openshift/hive/contrib/pkg/utils/azure" + azurecreds "github.com/openshift/hive/pkg/creds/azure" ) // AzureOptions is the set of options to deprovision an Azure cluster @@ -55,7 +55,7 @@ func NewDeprovisionAzureCommand(logLevel string) *cobra.Command { } func validate() error { - _, err := azureutils.GetCreds("") + _, err := azurecreds.GetCreds("") if err != nil { return errors.Wrap(err, "failed to get Azure credentials") } @@ -74,7 +74,7 @@ func (opt *AzureOptions) completeAzureUninstaller(args []string) (providers.Dest if err != nil { return nil, errors.Wrap(err, "failed to get client") } - azureutils.ConfigureCreds(client, nil) + azurecreds.ConfigureCreds(client, nil) metadata := &types.ClusterMetadata{ InfraID: args[0], diff --git a/contrib/pkg/deprovision/deprovision.go b/contrib/pkg/deprovision/deprovision.go index 72381b41f6d..c0aaf70fcda 100644 --- a/contrib/pkg/deprovision/deprovision.go +++ b/contrib/pkg/deprovision/deprovision.go @@ -5,27 +5,12 @@ import ( "log" "os" - "sigs.k8s.io/controller-runtime/pkg/client" - "github.com/openshift/installer/pkg/destroy/providers" "github.com/openshift/installer/pkg/types" - "github.com/openshift/installer/pkg/types/aws" - "github.com/openshift/installer/pkg/types/azure" - "github.com/openshift/installer/pkg/types/gcp" - "github.com/openshift/installer/pkg/types/ibmcloud" - "github.com/openshift/installer/pkg/types/nutanix" - "github.com/openshift/installer/pkg/types/openstack" - "github.com/openshift/installer/pkg/types/vsphere" "github.com/openshift/hive/contrib/pkg/utils" - awsutil "github.com/openshift/hive/contrib/pkg/utils/aws" - azureutil "github.com/openshift/hive/contrib/pkg/utils/azure" - gcputil "github.com/openshift/hive/contrib/pkg/utils/gcp" - ibmcloudutil "github.com/openshift/hive/contrib/pkg/utils/ibmcloud" - nutanixutil "github.com/openshift/hive/contrib/pkg/utils/nutanix" - openstackutil "github.com/openshift/hive/contrib/pkg/utils/openstack" - vsphereutil "github.com/openshift/hive/contrib/pkg/utils/vsphere" "github.com/openshift/hive/pkg/constants" + "github.com/openshift/hive/pkg/creds" "github.com/spf13/cobra" ) @@ -81,7 +66,7 @@ To run the generic destroyer, use the --metadata-json-secret-name parameter.`, logger.Fatal("no platform configured in metadata.json") } - ConfigureCreds[platform](c, metadata) + creds.ConfigureCreds[platform](c, metadata) destroyerBuilder, ok := providers.Registry[platform] if !ok { @@ -116,13 +101,3 @@ To run the generic destroyer, use the --metadata-json-secret-name parameter.`, cmd.AddCommand(NewDeprovisionNutanixCommand(logLevel)) return cmd } - -var ConfigureCreds = map[string]func(client.Client, *types.ClusterMetadata){ - aws.Name: awsutil.ConfigureCreds, - azure.Name: azureutil.ConfigureCreds, - gcp.Name: gcputil.ConfigureCreds, - ibmcloud.Name: ibmcloudutil.ConfigureCreds, - nutanix.Name: nutanixutil.ConfigureCreds, - openstack.Name: openstackutil.ConfigureCreds, - vsphere.Name: vsphereutil.ConfigureCreds, -} diff --git a/contrib/pkg/deprovision/gcp.go b/contrib/pkg/deprovision/gcp.go index 943169082f7..90de833a3ac 100644 --- a/contrib/pkg/deprovision/gcp.go +++ b/contrib/pkg/deprovision/gcp.go @@ -12,7 +12,7 @@ import ( typesgcp "github.com/openshift/installer/pkg/types/gcp" "github.com/openshift/hive/contrib/pkg/utils" - gcputils "github.com/openshift/hive/contrib/pkg/utils/gcp" + gcpcreds "github.com/openshift/hive/pkg/creds/gcp" "github.com/openshift/hive/pkg/gcpclient" ) @@ -60,7 +60,7 @@ func (o *gcpOptions) Complete(cmd *cobra.Command, args []string) error { if err != nil { return errors.Wrap(err, "failed to get client") } - gcputils.ConfigureCreds(client, nil) + gcpcreds.ConfigureCreds(client, nil) return nil } @@ -73,7 +73,7 @@ func (o *gcpOptions) Validate(cmd *cobra.Command) error { return fmt.Errorf("missing region") } - creds, err := gcputils.GetCreds("") + creds, err := gcpcreds.GetCreds("") if err != nil { return errors.Wrap(err, "failed to get GCP credentials") } diff --git a/contrib/pkg/deprovision/ibmcloud.go b/contrib/pkg/deprovision/ibmcloud.go index 380f6f79bbf..15a005e4e60 100644 --- a/contrib/pkg/deprovision/ibmcloud.go +++ b/contrib/pkg/deprovision/ibmcloud.go @@ -10,8 +10,8 @@ import ( "github.com/spf13/cobra" "github.com/openshift/hive/contrib/pkg/utils" - ibmutils "github.com/openshift/hive/contrib/pkg/utils/ibmcloud" "github.com/openshift/hive/pkg/constants" + ibmcreds "github.com/openshift/hive/pkg/creds/ibmcloud" "github.com/openshift/hive/pkg/ibmclient" "github.com/openshift/installer/pkg/destroy/ibmcloud" "github.com/openshift/installer/pkg/types" @@ -69,7 +69,7 @@ func (o *ibmCloudDeprovisionOptions) Complete(cmd *cobra.Command, args []string) if err != nil { return errors.Wrap(err, "failed to get client") } - ibmutils.ConfigureCreds(client, nil) + ibmcreds.ConfigureCreds(client, nil) // Create IBMCloud Client ibmCloudAPIKey := os.Getenv(constants.IBMCloudAPIKeyEnvVar) diff --git a/contrib/pkg/deprovision/nutanix.go b/contrib/pkg/deprovision/nutanix.go index 1975954089b..3dc9175e6d6 100644 --- a/contrib/pkg/deprovision/nutanix.go +++ b/contrib/pkg/deprovision/nutanix.go @@ -9,8 +9,8 @@ import ( "github.com/spf13/cobra" "github.com/openshift/hive/contrib/pkg/utils" - nutanixutils "github.com/openshift/hive/contrib/pkg/utils/nutanix" "github.com/openshift/hive/pkg/constants" + nutanixcreds "github.com/openshift/hive/pkg/creds/nutanix" "github.com/openshift/installer/pkg/destroy/nutanix" "github.com/openshift/installer/pkg/types" typesnutanix "github.com/openshift/installer/pkg/types/nutanix" @@ -61,7 +61,7 @@ func (o *nutanixOptions) Complete(cmd *cobra.Command, args []string) error { if err != nil { return errors.Wrap(err, "failed to get client") } - nutanixutils.ConfigureCreds(client, nil) + nutanixcreds.ConfigureCreds(client, nil) return nil } diff --git a/contrib/pkg/deprovision/openstack.go b/contrib/pkg/deprovision/openstack.go index f3d73737dee..15024ffb6af 100644 --- a/contrib/pkg/deprovision/openstack.go +++ b/contrib/pkg/deprovision/openstack.go @@ -9,7 +9,7 @@ import ( "github.com/spf13/cobra" "github.com/openshift/hive/contrib/pkg/utils" - openstackutils "github.com/openshift/hive/contrib/pkg/utils/openstack" + openstackcreds "github.com/openshift/hive/pkg/creds/openstack" "github.com/openshift/installer/pkg/destroy/openstack" "github.com/openshift/installer/pkg/types" typesopenstack "github.com/openshift/installer/pkg/types/openstack" @@ -56,7 +56,7 @@ func (o *openStackOptions) Complete(cmd *cobra.Command, args []string) error { if err != nil { return errors.Wrap(err, "failed to get client") } - openstackutils.ConfigureCreds(client, nil) + openstackcreds.ConfigureCreds(client, nil) return nil } diff --git a/contrib/pkg/deprovision/vsphere.go b/contrib/pkg/deprovision/vsphere.go index 301116bbbdd..93e4efa716a 100644 --- a/contrib/pkg/deprovision/vsphere.go +++ b/contrib/pkg/deprovision/vsphere.go @@ -13,8 +13,8 @@ import ( typesvsphere "github.com/openshift/installer/pkg/types/vsphere" "github.com/openshift/hive/contrib/pkg/utils" - vsphereutils "github.com/openshift/hive/contrib/pkg/utils/vsphere" "github.com/openshift/hive/pkg/constants" + vspherecreds "github.com/openshift/hive/pkg/creds/vsphere" ) // vSphereOptions is the set of options to deprovision an vSphere cluster @@ -60,7 +60,7 @@ func (o *vSphereOptions) Complete(cmd *cobra.Command, args []string) error { if err != nil { return errors.Wrap(err, "failed to get client") } - vsphereutils.ConfigureCreds(client, nil) + vspherecreds.ConfigureCreds(client, nil) return nil } diff --git a/contrib/pkg/testresource/command.go b/contrib/pkg/testresource/command.go index 77efea6b6ce..f912ffb60a5 100644 --- a/contrib/pkg/testresource/command.go +++ b/contrib/pkg/testresource/command.go @@ -54,8 +54,7 @@ func newApplyCommand() *cobra.Command { return } content := mustRead(args[0]) - kubeconfig := mustRead(kubeconfigPath) - helper, err := resource.NewHelper(kubeconfig, log.WithField("cmd", "apply")) + helper, err := resource.NewHelper(log.WithField("cmd", "apply"), resource.FromKubeconfig(mustRead(kubeconfigPath))) if err != nil { fmt.Printf("Error creating resource helper: %v\n", err) return @@ -126,8 +125,7 @@ func newPatchCommand() *cobra.Command { return } content := mustRead(args[0]) - kubeconfig := mustRead(kubeconfigPath) - helper, err := resource.NewHelper(kubeconfig, log.WithField("cmd", "patch")) + helper, err := resource.NewHelper(log.WithField("cmd", "patch"), resource.FromKubeconfig(mustRead(kubeconfigPath))) if err != nil { fmt.Printf("Error creating resource helper: %v\n", err) return diff --git a/contrib/pkg/utils/aws/aws.go b/contrib/pkg/utils/aws/aws.go deleted file mode 100644 index 7c7b6e7f4ec..00000000000 --- a/contrib/pkg/utils/aws/aws.go +++ /dev/null @@ -1,284 +0,0 @@ -package aws - -import ( - "errors" - "fmt" - "os" - "path/filepath" - "strings" - - log "github.com/sirupsen/logrus" - ini "gopkg.in/ini.v1" - - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/util/sets" - "sigs.k8s.io/controller-runtime/pkg/client" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/service/ec2" - ec2types "github.com/aws/aws-sdk-go-v2/service/ec2/types" - - hivev1 "github.com/openshift/hive/apis/hive/v1" - "github.com/openshift/hive/contrib/pkg/utils" - "github.com/openshift/hive/pkg/awsclient" - "github.com/openshift/hive/pkg/constants" - - installertypes "github.com/openshift/installer/pkg/types" -) - -const ( - // PrivateLinkHubAcctCredsName is the name of the AWS PrivateLink Hub account credentials Secret - // created by the "hiveutil awsprivatelink enable" command - PrivateLinkHubAcctCredsName = "awsprivatelink-hub-acct-creds" - - // PrivateLinkHubAcctCredsLabel is added to the AWS PrivateLink Hub account credentials Secret - // created by the "hiveutil awsprivatelink enable" command and - // referenced by HiveConfig.spec.awsPrivateLink.credentialsSecretRef. - PrivateLinkHubAcctCredsLabel = "hive.openshift.io/awsprivatelink-hub-acct-credentials" -) - -func GetAWSClientsByRegion(secret *corev1.Secret, regions sets.Set[string]) (map[string]awsclient.Client, error) { - awsClientsByRegion := make(map[string]awsclient.Client) - for region := range regions { - awsClients, err := awsclient.NewClientFromSecret(secret, region) - if err != nil { - return awsClientsByRegion, err - } - awsClientsByRegion[region] = awsClients - } - - return awsClientsByRegion, nil -} - -func FindVpcInInventory(vpcId string, inventory []hivev1.AWSPrivateLinkInventory) (int, bool) { - for i, endpointVpc := range inventory { - if vpcId == endpointVpc.AWSPrivateLinkVPC.VPCID { - return i, true - } - } - - return -1, false -} - -// GetDefaultSGOfVpc gets the default SG of a VPC. -func GetDefaultSGOfVpc(awsClients awsclient.Client, vpcId string) (string, error) { - describeSecurityGroupsOutput, err := awsClients.DescribeSecurityGroups(&ec2.DescribeSecurityGroupsInput{ - Filters: []ec2types.Filter{ - { - Name: aws.String("vpc-id"), - Values: []string{vpcId}, - }, - { - Name: aws.String("group-name"), - Values: []string{"default"}, - }, - }, - }) - if err != nil { - return "", err - } - if len(describeSecurityGroupsOutput.SecurityGroups) == 0 { - return "", fmt.Errorf("default SG not found for VPC %v", vpcId) - } - - return *describeSecurityGroupsOutput.SecurityGroups[0].GroupId, nil -} - -// RevokeAllIngressFromCIDR removes an SG inbound rule which allows all ingress originating from a CIDR block. -func RevokeAllIngressFromCIDR(awsClients awsclient.Client, SG, cidr *string) (*ec2.RevokeSecurityGroupIngressOutput, error) { - return awsClients.RevokeSecurityGroupIngress(&ec2.RevokeSecurityGroupIngressInput{ - GroupId: SG, - IpPermissions: []ec2types.IpPermission{ - { - IpRanges: []ec2types.IpRange{ - { - CidrIp: cidr, - }, - }, - IpProtocol: aws.String("-1"), - }, - }, - }) -} - -// RevokeAllIngressFromSG removes an SG inbound rule which allows all ingress originating from another SG. -func RevokeAllIngressFromSG(awsClients awsclient.Client, SG, sourceSG *string) (*ec2.RevokeSecurityGroupIngressOutput, error) { - return awsClients.RevokeSecurityGroupIngress(&ec2.RevokeSecurityGroupIngressInput{ - GroupId: SG, - IpPermissions: []ec2types.IpPermission{ - { - IpProtocol: aws.String("-1"), - UserIdGroupPairs: []ec2types.UserIdGroupPair{ - { - GroupId: sourceSG, - }, - }, - }, - }, - }) -} - -// AuthorizeAllIngressFromCIDR adds an SG inbound rule which allows all ingress originating from a CIDR block. -func AuthorizeAllIngressFromCIDR(awsClients awsclient.Client, SG, cidr, description *string) (*ec2.AuthorizeSecurityGroupIngressOutput, error) { - return awsClients.AuthorizeSecurityGroupIngress(&ec2.AuthorizeSecurityGroupIngressInput{ - GroupId: SG, - IpPermissions: []ec2types.IpPermission{ - { - IpRanges: []ec2types.IpRange{ - { - CidrIp: cidr, - Description: description, - }, - }, - IpProtocol: aws.String("-1"), - }, - }, - }) -} - -// AuthorizeAllIngressFromSG adds an SG inbound rule which allows all ingress originating from another SG. -func AuthorizeAllIngressFromSG(awsClients awsclient.Client, SG, sourceSG, description *string) (*ec2.AuthorizeSecurityGroupIngressOutput, error) { - return awsClients.AuthorizeSecurityGroupIngress(&ec2.AuthorizeSecurityGroupIngressInput{ - GroupId: SG, - IpPermissions: []ec2types.IpPermission{ - { - IpProtocol: aws.String("-1"), - UserIdGroupPairs: []ec2types.UserIdGroupPair{ - { - Description: description, - GroupId: sourceSG, - }, - }, - }, - }, - }) -} - -// GetInfraIdFromVpcId gets the infraID of an OCP cluster using the ID of the VPC it resides. -func GetInfraIdFromVpcId(awsClients awsclient.Client, vpcId string) (string, error) { - // When we specify the resource IDs explicitly instead of using filtering, - // AWS functions will return a non-nil error if nothing is found. - describeVpcsOutput, err := awsClients.DescribeVpcs(&ec2.DescribeVpcsInput{ - VpcIds: []string{vpcId}, - }) - if err != nil { - return "", err - } - - targetPrefix := "kubernetes.io/cluster/" - for _, tag := range describeVpcsOutput.Vpcs[0].Tags { - if k := aws.ToString(tag.Key); strings.HasPrefix(k, targetPrefix) { - return strings.Replace(k, targetPrefix, "", 1), nil - } - } - return "", fmt.Errorf("no tag with prefix %v found on VPC %v", targetPrefix, vpcId) -} - -// GetWorkerSGFromVpcId gets the worker SG ID of an OCP cluster using the ID of the VPC it resides. -func GetWorkerSGFromVpcId(awsClients awsclient.Client, vpcId string) (string, error) { - infraID, err := GetInfraIdFromVpcId(awsClients, vpcId) - if err != nil { - return "", err - } - - describeSecurityGroupsOutput, err := awsClients.DescribeSecurityGroups(&ec2.DescribeSecurityGroupsInput{ - Filters: []ec2types.Filter{ - { - Name: aws.String("tag:Name"), - Values: []string{infraID + "-worker-sg", infraID + "-node"}, - }, - }, - }) - if err != nil { - return "", err - } - if len(describeSecurityGroupsOutput.SecurityGroups) == 0 { - return "", fmt.Errorf("worker SG not found for VPC %v", vpcId) - } - - return aws.ToString(describeSecurityGroupsOutput.SecurityGroups[0].GroupId), err -} - -// GetCIDRFromVpcId gets the CIDR block of a VPC using the ID of it. -func GetCIDRFromVpcId(awsClients awsclient.Client, vpcId string) (string, error) { - // When we specify the resource IDs explicitly instead of using filtering, - // AWS functions will return a non-nil error if nothing is found. - describeVpcOutput, err := awsClients.DescribeVpcs(&ec2.DescribeVpcsInput{ - VpcIds: []string{vpcId}, - }) - if err != nil { - return "", err - } - - return aws.ToString(describeVpcOutput.Vpcs[0].CidrBlock), nil -} - -// GetAWSCreds reads AWS credentials either from either the specified credentials file, -// the standard environment variables, or a default credentials file. (~/.aws/credentials) -// The defaultCredsFile will only be used if credsFile is empty and the environment variables -// are not set. -func GetAWSCreds(credsFile, defaultCredsFile string) (string, string, error) { - credsFilePath := defaultCredsFile - switch { - case credsFile != "": - credsFilePath = credsFile - default: - secretAccessKey := os.Getenv("AWS_SECRET_ACCESS_KEY") - accessKeyID := os.Getenv("AWS_ACCESS_KEY_ID") - if len(secretAccessKey) > 0 && len(accessKeyID) > 0 { - return accessKeyID, secretAccessKey, nil - } - } - credFile, err := ini.Load(credsFilePath) - if err != nil { - log.Error("Cannot load AWS credentials") - return "", "", err - } - defaultSection, err := credFile.GetSection("default") - if err != nil { - log.Error("Cannot get default section from AWS credentials file") - return "", "", err - } - accessKeyIDValue := defaultSection.Key("aws_access_key_id") - secretAccessKeyValue := defaultSection.Key("aws_secret_access_key") - if accessKeyIDValue == nil || secretAccessKeyValue == nil { - log.Error("AWS credentials file missing keys in default section") - } - return accessKeyIDValue.String(), secretAccessKeyValue.String(), nil -} - -var awsConfigForbidCredentialProcess utils.ProjectToDirFileFilter = func(key string, contents []byte) (basename string, newContents []byte, err error) { - // First, only process aws_config - bn, newContents, err := utils.ProjectOnlyTheseKeys(constants.AWSConfigSecretKey)(key, contents) - // If that passed, scrub for credential_process - if err == nil && bn != "" && awsclient.ContainsCredentialProcess(newContents) { - return "", nil, errors.New("credential_process is insecure and thus forbidden") - } - return bn, newContents, err -} - -// ConfigureCreds loads a secret designated by the environment variables CLUSTERDEPLOYMENT_NAMESPACE -// and CREDS_SECRET_NAME and configures AWS credential environment variables and config files -// accordingly. -func ConfigureCreds(c client.Client, metadata *installertypes.ClusterMetadata) { - credsSecret := utils.LoadSecretOrDie(c, "CREDS_SECRET_NAME") - if credsSecret == nil { - return - } - // Should we bounce if any of the following already exist? - if id := string(credsSecret.Data[constants.AWSAccessKeyIDSecretKey]); id != "" { - os.Setenv("AWS_ACCESS_KEY_ID", id) - } - if secret := string(credsSecret.Data[constants.AWSSecretAccessKeySecretKey]); secret != "" { - os.Setenv("AWS_SECRET_ACCESS_KEY", secret) - } - if config := credsSecret.Data[constants.AWSConfigSecretKey]; len(config) != 0 { - // Lay this down as a file, but forbid credential_process - utils.ProjectToDir(credsSecret, constants.AWSCredsMount, awsConfigForbidCredentialProcess) - os.Setenv("AWS_CONFIG_FILE", filepath.Join(constants.AWSCredsMount, constants.AWSConfigSecretKey)) - } - // This would normally allow credential_process in the config file, but we checked for that above. - os.Setenv("AWS_SDK_LOAD_CONFIG", "true") - // Install cluster proxy trusted CA bundle - utils.InstallCerts(constants.TrustedCABundleDir) -} diff --git a/contrib/pkg/utils/client.go b/contrib/pkg/utils/client.go index 27f6dcb7052..f2f567f2b46 100644 --- a/contrib/pkg/utils/client.go +++ b/contrib/pkg/utils/client.go @@ -4,7 +4,6 @@ import ( "context" "k8s.io/apimachinery/pkg/watch" - restclient "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" "sigs.k8s.io/controller-runtime/pkg/client" @@ -24,7 +23,9 @@ func (w wwClient) Watch(ctx context.Context, obj client.ObjectList, opts ...clie // GetClient returns a new dynamic controller-runtime client. func GetClient(fieldManager string) (client.WithWatch, error) { - cfg, err := GetClientConfig() + cfg, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig( + clientcmd.NewDefaultClientConfigLoadingRules(), &clientcmd.ConfigOverrides{}). + ClientConfig() if err != nil { return nil, err } @@ -39,10 +40,3 @@ func GetClient(fieldManager string) (client.WithWatch, error) { w: dynamicClient, }, nil } - -// GetClientConfig gets the config for the REST client. -func GetClientConfig() (*restclient.Config, error) { - rules := clientcmd.NewDefaultClientConfigLoadingRules() - kubeconfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(rules, &clientcmd.ConfigOverrides{}) - return kubeconfig.ClientConfig() -} diff --git a/contrib/pkg/utils/generic.go b/contrib/pkg/utils/generic.go index a12b4b2d68f..5fee5103467 100644 --- a/contrib/pkg/utils/generic.go +++ b/contrib/pkg/utils/generic.go @@ -59,13 +59,13 @@ func GetResourceHelper(controllerName hivev1.ControllerName, logger log.FieldLog logger.WithError(err).Error("Cannot get client config") return nil, err } - return resource.NewHelperFromRESTConfig(cfg, controllerName, logger) + return resource.NewHelper(logger, resource.FromRESTConfig(cfg), resource.WithControllerName(controllerName)) } func DefaultNamespace() (string, error) { - rules := clientcmd.NewDefaultClientConfigLoadingRules() - kubeconfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(rules, &clientcmd.ConfigOverrides{}) - ns, _, err := kubeconfig.Namespace() + ns, _, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig( + clientcmd.NewDefaultClientConfigLoadingRules(), &clientcmd.ConfigOverrides{}). + Namespace() return ns, err } diff --git a/pkg/clusterresource/builder.go b/pkg/clusterresource/builder.go index 055d41fa702..37894cce2fc 100644 --- a/pkg/clusterresource/builder.go +++ b/pkg/clusterresource/builder.go @@ -14,7 +14,7 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" hivev1 "github.com/openshift/hive/apis/hive/v1" "github.com/openshift/hive/pkg/constants" @@ -411,7 +411,7 @@ func (o *Builder) generateInstallConfigSecret() (*corev1.Secret, error) { }, ControlPlane: &installertypes.MachinePool{ Name: "master", - Replicas: pointer.Int64Ptr(3), + Replicas: ptr.To(int64(3)), }, Compute: []installertypes.MachinePool{ { @@ -538,7 +538,7 @@ func (o *Builder) generateMachinePool() *hivev1.MachinePool { Name: o.Name, }, Name: "worker", - Replicas: pointer.Int64Ptr(o.WorkerNodesCount), + Replicas: ptr.To(o.WorkerNodesCount), }, } o.CloudBuilder.addMachinePoolPlatform(o, mp) diff --git a/pkg/constants/constants.go b/pkg/constants/constants.go index d57c263391d..1e0e7791e6d 100644 --- a/pkg/constants/constants.go +++ b/pkg/constants/constants.go @@ -378,7 +378,7 @@ const ( InstallLogsUploadProviderEnvVar = "HIVE_INSTALL_LOGS_UPLOAD_PROVIDER" // InstallLogsUploadProviderAWS is used to specify that AWS is the cloud provider to upload logs to. - InstallLogsUploadProviderAWS = "aws" + InstallLogsUploadProviderAWS = PlatformAWS // InstallLogsCredentialsSecretRefEnvVar is the environment variable specifying what secret to use for storing logs. InstallLogsCredentialsSecretRefEnvVar = "HIVE_INSTALL_LOGS_CREDENTIALS_SECRET" diff --git a/pkg/controller/argocdregister/argocdregister_controller_test.go b/pkg/controller/argocdregister/argocdregister_controller_test.go index 3c4079b57dd..15777f3ed45 100644 --- a/pkg/controller/argocdregister/argocdregister_controller_test.go +++ b/pkg/controller/argocdregister/argocdregister_controller_test.go @@ -15,7 +15,7 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" @@ -287,8 +287,8 @@ func testClusterDeployment() *hivev1.ClusterDeployment { cd.Status = hivev1.ClusterDeploymentStatus{ APIURL: "http://test-api.test.com", - InstallerImage: pointer.String("installer-image:latest"), - CLIImage: pointer.String("cli:latest"), + InstallerImage: ptr.To("installer-image:latest"), + CLIImage: ptr.To("cli:latest"), } return cd diff --git a/pkg/controller/awsprivatelink/awsprivatelink_controller_test.go b/pkg/controller/awsprivatelink/awsprivatelink_controller_test.go index e3e9c6e0f7b..e7462ca56d0 100644 --- a/pkg/controller/awsprivatelink/awsprivatelink_controller_test.go +++ b/pkg/controller/awsprivatelink/awsprivatelink_controller_test.go @@ -446,7 +446,7 @@ type createVpcEndpointInputMatcher struct { vpcId string } -func (m createVpcEndpointInputMatcher) Matches(o interface{}) bool { +func (m createVpcEndpointInputMatcher) Matches(o any) bool { i, ok := o.(*ec2.CreateVpcEndpointInput) if !ok { return false @@ -2173,7 +2173,7 @@ func (m createHostedZoneInputMatcher) String() string { return "matches CreateHostedZoneInput devoid of CallerReference" } -func (m createHostedZoneInputMatcher) Matches(x interface{}) bool { +func (m createHostedZoneInputMatcher) Matches(x any) bool { xT, ok := x.(*route53.CreateHostedZoneInput) if !ok { return false diff --git a/pkg/controller/clusterdeployment/clusterdeployment_controller.go b/pkg/controller/clusterdeployment/clusterdeployment_controller.go index 9f1518c62fd..58db6c7db7e 100644 --- a/pkg/controller/clusterdeployment/clusterdeployment_controller.go +++ b/pkg/controller/clusterdeployment/clusterdeployment_controller.go @@ -28,7 +28,7 @@ import ( "k8s.io/client-go/rest" "k8s.io/client-go/util/flowcontrol" "k8s.io/client-go/util/workqueue" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" @@ -1382,7 +1382,7 @@ func (r *ReconcileClusterDeployment) resolveInstallerImage(cd *hivev1.ClusterDep if !areImagesResolved { if skipImageSetJob { cdLog.Info("skipping imageset job due to ClusterInstall.Spec.SkipImageSetJob") - cd.Status.InstallerImage = pointer.String("") + cd.Status.InstallerImage = ptr.To("") } else { // "resolve" the images via installerImageOverride installerImage := cd.Spec.Provisioning.InstallerImageOverride @@ -1394,7 +1394,7 @@ func (r *ReconcileClusterDeployment) resolveInstallerImage(cd *hivev1.ClusterDep } cd.Status.InstallerImage = &installerImage } - cd.Status.CLIImage = pointer.String("") + cd.Status.CLIImage = ptr.To("") changed1 = true } cd.Status.Conditions, changed2 = controllerutils.SetClusterDeploymentConditionWithChangeCheck( @@ -2721,7 +2721,7 @@ func addOwnershipToSecret(secret *corev1.Secret, cd *hivev1.ClusterDeployment, c Kind: cd.Kind, Name: cd.Name, UID: cd.UID, - BlockOwnerDeletion: pointer.Bool(true), + BlockOwnerDeletion: ptr.To(true), } cdRefChanged := librarygocontroller.EnsureOwnerRef(secret, cdRef) diff --git a/pkg/controller/clusterdeployment/clusterdeployment_controller_test.go b/pkg/controller/clusterdeployment/clusterdeployment_controller_test.go index 6e1eb1127ae..80d595dd880 100644 --- a/pkg/controller/clusterdeployment/clusterdeployment_controller_test.go +++ b/pkg/controller/clusterdeployment/clusterdeployment_controller_test.go @@ -4073,14 +4073,14 @@ func testEmptyFakeClusterInstall(name string) *unstructured.Unstructured { }) fake.SetNamespace(testNamespace) fake.SetName(name) - unstructured.SetNestedField(fake.UnstructuredContent(), map[string]interface{}{}, "spec") - unstructured.SetNestedField(fake.UnstructuredContent(), map[string]interface{}{}, "status") + unstructured.SetNestedField(fake.UnstructuredContent(), map[string]any{}, "spec") + unstructured.SetNestedField(fake.UnstructuredContent(), map[string]any{}, "status") return fake } func testFakeClusterInstall(name string) *unstructured.Unstructured { fake := testEmptyFakeClusterInstall(name) - unstructured.SetNestedField(fake.UnstructuredContent(), map[string]interface{}{ + unstructured.SetNestedField(fake.UnstructuredContent(), map[string]any{ "name": testClusterImageSetName, }, "spec", "imageSetRef") return fake @@ -4089,9 +4089,9 @@ func testFakeClusterInstall(name string) *unstructured.Unstructured { func testFakeClusterInstallWithConditions(name string, conditions []hivev1.ClusterInstallCondition) *unstructured.Unstructured { fake := testFakeClusterInstall(name) - value := []interface{}{} + value := []any{} for _, c := range conditions { - value = append(value, map[string]interface{}{ + value = append(value, map[string]any{ "type": string(c.Type), "status": string(c.Status), "reason": c.Reason, @@ -4106,22 +4106,22 @@ func testFakeClusterInstallWithConditions(name string, conditions []hivev1.Clust func testFakeClusterInstallWithClusterMetadata(name string, metadata hivev1.ClusterMetadata) *unstructured.Unstructured { fake := testFakeClusterInstall(name) - value := map[string]interface{}{ + value := map[string]any{ "clusterID": metadata.ClusterID, "infraID": metadata.InfraID, - "adminKubeconfigSecretRef": map[string]interface{}{ + "adminKubeconfigSecretRef": map[string]any{ "name": metadata.AdminKubeconfigSecretRef.Name, }, } if metadata.AdminPasswordSecretRef != nil { - value["adminPasswordSecretRef"] = map[string]interface{}{ + value["adminPasswordSecretRef"] = map[string]any{ "name": metadata.AdminPasswordSecretRef.Name, } } if metadata.MetadataJSONSecretRef != nil { - value["metadataJSONSecretRef"] = map[string]interface{}{ + value["metadataJSONSecretRef"] = map[string]any{ "name": metadata.MetadataJSONSecretRef.Name, } } diff --git a/pkg/controller/clusterdeployment/clusterprovisions.go b/pkg/controller/clusterdeployment/clusterprovisions.go index 773d5a943a0..291cbb16847 100644 --- a/pkg/controller/clusterdeployment/clusterprovisions.go +++ b/pkg/controller/clusterdeployment/clusterprovisions.go @@ -853,7 +853,7 @@ func (r *ReconcileClusterDeployment) resolveControllerRef(namespace string, cont } // When a clusterprovision is created, update the expectations of the clusterdeployment that owns the clusterprovision. -func (r *ReconcileClusterDeployment) trackClusterProvisionAdd(obj interface{}) { +func (r *ReconcileClusterDeployment) trackClusterProvisionAdd(obj any) { provision := obj.(*hivev1.ClusterProvision) if provision.DeletionTimestamp != nil { // on a restart of the controller, it's possible a new object shows up in a state that diff --git a/pkg/controller/clusterpool/clusterdeploymentexpectations.go b/pkg/controller/clusterpool/clusterdeploymentexpectations.go index d879d1589c6..59d9a1c4144 100644 --- a/pkg/controller/clusterpool/clusterdeploymentexpectations.go +++ b/pkg/controller/clusterpool/clusterdeploymentexpectations.go @@ -47,7 +47,7 @@ func (h *clusterDeploymentEventHandler) Create(ctx context.Context, e event.Type } // When a ClusterDeployment is created, update the expectations of the ClusterPool that owns the ClusterDeployment. -func (h *clusterDeploymentEventHandler) trackClusterDeploymentAdd(obj interface{}) { +func (h *clusterDeploymentEventHandler) trackClusterDeploymentAdd(obj any) { cd := obj.(*hivev1.ClusterDeployment) if cd.DeletionTimestamp != nil { // on a restart of the controller, it's possible a new object shows up in a state that diff --git a/pkg/controller/clusterpool/clusterpool_controller.go b/pkg/controller/clusterpool/clusterpool_controller.go index f493118398d..9adb1ea3c33 100644 --- a/pkg/controller/clusterpool/clusterpool_controller.go +++ b/pkg/controller/clusterpool/clusterpool_controller.go @@ -870,7 +870,7 @@ func (r *ReconcileClusterPool) createCluster( return cd, nil } -func isInstallConfigSecret(obj interface{}) *corev1.Secret { +func isInstallConfigSecret(obj any) *corev1.Secret { if secret, ok := obj.(*corev1.Secret); ok { _, ok := secret.StringData["install-config.yaml"] if ok { diff --git a/pkg/controller/clusterprovision/installlogmonitor_test.go b/pkg/controller/clusterprovision/installlogmonitor_test.go index 95d4a2e5b4b..c5e7dcf6261 100644 --- a/pkg/controller/clusterprovision/installlogmonitor_test.go +++ b/pkg/controller/clusterprovision/installlogmonitor_test.go @@ -11,7 +11,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/serializer" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "github.com/openshift/hive/pkg/constants" testfake "github.com/openshift/hive/pkg/test/fake" @@ -83,132 +83,132 @@ func TestParseInstallLog(t *testing.T) { }{ { name: "AWSSubnetInsufficientIPSpace", - log: pointer.String(awsSubnetInsufficientIPSpace), + log: ptr.To(awsSubnetInsufficientIPSpace), expectedReason: "AWSSubnetInsufficientIPSpace", }, { name: "AWSSubnetTagLimitExceeded", - log: pointer.String(awsSubnetTagLimitExceeded), + log: ptr.To(awsSubnetTagLimitExceeded), expectedReason: "AWSSubnetTagLimitExceeded", }, { name: "IngressOperatorDegraded", - log: pointer.String(ingressOperatorDegraded), + log: ptr.To(ingressOperatorDegraded), expectedReason: "IngressOperatorDegraded", }, { name: "S3AccessControlListNotSupported", - log: pointer.String(s3AccessControlListNotSupported), + log: ptr.To(s3AccessControlListNotSupported), expectedReason: "S3AccessControlListNotSupported", }, { name: "NoWorkerNodesReady", - log: pointer.String(noWorkerNodesReady), + log: ptr.To(noWorkerNodesReady), expectedReason: "NoWorkerNodesReady", }, { name: "Gp3VolumeLimitExceeded", - log: pointer.String(gp3VolumeLimitExceeded), + log: ptr.To(gp3VolumeLimitExceeded), expectedReason: "Gp3VolumeLimitExceeded", }, { name: "DefaultEbsKmsKeyInsufficientPermissions", - log: pointer.String(defaultEbsKmsKeyInsufficientPermissions), + log: ptr.To(defaultEbsKmsKeyInsufficientPermissions), expectedReason: "DefaultEbsKmsKeyInsufficientPermissions", }, { name: "AWSInsufficientCapacity", - log: pointer.String(awsInsufficientCapacity), + log: ptr.To(awsInsufficientCapacity), expectedReason: "AWSInsufficientCapacity", }, { name: "load balancer service linked role prereq", - log: pointer.String(accessDeniedSLR), + log: ptr.To(accessDeniedSLR), expectedReason: "AWSAccessDeniedSLR", }, { name: "DNS already exists", - log: pointer.String(dnsAlreadyExistsLog), + log: ptr.To(dnsAlreadyExistsLog), expectedReason: "DNSAlreadyExists", }, { name: "PendingVerification", - log: pointer.String(pendingVerificationLog), + log: ptr.To(pendingVerificationLog), expectedReason: "PendingVerification", }, { name: "Wildcard", - log: pointer.String(gcpInvalidProjectIDLog), + log: ptr.To(gcpInvalidProjectIDLog), expectedReason: "GCPInvalidProjectID", }, { name: "Escaped single quotes", - log: pointer.String(gcpSSDQuotaLog), + log: ptr.To(gcpSSDQuotaLog), expectedReason: "GCPQuotaSSDTotalGBExceeded", }, { name: "AWSNATGatewayLimitExceeded", - log: pointer.String(natGatewayLimitExceeded), + log: ptr.To(natGatewayLimitExceeded), expectedReason: "AWSNATGatewayLimitExceeded", }, { name: "AWSVPCLimitExceeded", - log: pointer.String(vpcLimitExceeded), + log: ptr.To(vpcLimitExceeded), expectedReason: "AWSVPCLimitExceeded", }, { name: "AWSRoute53LimitExceeded", - log: pointer.String(route53LimitExceeded), + log: ptr.To(route53LimitExceeded), expectedReason: "TooManyRoute53Zones", }, { name: "Generic ResourceLimitExceeded", - log: pointer.String(genericLimitExceeded), + log: ptr.To(genericLimitExceeded), expectedReason: "FallbackResourceLimitExceeded", }, { name: "Credentials are invalid", - log: pointer.String(invalidCredentials), + log: ptr.To(invalidCredentials), expectedReason: "InvalidCredentials", }, { name: "Failed waiting for Kubernetes API", - log: pointer.String(kubeAPIWaitFailedLog), + log: ptr.To(kubeAPIWaitFailedLog), expectedReason: "KubeAPIWaitFailed", }, { name: "ProxyTimeout", - log: pointer.String(proxyTimeoutLog), + log: ptr.To(proxyTimeoutLog), expectedReason: "ProxyTimeout", }, { name: "Proxy Connection Refused", - log: pointer.String(proxyConnectionRefused), + log: ptr.To(proxyConnectionRefused), expectedReason: "ProxyTimeout", }, { name: "Proxy No Route To Host", - log: pointer.String(proxyNoRouteToHost), + log: ptr.To(proxyNoRouteToHost), expectedReason: "ProxyTimeout", }, { name: "ProxyInvalidCABundle", - log: pointer.String(proxyInvalidCABundleLog), + log: ptr.To(proxyInvalidCABundleLog), expectedReason: "ProxyInvalidCABundle", }, { name: "AWSDeniedBySCP", - log: pointer.String(awsDeniedByScp), + log: ptr.To(awsDeniedByScp), expectedReason: "AWSDeniedBySCP", }, { name: "AWSUnauthorizedBySCP", - log: pointer.String(awsUnauthorizedByScp), + log: ptr.To(awsUnauthorizedByScp), expectedReason: "AWSDeniedBySCP", }, { name: "KubeAPIWaitTimeout from additional regex entries", - log: pointer.String(kubeAPIWaitTimeoutLog), + log: ptr.To(kubeAPIWaitTimeoutLog), existing: []runtime.Object{ &corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ @@ -245,7 +245,7 @@ func TestParseInstallLog(t *testing.T) { }, { name: "regexes take precedence over additionalRegexes", - log: pointer.String(kubeAPIWaitTimeoutLog), + log: ptr.To(kubeAPIWaitTimeoutLog), existing: []runtime.Object{ &corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ @@ -286,19 +286,19 @@ func TestParseInstallLog(t *testing.T) { }, { name: "no matching log", - log: pointer.String(noMatchLog), + log: ptr.To(noMatchLog), expectedReason: unknownReason, - expectedMessage: pointer.String(noMatchLog), + expectedMessage: ptr.To(noMatchLog), }, { name: "missing regex configmap", - log: pointer.String(dnsAlreadyExistsLog), + log: ptr.To(dnsAlreadyExistsLog), existing: []runtime.Object{}, expectedReason: unknownReason, }, { name: "missing regexes data entry", - log: pointer.String(dnsAlreadyExistsLog), + log: ptr.To(dnsAlreadyExistsLog), existing: []runtime.Object{&corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Name: regexConfigMapName, @@ -309,7 +309,7 @@ func TestParseInstallLog(t *testing.T) { }, { name: "malformed regex", - log: pointer.String(dnsAlreadyExistsLog), + log: ptr.To(dnsAlreadyExistsLog), existing: []runtime.Object{&corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Name: regexConfigMapName, @@ -323,7 +323,7 @@ func TestParseInstallLog(t *testing.T) { }, { name: "skip bad regex entry", - log: pointer.String(dnsAlreadyExistsLog), + log: ptr.To(dnsAlreadyExistsLog), existing: []runtime.Object{&corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Name: regexConfigMapName, @@ -348,7 +348,7 @@ func TestParseInstallLog(t *testing.T) { }, { name: "skip bad search string", - log: pointer.String(dnsAlreadyExistsLog), + log: ptr.To(dnsAlreadyExistsLog), existing: []runtime.Object{&corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Name: regexConfigMapName, @@ -369,112 +369,112 @@ func TestParseInstallLog(t *testing.T) { }, { name: "GCP compute quota", - log: pointer.String(gcpCPUQuotaLog), + log: ptr.To(gcpCPUQuotaLog), expectedReason: "GCPComputeQuotaExceeded", }, { name: "GCP service account quota", - log: pointer.String(gcpServiceAccountQuotaLog), + log: ptr.To(gcpServiceAccountQuotaLog), expectedReason: "GCPServiceAccountQuotaExceeded", }, { name: "Can't delete IAM role", - log: pointer.String(awsDeleteRoleFailed), + log: ptr.To(awsDeleteRoleFailed), expectedReason: "ErrorDeletingIAMRole", }, { name: "AWSSubnetDoesNotExist", - log: pointer.String(subnetDoesNotExist), + log: ptr.To(subnetDoesNotExist), expectedReason: "AWSSubnetDoesNotExist", }, { name: "NATGatewayFailed", - log: pointer.String(natGateWayFailed), + log: ptr.To(natGateWayFailed), expectedReason: "NATGatewayFailed", }, { name: "AWSInsufficientPermissions", - log: pointer.String(insufficientPermissions), + log: ptr.To(insufficientPermissions), expectedReason: "AWSInsufficientPermissions", }, { name: "LoadBalancerLimitExceeded", - log: pointer.String(loadBalancerLimitExceeded), + log: ptr.To(loadBalancerLimitExceeded), expectedReason: "LoadBalancerLimitExceeded", }, { name: "AWSEC2QuotaExceeded", - log: pointer.String(awsEC2QuotaExceeded), + log: ptr.To(awsEC2QuotaExceeded), expectedReason: "AWSEC2QuotaExceeded", }, { name: "BootstrapFailed", - log: pointer.String(bootstrapFailed), + log: ptr.To(bootstrapFailed), expectedReason: "BootstrapFailed", }, { name: "KubeAPIWaitFailed", - log: pointer.String(kubeAPIWaitFailedLog), + log: ptr.To(kubeAPIWaitFailedLog), expectedReason: "KubeAPIWaitFailed", }, { name: "GenericBootstrapFailed", - log: pointer.String(genericBootstrapFailed), + log: ptr.To(genericBootstrapFailed), expectedReason: "GenericBootstrapFailed", }, { name: "AWSRoute53Timeout", - log: pointer.String(route53Timeout), + log: ptr.To(route53Timeout), expectedReason: "AWSRoute53Timeout", }, { name: "InconsistentTerraformResult", - log: pointer.String(inconsistentTerraformResult), + log: ptr.To(inconsistentTerraformResult), expectedReason: "InconsistentTerraformResult", }, { name: "MultipleRoute53ZonesFound", - log: pointer.String(multipleRoute53ZonesFound), + log: ptr.To(multipleRoute53ZonesFound), expectedReason: "MultipleRoute53ZonesFound", }, { name: "AWSVPCDoesNotExist", - log: pointer.String(awsInvalidVpcId), + log: ptr.To(awsInvalidVpcId), expectedReason: "AWSVPCDoesNotExist", }, { name: "TargetGroupNotFound", - log: pointer.String(targetGroupNotFound), + log: ptr.To(targetGroupNotFound), expectedReason: "TargetGroupNotFound", }, { name: "ErrorCreatingNetworkLoadBalancer", - log: pointer.String(errorCreatingNLB), + log: ptr.To(errorCreatingNLB), expectedReason: "ErrorCreatingNetworkLoadBalancer", }, { name: "ErrorDestroyingBootstrapResources", - log: pointer.String(terraformFailedDelete), + log: ptr.To(terraformFailedDelete), expectedReason: "InstallerFailedToDestroyResources", }, { name: "AltErrorDestroyingBootstrapResources", - log: pointer.String(terraformFailedDestroy), + log: ptr.To(terraformFailedDestroy), expectedReason: "InstallerFailedToDestroyResources", }, { name: "AWSAccountBlocked", - log: pointer.String(awsAccountBlocked), + log: ptr.To(awsAccountBlocked), expectedReason: "AWSAccountIsBlocked", }, { name: "InstallConfigAuthFail", - log: pointer.String(installConfigAuthFail), + log: ptr.To(installConfigAuthFail), expectedReason: "InstallConfigNetworkAuthFail", }, { name: "InstallConfigCACert", - log: pointer.String(installConfigBadCACert), + log: ptr.To(installConfigBadCACert), expectedReason: "InstallConfigNetworkBadCACert", }, } diff --git a/pkg/controller/clusterprovision/jobexpectations.go b/pkg/controller/clusterprovision/jobexpectations.go index 7a0934ad989..74739e6fc02 100644 --- a/pkg/controller/clusterprovision/jobexpectations.go +++ b/pkg/controller/clusterprovision/jobexpectations.go @@ -67,7 +67,7 @@ func (r *ReconcileClusterProvision) resolveControllerRef(namespace string, contr } // When a job is created, update the expectations of the clusterprovision that owns the job. -func (r *ReconcileClusterProvision) trackJobAdd(obj interface{}) { +func (r *ReconcileClusterProvision) trackJobAdd(obj any) { job := obj.(*batchv1.Job) if job.DeletionTimestamp != nil { // on a restart of the controller, it's possible a new object shows up in a state that diff --git a/pkg/controller/clustersync/clustersync_controller.go b/pkg/controller/clustersync/clustersync_controller.go index 366c183afa1..71f4ea8cb22 100644 --- a/pkg/controller/clustersync/clustersync_controller.go +++ b/pkg/controller/clustersync/clustersync_controller.go @@ -172,7 +172,7 @@ func resourceHelperBuilderFunc( return nil, err } - return resource.NewHelperFromRESTConfig(restConfig, ControllerName, logger) + return resource.NewHelper(logger, resource.FromRESTConfig(restConfig), resource.WithControllerName(ControllerName)) } // AddToManager adds a new Controller to mgr with r as the reconcile.Reconciler diff --git a/pkg/controller/clustersync/clustersync_controller_test.go b/pkg/controller/clustersync/clustersync_controller_test.go index 938f13c791c..1cdec6efb66 100644 --- a/pkg/controller/clustersync/clustersync_controller_test.go +++ b/pkg/controller/clustersync/clustersync_controller_test.go @@ -13,7 +13,7 @@ import ( log "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -184,7 +184,7 @@ func (rt *reconcileTest) run(t *testing.T) { Kind: "ClusterDeployment", Name: testCDName, UID: testCDUID, - BlockOwnerDeletion: pointer.BoolPtr(true), + BlockOwnerDeletion: ptr.To(true), } assert.Contains(t, clusterSync.OwnerReferences, expectedOwnerReferenceFromClusterSync, "expected owner reference from ClusterSync to ClusterDeployment") @@ -193,7 +193,7 @@ func (rt *reconcileTest) run(t *testing.T) { Kind: "ClusterSync", Name: testClusterSyncName, UID: testClusterSyncUID, - BlockOwnerDeletion: pointer.BoolPtr(true), + BlockOwnerDeletion: ptr.To(true), } assert.Contains(t, lease.OwnerReferences, expectedOwnerReferenceFromLease, "expected owner reference from ClusterSyncLease to ClusterSync") @@ -1161,16 +1161,16 @@ func TestReconcileClusterSync_ResourcesToDeleteAreOrdered(t *testing.T) { testSecretMapping("test-secret", "namespace-B", "name-A"), } permutation := 0 - roa := make([]interface{}, len(resourcesToApply)) + roa := make([]any, len(resourcesToApply)) for i, r := range resourcesToApply { roa[i] = r } - sm := make([]interface{}, len(secretMappings)) + sm := make([]any, len(secretMappings)) for i, m := range secretMappings { sm[i] = m } - permute(roa, func(roa []interface{}) { - permute(sm, func(sm []interface{}) { + permute(roa, func(roa []any) { + permute(sm, func(sm []any) { resourcesToApply = make([]hivev1.MetaRuntimeObject, len(roa)) for i, r := range roa { resourcesToApply[i] = r.(hivev1.MetaRuntimeObject) @@ -2295,7 +2295,7 @@ func buildSyncLease(t time.Time) *hiveintv1alpha1.ClusterSyncLease { Kind: "ClusterSync", Name: testClusterSyncName, UID: testClusterSyncUID, - BlockOwnerDeletion: pointer.BoolPtr(true), + BlockOwnerDeletion: ptr.To(true), }}, }, Spec: hiveintv1alpha1.ClusterSyncLeaseSpec{ @@ -2369,7 +2369,7 @@ func newApplyMatcher(resource hivev1.MetaRuntimeObject) gomock.Matcher { return &applyMatcher{resource: u} } -func (m *applyMatcher) Matches(x interface{}) bool { +func (m *applyMatcher) Matches(x any) bool { rawData, ok := x.([]byte) if !ok { return false @@ -2390,7 +2390,7 @@ func (m *applyMatcher) String() string { ) } -func (m *applyMatcher) Got(got interface{}) string { +func (m *applyMatcher) Got(got any) string { switch t := got.(type) { case []byte: return string(t) @@ -2422,7 +2422,7 @@ func newYamlApplyMatcher(t *testing.T, yamlString string) gomock.Matcher { // Matches implements gomock.Matcher for yamlApplyMatcher such that, when a mismatch occurs, // the delta is emitted as a JSON patch string, allowing quick and easy visualization. -func (m *yamlApplyMatcher) Matches(x interface{}) bool { +func (m *yamlApplyMatcher) Matches(x any) bool { bytes, ok := x.([]byte) assert.True(m.t, ok, "Unexpectedly got %T instead of []byte", x) diff, err := jsonpatch.CreateMergePatch(m.want, bytes) @@ -2436,7 +2436,7 @@ func (m *yamlApplyMatcher) String() string { return string(m.want) } -func (m *yamlApplyMatcher) Got(got interface{}) string { +func (m *yamlApplyMatcher) Got(got any) string { switch t := got.(type) { case []byte: return string(t) @@ -2455,7 +2455,7 @@ func newByteMatcher(s string) gomock.Matcher { return &byteMatcher{want: s} } -func (m *byteMatcher) Matches(x interface{}) bool { +func (m *byteMatcher) Matches(x any) bool { return string(x.([]byte)) == m.want } @@ -2463,11 +2463,11 @@ func (m *byteMatcher) String() string { return m.want } -func (m *byteMatcher) Got(got interface{}) string { +func (m *byteMatcher) Got(got any) string { return string(got.([]byte)) } -func permute(x []interface{}, foo func([]interface{})) { +func permute(x []any, foo func([]any)) { switch l := len(x); l { case 0: case 1: @@ -2475,7 +2475,7 @@ func permute(x []interface{}, foo func([]interface{})) { default: for i := 0; i < l; i++ { x[0], x[i] = x[i], x[0] - permute(x[1:], func(y []interface{}) { + permute(x[1:], func(y []any) { foo(append(x[0:1], y...)) }) x[0], x[i] = x[i], x[0] diff --git a/pkg/controller/clustersync/templates.go b/pkg/controller/clustersync/templates.go index 1054f2f886e..d850759cd3b 100644 --- a/pkg/controller/clustersync/templates.go +++ b/pkg/controller/clustersync/templates.go @@ -60,9 +60,9 @@ func fromCDLabel(cd *hivev1.ClusterDeployment) func(string) string { // The template is executed with a `nil` data object -- i.e. templates referring to `.` ("dot") // will not work. This really exists only to support invoking functions in the template's FuncMap. // We expect `v` to be a descendant of an Unstructured.Object, and thus limited to types -// string, float, int, bool, []interface{}, or map[string]interface{} (where the list/map -// interface{} values are similarly limited, recursively). -func applyTemplate(t *template.Template, v interface{}) (interface{}, error) { +// string, float, int, bool, []any, or map[string]any (where the list/map +// any values are similarly limited, recursively). +func applyTemplate(t *template.Template, v any) (any, error) { ival := reflect.ValueOf(v) switch ival.Kind() { case reflect.String: diff --git a/pkg/controller/controlplanecerts/controlplanecerts_controller.go b/pkg/controller/controlplanecerts/controlplanecerts_controller.go index a1ae4c51b67..9fa4aafd23f 100644 --- a/pkg/controller/controlplanecerts/controlplanecerts_controller.go +++ b/pkg/controller/controlplanecerts/controlplanecerts_controller.go @@ -82,7 +82,11 @@ func Add(mgr manager.Manager) error { // NewReconciler returns a new reconcile.Reconciler func NewReconciler(mgr manager.Manager, rateLimiter flowcontrol.RateLimiter) reconcile.Reconciler { logger := log.WithField("controller", ControllerName) - helper, err := resource.NewHelperWithMetricsFromRESTConfig(mgr.GetConfig(), ControllerName, logger) + helper, err := resource.NewHelper( + logger, + resource.FromRESTConfig(mgr.GetConfig()), + resource.WithControllerName(ControllerName), + resource.WithMetrics()) if err != nil { // Hard exit if we can't create this controller logger.WithError(err).Fatal("unable to create resource helper") diff --git a/pkg/controller/dnsendpoint/dnsendpoint_controller_test.go b/pkg/controller/dnsendpoint/dnsendpoint_controller_test.go index 5b9b8098122..dfa1984a999 100644 --- a/pkg/controller/dnsendpoint/dnsendpoint_controller_test.go +++ b/pkg/controller/dnsendpoint/dnsendpoint_controller_test.go @@ -101,13 +101,13 @@ func TestDNSEndpointReconcile(t *testing.T) { endpointsBySubdomain: endpointsBySubdomain{ dnsName: endpointState{ dnsZone: testDNSZone(), - nsValues: sets.New[string]("test-value-1", "test-value-2"), + nsValues: sets.New("test-value-1", "test-value-2"), }, }, }, }, configureQuery: func(mockQuery *mock.MockQuery) { - mockQuery.EXPECT().CreateOrUpdate(rootDomain, dnsName, sets.New[string]("test-value-1", "test-value-2", "test-value-3")).Return(nil) + mockQuery.EXPECT().CreateOrUpdate(rootDomain, dnsName, sets.New("test-value-1", "test-value-2", "test-value-3")).Return(nil) }, expectedNameServers: rootDomainsMap{ rootDomain: &rootDomainsInfo{ @@ -115,7 +115,7 @@ func TestDNSEndpointReconcile(t *testing.T) { endpointsBySubdomain: endpointsBySubdomain{ dnsName: endpointState{ dnsZone: testDNSZone(), - nsValues: sets.New[string]("test-value-1", "test-value-2", "test-value-3"), + nsValues: sets.New("test-value-1", "test-value-2", "test-value-3"), }, }, }, @@ -137,7 +137,7 @@ func TestDNSEndpointReconcile(t *testing.T) { endpointsBySubdomain: endpointsBySubdomain{ dnsName: endpointState{ dnsZone: testDNSZone(), - nsValues: sets.New[string]("test-value-1", "test-value-2", "test-value-3"), + nsValues: sets.New("test-value-1", "test-value-2", "test-value-3"), }, }, }, @@ -148,7 +148,7 @@ func TestDNSEndpointReconcile(t *testing.T) { endpointsBySubdomain: endpointsBySubdomain{ dnsName: endpointState{ dnsZone: testDNSZone(), - nsValues: sets.New[string]("test-value-1", "test-value-2", "test-value-3"), + nsValues: sets.New("test-value-1", "test-value-2", "test-value-3"), }, }, }, @@ -170,13 +170,13 @@ func TestDNSEndpointReconcile(t *testing.T) { endpointsBySubdomain: endpointsBySubdomain{ dnsName: endpointState{ dnsZone: testDNSZone(), - nsValues: sets.New[string]("old-value"), + nsValues: sets.New("old-value"), }, }, }, }, configureQuery: func(mockQuery *mock.MockQuery) { - mockQuery.EXPECT().CreateOrUpdate(rootDomain, dnsName, sets.New[string]("test-value-1", "test-value-2", "test-value-3")).Return(nil) + mockQuery.EXPECT().CreateOrUpdate(rootDomain, dnsName, sets.New("test-value-1", "test-value-2", "test-value-3")).Return(nil) }, expectedNameServers: rootDomainsMap{ rootDomain: &rootDomainsInfo{ @@ -184,7 +184,7 @@ func TestDNSEndpointReconcile(t *testing.T) { endpointsBySubdomain: endpointsBySubdomain{ dnsName: endpointState{ dnsZone: testDNSZone(), - nsValues: sets.New[string]("test-value-1", "test-value-2", "test-value-3"), + nsValues: sets.New("test-value-1", "test-value-2", "test-value-3"), }, }, }, @@ -206,13 +206,13 @@ func TestDNSEndpointReconcile(t *testing.T) { endpointsBySubdomain: endpointsBySubdomain{ dnsName: endpointState{ dnsZone: testDNSZone(), - nsValues: sets.New[string]("test-value-1", "test-value-2", "test-value-3"), + nsValues: sets.New("test-value-1", "test-value-2", "test-value-3"), }, }, }, }, configureQuery: func(mockQuery *mock.MockQuery) { - mockQuery.EXPECT().Delete(rootDomain, dnsName, sets.New[string]("test-value-1", "test-value-2", "test-value-3")).Return(nil) + mockQuery.EXPECT().Delete(rootDomain, dnsName, sets.New("test-value-1", "test-value-2", "test-value-3")).Return(nil) }, expectedNameServers: rootDomainsMap{ rootDomain: &rootDomainsInfo{ @@ -231,13 +231,13 @@ func TestDNSEndpointReconcile(t *testing.T) { endpointsBySubdomain: endpointsBySubdomain{ dnsName: endpointState{ dnsZone: testDNSZone(), - nsValues: sets.New[string]("old-value"), + nsValues: sets.New("old-value"), }, }, }, }, configureQuery: func(mockQuery *mock.MockQuery) { - mockQuery.EXPECT().CreateOrUpdate(rootDomain, dnsName, sets.New[string]("test-value-1", "test-value-2", "test-value-3")). + mockQuery.EXPECT().CreateOrUpdate(rootDomain, dnsName, sets.New("test-value-1", "test-value-2", "test-value-3")). Return(errors.New("create error")) }, expectErr: true, @@ -247,7 +247,7 @@ func TestDNSEndpointReconcile(t *testing.T) { endpointsBySubdomain: endpointsBySubdomain{ dnsName: endpointState{ dnsZone: testDNSZone(), - nsValues: sets.New[string]("old-value"), + nsValues: sets.New("old-value"), }, }, }, @@ -268,13 +268,13 @@ func TestDNSEndpointReconcile(t *testing.T) { endpointsBySubdomain: endpointsBySubdomain{ dnsName: endpointState{ dnsZone: testDNSZone(), - nsValues: sets.New[string]("old-value"), + nsValues: sets.New("old-value"), }, }, }, }, configureQuery: func(mockQuery *mock.MockQuery) { - mockQuery.EXPECT().Delete(rootDomain, dnsName, sets.New[string]("old-value")). + mockQuery.EXPECT().Delete(rootDomain, dnsName, sets.New("old-value")). Return(errors.New("delete error")) }, expectErr: true, @@ -284,7 +284,7 @@ func TestDNSEndpointReconcile(t *testing.T) { endpointsBySubdomain: endpointsBySubdomain{ dnsName: endpointState{ dnsZone: testDNSZone(), - nsValues: sets.New[string]("old-value"), + nsValues: sets.New("old-value"), }, }, }, @@ -368,13 +368,13 @@ func TestDNSEndpointReconcile(t *testing.T) { endpointsBySubdomain: endpointsBySubdomain{ dnsName: endpointState{ dnsZone: testDNSZone(), - nsValues: sets.New[string]("test-value-1", "test-value-2", "test-value-3"), + nsValues: sets.New("test-value-1", "test-value-2", "test-value-3"), }, }, }, }, configureQuery: func(mockQuery *mock.MockQuery) { - mockQuery.EXPECT().Delete(rootDomain, dnsName, sets.New[string]("test-value-1", "test-value-2", "test-value-3")).Return(nil) + mockQuery.EXPECT().Delete(rootDomain, dnsName, sets.New("test-value-1", "test-value-2", "test-value-3")).Return(nil) }, expectedNameServers: rootDomainsMap{ rootDomain: &rootDomainsInfo{ @@ -480,7 +480,7 @@ func validateConditions(t *testing.T, dnsZone *hivev1.DNSZone, conditions []cond } type fakeManager struct { - watchedDomains map[string]bool + watchedDomains sets.Set[string] } func (fm *fakeManager) Add(mgr manager.Runnable) error { @@ -490,7 +490,7 @@ func (fm *fakeManager) Add(mgr manager.Runnable) error { if ok { // record which domains are being watched/scraped for domainKey := range scraper.rootDomainsMap { - fm.watchedDomains[domainKey] = true + fm.watchedDomains.Insert(domainKey) } } return nil @@ -499,7 +499,7 @@ func (fm *fakeManager) Add(mgr manager.Runnable) error { func (*fakeManager) Elected() <-chan struct{} { panic("not implemented") } -func (*fakeManager) SetFields(interface{}) error { +func (*fakeManager) SetFields(any) error { panic("not implemented") } func (*fakeManager) AddMetricsExtraHandler(path string, handler http.Handler) error { @@ -664,7 +664,7 @@ func TestMultiCloudDNSSetup(t *testing.T) { // Run/set up reconciler fakeClient := testfake.NewFakeClientBuilder().Build() fakeMgr := &fakeManager{ - watchedDomains: map[string]bool{}, + watchedDomains: sets.New[string](), } reconciler, _, err := newReconciler(fakeMgr, fakeClient) @@ -684,7 +684,7 @@ func TestMultiCloudDNSSetup(t *testing.T) { } } assert.True(t, found, "failed to find scraper for domain %s", domain) - assert.True(t, fakeMgr.watchedDomains[domain], "failed to record domain %s as being watched", domain) + assert.True(t, fakeMgr.watchedDomains.Has(domain), "failed to record domain %s as being watched", domain) } } } diff --git a/pkg/controller/dnsendpoint/nameserver/aws_live_test.go b/pkg/controller/dnsendpoint/nameserver/aws_live_test.go index d8b96d97284..5223b56ab1a 100644 --- a/pkg/controller/dnsendpoint/nameserver/aws_live_test.go +++ b/pkg/controller/dnsendpoint/nameserver/aws_live_test.go @@ -5,7 +5,6 @@ import ( "math/rand" "os" "testing" - "time" "github.com/stretchr/testify/suite" @@ -24,7 +23,6 @@ func TestLiveAWS(t *testing.T) { if rootDomain == "" { t.SkipNow() } - rand.Seed(time.Now().UnixNano()) suite.Run(t, &LiveAWSTestSuite{rootDomain: rootDomain}) } diff --git a/pkg/controller/dnsendpoint/nameserver/aws_test.go b/pkg/controller/dnsendpoint/nameserver/aws_test.go index bf365dece7e..c1079c8bf06 100644 --- a/pkg/controller/dnsendpoint/nameserver/aws_test.go +++ b/pkg/controller/dnsendpoint/nameserver/aws_test.go @@ -57,7 +57,7 @@ func TestAWSGet(t *testing.T) { )), }, expectedNameServers: map[string]sets.Set[string]{ - "test-subdomain": sets.New[string]("test-ns"), + "test-subdomain": sets.New("test-ns"), }, }, { @@ -78,7 +78,7 @@ func TestAWSGet(t *testing.T) { )), }, expectedNameServers: map[string]sets.Set[string]{ - "test-subdomain": sets.New[string]("test-ns"), + "test-subdomain": sets.New("test-ns"), }, }, { @@ -112,7 +112,7 @@ func TestAWSGet(t *testing.T) { )), }, expectedNameServers: map[string]sets.Set[string]{ - "test-subdomain": sets.New[string]("test-ns"), + "test-subdomain": sets.New("test-ns"), }, }, { @@ -126,7 +126,7 @@ func TestAWSGet(t *testing.T) { )), }, expectedNameServers: map[string]sets.Set[string]{ - "test-subdomain": sets.New[string]("test-ns-1", "test-ns-2", "test-ns-3"), + "test-subdomain": sets.New("test-ns-1", "test-ns-2", "test-ns-3"), }, }, { @@ -142,9 +142,9 @@ func TestAWSGet(t *testing.T) { )), }, expectedNameServers: map[string]sets.Set[string]{ - "test-subdomain-1": sets.New[string]("test-ns-1"), - "test-subdomain-2": sets.New[string]("test-ns-2"), - "test-subdomain-3": sets.New[string]("test-ns-3"), + "test-subdomain-1": sets.New("test-ns-1"), + "test-subdomain-2": sets.New("test-ns-2"), + "test-subdomain-3": sets.New("test-ns-3"), }, }, { @@ -167,8 +167,8 @@ func TestAWSGet(t *testing.T) { ), }, expectedNameServers: map[string]sets.Set[string]{ - "test-subdomain-1": sets.New[string]("test-ns-1"), - "test-subdomain-2": sets.New[string]("test-ns-2"), + "test-subdomain-1": sets.New("test-ns-1"), + "test-subdomain-2": sets.New("test-ns-2"), }, }, } diff --git a/pkg/controller/dnsendpoint/nameserver/azure_live_test.go b/pkg/controller/dnsendpoint/nameserver/azure_live_test.go index 163b32df7a1..7c4b77871fa 100644 --- a/pkg/controller/dnsendpoint/nameserver/azure_live_test.go +++ b/pkg/controller/dnsendpoint/nameserver/azure_live_test.go @@ -7,7 +7,6 @@ import ( "os/user" "path/filepath" "testing" - "time" "github.com/stretchr/testify/suite" "k8s.io/apimachinery/pkg/util/sets" @@ -29,7 +28,6 @@ func TestLiveAzure(t *testing.T) { if rootDomain == "" { t.SkipNow() } - rand.Seed(time.Now().UnixNano()) suite.Run(t, &LiveAzureTestSuite{ resourceGroupName: resourceGroupName, rootDomain: rootDomain, diff --git a/pkg/controller/dnsendpoint/nameserver/azure_test.go b/pkg/controller/dnsendpoint/nameserver/azure_test.go index a54440a48d1..7668f2ee6ad 100644 --- a/pkg/controller/dnsendpoint/nameserver/azure_test.go +++ b/pkg/controller/dnsendpoint/nameserver/azure_test.go @@ -30,7 +30,7 @@ func TestAzureGet(t *testing.T) { azure.recordSetPage(azure.withRecordSets(azure.recordSet("test-subdomain", "NS", "test-ns"))), }, expectedNameServers: map[string]sets.Set[string]{ - "test-subdomain.test-domain": sets.New[string]("test-ns"), + "test-subdomain.test-domain": sets.New("test-ns"), }, }, { @@ -51,7 +51,7 @@ func TestAzureGet(t *testing.T) { azure.recordSetPage(azure.withRecordSets(azure.recordSet("test-subdomain", "NS", "test-ns-1", "test-ns-2", "test-ns-3"))), }, expectedNameServers: map[string]sets.Set[string]{ - "test-subdomain.test-domain": sets.New[string]("test-ns-1", "test-ns-2", "test-ns-3"), + "test-subdomain.test-domain": sets.New("test-ns-1", "test-ns-2", "test-ns-3"), }, }, { @@ -64,9 +64,9 @@ func TestAzureGet(t *testing.T) { )), }, expectedNameServers: map[string]sets.Set[string]{ - "test-subdomain-1.test-domain": sets.New[string]("test-ns-1"), - "test-subdomain-2.test-domain": sets.New[string]("test-ns-2"), - "test-subdomain-3.test-domain": sets.New[string]("test-ns-3"), + "test-subdomain-1.test-domain": sets.New("test-ns-1"), + "test-subdomain-2.test-domain": sets.New("test-ns-2"), + "test-subdomain-3.test-domain": sets.New("test-ns-3"), }, }, } diff --git a/pkg/controller/dnsendpoint/nameserver/gcp_live_test.go b/pkg/controller/dnsendpoint/nameserver/gcp_live_test.go index 5ac3172d432..c2793cbfdaf 100644 --- a/pkg/controller/dnsendpoint/nameserver/gcp_live_test.go +++ b/pkg/controller/dnsendpoint/nameserver/gcp_live_test.go @@ -7,7 +7,6 @@ import ( "os/user" "path/filepath" "testing" - "time" "github.com/stretchr/testify/suite" @@ -27,7 +26,6 @@ func TestLiveGCP(t *testing.T) { if rootDomain == "" { t.SkipNow() } - rand.Seed(time.Now().UnixNano()) suite.Run(t, &LiveGCPTestSuite{rootDomain: rootDomain}) } diff --git a/pkg/controller/dnsendpoint/nameserver/gcp_test.go b/pkg/controller/dnsendpoint/nameserver/gcp_test.go index 75e58fca83c..a6ae83619e3 100644 --- a/pkg/controller/dnsendpoint/nameserver/gcp_test.go +++ b/pkg/controller/dnsendpoint/nameserver/gcp_test.go @@ -49,7 +49,7 @@ func TestGCPGet(t *testing.T) { )), }, expectedNameServers: map[string]sets.Set[string]{ - "test-subdomain": sets.New[string]("test-ns"), + "test-subdomain": sets.New("test-ns"), }, }, { @@ -70,7 +70,7 @@ func TestGCPGet(t *testing.T) { )), }, expectedNameServers: map[string]sets.Set[string]{ - "test-subdomain": sets.New[string]("test-ns"), + "test-subdomain": sets.New("test-ns"), }, }, { @@ -104,7 +104,7 @@ func TestGCPGet(t *testing.T) { )), }, expectedNameServers: map[string]sets.Set[string]{ - "test-subdomain": sets.New[string]("test-ns"), + "test-subdomain": sets.New("test-ns"), }, }, { @@ -118,7 +118,7 @@ func TestGCPGet(t *testing.T) { )), }, expectedNameServers: map[string]sets.Set[string]{ - "test-subdomain": sets.New[string]("test-ns-1", "test-ns-2", "test-ns-3"), + "test-subdomain": sets.New("test-ns-1", "test-ns-2", "test-ns-3"), }, }, { @@ -134,9 +134,9 @@ func TestGCPGet(t *testing.T) { )), }, expectedNameServers: map[string]sets.Set[string]{ - "test-subdomain-1": sets.New[string]("test-ns-1"), - "test-subdomain-2": sets.New[string]("test-ns-2"), - "test-subdomain-3": sets.New[string]("test-ns-3"), + "test-subdomain-1": sets.New("test-ns-1"), + "test-subdomain-2": sets.New("test-ns-2"), + "test-subdomain-3": sets.New("test-ns-3"), }, }, { @@ -159,8 +159,8 @@ func TestGCPGet(t *testing.T) { ), }, expectedNameServers: map[string]sets.Set[string]{ - "test-subdomain-1": sets.New[string]("test-ns-1"), - "test-subdomain-2": sets.New[string]("test-ns-2"), + "test-subdomain-1": sets.New("test-ns-1"), + "test-subdomain-2": sets.New("test-ns-2"), }, }, } diff --git a/pkg/controller/dnsendpoint/nameserverscraper_test.go b/pkg/controller/dnsendpoint/nameserverscraper_test.go index b5fac9325f2..922284fd250 100644 --- a/pkg/controller/dnsendpoint/nameserverscraper_test.go +++ b/pkg/controller/dnsendpoint/nameserverscraper_test.go @@ -67,13 +67,13 @@ func TestGetEndpoint(t *testing.T) { rootDomain: &rootDomainsInfo{ endpointsBySubdomain: endpointsBySubdomain{ domain: endpointState{ - nsValues: sets.New[string]("test-value"), + nsValues: sets.New("test-value"), }, }, }, }, expectRootDomain: true, - expectedValues: sets.New[string]("test-value"), + expectedValues: sets.New("test-value"), }, { name: "multiple namespace values", @@ -81,13 +81,13 @@ func TestGetEndpoint(t *testing.T) { rootDomain: &rootDomainsInfo{ endpointsBySubdomain: endpointsBySubdomain{ domain: endpointState{ - nsValues: sets.New[string]("test-value-1", "test-value-2", "test-value-3"), + nsValues: sets.New("test-value-1", "test-value-2", "test-value-3"), }, }, }, }, expectRootDomain: true, - expectedValues: sets.New[string]("test-value-1", "test-value-2", "test-value-3"), + expectedValues: sets.New("test-value-1", "test-value-2", "test-value-3"), }, { name: "many root domains and domains", @@ -95,23 +95,23 @@ func TestGetEndpoint(t *testing.T) { rootDomain: &rootDomainsInfo{ endpointsBySubdomain: endpointsBySubdomain{ domain: endpointState{ - nsValues: sets.New[string]("test-value"), + nsValues: sets.New("test-value"), }, "other.domain.com": endpointState{ - nsValues: sets.New[string]("other-value"), + nsValues: sets.New("other-value"), }, }, }, "other-domain": &rootDomainsInfo{ endpointsBySubdomain: endpointsBySubdomain{ "sub-domain.other-domain": endpointState{ - nsValues: sets.New[string]("another-value"), + nsValues: sets.New("another-value"), }, }, }, }, expectRootDomain: true, - expectedValues: sets.New[string]("test-value"), + expectedValues: sets.New("test-value"), }, } for _, tc := range cases { @@ -131,7 +131,7 @@ func TestGetEndpoint(t *testing.T) { func TestAddEndpoint(t *testing.T) { rootDomain := "domain.com" domain := "test.domain.com" - values := sets.New[string]("test-value-1", "test-value-2", "test-value-3") + values := sets.New("test-value-1", "test-value-2", "test-value-3") cases := []struct { name string nameServers rootDomainsMap @@ -204,7 +204,7 @@ func TestAddEndpoint(t *testing.T) { dz.Namespace = "other-namespace" return dz }(), - nsValues: sets.New[string]("other-value"), + nsValues: sets.New("other-value"), }, }, }, @@ -322,7 +322,7 @@ func TestRemoveEndpoint(t *testing.T) { endpointsBySubdomain: endpointsBySubdomain{ domain: endpointState{ dnsZone: testDNSZone(), - nsValues: sets.New[string]("test-value"), + nsValues: sets.New("test-value"), }, }, }, @@ -341,7 +341,7 @@ func TestRemoveEndpoint(t *testing.T) { "other.domain.com": endpointState{}, domain: endpointState{ dnsZone: testDNSZone(), - nsValues: sets.New[string]("test-value"), + nsValues: sets.New("test-value"), }, }, }, @@ -522,7 +522,7 @@ func TestScrape(t *testing.T) { mockQuery.EXPECT().Get("domain.com"). Return( map[string]sets.Set[string]{ - "test.domain.com": sets.New[string]("test-value"), + "test.domain.com": sets.New("test-value"), }, nil, ) @@ -533,7 +533,7 @@ func TestScrape(t *testing.T) { endpointsBySubdomain: endpointsBySubdomain{ "test.domain.com": endpointState{ dnsZone: nil, - nsValues: sets.New[string]("test-value"), + nsValues: sets.New("test-value"), }, }, }, @@ -547,7 +547,7 @@ func TestScrape(t *testing.T) { endpointsBySubdomain: endpointsBySubdomain{ "test.domain.com": endpointState{ dnsZone: testDNSZone(), - nsValues: sets.New[string]("old-value"), + nsValues: sets.New("old-value"), }, }, }, @@ -556,7 +556,7 @@ func TestScrape(t *testing.T) { mockQuery.EXPECT().Get("domain.com"). Return( map[string]sets.Set[string]{ - "test.domain.com": sets.New[string]("test-value"), + "test.domain.com": sets.New("test-value"), }, nil, ) @@ -567,7 +567,7 @@ func TestScrape(t *testing.T) { endpointsBySubdomain: endpointsBySubdomain{ "test.domain.com": endpointState{ dnsZone: testDNSZone(), - nsValues: sets.New[string]("test-value"), + nsValues: sets.New("test-value"), }, }, }, @@ -582,7 +582,7 @@ func TestScrape(t *testing.T) { endpointsBySubdomain: endpointsBySubdomain{ "test.domain.com": endpointState{ dnsZone: testDNSZone(), - nsValues: sets.New[string]("test-value"), + nsValues: sets.New("test-value"), }, }, }, @@ -591,7 +591,7 @@ func TestScrape(t *testing.T) { mockQuery.EXPECT().Get("domain.com"). Return( map[string]sets.Set[string]{ - "test.domain.com": sets.New[string]("test-value"), + "test.domain.com": sets.New("test-value"), }, nil, ) @@ -602,7 +602,7 @@ func TestScrape(t *testing.T) { endpointsBySubdomain: endpointsBySubdomain{ "test.domain.com": endpointState{ dnsZone: testDNSZone(), - nsValues: sets.New[string]("test-value"), + nsValues: sets.New("test-value"), }, }, }, @@ -616,19 +616,19 @@ func TestScrape(t *testing.T) { endpointsBySubdomain: endpointsBySubdomain{ "changed-1.domain.com": endpointState{ dnsZone: testDNSZoneWithNSName(testNamespace, "test-changed-1"), - nsValues: sets.New[string]("old-value-1"), + nsValues: sets.New("old-value-1"), }, "changed-2.domain.com": endpointState{ dnsZone: testDNSZoneWithNSName(testNamespace, "test-changed-2"), - nsValues: sets.New[string]("old-value-2"), + nsValues: sets.New("old-value-2"), }, "changed-3.domain.com": endpointState{ dnsZone: testDNSZoneWithNSName(testNamespace, "test-changed-3"), - nsValues: sets.New[string]("old-value-3a", "old-value-3b"), + nsValues: sets.New("old-value-3a", "old-value-3b"), }, "unchanged.domain.com": endpointState{ dnsZone: testDNSZoneWithNSName(testNamespace, "test-unchanged"), - nsValues: sets.New[string]("test-value-4"), + nsValues: sets.New("test-value-4"), }, }, }, @@ -637,11 +637,11 @@ func TestScrape(t *testing.T) { mockQuery.EXPECT().Get("domain.com"). Return( map[string]sets.Set[string]{ - "changed-1.domain.com": sets.New[string]("test-value-1"), - "changed-2.domain.com": sets.New[string]("test-value-2a", "test-value-2b"), - "changed-3.domain.com": sets.New[string]("test-value-3"), - "unchanged.domain.com": sets.New[string]("test-value-4"), - "untracked.domain.com": sets.New[string]("test-value-5"), + "changed-1.domain.com": sets.New("test-value-1"), + "changed-2.domain.com": sets.New("test-value-2a", "test-value-2b"), + "changed-3.domain.com": sets.New("test-value-3"), + "unchanged.domain.com": sets.New("test-value-4"), + "untracked.domain.com": sets.New("test-value-5"), }, nil, ) @@ -652,23 +652,23 @@ func TestScrape(t *testing.T) { endpointsBySubdomain: endpointsBySubdomain{ "changed-1.domain.com": endpointState{ dnsZone: testDNSZoneWithNSName(testNamespace, "test-changed-1"), - nsValues: sets.New[string]("test-value-1"), + nsValues: sets.New("test-value-1"), }, "changed-2.domain.com": endpointState{ dnsZone: testDNSZoneWithNSName(testNamespace, "test-changed-2"), - nsValues: sets.New[string]("test-value-2a", "test-value-2b"), + nsValues: sets.New("test-value-2a", "test-value-2b"), }, "changed-3.domain.com": endpointState{ dnsZone: testDNSZoneWithNSName(testNamespace, "test-changed-3"), - nsValues: sets.New[string]("test-value-3"), + nsValues: sets.New("test-value-3"), }, "unchanged.domain.com": endpointState{ dnsZone: testDNSZoneWithNSName(testNamespace, "test-unchanged"), - nsValues: sets.New[string]("test-value-4"), + nsValues: sets.New("test-value-4"), }, "untracked.domain.com": endpointState{ dnsZone: nil, - nsValues: sets.New[string]("test-value-5"), + nsValues: sets.New("test-value-5"), }, }, }, diff --git a/pkg/controller/fakeclusterinstall/fakeclusterinstall_controller.go b/pkg/controller/fakeclusterinstall/fakeclusterinstall_controller.go index bff316070c7..32dc678cb04 100644 --- a/pkg/controller/fakeclusterinstall/fakeclusterinstall_controller.go +++ b/pkg/controller/fakeclusterinstall/fakeclusterinstall_controller.go @@ -14,7 +14,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/util/flowcontrol" "k8s.io/client-go/util/workqueue" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" @@ -167,7 +167,7 @@ func (r *ReconcileClusterInstall) Reconcile(ctx context.Context, request reconci Kind: cd.Kind, Name: cd.Name, UID: cd.UID, - BlockOwnerDeletion: pointer.BoolPtr(true), + BlockOwnerDeletion: ptr.To(true), } cdRefChanged := librarygocontroller.EnsureOwnerRef(fci, cdRef) if cdRefChanged { diff --git a/pkg/controller/hibernation/azure_actuator_test.go b/pkg/controller/hibernation/azure_actuator_test.go index f3595aab2bd..38cb6a0f618 100644 --- a/pkg/controller/hibernation/azure_actuator_test.go +++ b/pkg/controller/hibernation/azure_actuator_test.go @@ -10,7 +10,7 @@ import ( log "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" hivev1 "github.com/openshift/hive/apis/hive/v1" @@ -271,7 +271,7 @@ func withClusterMetadataResourceGroupName(rg string) Option { if cd.Spec.ClusterMetadata.Platform.Azure == nil { cd.Spec.ClusterMetadata.Platform.Azure = &hivev1azure.Metadata{} } - cd.Spec.ClusterMetadata.Platform.Azure.ResourceGroupName = pointer.String(rg) + cd.Spec.ClusterMetadata.Platform.Azure.ResourceGroupName = ptr.To(rg) } } @@ -290,12 +290,12 @@ func setupAzureClientInstances(client *mockazureclient.MockClient, instances map name := fmt.Sprintf("%s-%d", state, i) instanceViewStatus := []compute.InstanceViewStatus{ { - Code: pointer.String("PowerState/" + state), + Code: ptr.To("PowerState/" + state), }, } vms = append(vms, compute.VirtualMachine{ - Name: pointer.String(name), - ID: pointer.String(fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/virtualMachines/%s", azureTestSubscription, rgname, name)), + Name: ptr.To(name), + ID: ptr.To(fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/virtualMachines/%s", azureTestSubscription, rgname, name)), VirtualMachineProperties: &compute.VirtualMachineProperties{ InstanceView: &compute.VirtualMachineInstanceView{ Statuses: &instanceViewStatus, diff --git a/pkg/controller/hibernation/hibernation_controller_test.go b/pkg/controller/hibernation/hibernation_controller_test.go index cb86034bc38..095a2dbb68c 100644 --- a/pkg/controller/hibernation/hibernation_controller_test.go +++ b/pkg/controller/hibernation/hibernation_controller_test.go @@ -1329,9 +1329,10 @@ func getHibernatingAndRunningConditions(cd *hivev1.ClusterDeployment) (*hivev1.C var hibCond *hivev1.ClusterDeploymentCondition var runCond *hivev1.ClusterDeploymentCondition for i := range cd.Status.Conditions { - if cd.Status.Conditions[i].Type == hivev1.ClusterHibernatingCondition { + switch cd.Status.Conditions[i].Type { + case hivev1.ClusterHibernatingCondition: hibCond = &cd.Status.Conditions[i] - } else if cd.Status.Conditions[i].Type == hivev1.ClusterReadyCondition { + case hivev1.ClusterReadyCondition: runCond = &cd.Status.Conditions[i] } } diff --git a/pkg/controller/hibernation/ibmcloud_actuator_test.go b/pkg/controller/hibernation/ibmcloud_actuator_test.go index fdb5fb76820..4873bc0f158 100644 --- a/pkg/controller/hibernation/ibmcloud_actuator_test.go +++ b/pkg/controller/hibernation/ibmcloud_actuator_test.go @@ -12,8 +12,8 @@ import ( log "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" hivev1 "github.com/openshift/hive/apis/hive/v1" @@ -250,9 +250,9 @@ func setupIBMCloudClientInstances(ibmCloudClient *mockibmclient.MockAPI, statuse for status, count := range statuses { for i := 0; i < count; i++ { instances = append(instances, vpcv1.Instance{ - Name: pointer.String(fmt.Sprintf("%s-%d", status, i)), - ID: pointer.String(fmt.Sprintf("%s-%d", status, i)), - Status: pointer.String(status), + Name: ptr.To(fmt.Sprintf("%s-%d", status, i)), + ID: ptr.To(fmt.Sprintf("%s-%d", status, i)), + Status: ptr.To(status), }) } } diff --git a/pkg/controller/images/controller_images.go b/pkg/controller/images/controller_images.go index 1afbc1da05e..0ed9f3aa12b 100644 --- a/pkg/controller/images/controller_images.go +++ b/pkg/controller/images/controller_images.go @@ -40,13 +40,6 @@ func GetHiveImagePullPolicy() corev1.PullPolicy { return corev1.PullPolicy(envVarOrDefault(HiveImagePullPolicyEnvVar, string(corev1.PullAlways))) } -// GetHiveClusterProvisionImagePullPolicy returns the policy to use when pulling the hive image -// for the ClusterProvision pod. -// Either the one specified in the environment variable or the hardcoded default. -func GetHiveClusterProvisionImagePullPolicy() corev1.PullPolicy { - return corev1.PullPolicy(envVarOrDefault(HiveClusterProvisionImagePullPolicyEnvVar, string(corev1.PullIfNotPresent))) -} - func envVarOrDefault(envVar string, defaultValue string) string { value, ok := os.LookupEnv(envVar) if ok { diff --git a/pkg/controller/machinepool/actuator.go b/pkg/controller/machinepool/actuator.go index 791b2548379..bec5a38f4b8 100644 --- a/pkg/controller/machinepool/actuator.go +++ b/pkg/controller/machinepool/actuator.go @@ -20,3 +20,8 @@ type Actuator interface { // to wait before we can proceed with reconciling. (e.g. obtaining a pool name lease) GenerateMachineSets(*hivev1.ClusterDeployment, *hivev1.MachinePool, log.FieldLogger) (msets []*machineapi.MachineSet, proceed bool, genError error) } + +const ( + // workerUserDataName is the name of a secret in the cluster used for obtaining user data from MCO. + workerUserDataName = "worker-user-data" +) diff --git a/pkg/controller/machinepool/awsactuator_test.go b/pkg/controller/machinepool/awsactuator_test.go index 2897d99bf29..fa90755a423 100644 --- a/pkg/controller/machinepool/awsactuator_test.go +++ b/pkg/controller/machinepool/awsactuator_test.go @@ -20,7 +20,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" jsonserializer "k8s.io/apimachinery/pkg/runtime/serializer/json" "k8s.io/apimachinery/pkg/types" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" machineapi "github.com/openshift/api/machine/v1beta1" @@ -83,7 +83,7 @@ func TestAWSActuator(t *testing.T) { generateAWSMachineSetName("zone1"): 3, }, expectedAMI: &machineapi.AWSResourceReference{ - ID: pointer.String(testAMI), + ID: ptr.To(testAMI), }, expectedUserTags: baseUserTagsFromCD, }, @@ -101,7 +101,7 @@ func TestAWSActuator(t *testing.T) { generateAWSMachineSetName("zone3"): 1, }, expectedAMI: &machineapi.AWSResourceReference{ - ID: pointer.String(testAMI), + ID: ptr.To(testAMI), }, expectedUserTags: baseUserTagsFromCD, }, @@ -120,7 +120,7 @@ func TestAWSActuator(t *testing.T) { generateAWSMachineSetName("zone3"): 1, }, expectedAMI: &machineapi.AWSResourceReference{ - ID: pointer.String(testAMI), + ID: ptr.To(testAMI), }, expectedUserTags: baseUserTagsFromCD, }, @@ -155,7 +155,7 @@ func TestAWSActuator(t *testing.T) { }, expectedSubnetIDInMachineSet: true, expectedAMI: &machineapi.AWSResourceReference{ - ID: pointer.String(testAMI), + ID: ptr.To(testAMI), }, expectedUserTags: baseUserTagsFromCD, }, @@ -169,7 +169,7 @@ func TestAWSActuator(t *testing.T) { }, expectedErr: true, expectedAMI: &machineapi.AWSResourceReference{ - ID: pointer.String(testAMI), + ID: ptr.To(testAMI), }, expectedUserTags: baseUserTagsFromCD, }, @@ -193,7 +193,7 @@ func TestAWSActuator(t *testing.T) { Reason: "SubnetsNotFound", }, expectedAMI: &machineapi.AWSResourceReference{ - ID: pointer.String(testAMI), + ID: ptr.To(testAMI), }, expectedUserTags: baseUserTagsFromCD, }, @@ -218,7 +218,7 @@ func TestAWSActuator(t *testing.T) { Reason: "MoreThanOneSubnetForZone", }, expectedAMI: &machineapi.AWSResourceReference{ - ID: pointer.String(testAMI), + ID: ptr.To(testAMI), }, expectedUserTags: baseUserTagsFromCD, }, @@ -254,7 +254,7 @@ func TestAWSActuator(t *testing.T) { Reason: "NotEnoughSubnetsForZones", }, expectedAMI: &machineapi.AWSResourceReference{ - ID: pointer.String(testAMI), + ID: ptr.To(testAMI), }, expectedUserTags: baseUserTagsFromCD, }, @@ -275,7 +275,7 @@ func TestAWSActuator(t *testing.T) { Reason: "WrongNumberOfSubnets", }, expectedAMI: &machineapi.AWSResourceReference{ - ID: pointer.String(testAMI), + ID: ptr.To(testAMI), }, expectedUserTags: baseUserTagsFromCD, }, @@ -296,7 +296,7 @@ func TestAWSActuator(t *testing.T) { Reason: "WrongNumberOfSubnets", }, expectedAMI: &machineapi.AWSResourceReference{ - ID: pointer.String(testAMI), + ID: ptr.To(testAMI), }, expectedUserTags: baseUserTagsFromCD, }, @@ -331,7 +331,7 @@ func TestAWSActuator(t *testing.T) { }, expectedSubnetIDInMachineSet: true, expectedAMI: &machineapi.AWSResourceReference{ - ID: pointer.String(testAMI), + ID: ptr.To(testAMI), }, expectedUserTags: baseUserTagsFromCD, }, @@ -366,7 +366,7 @@ func TestAWSActuator(t *testing.T) { }, expectedSubnetIDInMachineSet: true, expectedAMI: &machineapi.AWSResourceReference{ - ID: pointer.String(testAMI), + ID: ptr.To(testAMI), }, expectedUserTags: baseUserTagsFromCD, }, @@ -383,7 +383,7 @@ func TestAWSActuator(t *testing.T) { }, expectedEC2MetadataAuth: "Optional", expectedAMI: &machineapi.AWSResourceReference{ - ID: pointer.String(testAMI), + ID: ptr.To(testAMI), }, expectedUserTags: baseUserTagsFromCD, }, @@ -400,7 +400,7 @@ func TestAWSActuator(t *testing.T) { }, expectedKMSKey: fakeKMSKeyARN, expectedAMI: &machineapi.AWSResourceReference{ - ID: pointer.String(testAMI), + ID: ptr.To(testAMI), }, expectedUserTags: baseUserTagsFromCD, }, @@ -428,7 +428,7 @@ func TestAWSActuator(t *testing.T) { Reason: "ConfigurationSupported", }, expectedAMI: &machineapi.AWSResourceReference{ - ID: pointer.String(testAMI), + ID: ptr.To(testAMI), }, expectedUserTags: baseUserTagsFromCD, }, @@ -512,7 +512,7 @@ func TestAWSActuator(t *testing.T) { generateAWSMachineSetName("zone2"): 1, }, expectedAMI: &machineapi.AWSResourceReference{ - ID: pointer.String(testAMI), + ID: ptr.To(testAMI), }, expectExtraSG: true, expectedUserTags: baseUserTagsFromCD, @@ -582,7 +582,7 @@ func TestAWSActuator(t *testing.T) { generateAWSMachineSetName("zone2"): 1, }, expectedAMI: &machineapi.AWSResourceReference{ - ID: pointer.String(testAMI), + ID: ptr.To(testAMI), }, expectedSGIDs: []string{"sg-one", "sg-two"}, expectedUserTags: baseUserTagsFromCD, @@ -640,7 +640,7 @@ func TestAWSActuator(t *testing.T) { generateAWSMachineSetName("zone2"): 1, }, expectedAMI: &machineapi.AWSResourceReference{ - ID: pointer.String(testAMI), + ID: ptr.To(testAMI), }, expectedUserTags: map[string]string{ "pool-label": "pool-value", @@ -814,7 +814,7 @@ func TestGetAWSAMIID(t *testing.T) { func mockDescribeAvailabilityZones(client *mockaws.MockClient, zones []string) *gomock.Call { input := &ec2.DescribeAvailabilityZonesInput{ Filters: []ec2types.Filter{{ - Name: pointer.String("region-name"), + Name: ptr.To("region-name"), Values: []string{testRegion}, }}, } diff --git a/pkg/controller/machinepool/azureactuator_test.go b/pkg/controller/machinepool/azureactuator_test.go index 962cd3f283c..1fda67bc181 100644 --- a/pkg/controller/machinepool/azureactuator_test.go +++ b/pkg/controller/machinepool/azureactuator_test.go @@ -11,7 +11,7 @@ import ( "github.com/golang/mock/gomock" "github.com/stretchr/testify/assert" corev1 "k8s.io/api/core/v1" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" machineapi "github.com/openshift/api/machine/v1beta1" @@ -139,7 +139,7 @@ func TestAzureActuator(t *testing.T) { clusterDeployment: testAzureClusterDeployment(), pool: func() *hivev1.MachinePool { p := testAzurePool() - p.Spec.Replicas = pointer.Int64(5) + p.Spec.Replicas = ptr.To(int64(5)) return p }(), mockAzureClient: func(mockCtrl *gomock.Controller, client *mockazure.MockClient) { @@ -342,7 +342,7 @@ func TestAzureActuator(t *testing.T) { clusterDeployment: testAzureClusterDeployment412(), pool: func() *hivev1.MachinePool { p := testAzurePool() - p.Spec.Replicas = pointer.Int64(5) + p.Spec.Replicas = ptr.To(int64(5)) return p }(), mockAzureClient: func(mockCtrl *gomock.Controller, client *mockazure.MockClient) { @@ -556,10 +556,10 @@ func mockListResourceSKUs(mockCtrl *gomock.Controller, client *mockazure.MockCli page.EXPECT().Values().Return( []compute.ResourceSku{ { - Name: pointer.String(testInstanceType), + Name: ptr.To(testInstanceType), LocationInfo: &[]compute.ResourceSkuLocationInfo{ { - Location: pointer.String(testRegion), + Location: ptr.To(testRegion), Zones: &zones, }, }, @@ -612,7 +612,7 @@ func testAzureClusterDeployment() *hivev1.ClusterDeployment { } cd.Spec.ClusterMetadata.Platform = &hivev1.ClusterPlatformMetadata{ Azure: &hivev1azure.Metadata{ - ResourceGroupName: pointer.String("foo-12345-rg"), + ResourceGroupName: ptr.To("foo-12345-rg"), }, } return cd diff --git a/pkg/controller/machinepool/gcpactuator.go b/pkg/controller/machinepool/gcpactuator.go index 0d2712cc3b9..05654e6475e 100644 --- a/pkg/controller/machinepool/gcpactuator.go +++ b/pkg/controller/machinepool/gcpactuator.go @@ -13,7 +13,8 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" - "k8s.io/utils/pointer" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" machineapi "github.com/openshift/api/machine/v1beta1" @@ -383,7 +384,7 @@ func (a *GCPActuator) obtainLease(pool *hivev1.MachinePool, cd *hivev1.ClusterDe Kind: "MachinePool", Name: pool.Name, UID: pool.UID, - Controller: pointer.Bool(true), + Controller: ptr.To(true), }, }, }, @@ -434,14 +435,14 @@ func (a *GCPActuator) findAvailableLeaseChars(cd *hivev1.ClusterDeployment, leas } func requireLeases(remoteMachineSets []machineapi.MachineSet, logger log.FieldLogger) bool { - poolNames := make(map[string]bool) + poolNames := sets.New[string]() for _, ms := range remoteMachineSets { nameParts := strings.Split(ms.Name, "-") if len(nameParts) < 3 { continue } poolName := nameParts[len(nameParts)-2] - poolNames[poolName] = true + poolNames.Insert(poolName) } // If there are machinesets with a pool name of "w" and no machinesets with a pool name of "worker", then assume // that the "w" pool is the worker pool created by the installer. If the installer-created "w" worker pool still @@ -450,7 +451,7 @@ func requireLeases(remoteMachineSets []machineapi.MachineSet, logger log.FieldLo // installer-created worker pool when there are Hive-managed pools that are not using leases. Hive will block // through validation MachinePools with a pool name of "w", but the user could still create such machinesets on // the cluster manually. - if poolNames["w"] && !poolNames["worker"] { + if poolNames.Has("w") && !poolNames.Has("worker") { logger.Debug("leases are required since there is a \"w\" machine pool in the cluster that is likely the installer-created worker pool") return true } diff --git a/pkg/controller/machinepool/ibmcloudactuator_test.go b/pkg/controller/machinepool/ibmcloudactuator_test.go index 0f3443ab2cf..ed784eb5ccd 100644 --- a/pkg/controller/machinepool/ibmcloudactuator_test.go +++ b/pkg/controller/machinepool/ibmcloudactuator_test.go @@ -39,7 +39,7 @@ func TestIBMCloudActuator(t *testing.T) { clusterDeployment: testIBMCloudClusterDeployment(), pool: testIBMCloudPool(), mockIBMClient: func(client *mockibm.MockAPI) { - mockGetVPCZonesForRegion(client, []string{"test-region-1", "test-region-2", "test-region-3"}, testRegion) + mockGetVPCZonesForRegion(client, []string{"test-region-1", "test-region-2", "test-region-3"}) }, expectedMachineSetReplicas: map[string]int32{ generateIBMCloudMachineSetName("worker", "1"): 1, @@ -66,7 +66,7 @@ func TestIBMCloudActuator(t *testing.T) { clusterDeployment: testIBMCloudClusterDeployment(), pool: testIBMCloudPool(), mockIBMClient: func(client *mockibm.MockAPI) { - mockGetVPCZonesForRegion(client, []string{}, testRegion) + mockGetVPCZonesForRegion(client, []string{}) }, expectedErr: true, }, @@ -81,7 +81,7 @@ func TestIBMCloudActuator(t *testing.T) { return p }(), mockIBMClient: func(client *mockibm.MockAPI) { - mockGetVPCZonesForRegion(client, []string{"test-region-1", "test-region-2", "test-region-3"}, testRegion) + mockGetVPCZonesForRegion(client, []string{"test-region-1", "test-region-2", "test-region-3"}) }, expectedMachineSetReplicas: map[string]int32{ generateIBMCloudMachineSetName("worker", "1"): 1, @@ -105,7 +105,7 @@ func TestIBMCloudActuator(t *testing.T) { return p }(), mockIBMClient: func(client *mockibm.MockAPI) { - mockGetVPCZonesForRegion(client, []string{"test-region-1", "test-region-2", "test-region-3"}, testRegion) + mockGetVPCZonesForRegion(client, []string{"test-region-1", "test-region-2", "test-region-3"}) }, expectedMachineSetReplicas: map[string]int32{ generateIBMCloudMachineSetName("worker", "1"): 1, @@ -195,6 +195,6 @@ func generateIBMCloudMachineSetName(leaseChar, zone string) string { return fmt.Sprintf("%s-%s-%s", testInfraID, leaseChar, zone) } -func mockGetVPCZonesForRegion(ibmClient *mockibm.MockAPI, zones []string, region string) { +func mockGetVPCZonesForRegion(ibmClient *mockibm.MockAPI, zones []string) { ibmClient.EXPECT().GetVPCZonesForRegion(gomock.Any(), testRegion).Return(zones, nil).Times(1) } diff --git a/pkg/controller/machinepool/leaseexceptions.go b/pkg/controller/machinepool/leaseexceptions.go index d0e66acd41a..9f6e40ec8aa 100644 --- a/pkg/controller/machinepool/leaseexceptions.go +++ b/pkg/controller/machinepool/leaseexceptions.go @@ -108,7 +108,7 @@ func (r *ReconcileMachinePool) resolveControllerRef(namespace string, controller } // When a lease is created, update the expectations of the machinepool that owns the lease. -func (r *ReconcileMachinePool) trackLeaseAdd(obj interface{}) { +func (r *ReconcileMachinePool) trackLeaseAdd(obj any) { r.logger.Debug("tracking lease add") lease := obj.(*hivev1.MachinePoolNameLease) if lease.DeletionTimestamp != nil { diff --git a/pkg/controller/machinepool/machinepool_controller.go b/pkg/controller/machinepool/machinepool_controller.go index 61d2fdfeaaf..d40dd51e843 100644 --- a/pkg/controller/machinepool/machinepool_controller.go +++ b/pkg/controller/machinepool/machinepool_controller.go @@ -23,7 +23,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/event" @@ -1151,7 +1151,7 @@ func (r *ReconcileMachinePool) syncClusterAutoscaler( ScaleDown: &autoscalingv1.ScaleDownConfig{ Enabled: true, }, - BalanceSimilarNodeGroups: pointer.Bool(true), + BalanceSimilarNodeGroups: ptr.To(true), }, } if err := remoteClusterAPIClient.Create(context.Background(), defaultClusterAutoscaler); err != nil { diff --git a/pkg/controller/machinepool/machinepool_controller_test.go b/pkg/controller/machinepool/machinepool_controller_test.go index 2fe04b368fe..1e4cdb6e646 100644 --- a/pkg/controller/machinepool/machinepool_controller_test.go +++ b/pkg/controller/machinepool/machinepool_controller_test.go @@ -22,7 +22,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" @@ -1039,7 +1039,7 @@ func TestRemoteMachineSetReconcile(t *testing.T) { testMachineSetWithAZ("foo-12345-worker-us-east-1c", "worker", true, 1, 0, "us-east-1c"), func() runtime.Object { a := testClusterAutoscaler("1") - a.Spec.BalanceSimilarNodeGroups = pointer.Bool(false) + a.Spec.BalanceSimilarNodeGroups = ptr.To(false) return a }(), testMachineAutoscaler("foo-12345-worker-us-east-1a", "1", 1, 2), @@ -1063,7 +1063,7 @@ func TestRemoteMachineSetReconcile(t *testing.T) { }, expectedRemoteClusterAutoscalers: func() []autoscalingv1.ClusterAutoscaler { a := testClusterAutoscaler("1") - a.Spec.BalanceSimilarNodeGroups = pointer.Bool(false) + a.Spec.BalanceSimilarNodeGroups = ptr.To(false) return []autoscalingv1.ClusterAutoscaler{*a} }(), }, @@ -1670,8 +1670,8 @@ func Test_summarizeMachinesError(t *testing.T) { testMachineSetMachine("machine-1", "worker", testName), func() *machineapi.Machine { m := testMachineSetMachine("machine-2", "worker", testName) - m.Status.ErrorReason = (*machineapi.MachineStatusError)(pointer.String("GoneNotComingBack")) - m.Status.ErrorMessage = pointer.String("The machine is not found") + m.Status.ErrorReason = (*machineapi.MachineStatusError)(ptr.To("GoneNotComingBack")) + m.Status.ErrorMessage = ptr.To("The machine is not found") return m }(), testMachineSetMachine("machine-3", "worker", testName), @@ -1686,14 +1686,14 @@ func Test_summarizeMachinesError(t *testing.T) { testMachineSetMachine("machine-1", "worker", testName), func() *machineapi.Machine { m := testMachineSetMachine("machine-2", "worker", testName) - m.Status.ErrorReason = (*machineapi.MachineStatusError)(pointer.String("GoneNotComingBack")) - m.Status.ErrorMessage = pointer.String("The machine is not found") + m.Status.ErrorReason = (*machineapi.MachineStatusError)(ptr.To("GoneNotComingBack")) + m.Status.ErrorMessage = ptr.To("The machine is not found") return m }(), func() *machineapi.Machine { m := testMachineSetMachine("machine-3", "worker", testName) - m.Status.ErrorReason = (*machineapi.MachineStatusError)(pointer.String("InsufficientResources")) - m.Status.ErrorMessage = pointer.String("No available quota") + m.Status.ErrorReason = (*machineapi.MachineStatusError)(ptr.To("InsufficientResources")) + m.Status.ErrorMessage = ptr.To("No available quota") return m }(), testMachineSet(testName, "worker", false, 3, 0), @@ -1912,7 +1912,7 @@ func testClusterAutoscaler(resourceVersion string) *autoscalingv1.ClusterAutosca ScaleDown: &autoscalingv1.ScaleDownConfig{ Enabled: true, }, - BalanceSimilarNodeGroups: pointer.Bool(true), + BalanceSimilarNodeGroups: ptr.To(true), }, } } diff --git a/pkg/controller/machinepool/secrets.go b/pkg/controller/machinepool/secrets.go deleted file mode 100644 index 2571ab3e5d4..00000000000 --- a/pkg/controller/machinepool/secrets.go +++ /dev/null @@ -1,6 +0,0 @@ -package machinepool - -const ( - // workerUserDataName is the name of a secret in the cluster used for obtaining user data from MCO. - workerUserDataName = "worker-user-data" -) diff --git a/pkg/controller/metrics/metrics.go b/pkg/controller/metrics/metrics.go index 64773493888..09320323dbd 100644 --- a/pkg/controller/metrics/metrics.go +++ b/pkg/controller/metrics/metrics.go @@ -12,6 +12,7 @@ import ( log "github.com/sirupsen/logrus" batchv1 "k8s.io/api/batch/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/manager" @@ -294,12 +295,7 @@ func (mc *Calculator) Start(ctx context.Context) error { } } - accumulator.setMetrics(metricClusterDeploymentsTotal, - metricClusterDeploymentsInstalledTotal, - metricClusterDeploymentsUninstalledTotal, - metricClusterDeploymentsDeprovisioningTotal, - metricClusterDeploymentsWithConditionTotal, - mcLog) + accumulator.setMetrics(metricClusterDeploymentsTotal, metricClusterDeploymentsInstalledTotal, metricClusterDeploymentsUninstalledTotal, metricClusterDeploymentsDeprovisioningTotal, metricClusterDeploymentsWithConditionTotal) // Also add metrics only for clusters created in last 48h accumulator, err = newClusterAccumulator("48h", []string{"0h", "1h", "2h", "8h", "24h"}) @@ -311,12 +307,7 @@ func (mc *Calculator) Start(ctx context.Context) error { accumulator.processCluster(&cd) } - accumulator.setMetrics(metricClusterDeploymentsTotal, - metricClusterDeploymentsInstalledTotal, - metricClusterDeploymentsUninstalledTotal, - metricClusterDeploymentsDeprovisioningTotal, - metricClusterDeploymentsWithConditionTotal, - mcLog) + accumulator.setMetrics(metricClusterDeploymentsTotal, metricClusterDeploymentsInstalledTotal, metricClusterDeploymentsUninstalledTotal, metricClusterDeploymentsDeprovisioningTotal, metricClusterDeploymentsWithConditionTotal) } mcLog.Debug("calculating metrics across all install jobs") @@ -516,8 +507,7 @@ type clusterAccumulator struct { // clusterTypesSet will contain every cluster type we encounter during processing. // Used to zero out some values which may no longer exist when setting the final metrics. - // Maps cluster type to a meaningless bool. - clusterTypesSet map[string]bool + clusterTypesSet sets.Set[string] } const ( @@ -538,7 +528,7 @@ func newClusterAccumulator(ageFilter string, durationBuckets []string) (*cluster deprovisioning: map[string]map[string]int{}, uninstalled: map[string]map[string]int{}, conditions: map[hivev1.ClusterDeploymentConditionType]map[string]int{}, - clusterTypesSet: map[string]bool{}, + clusterTypesSet: sets.New[string](), } var err error if ageFilter != infinity { @@ -607,7 +597,7 @@ func (ca *clusterAccumulator) processCluster(cd *hivev1.ClusterDeployment) { clusterType := GetLabelValue(cd, hivev1.HiveClusterTypeLabel) powerState := GetPowerStateValue(cd.Status.PowerState) ca.ensureClusterTypeBuckets(clusterType, powerState) - ca.clusterTypesSet[clusterType] = true + ca.clusterTypesSet.Insert(clusterType) ca.total[powerState][clusterType]++ @@ -649,7 +639,7 @@ func (ca *clusterAccumulator) processCluster(cd *hivev1.ClusterDeployment) { } } -func (ca *clusterAccumulator) setMetrics(total, installed, uninstalled, deprovisioning, conditions *prometheus.GaugeVec, mcLog log.FieldLogger) { +func (ca *clusterAccumulator) setMetrics(total, installed, uninstalled, deprovisioning, conditions *prometheus.GaugeVec) { for k, v := range ca.total { for clusterType := range ca.clusterTypesSet { diff --git a/pkg/controller/remoteingress/remoteingress_controller.go b/pkg/controller/remoteingress/remoteingress_controller.go index 7064aadbd1a..6e74e0d8fce 100644 --- a/pkg/controller/remoteingress/remoteingress_controller.go +++ b/pkg/controller/remoteingress/remoteingress_controller.go @@ -84,7 +84,11 @@ func Add(mgr manager.Manager) error { // NewReconciler returns a new reconcile.Reconciler func NewReconciler(mgr manager.Manager, rateLimiter flowcontrol.RateLimiter) reconcile.Reconciler { logger := log.WithField("controller", ControllerName) - helper, err := resource.NewHelperWithMetricsFromRESTConfig(mgr.GetConfig(), ControllerName, logger) + helper, err := resource.NewHelper( + logger, + resource.FromRESTConfig(mgr.GetConfig()), + resource.WithControllerName(ControllerName), + resource.WithMetrics()) if err != nil { // Hard exit if we can't create this controller logger.WithError(err).Fatal("unable to create resource helper") diff --git a/pkg/controller/remoteingress/remoteingress_controller_test.go b/pkg/controller/remoteingress/remoteingress_controller_test.go index 5e96e6ae105..fc09d58b90f 100644 --- a/pkg/controller/remoteingress/remoteingress_controller_test.go +++ b/pkg/controller/remoteingress/remoteingress_controller_test.go @@ -18,6 +18,7 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/sets" "sigs.k8s.io/controller-runtime/pkg/reconcile" @@ -716,10 +717,10 @@ func testSecretForCertificateBundle(cb hivev1.CertificateBundleSpec) corev1.Secr func addCertificateBundlesForIngressList(cd *hivev1.ClusterDeployment) []hivev1.CertificateBundleSpec { certBundles := []hivev1.CertificateBundleSpec{} - certBundleAlreadyProcessed := map[string]bool{} + certBundleAlreadyProcessed := sets.New[string]() for _, ingress := range cd.Spec.Ingress { - if certBundleAlreadyProcessed[ingress.ServingCertificate] { + if certBundleAlreadyProcessed.Has(ingress.ServingCertificate) { continue } cb := hivev1.CertificateBundleSpec{ @@ -731,7 +732,7 @@ func addCertificateBundlesForIngressList(cd *hivev1.ClusterDeployment) []hivev1. certBundles = append(certBundles, cb) // no need to make multiple certbundle entries for the same certbundle - certBundleAlreadyProcessed[ingress.ServingCertificate] = true + certBundleAlreadyProcessed.Insert(ingress.ServingCertificate) } return certBundles diff --git a/pkg/controller/syncidentityprovider/syncidentityprovider_controller.go b/pkg/controller/syncidentityprovider/syncidentityprovider_controller.go index a5946a588e5..3273d93c720 100644 --- a/pkg/controller/syncidentityprovider/syncidentityprovider_controller.go +++ b/pkg/controller/syncidentityprovider/syncidentityprovider_controller.go @@ -81,13 +81,13 @@ func AddToManager(mgr manager.Manager, r reconcile.Reconciler, concurrentReconci reconciler := r.(*ReconcileSyncIdentityProviders) // Watch for changes to SyncIdentityProvider - err = c.Watch(source.Kind(mgr.GetCache(), &hivev1.SyncIdentityProvider{}, handler.TypedEnqueueRequestsFromMapFunc[*hivev1.SyncIdentityProvider](reconciler.syncIdentityProviderWatchHandler))) + err = c.Watch(source.Kind(mgr.GetCache(), &hivev1.SyncIdentityProvider{}, handler.TypedEnqueueRequestsFromMapFunc(reconciler.syncIdentityProviderWatchHandler))) if err != nil { return err } // Watch for changes to SelectorSyncIdentityProvider - err = c.Watch(source.Kind(mgr.GetCache(), &hivev1.SelectorSyncIdentityProvider{}, handler.TypedEnqueueRequestsFromMapFunc[*hivev1.SelectorSyncIdentityProvider](reconciler.selectorSyncIdentityProviderWatchHandler))) + err = c.Watch(source.Kind(mgr.GetCache(), &hivev1.SelectorSyncIdentityProvider{}, handler.TypedEnqueueRequestsFromMapFunc(reconciler.selectorSyncIdentityProviderWatchHandler))) if err != nil { return err } diff --git a/pkg/controller/unreachable/unreachable_controller_test.go b/pkg/controller/unreachable/unreachable_controller_test.go index 7c70b1d47c7..ce2dba5e8be 100644 --- a/pkg/controller/unreachable/unreachable_controller_test.go +++ b/pkg/controller/unreachable/unreachable_controller_test.go @@ -24,11 +24,11 @@ import ( "github.com/golang/mock/gomock" log "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" - "k8s.io/utils/pointer" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/reconcile" hivev1 "github.com/openshift/hive/apis/hive/v1" @@ -82,7 +82,7 @@ func TestReconcile(t *testing.T) { withUnreachableCondition(corev1.ConditionUnknown, time.Now()), withActiveAPIURLOverrideCondition(corev1.ConditionUnknown), ), - errorConnecting: pointer.BoolPtr(true), + errorConnecting: ptr.To(true), expectedUnreachableStatus: corev1.ConditionTrue, expectedActiveOverrideStatus: corev1.ConditionUnknown, expectRequeue: true, @@ -93,7 +93,7 @@ func TestReconcile(t *testing.T) { withUnreachableCondition(corev1.ConditionFalse, time.Now().Add(-maxUnreachableDuration)), withActiveAPIURLOverrideCondition(corev1.ConditionUnknown), ), - errorConnecting: pointer.BoolPtr(true), + errorConnecting: ptr.To(true), expectedUnreachableStatus: corev1.ConditionTrue, expectedActiveOverrideStatus: corev1.ConditionUnknown, expectRequeue: true, @@ -104,7 +104,7 @@ func TestReconcile(t *testing.T) { withUnreachableCondition(corev1.ConditionTrue, time.Now()), withActiveAPIURLOverrideCondition(corev1.ConditionUnknown), ), - errorConnecting: pointer.BoolPtr(true), + errorConnecting: ptr.To(true), expectedUnreachableStatus: corev1.ConditionTrue, expectedActiveOverrideStatus: corev1.ConditionUnknown, expectRequeue: true, @@ -115,7 +115,7 @@ func TestReconcile(t *testing.T) { withUnreachableCondition(corev1.ConditionUnknown, time.Now()), withActiveAPIURLOverrideCondition(corev1.ConditionUnknown), ), - errorConnecting: pointer.BoolPtr(false), + errorConnecting: ptr.To(false), expectedUnreachableStatus: corev1.ConditionFalse, expectedActiveOverrideStatus: corev1.ConditionUnknown, expectRequeueAfter: true, @@ -126,7 +126,7 @@ func TestReconcile(t *testing.T) { withUnreachableCondition(corev1.ConditionFalse, time.Now().Add(-maxUnreachableDuration)), withActiveAPIURLOverrideCondition(corev1.ConditionUnknown), ), - errorConnecting: pointer.BoolPtr(false), + errorConnecting: ptr.To(false), expectedUnreachableStatus: corev1.ConditionFalse, expectedActiveOverrideStatus: corev1.ConditionUnknown, expectRequeueAfter: true, @@ -137,7 +137,7 @@ func TestReconcile(t *testing.T) { withUnreachableCondition(corev1.ConditionTrue, time.Now()), withActiveAPIURLOverrideCondition(corev1.ConditionUnknown), ), - errorConnecting: pointer.BoolPtr(false), + errorConnecting: ptr.To(false), expectedUnreachableStatus: corev1.ConditionFalse, expectedActiveOverrideStatus: corev1.ConditionUnknown, expectRequeueAfter: true, @@ -149,7 +149,7 @@ func TestReconcile(t *testing.T) { withActiveAPIURLOverrideCondition(corev1.ConditionUnknown), withAPIURLOverride(), ), - errorConnecting: pointer.BoolPtr(false), + errorConnecting: ptr.To(false), expectedUnreachableStatus: corev1.ConditionFalse, expectedActiveOverrideStatus: corev1.ConditionTrue, expectRequeueAfter: true, @@ -161,7 +161,7 @@ func TestReconcile(t *testing.T) { withUnreachableCondition(corev1.ConditionFalse, time.Now()), withActiveAPIURLOverrideCondition(corev1.ConditionUnknown), ), - errorConnecting: pointer.BoolPtr(false), + errorConnecting: ptr.To(false), expectedUnreachableStatus: corev1.ConditionFalse, expectedActiveOverrideStatus: corev1.ConditionTrue, expectRequeueAfter: true, @@ -173,8 +173,8 @@ func TestReconcile(t *testing.T) { withUnreachableCondition(corev1.ConditionUnknown, time.Now()), withActiveAPIURLOverrideCondition(corev1.ConditionUnknown), ), - errorConnecting: pointer.BoolPtr(true), - errorConnectingSecondary: pointer.BoolPtr(false), + errorConnecting: ptr.To(true), + errorConnectingSecondary: ptr.To(false), expectedUnreachableStatus: corev1.ConditionFalse, expectedActiveOverrideStatus: corev1.ConditionFalse, expectRequeue: true, @@ -186,7 +186,7 @@ func TestReconcile(t *testing.T) { withUnreachableCondition(corev1.ConditionFalse, time.Now()), withActiveAPIURLOverrideCondition(corev1.ConditionFalse), ), - errorConnecting: pointer.BoolPtr(true), + errorConnecting: ptr.To(true), expectedUnreachableStatus: corev1.ConditionFalse, expectedActiveOverrideStatus: corev1.ConditionFalse, expectRequeue: true, @@ -198,8 +198,8 @@ func TestReconcile(t *testing.T) { withUnreachableCondition(corev1.ConditionFalse, time.Now().Add(-maxUnreachableDuration)), withActiveAPIURLOverrideCondition(corev1.ConditionFalse), ), - errorConnecting: pointer.BoolPtr(true), - errorConnectingSecondary: pointer.BoolPtr(false), + errorConnecting: ptr.To(true), + errorConnectingSecondary: ptr.To(false), expectedUnreachableStatus: corev1.ConditionFalse, expectedActiveOverrideStatus: corev1.ConditionFalse, expectRequeue: true, @@ -211,7 +211,7 @@ func TestReconcile(t *testing.T) { withUnreachableCondition(corev1.ConditionFalse, time.Now()), withActiveAPIURLOverrideCondition(corev1.ConditionFalse), ), - errorConnecting: pointer.BoolPtr(false), + errorConnecting: ptr.To(false), expectedUnreachableStatus: corev1.ConditionFalse, expectedActiveOverrideStatus: corev1.ConditionTrue, expectRequeueAfter: true, @@ -223,8 +223,8 @@ func TestReconcile(t *testing.T) { withUnreachableCondition(corev1.ConditionFalse, time.Now().Add(-maxUnreachableDuration)), withActiveAPIURLOverrideCondition(corev1.ConditionTrue), ), - errorConnecting: pointer.BoolPtr(true), - errorConnectingSecondary: pointer.BoolPtr(false), + errorConnecting: ptr.To(true), + errorConnectingSecondary: ptr.To(false), expectedUnreachableStatus: corev1.ConditionFalse, expectedActiveOverrideStatus: corev1.ConditionFalse, expectRequeue: true, diff --git a/pkg/controller/utils/clientwrapper.go b/pkg/controller/utils/clientwrapper.go index a40fe9ab34d..6f8024ce9c8 100644 --- a/pkg/controller/utils/clientwrapper.go +++ b/pkg/controller/utils/clientwrapper.go @@ -158,7 +158,8 @@ func (cmt *ControllerMetricsTripper) RoundTrip(req *http.Request) (*http.Respons // to parse a resource from a path. func parsePath(path string) (string, error) { tokens := strings.Split(path[1:], "/") - if tokens[0] == "api" { + switch tokens[0] { + case "api": // Handle core resources: if len(tokens) == 3 || len(tokens) == 4 { return strings.Join([]string{"core", tokens[1], tokens[2]}, "/"), nil @@ -167,7 +168,7 @@ func parsePath(path string) (string, error) { if len(tokens) > 4 && tokens[2] == "namespaces" { return strings.Join([]string{"core", tokens[1], tokens[4]}, "/"), nil } - } else if tokens[0] == "apis" { + case "apis": // Handle resources with apigroups: if len(tokens) == 4 || len(tokens) == 5 { return strings.Join([]string{tokens[1], tokens[2], tokens[3]}, "/"), nil diff --git a/pkg/controller/utils/expectations.go b/pkg/controller/utils/expectations.go index ccc5dc444ae..aae53da9754 100644 --- a/pkg/controller/utils/expectations.go +++ b/pkg/controller/utils/expectations.go @@ -52,7 +52,7 @@ const ( // * Controllers that don't set expectations will get woken up for every matching controllee // ExpKeyFunc to parse out the key from a ControlleeExpectation -var ExpKeyFunc = func(obj interface{}) (string, error) { +var ExpKeyFunc = func(obj any) (string, error) { if e, ok := obj.(*ControlleeExpectations); ok { return e.key, nil } diff --git a/pkg/controller/utils/logtagger.go b/pkg/controller/utils/logtagger.go index 1e4d25db69a..409b8b7b59e 100644 --- a/pkg/controller/utils/logtagger.go +++ b/pkg/controller/utils/logtagger.go @@ -48,8 +48,8 @@ func (s StringLogTagger) GetAdditionalLogFieldsJSON() *string { var _ AdditionalLogFieldHavinThing = StringLogTagger{} -func parseLogFields(jsonMap string) (map[string]interface{}, error) { - kvmap := map[string]interface{}{} +func parseLogFields(jsonMap string) (map[string]any, error) { + kvmap := map[string]any{} if err := json.Unmarshal([]byte(jsonMap), &kvmap); err != nil { return nil, err } @@ -64,7 +64,7 @@ func parseLogFields(jsonMap string) (map[string]interface{}, error) { // no such fields are found, both returns are nil -- this is not considered an error. If // parsing succeeds, the first return is the unmarshaled map and the second return is nil. If // parsing fails, the map is nil and the error is bubbled up. -func ExtractLogFields[O AdditionalLogFieldHavinThing](obj O) (map[string]interface{}, error) { +func ExtractLogFields[O AdditionalLogFieldHavinThing](obj O) (map[string]any, error) { addl_log_fields := obj.GetAdditionalLogFieldsJSON() if addl_log_fields == nil { return nil, nil diff --git a/pkg/controller/utils/logtagger_test.go b/pkg/controller/utils/logtagger_test.go index c167052cd4c..b94f5f23be1 100644 --- a/pkg/controller/utils/logtagger_test.go +++ b/pkg/controller/utils/logtagger_test.go @@ -21,7 +21,7 @@ func TestAddLogFields(t *testing.T) { tests := []struct { name string annotations map[string]string - want map[string]interface{} + want map[string]any }{ { name: "no annotations", @@ -41,12 +41,12 @@ func TestAddLogFields(t *testing.T) { { name: "empty json", annotations: map[string]string{alfa: `{}`}, - want: map[string]interface{}{"component": "hive"}, + want: map[string]any{"component": "hive"}, }, { name: "one field", annotations: map[string]string{alfa: `{"foo": "bar"}`}, - want: map[string]interface{}{ + want: map[string]any{ "foo": "bar", "component": "hive", }, @@ -54,7 +54,7 @@ func TestAddLogFields(t *testing.T) { { name: "multiple fields", annotations: map[string]string{alfa: `{"foo": "bar", "HELLO": "WORLD"}`}, - want: map[string]interface{}{ + want: map[string]any{ "foo": "bar", "HELLO": "WORLD", "component": "hive", @@ -64,8 +64,8 @@ func TestAddLogFields(t *testing.T) { name: "complex value", // logrus.WithFields happily accepts this (and marshals to a string when emitting) annotations: map[string]string{alfa: `{"foo": {"bar": "baz"}}`}, - want: map[string]interface{}{ - "foo": map[string]interface{}{"bar": "baz"}, + want: map[string]any{ + "foo": map[string]any{"bar": "baz"}, "component": "hive", }, }, diff --git a/pkg/controller/utils/nutanixutils/nutanix.go b/pkg/controller/utils/nutanixutils/nutanix.go index 142426b2d68..2a40b83af45 100644 --- a/pkg/controller/utils/nutanixutils/nutanix.go +++ b/pkg/controller/utils/nutanixutils/nutanix.go @@ -87,7 +87,7 @@ func convertFailureDomains[SourceFD any, TargetFD any, TargetPE any, SRR any]( // - []nutanixinstaller.PrismElement: A slice of unique converted Installer PrismElements (nutanixinstaller.PrismElement). // - []string: A slice of unique subnet UUIDs gathered from all Hive failure domains. func ConvertHiveFailureDomains(hiveFailureDomains []nutanix.FailureDomain) ([]nutanixinstaller.FailureDomain, []nutanixinstaller.PrismElement, []string) { - return convertFailureDomains[nutanix.FailureDomain, nutanixinstaller.FailureDomain, nutanixinstaller.PrismElement, nutanixinstaller.StorageResourceReference]( + return convertFailureDomains( hiveFailureDomains, convertHiveToInstallerPrismElement, convertHiveToInstallerStorageResource, @@ -111,7 +111,7 @@ func ConvertHiveFailureDomains(hiveFailureDomains []nutanix.FailureDomain) ([]nu // - []nutanix.PrismElement: A slice of unique converted Hive PrismElements (nutanix.PrismElement). // - []string: A slice of unique subnet UUIDs gathered from all Installer failure domains. func ConvertInstallerFailureDomains(installerFailureDomains []nutanixinstaller.FailureDomain) ([]nutanix.FailureDomain, []nutanix.PrismElement, []string) { - return convertFailureDomains[nutanixinstaller.FailureDomain, nutanix.FailureDomain, nutanix.PrismElement, nutanix.StorageResourceReference]( + return convertFailureDomains( installerFailureDomains, convertInstallerToHivePrismElement, convertInstallerToHiveStorageResource, diff --git a/pkg/controller/utils/rawext.go b/pkg/controller/utils/rawext.go deleted file mode 100644 index 21b4ee304d3..00000000000 --- a/pkg/controller/utils/rawext.go +++ /dev/null @@ -1,28 +0,0 @@ -package utils - -import ( - "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/apimachinery/pkg/runtime" -) - -// AddTypeMeta adds type metadata to objects in a list of RawExtension -// TypeMeta is needed for proper serialization/deserialization -func AddTypeMeta(objects []runtime.RawExtension, scheme *runtime.Scheme) ([]runtime.RawExtension, error) { - result := []runtime.RawExtension{} - for i := range objects { - object := objects[i].Object - gvks, _, err := scheme.ObjectKinds(object) - if err != nil { - return nil, err - } - accessor, err := meta.TypeAccessor(object) - if err != nil { - return nil, err - } - apiVersion, kind := gvks[0].ToAPIVersionAndKind() - accessor.SetAPIVersion(apiVersion) - accessor.SetKind(kind) - result = append(result, runtime.RawExtension{Object: object}) - } - return result, nil -} diff --git a/pkg/controller/utils/statefulset.go b/pkg/controller/utils/statefulset.go index 3d3e27ace0b..9c80e702eb7 100644 --- a/pkg/controller/utils/statefulset.go +++ b/pkg/controller/utils/statefulset.go @@ -11,32 +11,15 @@ import ( "strings" hivev1 "github.com/openshift/hive/apis/hive/v1" - "github.com/openshift/hive/pkg/util/scheme" "github.com/pkg/errors" log "github.com/sirupsen/logrus" appsv1 "k8s.io/api/apps/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" ) -var ( - appsScheme = scheme.GetScheme() - appsCodecs = serializer.NewCodecFactory(appsScheme) -) - -// ReadStatefulsetOrDie converts a statefulset asset into an actual instance of a statefulset. -func ReadStatefulsetOrDie(objBytes []byte) *appsv1.StatefulSet { - requiredObj, err := runtime.Decode(appsCodecs.UniversalDecoder(appsv1.SchemeGroupVersion), objBytes) - if err != nil { - panic(err) - } - return requiredObj.(*appsv1.StatefulSet) -} - // CalculateStatefulSetSpecHash returns a hash of the statefulset.Spec. func CalculateStatefulSetSpecHash(statefulset *appsv1.StatefulSet) (string, error) { diff --git a/pkg/controller/utils/utils.go b/pkg/controller/utils/utils.go index bf9e6d1ab6f..64642ae2793 100644 --- a/pkg/controller/utils/utils.go +++ b/pkg/controller/utils/utils.go @@ -173,7 +173,7 @@ func GetControllerConfig(client client.Client, controllerName hivev1.ControllerN // MergeJsons will merge the global and local pull secret and return it func MergeJsons(globalPullSecret string, localPullSecret string, cdLog log.FieldLogger) (string, error) { - type dockerConfig map[string]interface{} + type dockerConfig map[string]any type dockerConfigJSON struct { Auths dockerConfig `json:"auths"` } @@ -205,7 +205,7 @@ func MergeJsons(globalPullSecret string, localPullSecret string, cdLog log.Field } // GetChecksumOfObject returns the md5sum hash of the object passed in. -func GetChecksumOfObject(object interface{}) (string, error) { +func GetChecksumOfObject(object any) (string, error) { b, err := json.Marshal(object) if err != nil { return "", err @@ -214,7 +214,7 @@ func GetChecksumOfObject(object interface{}) (string, error) { } // GetChecksumOfObjects returns the md5sum hash of the objects passed in. -func GetChecksumOfObjects(objects ...interface{}) (string, error) { +func GetChecksumOfObjects(objects ...any) (string, error) { return GetChecksumOfObject(objects) } diff --git a/pkg/controller/velerobackup/velerobackup_controller.go b/pkg/controller/velerobackup/velerobackup_controller.go index a2400f7c60e..1a8061f8412 100644 --- a/pkg/controller/velerobackup/velerobackup_controller.go +++ b/pkg/controller/velerobackup/velerobackup_controller.go @@ -314,7 +314,7 @@ func (r *ReconcileBackup) calculateObjectsChecksumWithoutStatus(logger log.Field for i, object := range objects { var meta *metav1.ObjectMeta - var spec interface{} + var spec any switch t := object.(type) { case *hivev1.ClusterDeployment: diff --git a/pkg/creds/aws/aws.go b/pkg/creds/aws/aws.go new file mode 100644 index 00000000000..dab2005f43c --- /dev/null +++ b/pkg/creds/aws/aws.go @@ -0,0 +1,88 @@ +package aws + +import ( + "errors" + "os" + "path/filepath" + + log "github.com/sirupsen/logrus" + ini "gopkg.in/ini.v1" + + installertypes "github.com/openshift/installer/pkg/types" + + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/openshift/hive/contrib/pkg/utils" + "github.com/openshift/hive/pkg/awsclient" + "github.com/openshift/hive/pkg/constants" +) + +// GetAWSCreds reads AWS credentials either from either the specified credentials file, +// the standard environment variables, or a default credentials file. (~/.aws/credentials) +// The defaultCredsFile will only be used if credsFile is empty and the environment variables +// are not set. +func GetAWSCreds(credsFile, defaultCredsFile string) (string, string, error) { + credsFilePath := defaultCredsFile + switch { + case credsFile != "": + credsFilePath = credsFile + default: + secretAccessKey := os.Getenv("AWS_SECRET_ACCESS_KEY") + accessKeyID := os.Getenv("AWS_ACCESS_KEY_ID") + if len(secretAccessKey) > 0 && len(accessKeyID) > 0 { + return accessKeyID, secretAccessKey, nil + } + } + credFile, err := ini.Load(credsFilePath) + if err != nil { + log.Error("Cannot load AWS credentials") + return "", "", err + } + defaultSection, err := credFile.GetSection("default") + if err != nil { + log.Error("Cannot get default section from AWS credentials file") + return "", "", err + } + accessKeyIDValue := defaultSection.Key("aws_access_key_id") + secretAccessKeyValue := defaultSection.Key("aws_secret_access_key") + if accessKeyIDValue == nil || secretAccessKeyValue == nil { + log.Error("AWS credentials file missing keys in default section") + } + return accessKeyIDValue.String(), secretAccessKeyValue.String(), nil +} + +var awsConfigForbidCredentialProcess utils.ProjectToDirFileFilter = func(key string, contents []byte) (basename string, newContents []byte, err error) { + // First, only process aws_config + bn, newContents, err := utils.ProjectOnlyTheseKeys(constants.AWSConfigSecretKey)(key, contents) + // If that passed, scrub for credential_process + if err == nil && bn != "" && awsclient.ContainsCredentialProcess(newContents) { + return "", nil, errors.New("credential_process is insecure and thus forbidden") + } + return bn, newContents, err +} + +// ConfigureCreds loads a secret designated by the environment variables CLUSTERDEPLOYMENT_NAMESPACE +// and CREDS_SECRET_NAME and configures AWS credential environment variables and config files +// accordingly. +func ConfigureCreds(c client.Client, metadata *installertypes.ClusterMetadata) { + credsSecret := utils.LoadSecretOrDie(c, "CREDS_SECRET_NAME") + if credsSecret == nil { + return + } + // Should we bounce if any of the following already exist? + if id := string(credsSecret.Data[constants.AWSAccessKeyIDSecretKey]); id != "" { + os.Setenv("AWS_ACCESS_KEY_ID", id) + } + if secret := string(credsSecret.Data[constants.AWSSecretAccessKeySecretKey]); secret != "" { + os.Setenv("AWS_SECRET_ACCESS_KEY", secret) + } + if config := credsSecret.Data[constants.AWSConfigSecretKey]; len(config) != 0 { + // Lay this down as a file, but forbid credential_process + utils.ProjectToDir(credsSecret, constants.AWSCredsMount, awsConfigForbidCredentialProcess) + os.Setenv("AWS_CONFIG_FILE", filepath.Join(constants.AWSCredsMount, constants.AWSConfigSecretKey)) + } + // This would normally allow credential_process in the config file, but we checked for that above. + os.Setenv("AWS_SDK_LOAD_CONFIG", "true") + // Install cluster proxy trusted CA bundle + utils.InstallCerts(constants.TrustedCABundleDir) +} diff --git a/contrib/pkg/utils/azure/azure.go b/pkg/creds/azure/azure.go similarity index 100% rename from contrib/pkg/utils/azure/azure.go rename to pkg/creds/azure/azure.go diff --git a/pkg/creds/creds.go b/pkg/creds/creds.go new file mode 100644 index 00000000000..5087fa510a1 --- /dev/null +++ b/pkg/creds/creds.go @@ -0,0 +1,26 @@ +package creds + +import ( + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/openshift/installer/pkg/types" + + "github.com/openshift/hive/pkg/constants" + awsutil "github.com/openshift/hive/pkg/creds/aws" + azurecreds "github.com/openshift/hive/pkg/creds/azure" + gcpcreds "github.com/openshift/hive/pkg/creds/gcp" + ibmcloudutil "github.com/openshift/hive/pkg/creds/ibmcloud" + nutanixutil "github.com/openshift/hive/pkg/creds/nutanix" + openstackutil "github.com/openshift/hive/pkg/creds/openstack" + vsphereutil "github.com/openshift/hive/pkg/creds/vsphere" +) + +var ConfigureCreds = map[string]func(client.Client, *types.ClusterMetadata){ + constants.PlatformAWS: awsutil.ConfigureCreds, + constants.PlatformAzure: azurecreds.ConfigureCreds, + constants.PlatformGCP: gcpcreds.ConfigureCreds, + constants.PlatformIBMCloud: ibmcloudutil.ConfigureCreds, + constants.PlatformNutanix: nutanixutil.ConfigureCreds, + constants.PlatformOpenStack: openstackutil.ConfigureCreds, + constants.PlatformVSphere: vsphereutil.ConfigureCreds, +} diff --git a/contrib/pkg/utils/gcp/gcp.go b/pkg/creds/gcp/gcp.go similarity index 100% rename from contrib/pkg/utils/gcp/gcp.go rename to pkg/creds/gcp/gcp.go diff --git a/contrib/pkg/utils/ibmcloud/ibmcloud.go b/pkg/creds/ibmcloud/ibmcloud.go similarity index 100% rename from contrib/pkg/utils/ibmcloud/ibmcloud.go rename to pkg/creds/ibmcloud/ibmcloud.go diff --git a/contrib/pkg/utils/nutanix/nutanix.go b/pkg/creds/nutanix/nutanix.go similarity index 100% rename from contrib/pkg/utils/nutanix/nutanix.go rename to pkg/creds/nutanix/nutanix.go diff --git a/contrib/pkg/utils/openstack/openstack.go b/pkg/creds/openstack/openstack.go similarity index 100% rename from contrib/pkg/utils/openstack/openstack.go rename to pkg/creds/openstack/openstack.go diff --git a/contrib/pkg/utils/vsphere/vsphere.go b/pkg/creds/vsphere/vsphere.go similarity index 100% rename from contrib/pkg/utils/vsphere/vsphere.go rename to pkg/creds/vsphere/vsphere.go diff --git a/pkg/imageset/updateinstaller.go b/pkg/imageset/updateinstaller.go index 8d26e01026f..13806e0ab73 100644 --- a/pkg/imageset/updateinstaller.go +++ b/pkg/imageset/updateinstaller.go @@ -264,10 +264,7 @@ func (o *UpdateInstallerImageOptions) setImageResolutionErrorCondition(cd *hivev } func getClient(kubeConfig *rest.Config, fieldManager string) (client.Client, error) { - scheme := scheme.GetScheme() - managerOptions := manager.Options{ - Scheme: scheme, MapperProvider: apiutil.NewDynamicRESTMapper, } httpClient, err := rest.HTTPClientFor(kubeConfig) @@ -279,7 +276,7 @@ func getClient(kubeConfig *rest.Config, fieldManager string) (client.Client, err return nil, fmt.Errorf("failed to get API Group-Resources") } kubeClient, err := client.New(kubeConfig, client.Options{ - Scheme: scheme, + Scheme: scheme.GetScheme(), Mapper: mapper, }) if err != nil { diff --git a/pkg/install/generate.go b/pkg/install/generate.go index 6c33cef1a60..8902c1d8bb8 100644 --- a/pkg/install/generate.go +++ b/pkg/install/generate.go @@ -21,7 +21,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" - "k8s.io/utils/pointer" "k8s.io/utils/ptr" apihelpers "github.com/openshift/hive/apis/helpers" @@ -456,9 +455,9 @@ func GenerateInstallerJob( Labels: labels, }, Spec: batchv1.JobSpec{ - BackoffLimit: pointer.Int32Ptr(0), - Completions: pointer.Int32Ptr(1), - ActiveDeadlineSeconds: pointer.Int64Ptr(int64(provisionJobDeadline.Seconds())), + BackoffLimit: ptr.To(int32(0)), + Completions: ptr.To(int32(1)), + ActiveDeadlineSeconds: ptr.To(int64(provisionJobDeadline.Seconds())), Template: corev1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ Labels: labels, diff --git a/pkg/installmanager/dnscleanup.go b/pkg/installmanager/dnscleanup.go index b0e8f5467c5..2bccbcfa84c 100644 --- a/pkg/installmanager/dnscleanup.go +++ b/pkg/installmanager/dnscleanup.go @@ -10,12 +10,12 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" hivev1 "github.com/openshift/hive/apis/hive/v1" - azureutils "github.com/openshift/hive/contrib/pkg/utils/azure" - gcputils "github.com/openshift/hive/contrib/pkg/utils/gcp" "github.com/openshift/hive/pkg/awsclient" "github.com/openshift/hive/pkg/azureclient" dns "github.com/openshift/hive/pkg/controller/dnszone" controllerutils "github.com/openshift/hive/pkg/controller/utils" + azurecreds "github.com/openshift/hive/pkg/creds/azure" + gcpcreds "github.com/openshift/hive/pkg/creds/gcp" "github.com/openshift/hive/pkg/gcpclient" ) @@ -79,7 +79,7 @@ func cleanupAzureDNSZone(dnsZone *hivev1.DNSZone, logger log.FieldLogger) error logger = logger.WithField("dnsZoneID", dnsZone.Spec.Zone) logger.Info("cleaning up DNSZone") - creds, err := azureutils.GetCreds("") + creds, err := azurecreds.GetCreds("") if err != nil { logger.WithError(err).Error("failed to get Azure creds") return err @@ -111,7 +111,7 @@ func cleanupGCPDNSZone(dnsZone *hivev1.DNSZone, logger log.FieldLogger) error { logger = logger.WithField("zoneName", *dnsZone.Status.GCP.ZoneName) logger.Info("cleaning up DNSZone") - creds, err := gcputils.GetCreds("") + creds, err := gcpcreds.GetCreds("") if err != nil { logger.WithError(err).Error("failed to get GCP creds") return err diff --git a/pkg/installmanager/helper_test.go b/pkg/installmanager/helper_test.go index f13ec7d52a4..2e976b9d3b1 100644 --- a/pkg/installmanager/helper_test.go +++ b/pkg/installmanager/helper_test.go @@ -9,7 +9,7 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" hivev1 "github.com/openshift/hive/apis/hive/v1" @@ -58,7 +58,7 @@ func testClusterProvisionWithInfraIDSet() *hivev1.ClusterProvision { Name: testDeploymentName, }, Stage: hivev1.ClusterProvisionStageProvisioning, - InfraID: pointer.String("dummy-infra-id"), + InfraID: ptr.To("dummy-infra-id"), }, } } diff --git a/pkg/installmanager/ibm_metadata_test.go b/pkg/installmanager/ibm_metadata_test.go index 3414b538145..92cf65a307b 100644 --- a/pkg/installmanager/ibm_metadata_test.go +++ b/pkg/installmanager/ibm_metadata_test.go @@ -9,7 +9,8 @@ import ( "github.com/openshift/hive/pkg/ibmclient" mockibm "github.com/openshift/hive/pkg/ibmclient/mock" "github.com/stretchr/testify/assert" - "k8s.io/utils/pointer" + + "k8s.io/utils/ptr" ) func TestGetCISInstanceCRN(t *testing.T) { @@ -89,7 +90,7 @@ func TestGetAccountID(t *testing.T) { { name: "AccountID Found", existingAPIKey: &iamidentityv1.APIKey{ - AccountID: pointer.String("testaccountid"), + AccountID: ptr.To("testaccountid"), }, expectErr: false, }, diff --git a/pkg/installmanager/installmanager.go b/pkg/installmanager/installmanager.go index 640ac91692a..41ffb34ad4c 100644 --- a/pkg/installmanager/installmanager.go +++ b/pkg/installmanager/installmanager.go @@ -60,17 +60,11 @@ import ( jsoniter "github.com/json-iterator/go" hivev1 "github.com/openshift/hive/apis/hive/v1" contributils "github.com/openshift/hive/contrib/pkg/utils" - awsutils "github.com/openshift/hive/contrib/pkg/utils/aws" - azureutils "github.com/openshift/hive/contrib/pkg/utils/azure" - gcputils "github.com/openshift/hive/contrib/pkg/utils/gcp" - ibmutils "github.com/openshift/hive/contrib/pkg/utils/ibmcloud" - nutanixutils "github.com/openshift/hive/contrib/pkg/utils/nutanix" - openstackutils "github.com/openshift/hive/contrib/pkg/utils/openstack" - vsphereutils "github.com/openshift/hive/contrib/pkg/utils/vsphere" "github.com/openshift/hive/pkg/awsclient" "github.com/openshift/hive/pkg/constants" "github.com/openshift/hive/pkg/controller/machinepool" "github.com/openshift/hive/pkg/controller/utils" + "github.com/openshift/hive/pkg/creds" "github.com/openshift/hive/pkg/gcpclient" "github.com/openshift/hive/pkg/ibmclient" "github.com/openshift/hive/pkg/resource" @@ -538,22 +532,7 @@ func (m *InstallManager) Run() error { func loadSecrets(m *InstallManager, cd *hivev1.ClusterDeployment) { // Configure credentials (including certs) appropriately according to the cloud provider - switch { - case cd.Spec.Platform.AWS != nil: - awsutils.ConfigureCreds(m.DynamicClient, nil) - case cd.Spec.Platform.Azure != nil: - azureutils.ConfigureCreds(m.DynamicClient, nil) - case cd.Spec.Platform.GCP != nil: - gcputils.ConfigureCreds(m.DynamicClient, nil) - case cd.Spec.Platform.OpenStack != nil: - openstackutils.ConfigureCreds(m.DynamicClient, nil) - case cd.Spec.Platform.VSphere != nil: - vsphereutils.ConfigureCreds(m.DynamicClient, nil) - case cd.Spec.Platform.IBMCloud != nil: - ibmutils.ConfigureCreds(m.DynamicClient, nil) - case cd.Spec.Platform.Nutanix != nil: - nutanixutils.ConfigureCreds(m.DynamicClient, nil) - } + creds.ConfigureCreds[utils.GetClusterPlatform(cd)](m.DynamicClient, nil) // Load up the install config and pull secret. These env vars are required; else we'll panic. contributils.ProjectToDir(contributils.LoadSecretOrDie(m.DynamicClient, "INSTALLCONFIG_SECRET_NAME"), "/installconfig", nil) @@ -1090,7 +1069,7 @@ func patchWorkerMachineSetManifest(manifestBytes []byte, pool *hivev1.MachinePoo } securityGroupFilterValue := pool.Annotations[constants.ExtraWorkerSecurityGroupAnnotation] - var vpcIDFilterValue map[string]interface{} = map[string]interface{}{ + var vpcIDFilterValue map[string]any = map[string]any{ "name": "vpc-id", "values": []string{vpcID}, } diff --git a/pkg/operator/util/apply.go b/pkg/operator/hive/apply.go similarity index 77% rename from pkg/operator/util/apply.go rename to pkg/operator/hive/apply.go index f3dd7cdcd55..043315c12b7 100644 --- a/pkg/operator/util/apply.go +++ b/pkg/operator/hive/apply.go @@ -1,4 +1,4 @@ -package util +package hive import ( "github.com/openshift/library-go/pkg/operator/resource/resourceread" @@ -18,13 +18,13 @@ import ( "github.com/openshift/hive/pkg/util/scheme" ) -// RTOApplyOpt (runtime.Object apply option) modifies a runtime.Object in preparation for applying it. -type RTOApplyOpt func(runtime.Object, log.FieldLogger) error +// rtoApplyOpt (runtime.Object apply option) modifies a runtime.Object in preparation for applying it. +type rtoApplyOpt func(runtime.Object, log.FieldLogger) error -// WithGarbageCollection returns a RTOApplyOpt that adds an owner reference to parent to the +// withGarbageCollection returns a RTOApplyOpt that adds an owner reference to parent to the // runtime object so the latter gets cleaned up when the parent is deleted. Errors only happen // if the runtime object can't be interpreted as a metav1.Object or meta.Type. -func WithGarbageCollection(parent v1.Object) RTOApplyOpt { +func withGarbageCollection(parent v1.Object) rtoApplyOpt { return func(runtimeObj runtime.Object, hLog log.FieldLogger) error { obj, err := meta.Accessor(runtimeObj) if err != nil { @@ -52,9 +52,9 @@ func WithGarbageCollection(parent v1.Object) RTOApplyOpt { } } -// WithNamespaceOverride returns a RTOApplyOpt that sets the namespace of the runtime object. +// withNamespaceOverride returns a RTOApplyOpt that sets the namespace of the runtime object. // There are no error cases. -func WithNamespaceOverride(namespaceOverride string) RTOApplyOpt { +func withNamespaceOverride(namespaceOverride string) rtoApplyOpt { return func(runtimeObj runtime.Object, hLog log.FieldLogger) error { obj, err := meta.Accessor(runtimeObj) if err != nil { @@ -66,11 +66,11 @@ func WithNamespaceOverride(namespaceOverride string) RTOApplyOpt { } } -// CRBWithSubjectNSOverride sets the namespace of each Subject to namespaceOverride if it is +// crbWithSubjectNSOverride sets the namespace of each Subject to namespaceOverride if it is // - a ServiceAccount subject // - otherwise unset // Errors if the runtime object is not a *ClusterRoleBinding -func CRBWithSubjectNSOverride(namespaceOverride string) RTOApplyOpt { +func crbWithSubjectNSOverride(namespaceOverride string) rtoApplyOpt { return func(rto runtime.Object, hLog log.FieldLogger) error { rb, ok := rto.(*rbacv1.ClusterRoleBinding) if !ok { @@ -85,49 +85,49 @@ func CRBWithSubjectNSOverride(namespaceOverride string) RTOApplyOpt { } } -// ToRuntimeObject defines a function that produces a runtime object. It is intended for use +// toRuntimeObject defines a function that produces a runtime object. It is intended for use // in closures to supply such objects from different sources (asset paths, byte arrays) to // ApplyRuntimeObject(). -type ToRuntimeObject func(log.FieldLogger) (runtime.Object, error) +type toRuntimeObject func(log.FieldLogger) (runtime.Object, error) -// Passthrough's func just returns the input runtime object. -func Passthrough(rto runtime.Object) ToRuntimeObject { +// passthrough's func just returns the input runtime object. +func passthrough(rto runtime.Object) toRuntimeObject { return func(fl log.FieldLogger) (runtime.Object, error) { return rto, nil } } -// FromAssetPath's func loads a runtime object from a known asset path in bindata. -func FromAssetPath(assetPath string) ToRuntimeObject { +// fromAssetPath's func loads a runtime object from a known asset path in bindata. +func fromAssetPath(assetPath string) toRuntimeObject { return func(hLog log.FieldLogger) (runtime.Object, error) { hLog.WithField("assetPath", assetPath).Info("loading runtime object from asset") return readRuntimeObject(assetPath) } } -// CRBFromAssetPath is a special case of FromAssetPath that returns a *ClusterRoleBinding +// crbFromAssetPath is a special case of FromAssetPath that returns a *ClusterRoleBinding // (a specific instance of a runtime object) from a known asset path in bindata. Panics if // the asset is not a CRB, or if the asset can't be loaded from the specified path. -func CRBFromAssetPath(roleBindingAssetPath string) ToRuntimeObject { +func crbFromAssetPath(roleBindingAssetPath string) toRuntimeObject { return func(hLog log.FieldLogger) (runtime.Object, error) { hLog.WithField("assetPath", roleBindingAssetPath).Info("loading ClusterRoleBinding from asset") return resourceread.ReadClusterRoleBindingV1OrDie(assets.MustAsset(roleBindingAssetPath)), nil } } -// FromBytes produces a func that decodes a byte array into a runtime object. -func FromBytes(assetBytes []byte) ToRuntimeObject { +// fromBytes produces a func that decodes a byte array into a runtime object. +func fromBytes(assetBytes []byte) toRuntimeObject { return func(hLog log.FieldLogger) (runtime.Object, error) { hLog.Info("decoding runtime object from bytes") return decodeRuntimeObject(assetBytes) } } -// ApplyRuntimeObject +// applyRuntimeObject // - Executes rtoFactory to produce a runtime object. // - Modifies the runtime object according to opts. // - Applies the runtime object to the cluster via h. -func ApplyRuntimeObject(h resource.Helper, rtoFactory ToRuntimeObject, hLog log.FieldLogger, opts ...RTOApplyOpt) (resource.ApplyResult, error) { +func applyRuntimeObject(h resource.Helper, rtoFactory toRuntimeObject, hLog log.FieldLogger, opts ...rtoApplyOpt) (resource.ApplyResult, error) { requiredObj, err := rtoFactory(hLog) if err != nil { hLog.WithError(err).Error("failed to convert to runtime object") @@ -142,23 +142,23 @@ func ApplyRuntimeObject(h resource.Helper, rtoFactory ToRuntimeObject, hLog log. return h.ApplyRuntimeObject(requiredObj, scheme.GetScheme()) } -func DeleteAssetByPathWithNSOverride(h resource.Helper, assetPath, namespaceOverride string, hiveconfig *hivev1.HiveConfig) error { +func deleteAssetByPathWithNSOverride(h resource.Helper, assetPath, namespaceOverride string, hiveconfig *hivev1.HiveConfig) error { requiredObj, err := readRuntimeObject(assetPath) if err != nil { return errors.Wrapf(err, "unable to decode asset: %s", assetPath) } - return DeleteRuntimeObjectWithNSOverride(h, requiredObj, namespaceOverride, hiveconfig) + return deleteRuntimeObjectWithNSOverride(h, requiredObj, namespaceOverride, hiveconfig) } -func DeleteAssetBytesWithNSOverride(h resource.Helper, assetBytes []byte, namespaceOverride string, hiveconfig *hivev1.HiveConfig) error { +func deleteAssetBytesWithNSOverride(h resource.Helper, assetBytes []byte, namespaceOverride string, hiveconfig *hivev1.HiveConfig) error { rtObj, err := decodeRuntimeObject(assetBytes) if err != nil { return errors.Wrap(err, "unable to decode asset") } - return DeleteRuntimeObjectWithNSOverride(h, rtObj, namespaceOverride, hiveconfig) + return deleteRuntimeObjectWithNSOverride(h, rtObj, namespaceOverride, hiveconfig) } -func DeleteRuntimeObjectWithNSOverride(h resource.Helper, requiredObj runtime.Object, namespaceOverride string, hiveconfig *hivev1.HiveConfig) error { +func deleteRuntimeObjectWithNSOverride(h resource.Helper, requiredObj runtime.Object, namespaceOverride string, hiveconfig *hivev1.HiveConfig) error { objA, _ := meta.Accessor(requiredObj) objT, _ := meta.TypeAccessor(requiredObj) if err := h.Delete(objT.GetAPIVersion(), objT.GetKind(), namespaceOverride, objA.GetName()); err != nil { diff --git a/pkg/operator/util/conditions.go b/pkg/operator/hive/conditions.go similarity index 99% rename from pkg/operator/util/conditions.go rename to pkg/operator/hive/conditions.go index 15b1d692dc8..8e7459920b5 100644 --- a/pkg/operator/util/conditions.go +++ b/pkg/operator/hive/conditions.go @@ -1,4 +1,4 @@ -package util +package hive import ( hivev1 "github.com/openshift/hive/apis/hive/v1" diff --git a/pkg/operator/hive/configmap.go b/pkg/operator/hive/configmap.go index aa913da0421..af826e1c92b 100644 --- a/pkg/operator/hive/configmap.go +++ b/pkg/operator/hive/configmap.go @@ -20,7 +20,6 @@ import ( hivecontractsv1alpha1 "github.com/openshift/hive/apis/hivecontracts/v1alpha1" "github.com/openshift/hive/pkg/constants" "github.com/openshift/hive/pkg/controller/utils" - "github.com/openshift/hive/pkg/operator/util" "github.com/openshift/hive/pkg/resource" "github.com/openshift/hive/pkg/util/contracts" "k8s.io/apimachinery/pkg/util/sets" @@ -340,7 +339,7 @@ func (r *ReconcileHiveConfig) deployConfigMap(hLog log.FieldLogger, h resource.H } } - result, err := util.ApplyRuntimeObject(h, util.Passthrough(cm), hLog, util.WithGarbageCollection(instance)) + result, err := applyRuntimeObject(h, passthrough(cm), hLog, withGarbageCollection(instance)) if err != nil { cmLog.WithError(err).Error("error applying configmap") return "", err diff --git a/pkg/operator/hive/hive.go b/pkg/operator/hive/hive.go index 6acb98f5e15..18228a38b6c 100644 --- a/pkg/operator/hive/hive.go +++ b/pkg/operator/hive/hive.go @@ -30,7 +30,6 @@ import ( "github.com/openshift/hive/pkg/controller/images" "github.com/openshift/hive/pkg/controller/utils" "github.com/openshift/hive/pkg/operator/assets" - "github.com/openshift/hive/pkg/operator/util" "github.com/openshift/hive/pkg/resource" ) @@ -65,7 +64,7 @@ func (r *ReconcileHiveConfig) deployHive(hLog log.FieldLogger, h resource.Helper for _, asset := range assetsToClean { hLog.Infof("Deleting asset %s from old target namespace %s", asset, ns) // DeleteAssetWithNSOverride already no-ops for IsNotFound - if err := util.DeleteAssetByPathWithNSOverride(h, asset, ns, instance); err != nil { + if err := deleteAssetByPathWithNSOverride(h, asset, ns, instance); err != nil { return errors.Wrapf(err, "error deleting asset %s from old target namespace %s", asset, ns) } } @@ -299,7 +298,7 @@ func (r *ReconcileHiveConfig) deployHive(hLog log.FieldLogger, h resource.Helper // Load namespaced assets, decode them, set to our target namespace, and apply: for _, assetPath := range namespacedAssets { - if _, err := util.ApplyRuntimeObject(h, util.FromAssetPath(assetPath), hLog, util.WithNamespaceOverride(hiveNSName), util.WithGarbageCollection(instance)); err != nil { + if _, err := applyRuntimeObject(h, fromAssetPath(assetPath), hLog, withNamespaceOverride(hiveNSName), withGarbageCollection(instance)); err != nil { hLog.WithError(err).Error("error applying object with namespace override") return err } @@ -312,7 +311,7 @@ func (r *ReconcileHiveConfig) deployHive(hLog log.FieldLogger, h resource.Helper "config/controllers/hive_controllers_role.yaml", } for _, a := range applyAssets { - if _, err := util.ApplyRuntimeObject(h, util.FromAssetPath(a), hLog, util.WithGarbageCollection(instance)); err != nil { + if _, err := applyRuntimeObject(h, fromAssetPath(a), hLog, withGarbageCollection(instance)); err != nil { hLog.WithField("asset", a).WithError(err).Error("error applying asset") return err } @@ -325,7 +324,7 @@ func (r *ReconcileHiveConfig) deployHive(hLog log.FieldLogger, h resource.Helper } for _, crbAsset := range clusterRoleBindingAssets { - if _, err := util.ApplyRuntimeObject(h, util.CRBFromAssetPath(crbAsset), hLog, util.CRBWithSubjectNSOverride(hiveNSName), util.WithGarbageCollection(instance)); err != nil { + if _, err := applyRuntimeObject(h, crbFromAssetPath(crbAsset), hLog, crbWithSubjectNSOverride(hiveNSName), withGarbageCollection(instance)); err != nil { hLog.WithError(err).Error("error applying ClusterRoleBinding with namespace override") return err } @@ -346,7 +345,7 @@ func (r *ReconcileHiveConfig) deployHive(hLog log.FieldLogger, h resource.Helper if r.isOpenShift { hLog.Info("deploying OpenShift specific assets") for _, a := range openshiftSpecificAssets { - _, err = util.ApplyRuntimeObject(h, util.FromAssetPath(a), hLog, util.WithGarbageCollection(instance)) + _, err = applyRuntimeObject(h, fromAssetPath(a), hLog, withGarbageCollection(instance)) if err != nil { return err } @@ -363,7 +362,7 @@ func (r *ReconcileHiveConfig) deployHive(hLog log.FieldLogger, h resource.Helper } hiveDeployment.Namespace = hiveNSName - result, err := util.ApplyRuntimeObject(h, util.Passthrough(hiveDeployment), hLog, util.WithGarbageCollection(instance)) + result, err := applyRuntimeObject(h, passthrough(hiveDeployment), hLog, withGarbageCollection(instance)) if err != nil { hLog.WithError(err).Error("error applying deployment") return err @@ -426,7 +425,7 @@ func (r *ReconcileHiveConfig) includeAdditionalCAs(hLog log.FieldLogger, h resou "ca.crt": additionalCA.Bytes(), }, } - result, err := util.ApplyRuntimeObject(h, util.Passthrough(caSecret), hLog, util.WithGarbageCollection(instance)) + result, err := applyRuntimeObject(h, passthrough(caSecret), hLog, withGarbageCollection(instance)) if err != nil { hLog.WithError(err).Error("error applying additional cert secret") return err diff --git a/pkg/operator/hive/hive_controller.go b/pkg/operator/hive/hive_controller.go index 4e22290c28e..dea069edd6a 100644 --- a/pkg/operator/hive/hive_controller.go +++ b/pkg/operator/hive/hive_controller.go @@ -42,7 +42,6 @@ import ( "github.com/openshift/hive/pkg/constants" controllerutils "github.com/openshift/hive/pkg/controller/utils" "github.com/openshift/hive/pkg/operator/metrics" - "github.com/openshift/hive/pkg/operator/util" ) const ( @@ -231,7 +230,7 @@ func add(mgr manager.Manager, r reconcile.Reconciler) error { // Monitor CRDs so that we can keep latest list of supported contracts err = c.Watch(source.Kind(mgr.GetCache(), &apiextv1.CustomResourceDefinition{}, - handler.TypedEnqueueRequestsFromMapFunc[*apiextv1.CustomResourceDefinition](mapToHiveConfig[*apiextv1.CustomResourceDefinition](r, "CRD")))) // FIXME + handler.TypedEnqueueRequestsFromMapFunc(mapToHiveConfig[*apiextv1.CustomResourceDefinition](r, "CRD")))) // FIXME if err != nil { return err } @@ -241,7 +240,7 @@ func add(mgr manager.Manager, r reconcile.Reconciler) error { // former to the latter. Note that Proxy is Openshift-specific. if r.(*ReconcileHiveConfig).isOpenShift { err = c.Watch(source.Kind(mgr.GetCache(), &configv1.Proxy{}, - handler.TypedEnqueueRequestsFromMapFunc[*configv1.Proxy](mapToHiveConfig[*configv1.Proxy](r, "Proxy")))) + handler.TypedEnqueueRequestsFromMapFunc(mapToHiveConfig[*configv1.Proxy](r, "Proxy")))) if err != nil { return err } @@ -386,7 +385,7 @@ func (r *ReconcileHiveConfig) Reconcile(ctx context.Context, request reconcile.R hiveNSName := GetHiveNamespace(instance) // Initialize HiveConfig conditions if not present - newConditions, changed := util.InitializeHiveConfigConditions(instance.Status.Conditions, HiveConfigConditions) + newConditions, changed := InitializeHiveConfigConditions(instance.Status.Conditions, HiveConfigConditions) if changed { instance.Status.Conditions = newConditions hLog.Info("initializing hive controller conditions") @@ -401,22 +400,22 @@ func (r *ReconcileHiveConfig) Reconcile(ctx context.Context, request reconcile.R // can change at runtime, hence the runtime watch setup -- only if we're running on OpenShift. if r.isOpenShift { if err := r.establishConfigMapWatch(hLog, hiveNSName); err != nil { - instance.Status.Conditions = util.SetHiveConfigCondition(instance.Status.Conditions, hivev1.HiveReadyCondition, corev1.ConditionFalse, "ErrorEstablishingConfigMapWatch", err.Error()) + instance.Status.Conditions = SetHiveConfigCondition(instance.Status.Conditions, hivev1.HiveReadyCondition, corev1.ConditionFalse, "ErrorEstablishingConfigMapWatch", err.Error()) r.updateHiveConfigStatus(origHiveConfig, instance, hLog, false) return reconcile.Result{}, err } } if err := r.establishSecretWatch(hLog, hiveNSName); err != nil { - instance.Status.Conditions = util.SetHiveConfigCondition(instance.Status.Conditions, hivev1.HiveReadyCondition, corev1.ConditionFalse, "ErrorEstablishingSecretWatch", err.Error()) + instance.Status.Conditions = SetHiveConfigCondition(instance.Status.Conditions, hivev1.HiveReadyCondition, corev1.ConditionFalse, "ErrorEstablishingSecretWatch", err.Error()) r.updateHiveConfigStatus(origHiveConfig, instance, hLog, false) return reconcile.Result{}, err } - h, err := resource.NewHelperFromRESTConfig(r.restConfig, "operator", hLog) + h, err := resource.NewHelper(hLog, resource.FromRESTConfig(r.restConfig), resource.WithControllerName("operator")) if err != nil { hLog.WithError(err).Error("error creating resource helper") - instance.Status.Conditions = util.SetHiveConfigCondition(instance.Status.Conditions, hivev1.HiveReadyCondition, corev1.ConditionFalse, "ErrorCreatingResourceHelper", err.Error()) + instance.Status.Conditions = SetHiveConfigCondition(instance.Status.Conditions, hivev1.HiveReadyCondition, corev1.ConditionFalse, "ErrorCreatingResourceHelper", err.Error()) r.updateHiveConfigStatus(origHiveConfig, instance, hLog, false) return reconcile.Result{}, err } @@ -433,7 +432,7 @@ func (r *ReconcileHiveConfig) Reconcile(ctx context.Context, request reconcile.R hLog.WithField("hiveNS", hiveNSName).Debug("target namespace already exists") } else { hLog.WithError(err).Error("error creating hive target namespace") - instance.Status.Conditions = util.SetHiveConfigCondition(instance.Status.Conditions, hivev1.HiveReadyCondition, corev1.ConditionFalse, "ErrorCreatingHiveNamespace", err.Error()) + instance.Status.Conditions = SetHiveConfigCondition(instance.Status.Conditions, hivev1.HiveReadyCondition, corev1.ConditionFalse, "ErrorCreatingHiveNamespace", err.Error()) r.updateHiveConfigStatus(origHiveConfig, instance, hLog, false) return reconcile.Result{}, err } @@ -449,7 +448,7 @@ func (r *ReconcileHiveConfig) Reconcile(ctx context.Context, request reconcile.R nsList := &corev1.NamespaceList{} if err := r.List(context.TODO(), nsList, "", metav1.ListOptions{LabelSelector: fmt.Sprintf("%s=true", targetNamespaceLabel)}); err != nil { hLog.WithError(err).Error("error retrieving list of target namespaces") - instance.Status.Conditions = util.SetHiveConfigCondition(instance.Status.Conditions, hivev1.HiveReadyCondition, corev1.ConditionFalse, "ErrorListingTargetNamespaces", err.Error()) + instance.Status.Conditions = SetHiveConfigCondition(instance.Status.Conditions, hivev1.HiveReadyCondition, corev1.ConditionFalse, "ErrorListingTargetNamespaces", err.Error()) r.updateHiveConfigStatus(origHiveConfig, instance, hLog, false) return reconcile.Result{}, err } @@ -501,7 +500,7 @@ func (r *ReconcileHiveConfig) Reconcile(ctx context.Context, request reconcile.R managedDomainsConfigHash, err := r.deployConfigMap(hLog, h, instance, managedDomainsConfigMapInfo, namespacesToClean) if err != nil { hLog.WithError(err).Error("error setting up managed domains") - instance.Status.Conditions = util.SetHiveConfigCondition(instance.Status.Conditions, hivev1.HiveReadyCondition, corev1.ConditionFalse, "ErrorSettingUpManagedDomains", err.Error()) + instance.Status.Conditions = SetHiveConfigCondition(instance.Status.Conditions, hivev1.HiveReadyCondition, corev1.ConditionFalse, "ErrorSettingUpManagedDomains", err.Error()) r.updateHiveConfigStatus(origHiveConfig, instance, hLog, false) return reconcile.Result{}, err } @@ -509,7 +508,7 @@ func (r *ReconcileHiveConfig) Reconcile(ctx context.Context, request reconcile.R awsPlConfigHash, err := r.deployConfigMap(hLog, h, instance, awsPrivateLinkConfigMapInfo, namespacesToClean) if err != nil { hLog.WithError(err).Error("error deploying aws privatelink configmap") - instance.Status.Conditions = util.SetHiveConfigCondition(instance.Status.Conditions, hivev1.HiveReadyCondition, corev1.ConditionFalse, "ErrorDeployingAWSPrivatelinkConfigmap", err.Error()) + instance.Status.Conditions = SetHiveConfigCondition(instance.Status.Conditions, hivev1.HiveReadyCondition, corev1.ConditionFalse, "ErrorDeployingAWSPrivatelinkConfigmap", err.Error()) r.updateHiveConfigStatus(origHiveConfig, instance, hLog, false) return reconcile.Result{}, err } @@ -517,7 +516,7 @@ func (r *ReconcileHiveConfig) Reconcile(ctx context.Context, request reconcile.R plConfigHash, err := r.deployConfigMap(hLog, h, instance, privateLinkConfigMapInfo, namespacesToClean) if err != nil { hLog.WithError(err).Error("error deploying privatelink configmap") - instance.Status.Conditions = util.SetHiveConfigCondition(instance.Status.Conditions, hivev1.HiveReadyCondition, corev1.ConditionFalse, "ErrorDeployingPrivatelinkConfigmap", err.Error()) + instance.Status.Conditions = SetHiveConfigCondition(instance.Status.Conditions, hivev1.HiveReadyCondition, corev1.ConditionFalse, "ErrorDeployingPrivatelinkConfigmap", err.Error()) r.updateHiveConfigStatus(origHiveConfig, instance, hLog, false) return reconcile.Result{}, err } @@ -525,7 +524,7 @@ func (r *ReconcileHiveConfig) Reconcile(ctx context.Context, request reconcile.R fpConfigHash, err := r.deployConfigMap(hLog, h, instance, failedProvisionConfigMapInfo, namespacesToClean) if err != nil { hLog.WithError(err).Error("error deploying failed provision configmap") - instance.Status.Conditions = util.SetHiveConfigCondition(instance.Status.Conditions, hivev1.HiveReadyCondition, corev1.ConditionFalse, "ErrorDeployingFailedProvisionConfigmap", err.Error()) + instance.Status.Conditions = SetHiveConfigCondition(instance.Status.Conditions, hivev1.HiveReadyCondition, corev1.ConditionFalse, "ErrorDeployingFailedProvisionConfigmap", err.Error()) r.updateHiveConfigStatus(origHiveConfig, instance, hLog, false) return reconcile.Result{}, err } @@ -533,7 +532,7 @@ func (r *ReconcileHiveConfig) Reconcile(ctx context.Context, request reconcile.R mcConfigHash, err := r.deployConfigMap(hLog, h, instance, metricsConfigConfigMapInfo, namespacesToClean) if err != nil { hLog.WithError(err).Error("error deploying metrics config configmap") - instance.Status.Conditions = util.SetHiveConfigCondition(instance.Status.Conditions, hivev1.HiveReadyCondition, corev1.ConditionFalse, "ErrorDeployingMetricsConfigConfigmap", err.Error()) + instance.Status.Conditions = SetHiveConfigCondition(instance.Status.Conditions, hivev1.HiveReadyCondition, corev1.ConditionFalse, "ErrorDeployingMetricsConfigConfigmap", err.Error()) r.updateHiveConfigStatus(origHiveConfig, instance, hLog, false) return reconcile.Result{}, err } @@ -548,7 +547,7 @@ func (r *ReconcileHiveConfig) Reconcile(ctx context.Context, request reconcile.R confighash, err := r.deployConfigMap(hLog, h, instance, hiveControllersConfigMapInfo, namespacesToClean) if err != nil { hLog.WithError(err).Error("error deploying controllers configmap") - instance.Status.Conditions = util.SetHiveConfigCondition(instance.Status.Conditions, hivev1.HiveReadyCondition, corev1.ConditionFalse, "ErrorDeployingControllersConfigmap", err.Error()) + instance.Status.Conditions = SetHiveConfigCondition(instance.Status.Conditions, hivev1.HiveReadyCondition, corev1.ConditionFalse, "ErrorDeployingControllersConfigmap", err.Error()) r.updateHiveConfigStatus(origHiveConfig, instance, hLog, false) return reconcile.Result{}, err } @@ -559,7 +558,7 @@ func (r *ReconcileHiveConfig) Reconcile(ctx context.Context, request reconcile.R fgConfigHash, err := r.deployConfigMap(hLog, h, instance, featureGatesConfigMapInfo, namespacesToClean) if err != nil { hLog.WithError(err).Error("error deploying feature gates configmap") - instance.Status.Conditions = util.SetHiveConfigCondition(instance.Status.Conditions, hivev1.HiveReadyCondition, corev1.ConditionFalse, "ErrorDeployingFeatureGatesConfigmap", err.Error()) + instance.Status.Conditions = SetHiveConfigCondition(instance.Status.Conditions, hivev1.HiveReadyCondition, corev1.ConditionFalse, "ErrorDeployingFeatureGatesConfigmap", err.Error()) r.updateHiveConfigStatus(origHiveConfig, instance, hLog, false) return reconcile.Result{}, err } @@ -567,7 +566,7 @@ func (r *ReconcileHiveConfig) Reconcile(ctx context.Context, request reconcile.R err = r.deployHive(hLog, h, instance, namespacesToClean, confighash, managedDomainsConfigHash, fpConfigHash, mcConfigHash, scConfigHash) if err != nil { hLog.WithError(err).Error("error deploying Hive") - instance.Status.Conditions = util.SetHiveConfigCondition(instance.Status.Conditions, hivev1.HiveReadyCondition, corev1.ConditionFalse, "ErrorDeployingHive", err.Error()) + instance.Status.Conditions = SetHiveConfigCondition(instance.Status.Conditions, hivev1.HiveReadyCondition, corev1.ConditionFalse, "ErrorDeployingHive", err.Error()) r.updateHiveConfigStatus(origHiveConfig, instance, hLog, false) return reconcile.Result{}, err } @@ -575,14 +574,14 @@ func (r *ReconcileHiveConfig) Reconcile(ctx context.Context, request reconcile.R err = r.deployStatefulSet(clusterSyncCfg, hLog, h, instance, confighash, namespacesToClean) if err != nil { hLog.WithError(err).Error("error deploying ClusterSync") - instance.Status.Conditions = util.SetHiveConfigCondition(instance.Status.Conditions, hivev1.HiveReadyCondition, corev1.ConditionFalse, "ErrorDeployingClusterSync", err.Error()) + instance.Status.Conditions = SetHiveConfigCondition(instance.Status.Conditions, hivev1.HiveReadyCondition, corev1.ConditionFalse, "ErrorDeployingClusterSync", err.Error()) r.updateHiveConfigStatus(origHiveConfig, instance, hLog, false) return reconcile.Result{}, err } err = r.deployStatefulSet(machinePoolCfg, hLog, h, instance, confighash, namespacesToClean) if err != nil { hLog.WithError(err).Error("error deploying MachinePool") - instance.Status.Conditions = util.SetHiveConfigCondition(instance.Status.Conditions, hivev1.HiveReadyCondition, corev1.ConditionFalse, "ErrorDeployingMachinePool", err.Error()) + instance.Status.Conditions = SetHiveConfigCondition(instance.Status.Conditions, hivev1.HiveReadyCondition, corev1.ConditionFalse, "ErrorDeployingMachinePool", err.Error()) r.updateHiveConfigStatus(origHiveConfig, instance, hLog, false) return reconcile.Result{}, err } @@ -595,7 +594,7 @@ func (r *ReconcileHiveConfig) Reconcile(ctx context.Context, request reconcile.R err = r.deployHiveAdmission(hLog, h, instance, namespacesToClean, managedDomainsConfigHash, fgConfigHash, awsPlConfigHash, plConfigHash, scConfigHash) if err != nil { hLog.WithError(err).Error("error deploying HiveAdmission") - instance.Status.Conditions = util.SetHiveConfigCondition(instance.Status.Conditions, hivev1.HiveReadyCondition, corev1.ConditionFalse, "ErrorDeployingHiveAdmission", err.Error()) + instance.Status.Conditions = SetHiveConfigCondition(instance.Status.Conditions, hivev1.HiveReadyCondition, corev1.ConditionFalse, "ErrorDeployingHiveAdmission", err.Error()) r.updateHiveConfigStatus(origHiveConfig, instance, hLog, false) return reconcile.Result{}, err } @@ -615,7 +614,7 @@ func (r *ReconcileHiveConfig) Reconcile(ctx context.Context, request reconcile.R } } - instance.Status.Conditions = util.SetHiveConfigCondition(instance.Status.Conditions, hivev1.HiveReadyCondition, corev1.ConditionTrue, "DeploymentSuccess", "Hive is deployed successfully") + instance.Status.Conditions = SetHiveConfigCondition(instance.Status.Conditions, hivev1.HiveReadyCondition, corev1.ConditionTrue, "DeploymentSuccess", "Hive is deployed successfully") if err := r.updateHiveConfigStatus(origHiveConfig, instance, hLog, true); err != nil { return reconcile.Result{}, err } diff --git a/pkg/operator/hive/hiveadmission.go b/pkg/operator/hive/hiveadmission.go index 1030af6e759..c80d61cff1c 100644 --- a/pkg/operator/hive/hiveadmission.go +++ b/pkg/operator/hive/hiveadmission.go @@ -11,9 +11,7 @@ import ( hiveconstants "github.com/openshift/hive/pkg/constants" controllerutils "github.com/openshift/hive/pkg/controller/utils" "github.com/openshift/hive/pkg/operator/assets" - "github.com/openshift/hive/pkg/operator/util" "github.com/openshift/hive/pkg/resource" - "github.com/openshift/hive/pkg/util/scheme" "github.com/openshift/library-go/pkg/operator/resource/resourceread" @@ -72,7 +70,7 @@ func (r *ReconcileHiveConfig) deployHiveAdmission(hLog log.FieldLogger, h resour for _, asset := range assetsToClean { hLog.Infof("Deleting asset %s from old target namespace %s", asset, ns) // DeleteAssetWithNSOverride already no-ops for IsNotFound - if err := util.DeleteAssetByPathWithNSOverride(h, asset, ns, instance); err != nil { + if err := deleteAssetByPathWithNSOverride(h, asset, ns, instance); err != nil { return errors.Wrapf(err, "error deleting asset %s from old target namespace %s", asset, ns) } } @@ -82,7 +80,7 @@ func (r *ReconcileHiveConfig) deployHiveAdmission(hLog log.FieldLogger, h resour // Load namespaced assets, decode them, set to our target namespace, and apply: for _, assetPath := range namespacedAssets { - if _, err := util.ApplyRuntimeObject(h, util.FromAssetPath(assetPath), hLog, util.WithNamespaceOverride(hiveNSName), util.WithGarbageCollection(instance)); err != nil { + if _, err := applyRuntimeObject(h, fromAssetPath(assetPath), hLog, withNamespaceOverride(hiveNSName), withGarbageCollection(instance)); err != nil { hLog.WithError(err).Error("error applying object with namespace override") return err } @@ -94,7 +92,7 @@ func (r *ReconcileHiveConfig) deployHiveAdmission(hLog log.FieldLogger, h resour "config/hiveadmission/hiveadmission_rbac_role.yaml", } for _, a := range applyAssets { - if _, err := util.ApplyRuntimeObject(h, util.FromAssetPath(a), hLog, util.WithGarbageCollection(instance)); err != nil { + if _, err := applyRuntimeObject(h, fromAssetPath(a), hLog, withGarbageCollection(instance)); err != nil { return err } } @@ -104,7 +102,7 @@ func (r *ReconcileHiveConfig) deployHiveAdmission(hLog log.FieldLogger, h resour "config/hiveadmission/hiveadmission_rbac_role_binding.yaml", } for _, crbAsset := range clusterRoleBindingAssets { - if _, err := util.ApplyRuntimeObject(h, util.CRBFromAssetPath(crbAsset), hLog, util.CRBWithSubjectNSOverride(hiveNSName), util.WithGarbageCollection(instance)); err != nil { + if _, err := applyRuntimeObject(h, crbFromAssetPath(crbAsset), hLog, crbWithSubjectNSOverride(hiveNSName), withGarbageCollection(instance)); err != nil { hLog.WithError(err).Error("error applying ClusterRoleBinding with namespace override") return err } @@ -152,18 +150,15 @@ func (r *ReconcileHiveConfig) deployHiveAdmission(hLog log.FieldLogger, h resour addConfigVolume(&hiveAdmDeployment.Spec.Template.Spec, r.supportedContractsConfigMapInfo(hLog), hiveAdmContainer) addReleaseImageVerificationConfigMapEnv(hiveAdmContainer, instance) - scheme := scheme.GetScheme() - validatingWebhooks := make([]*admregv1.ValidatingWebhookConfiguration, len(webhookAssets)) for i, yaml := range webhookAssets { - asset = assets.MustAsset(yaml) - wh := util.ReadValidatingWebhookConfigurationV1OrDie(asset, scheme) - validatingWebhooks[i] = wh + validatingWebhooks[i] = readRuntimeObjectOrDie[*admregv1.ValidatingWebhookConfiguration]( + admregv1.SchemeGroupVersion, assets.MustAsset(yaml)) } hLog.Debug("reading apiservice") - asset = assets.MustAsset("config/hiveadmission/apiservice.yaml") - apiService := util.ReadAPIServiceV1Beta1OrDie(asset, scheme) + apiService := readRuntimeObjectOrDie[*apiregistrationv1.APIService]( + apiregistrationv1.SchemeGroupVersion, assets.MustAsset("config/hiveadmission/apiservice.yaml")) apiService.Spec.Service.Namespace = hiveNSName err = r.injectCerts(apiService, validatingWebhooks, nil, hiveNSName, hLog) @@ -193,14 +188,14 @@ func (r *ReconcileHiveConfig) deployHiveAdmission(hLog log.FieldLogger, h resour hiveAdmDeployment.Spec.Template.Spec.ImagePullSecrets = append(hiveAdmDeployment.Spec.Template.Spec.ImagePullSecrets, *ref) } - result, err := util.ApplyRuntimeObject(h, util.Passthrough(hiveAdmDeployment), hLog, util.WithGarbageCollection(instance)) + result, err := applyRuntimeObject(h, passthrough(hiveAdmDeployment), hLog, withGarbageCollection(instance)) if err != nil { hLog.WithError(err).Error("error applying deployment") return err } hLog.WithField("result", result).Info("hiveadmission deployment applied") - result, err = util.ApplyRuntimeObject(h, util.Passthrough(apiService), hLog, util.WithGarbageCollection(instance)) + result, err = applyRuntimeObject(h, passthrough(apiService), hLog, withGarbageCollection(instance)) if err != nil { hLog.WithError(err).Error("error applying apiservice") return err @@ -208,7 +203,7 @@ func (r *ReconcileHiveConfig) deployHiveAdmission(hLog log.FieldLogger, h resour hLog.Infof("apiservice applied (%s)", result) for _, webhook := range validatingWebhooks { - result, err = util.ApplyRuntimeObject(h, util.Passthrough(webhook), hLog, util.WithGarbageCollection(instance)) + result, err = applyRuntimeObject(h, passthrough(webhook), hLog, withGarbageCollection(instance)) if err != nil { hLog.WithField("webhook", webhook.Name).WithError(err).Errorf("error applying validating webhook") return err diff --git a/pkg/operator/hive/operatorutils.go b/pkg/operator/hive/operatorutils.go index f379fafa69e..e93fe0100d3 100644 --- a/pkg/operator/hive/operatorutils.go +++ b/pkg/operator/hive/operatorutils.go @@ -11,14 +11,19 @@ import ( corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/dynamic" hivev1 "github.com/openshift/hive/apis/hive/v1" "github.com/openshift/hive/pkg/constants" controllerutils "github.com/openshift/hive/pkg/controller/utils" + "github.com/openshift/hive/pkg/util/scheme" ) +var appsCodecs = serializer.NewCodecFactory(scheme.GetScheme()) + func GetHiveNamespace(config *hivev1.HiveConfig) string { if config.Spec.TargetNamespace == "" { return constants.DefaultHiveNamespace @@ -46,7 +51,7 @@ type gvrNSName struct { name string } -func computeHash(data interface{}, additionalHashes ...string) string { +func computeHash(data any, additionalHashes ...string) string { hasher := md5.New() hasher.Write([]byte(fmt.Sprintf("%v", data))) for _, h := range additionalHashes { @@ -95,3 +100,12 @@ func getImagePullSecretReference(config *hivev1.HiveConfig) *corev1.LocalObjectR } return nil } + +func readRuntimeObjectOrDie[T any](sgv schema.GroupVersion, objBytes []byte) T { + requiredObj, err := runtime.Decode(appsCodecs.UniversalDecoder(sgv), objBytes) + if err != nil { + panic(err) + } + return requiredObj.(T) + +} diff --git a/pkg/operator/hive/sharded_controllers.go b/pkg/operator/hive/sharded_controllers.go index 9422fcf869c..861dc0df14b 100644 --- a/pkg/operator/hive/sharded_controllers.go +++ b/pkg/operator/hive/sharded_controllers.go @@ -16,7 +16,6 @@ import ( "github.com/openshift/hive/pkg/controller/images" controllerutils "github.com/openshift/hive/pkg/controller/utils" "github.com/openshift/hive/pkg/operator/assets" - "github.com/openshift/hive/pkg/operator/util" "github.com/openshift/hive/pkg/resource" ) @@ -107,7 +106,7 @@ func (r *ReconcileHiveConfig) deployStatefulSet(c ssCfg, hLog log.FieldLogger, h for _, a := range assetsToClean { hLog.Infof("Deleting asset %s from old target namespace %s", a.path, ns) // DeleteAsset*WithNSOverride already no-ops for IsNotFound - if err := util.DeleteAssetBytesWithNSOverride(h, a.processed, ns, hiveconfig); err != nil { + if err := deleteAssetBytesWithNSOverride(h, a.processed, ns, hiveconfig); err != nil { return errors.Wrapf(err, "error deleting asset %s from old target namespace %s", a.path, ns) } } @@ -115,7 +114,8 @@ func (r *ReconcileHiveConfig) deployStatefulSet(c ssCfg, hLog log.FieldLogger, h hLog.Debug("reading statefulset") // Safe to ignore error because we processed this asset above - newStatefulSet := controllerutils.ReadStatefulsetOrDie(ssAsset.processed) + + newStatefulSet := readRuntimeObjectOrDie[*appsv1.StatefulSet](appsv1.SchemeGroupVersion, ssAsset.processed) container, err := containerByName(&newStatefulSet.Spec.Template.Spec, string(c.name)) if err != nil { return err @@ -157,7 +157,7 @@ func (r *ReconcileHiveConfig) deployStatefulSet(c ssCfg, hLog log.FieldLogger, h // Load namespaced assets, decode them, set to our target namespace, and apply: for _, a := range namespacedAssets { - if _, err := util.ApplyRuntimeObject(h, util.FromBytes(a.processed), hLog, util.WithNamespaceOverride(hiveNSName), util.WithGarbageCollection(hiveconfig)); err != nil { + if _, err := applyRuntimeObject(h, fromBytes(a.processed), hLog, withNamespaceOverride(hiveNSName), withGarbageCollection(hiveconfig)); err != nil { hLog.WithError(err).WithField("asset", a.path).Error("error applying object with namespace override") return err } @@ -243,7 +243,7 @@ func (r *ReconcileHiveConfig) deployStatefulSet(c ssCfg, hLog log.FieldLogger, h } newStatefulSet.Namespace = hiveNSName - result, err := util.ApplyRuntimeObject(h, util.Passthrough(newStatefulSet), hLog, util.WithGarbageCollection(hiveconfig)) + result, err := applyRuntimeObject(h, passthrough(newStatefulSet), hLog, withGarbageCollection(hiveconfig)) if err != nil { hLog.WithError(err).Error("error applying statefulset") return err diff --git a/pkg/operator/util/admissionregistration.go b/pkg/operator/util/admissionregistration.go deleted file mode 100644 index 5c04c4508b1..00000000000 --- a/pkg/operator/util/admissionregistration.go +++ /dev/null @@ -1,19 +0,0 @@ -package util - -import ( - admregv1 "k8s.io/api/admissionregistration/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/serializer" -) - -// ReadValidatingWebhookConfigurationV1OrDie reads a ValidatingWebhookConfiguration, -// as this is not yet added to library-go. -func ReadValidatingWebhookConfigurationV1OrDie(objBytes []byte, scheme *runtime.Scheme) *admregv1.ValidatingWebhookConfiguration { - apiExtensionsCodecs := serializer.NewCodecFactory(scheme) - - requiredObj, err := runtime.Decode(apiExtensionsCodecs.UniversalDecoder(admregv1.SchemeGroupVersion), objBytes) - if err != nil { - panic(err) - } - return requiredObj.(*admregv1.ValidatingWebhookConfiguration) -} diff --git a/pkg/operator/util/apiregistration.go b/pkg/operator/util/apiregistration.go deleted file mode 100644 index 268c6356c0c..00000000000 --- a/pkg/operator/util/apiregistration.go +++ /dev/null @@ -1,18 +0,0 @@ -package util - -import ( - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/serializer" - apiregistrationv1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1" -) - -// ReadAPIServiceV1Beta1OrDie reads an APIService, as this is not yet added to library-go. -func ReadAPIServiceV1Beta1OrDie(objBytes []byte, scheme *runtime.Scheme) *apiregistrationv1.APIService { - apiExtensionsCodecs := serializer.NewCodecFactory(scheme) - - requiredObj, err := runtime.Decode(apiExtensionsCodecs.UniversalDecoder(apiregistrationv1.SchemeGroupVersion), objBytes) - if err != nil { - panic(err) - } - return requiredObj.(*apiregistrationv1.APIService) -} diff --git a/pkg/remoteclient/remoteclient.go b/pkg/remoteclient/remoteclient.go index c9d3618456e..b98843dc6eb 100644 --- a/pkg/remoteclient/remoteclient.go +++ b/pkg/remoteclient/remoteclient.go @@ -82,13 +82,13 @@ func ConnectToRemoteCluster( localClient client.Client, logger log.FieldLogger, ) (remoteClient client.Client, unreachable, requeue bool) { - var rawRemoteClient interface{} + var rawRemoteClient any rawRemoteClient, unreachable, requeue = connectToRemoteCluster( cd, remoteClientBuilder, localClient, logger, - func(builder Builder) (interface{}, error) { return builder.Build() }, + func(builder Builder) (any, error) { return builder.Build() }, ) if unreachable { return @@ -102,8 +102,8 @@ func connectToRemoteCluster( remoteClientBuilder Builder, localClient client.Client, logger log.FieldLogger, - buildFunc func(builder Builder) (interface{}, error), -) (remoteClient interface{}, unreachable, requeue bool) { + buildFunc func(builder Builder) (any, error), +) (remoteClient any, unreachable, requeue bool) { if u, _ := Unreachable(cd); u { logger.Debug("skipping cluster with unreachable condition") unreachable = true diff --git a/pkg/resource/helper.go b/pkg/resource/helper.go index 560494ca3f3..153dfc989bf 100644 --- a/pkg/resource/helper.go +++ b/pkg/resource/helper.go @@ -50,6 +50,34 @@ type helper struct { openAPISchema openapi.Resources } +type HelperOpt func(*helper) + +func FromRESTConfig(restConfig *rest.Config) HelperOpt { + return func(h *helper) { + h.restConfig = restConfig + h.getFactory = h.getRESTConfigFactory + } +} + +func FromKubeconfig(kubeconfig []byte) HelperOpt { + return func(h *helper) { + h.kubeconfig = kubeconfig + h.getFactory = h.getKubeconfigFactory + } +} + +func WithMetrics() HelperOpt { + return func(h *helper) { + h.metricsEnabled = true + } +} + +func WithControllerName(cn hivev1.ControllerName) HelperOpt { + return func(h *helper) { + h.controllerName = cn + } +} + // cacheOpenAPISchema builds the very expensive OpenAPISchema (>3s commonly) once, and stores // the resulting schema on the helper for re-use, particularly in Apply when run many times against // one cluster. @@ -65,46 +93,20 @@ func (r *helper) cacheOpenAPISchema() error { return nil } -// NewHelperFromRESTConfig returns a new object that allows apply and patch operations -func NewHelperFromRESTConfig(restConfig *rest.Config, controllerName hivev1.ControllerName, logger log.FieldLogger) (Helper, error) { - r := &helper{ - logger: logger, - cacheDir: getCacheDir(logger), - restConfig: restConfig, - controllerName: controllerName, - } - r.getFactory = r.getRESTConfigFactory - err := r.cacheOpenAPISchema() - return r, err -} - -// NewHelperWithMetricsFromRESTConfig returns a new object that allows apply and patch operations, with metrics tracking enabled. -func NewHelperWithMetricsFromRESTConfig(restConfig *rest.Config, controllerName hivev1.ControllerName, logger log.FieldLogger) (Helper, error) { - r := &helper{ - logger: logger, - metricsEnabled: true, - controllerName: controllerName, - cacheDir: getCacheDir(logger), - restConfig: restConfig, - } - r.getFactory = r.getRESTConfigFactory - err := r.cacheOpenAPISchema() - return r, err -} - // NewHelper returns a new object that allows apply and patch operations -func NewHelper(kubeconfig []byte, logger log.FieldLogger) (Helper, error) { +func NewHelper(logger log.FieldLogger, opts ...HelperOpt) (Helper, error) { r := &helper{ - logger: logger, - cacheDir: getCacheDir(logger), - kubeconfig: kubeconfig, + logger: logger, + cacheDir: getCacheDir(), + } + for _, o := range opts { + o(r) } - r.getFactory = r.getKubeconfigFactory err := r.cacheOpenAPISchema() return r, err } -func getCacheDir(logger log.FieldLogger) string { +func getCacheDir() string { if envCacheDir := os.Getenv(cacheDirEnvKey); len(envCacheDir) > 0 { return envCacheDir } diff --git a/pkg/test/assert/assertions.go b/pkg/test/assert/assertions.go index 924752d7aef..d798fa12213 100644 --- a/pkg/test/assert/assertions.go +++ b/pkg/test/assert/assertions.go @@ -19,7 +19,7 @@ import ( // BetweenTimes asserts that the time is within the time window, inclusive of the start and end times. // // assert.BetweenTimes(t, time.Now(), time.Now().Add(-10*time.Second), time.Now().Add(10*time.Second)) -func BetweenTimes(t *testing.T, actual, startTime, endTime time.Time, msgAndArgs ...interface{}) bool { +func BetweenTimes(t *testing.T, actual, startTime, endTime time.Time, msgAndArgs ...any) bool { if actual.Before(startTime) { return testifyassert.Fail(t, fmt.Sprintf("Actual time %v is before start time %v", actual, startTime), msgAndArgs...) } diff --git a/pkg/test/clusterprovision/clusterprovision.go b/pkg/test/clusterprovision/clusterprovision.go index 3f4d7bdf4e8..84a8a4d465e 100644 --- a/pkg/test/clusterprovision/clusterprovision.go +++ b/pkg/test/clusterprovision/clusterprovision.go @@ -6,7 +6,7 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" hivev1 "github.com/openshift/hive/apis/hive/v1" "github.com/openshift/hive/pkg/constants" @@ -99,8 +99,8 @@ func WithStage(stage hivev1.ClusterProvisionStage) Option { func Successful(clusterID, infraID, kubeconfigSecretName, passwordSecretName string) Option { return func(clusterProvision *hivev1.ClusterProvision) { clusterProvision.Spec.Stage = hivev1.ClusterProvisionStageComplete - clusterProvision.Spec.ClusterID = pointer.String(clusterID) - clusterProvision.Spec.InfraID = pointer.String(infraID) + clusterProvision.Spec.ClusterID = ptr.To(clusterID) + clusterProvision.Spec.InfraID = ptr.To(infraID) clusterProvision.Spec.AdminKubeconfigSecretRef = &corev1.LocalObjectReference{Name: kubeconfigSecretName} clusterProvision.Spec.AdminPasswordSecretRef = &corev1.LocalObjectReference{Name: passwordSecretName} } diff --git a/pkg/test/machinepool/machinepool.go b/pkg/test/machinepool/machinepool.go index bbf4d0455ac..17598415969 100644 --- a/pkg/test/machinepool/machinepool.go +++ b/pkg/test/machinepool/machinepool.go @@ -5,7 +5,6 @@ import ( corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/utils/pointer" "k8s.io/utils/ptr" hivev1 "github.com/openshift/hive/apis/hive/v1" @@ -153,7 +152,7 @@ func WithAnnotations(annotations map[string]string) Option { func WithReplicas(replicas int64) Option { return func(mp *hivev1.MachinePool) { - mp.Spec.Replicas = pointer.Int64(replicas) + mp.Spec.Replicas = ptr.To(replicas) } } diff --git a/pkg/test/persistentvolumeclaim/persistentvolumeclaim.go b/pkg/test/persistentvolumeclaim/persistentvolumeclaim.go deleted file mode 100644 index f674047200e..00000000000 --- a/pkg/test/persistentvolumeclaim/persistentvolumeclaim.go +++ /dev/null @@ -1,72 +0,0 @@ -package persistentvolumeclaim - -import ( - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/runtime" - - "github.com/openshift/hive/pkg/test/generic" -) - -// Option defines a function signature for any function that wants to be passed into Build -type Option func(*corev1.PersistentVolumeClaim) - -// Build runs each of the functions passed in to generate the object. -func Build(opts ...Option) *corev1.PersistentVolumeClaim { - retval := &corev1.PersistentVolumeClaim{} - for _, o := range opts { - o(retval) - } - - return retval -} - -type Builder interface { - Build(opts ...Option) *corev1.PersistentVolumeClaim - - Options(opts ...Option) Builder - - GenericOptions(opts ...generic.Option) Builder -} - -func BasicBuilder() Builder { - return &builder{} -} - -func FullBuilder(namespace, name string, typer runtime.ObjectTyper) Builder { - b := &builder{} - return b.GenericOptions( - generic.WithTypeMeta(typer), - generic.WithResourceVersion("1"), - generic.WithNamespace(namespace), - generic.WithName(name), - ) -} - -type builder struct { - options []Option -} - -func (b *builder) Build(opts ...Option) *corev1.PersistentVolumeClaim { - return Build(append(b.options, opts...)...) -} - -func (b *builder) Options(opts ...Option) Builder { - return &builder{ - options: append(b.options, opts...), - } -} - -func (b *builder) GenericOptions(opts ...generic.Option) Builder { - options := make([]Option, len(opts)) - for i, o := range opts { - options[i] = Generic(o) - } - return b.Options(options...) -} - -// Generic allows common functions applicable to all objects to be used as Options to Build -func Generic(opt generic.Option) Option { - return func(checkpoint *corev1.PersistentVolumeClaim) { - opt(checkpoint) - } -} diff --git a/pkg/test/statefulset/statefulset.go b/pkg/test/statefulset/statefulset.go index 2b1be8d9a30..c252e61013e 100644 --- a/pkg/test/statefulset/statefulset.go +++ b/pkg/test/statefulset/statefulset.go @@ -1,12 +1,12 @@ package statefulset import ( + appsv1 "k8s.io/api/apps/v1" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" hivev1 "github.com/openshift/hive/apis/hive/v1" "github.com/openshift/hive/pkg/test/generic" - appsv1 "k8s.io/api/apps/v1" ) // Option defines a function signature for any function that wants to be passed into Build @@ -86,7 +86,7 @@ func WithNamespace(namespace string) Option { // WithReplicas sets the spec.Replicas field when building an object with Build. func WithReplicas(replicas int32) Option { return func(statefulset *appsv1.StatefulSet) { - statefulset.Spec.Replicas = pointer.Int32Ptr(replicas) + statefulset.Spec.Replicas = ptr.To(replicas) } } diff --git a/pkg/util/annotations/annotations.go b/pkg/util/annotations/annotations.go deleted file mode 100644 index e239dc24fc2..00000000000 --- a/pkg/util/annotations/annotations.go +++ /dev/null @@ -1,14 +0,0 @@ -package annotations - -// AddAnnotation returns a map with the given key and value added to the given map. -func AddAnnotation(annotations map[string]string, annotationKey, annotationValue string) map[string]string { - if annotationKey == "" { - // Don't need to add an annotation. - return annotations - } - if annotations == nil { - annotations = make(map[string]string) - } - annotations[annotationKey] = annotationValue - return annotations -} diff --git a/pkg/util/logrus/logr.go b/pkg/util/logrus/logr.go index 385f0165c22..1a1c57dfc02 100644 --- a/pkg/util/logrus/logr.go +++ b/pkg/util/logrus/logr.go @@ -21,12 +21,12 @@ func NewLogr(logger log.FieldLogger) logr.Logger { func (lgr) Init(logr.RuntimeInfo) {} // Info implements logr.LogSink -func (l lgr) Info(level int, msg string, keyAndValues ...interface{}) { +func (l lgr) Info(level int, msg string, keyAndValues ...any) { l.logger.WithFields(keyAndValuesToFields(keyAndValues...)).Debug(msg) } // Error implements logr.LogSink -func (l lgr) Error(err error, msg string, keyAndValues ...interface{}) { +func (l lgr) Error(err error, msg string, keyAndValues ...any) { l.logger.WithError(err).WithFields(keyAndValuesToFields(keyAndValues...)).Error(msg) } @@ -41,11 +41,11 @@ func (l lgr) WithName(name string) logr.LogSink { } // WithValues implements logr.LogSink -func (l lgr) WithValues(keyAndValues ...interface{}) logr.LogSink { +func (l lgr) WithValues(keyAndValues ...any) logr.LogSink { return lgr{logger: l.logger.WithFields(keyAndValuesToFields(keyAndValues...))} } -func keyAndValuesToFields(keyAndValues ...interface{}) log.Fields { +func keyAndValuesToFields(keyAndValues ...any) log.Fields { fields := log.Fields{} for idx := 0; idx < len(keyAndValues); { fields[keyAndValues[idx].(string)] = "" diff --git a/pkg/util/logrus/logr_test.go b/pkg/util/logrus/logr_test.go index 6588fb300a5..6a8351f2d01 100644 --- a/pkg/util/logrus/logr_test.go +++ b/pkg/util/logrus/logr_test.go @@ -12,22 +12,22 @@ import ( func Test_keyAndValuesToFields(t *testing.T) { cases := []struct { - input []interface{} + input []any output log.Fields }{{ input: nil, output: log.Fields{}, }, { - input: []interface{}{}, + input: []any{}, output: log.Fields{}, }, { - input: []interface{}{"key1", "value1", "key2", 1, "key3", 3.0, "key4", []int{1, 2}}, + input: []any{"key1", "value1", "key2", 1, "key3", 3.0, "key4", []int{1, 2}}, output: log.Fields{"key1": "value1", "key2": 1, "key3": 3.0, "key4": []int{1, 2}}, }, { - input: []interface{}{"key1"}, + input: []any{"key1"}, output: log.Fields{"key1": ""}, }, { - input: []interface{}{"key1", "value1", "key2"}, + input: []any{"key1", "value1", "key2"}, output: log.Fields{"key1": "value1", "key2": ""}, }} for _, test := range cases { diff --git a/pkg/validating-webhooks/hive/v1/clusterdeployment_validating_admission_hook.go b/pkg/validating-webhooks/hive/v1/clusterdeployment_validating_admission_hook.go index 706139efae9..5c328cd44d0 100644 --- a/pkg/validating-webhooks/hive/v1/clusterdeployment_validating_admission_hook.go +++ b/pkg/validating-webhooks/hive/v1/clusterdeployment_validating_admission_hook.go @@ -205,6 +205,18 @@ func (a *ClusterDeploymentValidatingAdmissionHook) shouldValidate(admissionSpec return true } +func creationHooksDisabled(o metav1.Object) bool { + v, ok := o.GetLabels()[constants.DisableCreationWebHookForDisasterRecovery] + if !ok { + return false + } + b, err := strconv.ParseBool(v) + if err != nil { + return false + } + return b +} + // validateCreate specifically validates create operations for ClusterDeployment objects. func (a *ClusterDeploymentValidatingAdmissionHook) validateCreate(admissionSpec *admissionv1beta1.AdmissionRequest) *admissionv1beta1.AdmissionResponse { contextLogger := log.WithFields(log.Fields{ diff --git a/pkg/validating-webhooks/hive/v1/clusterdeployment_validating_admission_hook_test.go b/pkg/validating-webhooks/hive/v1/clusterdeployment_validating_admission_hook_test.go index 40501008da3..656e7dcda87 100644 --- a/pkg/validating-webhooks/hive/v1/clusterdeployment_validating_admission_hook_test.go +++ b/pkg/validating-webhooks/hive/v1/clusterdeployment_validating_admission_hook_test.go @@ -198,7 +198,7 @@ func validClusterDeploymentDifferentMutableValue() *hivev1.ClusterDeployment { func TestClusterDeploymentValidatingResource(t *testing.T) { // Arrange - data := NewClusterDeploymentValidatingAdmissionHook(*createDecoder(t)) + data := NewClusterDeploymentValidatingAdmissionHook(*createDecoder()) expectedPlural := schema.GroupVersionResource{ Group: "admission.hive.openshift.io", Version: "v1", @@ -216,7 +216,7 @@ func TestClusterDeploymentValidatingResource(t *testing.T) { func TestClusterDeploymentInitialize(t *testing.T) { // Arrange - data := NewClusterDeploymentValidatingAdmissionHook(*createDecoder(t)) + data := NewClusterDeploymentValidatingAdmissionHook(*createDecoder()) // Act err := data.Initialize(nil, nil) @@ -1790,7 +1790,7 @@ func TestClusterDeploymentValidate(t *testing.T) { t.Run(tc.name, func(t *testing.T) { // Arrange data := ClusterDeploymentValidatingAdmissionHook{ - decoder: *createDecoder(t), + decoder: *createDecoder(), validManagedDomains: validTestManagedDomains, fs: &featureSet{ FeatureGatesEnabled: &hivev1.FeatureGatesEnabled{ @@ -1882,6 +1882,6 @@ func TestNewClusterDeploymentValidatingAdmissionHook(t *testing.T) { t.Fatalf("unexpected: %v", err) } os.Setenv(constants.ManagedDomainsFileEnvVar, tempFile.Name()) - webhook := NewClusterDeploymentValidatingAdmissionHook(*createDecoder(t)) + webhook := NewClusterDeploymentValidatingAdmissionHook(*createDecoder()) assert.Equal(t, webhook.validManagedDomains, expectedDomains, "valid domains must match expected") } diff --git a/pkg/validating-webhooks/hive/v1/clusterimageset_validating_admission_hook_test.go b/pkg/validating-webhooks/hive/v1/clusterimageset_validating_admission_hook_test.go index dfce72edf20..4115c0182b5 100644 --- a/pkg/validating-webhooks/hive/v1/clusterimageset_validating_admission_hook_test.go +++ b/pkg/validating-webhooks/hive/v1/clusterimageset_validating_admission_hook_test.go @@ -14,7 +14,7 @@ import ( func TestClusterImageSetValidatingResource(t *testing.T) { // Arrange - data := NewClusterImageSetValidatingAdmissionHook(*createDecoder(t)) + data := NewClusterImageSetValidatingAdmissionHook(*createDecoder()) expectedPlural := schema.GroupVersionResource{ Group: "admission.hive.openshift.io", Version: "v1", @@ -32,7 +32,7 @@ func TestClusterImageSetValidatingResource(t *testing.T) { func TestClusterImageSetInitialize(t *testing.T) { // Arrange - data := NewClusterImageSetValidatingAdmissionHook(*createDecoder(t)) + data := NewClusterImageSetValidatingAdmissionHook(*createDecoder()) // Act err := data.Initialize(nil, nil) @@ -156,7 +156,7 @@ func TestClusterImageSetValidate(t *testing.T) { for _, tc := range cases { t.Run(tc.name, func(t *testing.T) { // Arrange - data := NewClusterImageSetValidatingAdmissionHook(*createDecoder(t)) + data := NewClusterImageSetValidatingAdmissionHook(*createDecoder()) newObject := &hivev1.ClusterImageSet{ Spec: tc.newSpec, } diff --git a/pkg/validating-webhooks/hive/v1/clusterpool_validating_admission_hook_test.go b/pkg/validating-webhooks/hive/v1/clusterpool_validating_admission_hook_test.go index dba178aa950..c2fec6430b8 100644 --- a/pkg/validating-webhooks/hive/v1/clusterpool_validating_admission_hook_test.go +++ b/pkg/validating-webhooks/hive/v1/clusterpool_validating_admission_hook_test.go @@ -62,7 +62,7 @@ func invalidOpenStackClusterPool() *hivev1.ClusterPool { } func TestClusterPoolInitialize(t *testing.T) { - data := NewClusterPoolValidatingAdmissionHook(*createDecoder(t)) + data := NewClusterPoolValidatingAdmissionHook(*createDecoder()) err := data.Initialize(nil, nil) assert.Nil(t, err) } @@ -228,7 +228,7 @@ func TestClusterPoolValidate(t *testing.T) { t.Run(tc.name, func(t *testing.T) { // Arrange data := ClusterPoolValidatingAdmissionHook{ - decoder: *createDecoder(t), + decoder: *createDecoder(), } if tc.gvr == nil { diff --git a/pkg/validating-webhooks/hive/v1/clusterprovision_validating_admission_hook_test.go b/pkg/validating-webhooks/hive/v1/clusterprovision_validating_admission_hook_test.go index 68fc355f538..29fda773703 100644 --- a/pkg/validating-webhooks/hive/v1/clusterprovision_validating_admission_hook_test.go +++ b/pkg/validating-webhooks/hive/v1/clusterprovision_validating_admission_hook_test.go @@ -11,7 +11,7 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" hivev1 "github.com/openshift/hive/apis/hive/v1" ) @@ -54,7 +54,7 @@ func Test_ClusterProvisionAdmission_Validate_Kind(t *testing.T) { } for _, tc := range cases { t.Run(tc.name, func(t *testing.T) { - cut := NewClusterProvisionValidatingAdmissionHook(*createDecoder(t)) + cut := NewClusterProvisionValidatingAdmissionHook(*createDecoder()) cut.Initialize(nil, nil) request := &admissionv1beta1.AdmissionRequest{ Resource: metav1.GroupVersionResource{ @@ -92,7 +92,7 @@ func Test_ClusterProvisionAdmission_Validate_Operation(t *testing.T) { } for _, tc := range cases { t.Run(tc.name, func(t *testing.T) { - cut := NewClusterProvisionValidatingAdmissionHook(*createDecoder(t)) + cut := NewClusterProvisionValidatingAdmissionHook(*createDecoder()) cut.Initialize(nil, nil) request := &admissionv1beta1.AdmissionRequest{ Resource: metav1.GroupVersionResource{ @@ -233,7 +233,7 @@ func Test_ClusterProvisionAdmission_Validate_Create(t *testing.T) { name: "prev cluster ID set for pre-installed", provision: func() *hivev1.ClusterProvision { p := testPreInstalledClusterProvision() - p.Spec.PrevClusterID = pointer.String("test-cluster-id") + p.Spec.PrevClusterID = ptr.To("test-cluster-id") return p }(), }, @@ -241,14 +241,14 @@ func Test_ClusterProvisionAdmission_Validate_Create(t *testing.T) { name: "prev infra ID set for pre-installed", provision: func() *hivev1.ClusterProvision { p := testPreInstalledClusterProvision() - p.Spec.PrevInfraID = pointer.String("test-infra-id") + p.Spec.PrevInfraID = ptr.To("test-infra-id") return p }(), }, } for _, tc := range cases { t.Run(tc.name, func(t *testing.T) { - cut := NewClusterProvisionValidatingAdmissionHook(*createDecoder(t)) + cut := NewClusterProvisionValidatingAdmissionHook(*createDecoder()) cut.Initialize(nil, nil) rawProvision, err := json.Marshal(tc.provision) if !assert.NoError(t, err, "unexpected error marshalling provision") { @@ -329,7 +329,7 @@ func Test_ClusterProvisionAdmission_Validate_Update(t *testing.T) { old: testClusterProvision(), new: func() *hivev1.ClusterProvision { p := testClusterProvision() - p.Spec.ClusterID = pointer.String("new-cluster-id") + p.Spec.ClusterID = ptr.To("new-cluster-id") return p }(), expectAllowed: true, @@ -339,7 +339,7 @@ func Test_ClusterProvisionAdmission_Validate_Update(t *testing.T) { old: testCompletedClusterProvision(), new: func() *hivev1.ClusterProvision { p := testCompletedClusterProvision() - p.Spec.ClusterID = pointer.String("new-cluster-id") + p.Spec.ClusterID = ptr.To("new-cluster-id") return p }(), }, @@ -348,7 +348,7 @@ func Test_ClusterProvisionAdmission_Validate_Update(t *testing.T) { old: testClusterProvision(), new: func() *hivev1.ClusterProvision { p := testClusterProvision() - p.Spec.InfraID = pointer.String("new-infra-id") + p.Spec.InfraID = ptr.To("new-infra-id") return p }(), expectAllowed: true, @@ -358,7 +358,7 @@ func Test_ClusterProvisionAdmission_Validate_Update(t *testing.T) { old: testCompletedClusterProvision(), new: func() *hivev1.ClusterProvision { p := testCompletedClusterProvision() - p.Spec.InfraID = pointer.String("new-infra-id") + p.Spec.InfraID = ptr.To("new-infra-id") return p }(), }, @@ -367,7 +367,7 @@ func Test_ClusterProvisionAdmission_Validate_Update(t *testing.T) { old: testClusterProvision(), new: func() *hivev1.ClusterProvision { p := testClusterProvision() - p.Spec.InstallLog = pointer.String("new-install-log") + p.Spec.InstallLog = ptr.To("new-install-log") return p }(), expectAllowed: true, @@ -377,7 +377,7 @@ func Test_ClusterProvisionAdmission_Validate_Update(t *testing.T) { old: testCompletedClusterProvision(), new: func() *hivev1.ClusterProvision { p := testCompletedClusterProvision() - p.Spec.InstallLog = pointer.String("new-install-log") + p.Spec.InstallLog = ptr.To("new-install-log") return p }(), expectAllowed: true, @@ -445,7 +445,7 @@ func Test_ClusterProvisionAdmission_Validate_Update(t *testing.T) { old: testClusterProvision(), new: func() *hivev1.ClusterProvision { p := testClusterProvision() - p.Spec.PrevClusterID = pointer.String("new-prev-cluster-id") + p.Spec.PrevClusterID = ptr.To("new-prev-cluster-id") return p }(), }, @@ -454,7 +454,7 @@ func Test_ClusterProvisionAdmission_Validate_Update(t *testing.T) { old: testCompletedClusterProvision(), new: func() *hivev1.ClusterProvision { p := testCompletedClusterProvision() - p.Spec.PrevClusterID = pointer.String("new-prev-cluster-id") + p.Spec.PrevClusterID = ptr.To("new-prev-cluster-id") return p }(), }, @@ -463,7 +463,7 @@ func Test_ClusterProvisionAdmission_Validate_Update(t *testing.T) { old: testClusterProvision(), new: func() *hivev1.ClusterProvision { p := testClusterProvision() - p.Spec.PrevInfraID = pointer.String("new-prev-infra-id") + p.Spec.PrevInfraID = ptr.To("new-prev-infra-id") return p }(), }, @@ -472,14 +472,14 @@ func Test_ClusterProvisionAdmission_Validate_Update(t *testing.T) { old: testCompletedClusterProvision(), new: func() *hivev1.ClusterProvision { p := testCompletedClusterProvision() - p.Spec.PrevInfraID = pointer.String("new-prev-infra-id") + p.Spec.PrevInfraID = ptr.To("new-prev-infra-id") return p }(), }, } for _, tc := range cases { t.Run(tc.name, func(t *testing.T) { - cut := NewClusterProvisionValidatingAdmissionHook(*createDecoder(t)) + cut := NewClusterProvisionValidatingAdmissionHook(*createDecoder()) cut.Initialize(nil, nil) oldAsJSON, err := json.Marshal(tc.old) if !assert.NoError(t, err, "unexpected error marshalling old provision") { @@ -532,7 +532,7 @@ func Test_ClusterProvisionAdmission_Validate_Update_StageTransition(t *testing.T t.Run( fmt.Sprintf("%s to %s", oldStage, newStage), func(t *testing.T) { - cut := NewClusterProvisionValidatingAdmissionHook(*createDecoder(t)) + cut := NewClusterProvisionValidatingAdmissionHook(*createDecoder()) cut.Initialize(nil, nil) oldProvision := testCompletedClusterProvision() oldProvision.Spec.Stage = oldStage @@ -589,8 +589,8 @@ func testClusterProvision() *hivev1.ClusterProvision { }, Attempt: 0, Stage: hivev1.ClusterProvisionStageInitializing, - PrevClusterID: pointer.String("test-prev-cluster-id"), - PrevInfraID: pointer.String("test-prev-infra-id"), + PrevClusterID: ptr.To("test-prev-cluster-id"), + PrevInfraID: ptr.To("test-prev-infra-id"), }, } } @@ -598,14 +598,14 @@ func testClusterProvision() *hivev1.ClusterProvision { func testCompletedClusterProvision() *hivev1.ClusterProvision { provision := testClusterProvision() provision.Spec.Stage = hivev1.ClusterProvisionStageComplete - provision.Spec.ClusterID = pointer.String("test-cluster-id") - provision.Spec.InfraID = pointer.String("test-infra-id") - provision.Spec.InstallLog = pointer.String("test-install-log") + provision.Spec.ClusterID = ptr.To("test-cluster-id") + provision.Spec.InfraID = ptr.To("test-infra-id") + provision.Spec.InstallLog = ptr.To("test-install-log") provision.Spec.MetadataJSON = []byte("\"test-metadata\"") provision.Spec.AdminKubeconfigSecretRef = &corev1.LocalObjectReference{Name: "test-admin-kubeconfig"} provision.Spec.AdminPasswordSecretRef = &corev1.LocalObjectReference{Name: "test-admin-password"} - provision.Spec.PrevClusterID = pointer.String("test-prev-cluster-id") - provision.Spec.PrevInfraID = pointer.String("test-prev-infra-id") + provision.Spec.PrevClusterID = ptr.To("test-prev-cluster-id") + provision.Spec.PrevInfraID = ptr.To("test-prev-infra-id") return provision } @@ -619,8 +619,8 @@ func testPreInstalledClusterProvision() *hivev1.ClusterProvision { Name: "test-deployment", }, Stage: hivev1.ClusterProvisionStageComplete, - ClusterID: pointer.String("test-prev-cluster-id"), - InfraID: pointer.String("test-prev-infra-id"), + ClusterID: ptr.To("test-prev-cluster-id"), + InfraID: ptr.To("test-prev-infra-id"), AdminKubeconfigSecretRef: &corev1.LocalObjectReference{Name: "test-admin-kubeconfig"}, AdminPasswordSecretRef: &corev1.LocalObjectReference{Name: "test-admin-password"}, }, diff --git a/pkg/validating-webhooks/hive/v1/decoder_test.go b/pkg/validating-webhooks/hive/v1/decoder_test.go index de4f5c2f3fb..2d6c91a1954 100644 --- a/pkg/validating-webhooks/hive/v1/decoder_test.go +++ b/pkg/validating-webhooks/hive/v1/decoder_test.go @@ -1,13 +1,11 @@ package v1 import ( - "testing" - "github.com/openshift/hive/pkg/util/scheme" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" ) -func createDecoder(t *testing.T) *admission.Decoder { +func createDecoder() *admission.Decoder { scheme := scheme.GetScheme() decoder := admission.NewDecoder(scheme) return &decoder diff --git a/pkg/validating-webhooks/hive/v1/dnszone_validating_admission_hook_test.go b/pkg/validating-webhooks/hive/v1/dnszone_validating_admission_hook_test.go index 8ff45a03d51..b531c970043 100644 --- a/pkg/validating-webhooks/hive/v1/dnszone_validating_admission_hook_test.go +++ b/pkg/validating-webhooks/hive/v1/dnszone_validating_admission_hook_test.go @@ -14,7 +14,7 @@ import ( func TestDNSZoneValidatingResource(t *testing.T) { // Arrange - data := NewDNSZoneValidatingAdmissionHook(*createDecoder(t)) + data := NewDNSZoneValidatingAdmissionHook(*createDecoder()) expectedPlural := schema.GroupVersionResource{ Group: "admission.hive.openshift.io", Version: "v1", @@ -32,7 +32,7 @@ func TestDNSZoneValidatingResource(t *testing.T) { func TestDNSZoneInitialize(t *testing.T) { // Arrange - data := NewDNSZoneValidatingAdmissionHook(*createDecoder(t)) + data := NewDNSZoneValidatingAdmissionHook(*createDecoder()) // Act err := data.Initialize(nil, nil) @@ -141,7 +141,7 @@ func TestDNSZoneValidate(t *testing.T) { for _, tc := range cases { t.Run(tc.name, func(t *testing.T) { // Arrange - data := NewDNSZoneValidatingAdmissionHook(*createDecoder(t)) + data := NewDNSZoneValidatingAdmissionHook(*createDecoder()) newObject := &hivev1.DNSZone{ Spec: hivev1.DNSZoneSpec{ Zone: tc.newZoneStr, diff --git a/pkg/validating-webhooks/hive/v1/feature_gates.go b/pkg/validating-webhooks/hive/v1/feature_gates.go index 0d125e51d48..fd1fe7d02eb 100644 --- a/pkg/validating-webhooks/hive/v1/feature_gates.go +++ b/pkg/validating-webhooks/hive/v1/feature_gates.go @@ -48,7 +48,7 @@ func existsOnlyWhenFeatureGate(fs *featureSet, obj *unstructured.Unstructured, f // equalOnlyWhenFeatureGate ensures that the fieldPath specified in the obj is equal to the expected value when // the featureGate is enabled. // NOTE: the path to the field cannot include array / slice. -func equalOnlyWhenFeatureGate(fs *featureSet, obj *unstructured.Unstructured, fieldPath string, featureGate string, expected interface{}) field.ErrorList { +func equalOnlyWhenFeatureGate(fs *featureSet, obj *unstructured.Unstructured, fieldPath string, featureGate string, expected any) field.ErrorList { allErrs := field.ErrorList{} p := strings.Split(fieldPath, ".") diff --git a/pkg/validating-webhooks/hive/v1/feature_gates_test.go b/pkg/validating-webhooks/hive/v1/feature_gates_test.go index a0b1b0c6027..cc0935ba565 100644 --- a/pkg/validating-webhooks/hive/v1/feature_gates_test.go +++ b/pkg/validating-webhooks/hive/v1/feature_gates_test.go @@ -161,7 +161,7 @@ func Test_equalOnlyWhenFeatureGate(t *testing.T) { obj string enabledGates []string field string - value interface{} + value any err string }{{ diff --git a/pkg/validating-webhooks/hive/v1/machinepool_validating_admission_hook_test.go b/pkg/validating-webhooks/hive/v1/machinepool_validating_admission_hook_test.go index dbb79378fbe..2c96a409116 100644 --- a/pkg/validating-webhooks/hive/v1/machinepool_validating_admission_hook_test.go +++ b/pkg/validating-webhooks/hive/v1/machinepool_validating_admission_hook_test.go @@ -11,7 +11,7 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" hivev1 "github.com/openshift/hive/apis/hive/v1" hivev1aws "github.com/openshift/hive/apis/hive/v1/aws" @@ -59,7 +59,7 @@ func Test_MachinePoolAdmission_Validate_Kind(t *testing.T) { } for _, tc := range cases { t.Run(tc.name, func(t *testing.T) { - cut := NewMachinePoolValidatingAdmissionHook(*createDecoder(t)) + cut := NewMachinePoolValidatingAdmissionHook(*createDecoder()) cut.Initialize(nil, nil) request := &admissionv1beta1.AdmissionRequest{ Resource: metav1.GroupVersionResource{ @@ -97,7 +97,7 @@ func Test_MachinePoolAdmission_Validate_Operation(t *testing.T) { } for _, tc := range cases { t.Run(tc.name, func(t *testing.T) { - cut := NewMachinePoolValidatingAdmissionHook(*createDecoder(t)) + cut := NewMachinePoolValidatingAdmissionHook(*createDecoder()) cut.Initialize(nil, nil) request := &admissionv1beta1.AdmissionRequest{ Resource: metav1.GroupVersionResource{ @@ -171,7 +171,7 @@ func Test_MachinePoolAdmission_Validate_Create(t *testing.T) { name: "zero replicas", provision: func() *hivev1.MachinePool { pool := testMachinePool() - pool.Spec.Replicas = pointer.Int64(0) + pool.Spec.Replicas = ptr.To(int64(0)) return pool }(), expectAllowed: true, @@ -180,7 +180,7 @@ func Test_MachinePoolAdmission_Validate_Create(t *testing.T) { name: "positive replicas", provision: func() *hivev1.MachinePool { pool := testMachinePool() - pool.Spec.Replicas = pointer.Int64(1) + pool.Spec.Replicas = ptr.To(int64(1)) return pool }(), expectAllowed: true, @@ -189,7 +189,7 @@ func Test_MachinePoolAdmission_Validate_Create(t *testing.T) { name: "negative replicas", provision: func() *hivev1.MachinePool { pool := testMachinePool() - pool.Spec.Replicas = pointer.Int64(-1) + pool.Spec.Replicas = ptr.To(int64(-1)) return pool }(), }, @@ -197,7 +197,7 @@ func Test_MachinePoolAdmission_Validate_Create(t *testing.T) { name: "replicas and autoscaling", provision: func() *hivev1.MachinePool { pool := testMachinePool() - pool.Spec.Replicas = pointer.Int64(1) + pool.Spec.Replicas = ptr.To(int64(1)) pool.Spec.Autoscaling = &hivev1.MachinePoolAutoscaling{} return pool }(), @@ -493,7 +493,7 @@ func Test_MachinePoolAdmission_Validate_Create(t *testing.T) { } for _, tc := range cases { t.Run(tc.name, func(t *testing.T) { - cut := NewMachinePoolValidatingAdmissionHook(*createDecoder(t)) + cut := NewMachinePoolValidatingAdmissionHook(*createDecoder()) cut.Initialize(nil, nil) rawProvision, err := json.Marshal(tc.provision) if !assert.NoError(t, err, "unexpected error marshalling provision") { @@ -550,7 +550,7 @@ func Test_MachinePoolAdmission_Validate_Update(t *testing.T) { old: testMachinePool(), new: func() *hivev1.MachinePool { pool := testMachinePool() - pool.Spec.Replicas = pointer.Int64(5) + pool.Spec.Replicas = ptr.To(int64(5)) return pool }(), expectAllowed: true, @@ -606,7 +606,7 @@ func Test_MachinePoolAdmission_Validate_Update(t *testing.T) { } for _, tc := range cases { t.Run(tc.name, func(t *testing.T) { - cut := NewMachinePoolValidatingAdmissionHook(*createDecoder(t)) + cut := NewMachinePoolValidatingAdmissionHook(*createDecoder()) cut.Initialize(nil, nil) oldAsJSON, err := json.Marshal(tc.old) if !assert.NoError(t, err, "unexpected error marshalling old provision") { diff --git a/pkg/validating-webhooks/hive/v1/selector_syncset_validating_admission_hook_test.go b/pkg/validating-webhooks/hive/v1/selector_syncset_validating_admission_hook_test.go index 31b123cb5b6..1e9c5e63eff 100644 --- a/pkg/validating-webhooks/hive/v1/selector_syncset_validating_admission_hook_test.go +++ b/pkg/validating-webhooks/hive/v1/selector_syncset_validating_admission_hook_test.go @@ -14,7 +14,7 @@ import ( func TestSelectorSyncSetValidatingResource(t *testing.T) { // Arrange - data := NewSelectorSyncSetValidatingAdmissionHook(*createDecoder(t)) + data := NewSelectorSyncSetValidatingAdmissionHook(*createDecoder()) expectedPlural := schema.GroupVersionResource{ Group: "admission.hive.openshift.io", Version: "v1", @@ -32,7 +32,7 @@ func TestSelectorSyncSetValidatingResource(t *testing.T) { func TestSelectorSyncSetInitialize(t *testing.T) { // Arrange - data := NewSelectorSyncSetValidatingAdmissionHook(*createDecoder(t)) + data := NewSelectorSyncSetValidatingAdmissionHook(*createDecoder()) // Act err := data.Initialize(nil, nil) @@ -360,7 +360,7 @@ func TestSelectorSyncSetValidate(t *testing.T) { for _, tc := range cases { t.Run(tc.name, func(t *testing.T) { // Arrange - data := NewSelectorSyncSetValidatingAdmissionHook(*createDecoder(t)) + data := NewSelectorSyncSetValidatingAdmissionHook(*createDecoder()) objectRaw, _ := json.Marshal(tc.selectorSyncSet) diff --git a/pkg/validating-webhooks/hive/v1/syncset_validating_admission_hook.go b/pkg/validating-webhooks/hive/v1/syncset_validating_admission_hook.go index 91cf6f6298d..bd8a5e52cd7 100644 --- a/pkg/validating-webhooks/hive/v1/syncset_validating_admission_hook.go +++ b/pkg/validating-webhooks/hive/v1/syncset_validating_admission_hook.go @@ -26,14 +26,14 @@ const ( syncSetResource = "syncsets" ) -var invalidResourceGroupKinds = map[string]map[string]bool{ - "authorization.openshift.io": { - "Role": true, - "RoleBinding": true, - "ClusterRole": true, - "ClusterRoleBinding": true, - "SubjectAccessReview": true, - }, +var invalidResourceGroupKinds = map[string]sets.Set[string]{ + "authorization.openshift.io": sets.New( + "Role", + "RoleBinding", + "ClusterRole", + "ClusterRoleBinding", + "SubjectAccessReview", + ), } // HIVE-2807: "" is defaulted to "strategic" in code. @@ -294,7 +294,7 @@ func validateResource(resource runtime.RawExtension, fldPath *field.Path) field. return allErrs } - if invalidResourceGroupKinds[u.GroupVersionKind().Group][u.GetKind()] { + if invalidResourceGroupKinds[u.GroupVersionKind().Group].Has(u.GetKind()) { allErrs = append(allErrs, field.Invalid(fldPath.Child("APIVersion"), u.GetAPIVersion(), "must use kubernetes group for this resource kind")) } diff --git a/pkg/validating-webhooks/hive/v1/syncset_validating_admission_hook_test.go b/pkg/validating-webhooks/hive/v1/syncset_validating_admission_hook_test.go index 0147aef5855..53f0689ff3f 100644 --- a/pkg/validating-webhooks/hive/v1/syncset_validating_admission_hook_test.go +++ b/pkg/validating-webhooks/hive/v1/syncset_validating_admission_hook_test.go @@ -24,7 +24,7 @@ const ( func TestSyncSetValidatingResource(t *testing.T) { // Arrange - data := NewSyncSetValidatingAdmissionHook(*createDecoder(t)) + data := NewSyncSetValidatingAdmissionHook(*createDecoder()) expectedPlural := schema.GroupVersionResource{ Group: "admission.hive.openshift.io", Version: "v1", @@ -42,7 +42,7 @@ func TestSyncSetValidatingResource(t *testing.T) { func TestSyncSetInitialize(t *testing.T) { // Arrange - data := NewSyncSetValidatingAdmissionHook(*createDecoder(t)) + data := NewSyncSetValidatingAdmissionHook(*createDecoder()) // Act err := data.Initialize(nil, nil) @@ -396,7 +396,7 @@ func TestSyncSetValidate(t *testing.T) { for _, tc := range cases { t.Run(tc.name, func(t *testing.T) { // Arrange - data := NewSyncSetValidatingAdmissionHook(*createDecoder(t)) + data := NewSyncSetValidatingAdmissionHook(*createDecoder()) objectRaw, _ := json.Marshal(tc.syncSet) diff --git a/pkg/validating-webhooks/hive/v1/utils.go b/pkg/validating-webhooks/hive/v1/utils.go deleted file mode 100644 index 52c9bcef950..00000000000 --- a/pkg/validating-webhooks/hive/v1/utils.go +++ /dev/null @@ -1,20 +0,0 @@ -package v1 - -import ( - "strconv" - - "github.com/openshift/hive/pkg/constants" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -func creationHooksDisabled(o metav1.Object) bool { - v, ok := o.GetLabels()[constants.DisableCreationWebHookForDisasterRecovery] - if !ok { - return false - } - b, err := strconv.ParseBool(v) - if err != nil { - return false - } - return b -} diff --git a/test/e2e/common/apiservice.go b/test/e2e/common/apiservice.go index 768d23ab24f..9689fd41070 100644 --- a/test/e2e/common/apiservice.go +++ b/test/e2e/common/apiservice.go @@ -35,7 +35,7 @@ func WaitForAPIService(c apiregv1client.ApiregistrationV1Interface, name string, logger.Infof("Waiting for APIService") stop := make(chan struct{}) done := make(chan struct{}) - onObject := func(obj interface{}) { + onObject := func(obj any) { apiService, ok := obj.(*apiregv1.APIService) if !ok { logger.Warningf("object not APIService: %v", obj) @@ -46,17 +46,16 @@ func WaitForAPIService(c apiregv1client.ApiregistrationV1Interface, name string, } } watchList := cache.NewListWatchFromClient(c.RESTClient(), "apiservices", "", fields.OneTermEqualSelector("metadata.name", name)) - _, controller := cache.NewInformer( - watchList, - &apiregv1.APIService{}, - 0, - cache.ResourceEventHandlerFuncs{ + _, controller := cache.NewInformerWithOptions(cache.InformerOptions{ + ListerWatcher: watchList, + ObjectType: &apiregv1.APIService{}, + Handler: cache.ResourceEventHandlerFuncs{ AddFunc: onObject, - UpdateFunc: func(oldObject, newObject interface{}) { + UpdateFunc: func(oldObject, newObject any) { onObject(newObject) }, }, - ) + }) go controller.Run(stop) defer func() { stop <- struct{}{} }() diff --git a/test/e2e/common/deployment.go b/test/e2e/common/deployment.go index 62e4730642a..c2037a3aaac 100644 --- a/test/e2e/common/deployment.go +++ b/test/e2e/common/deployment.go @@ -36,7 +36,7 @@ func WaitForDeployment(c kclient.Interface, namespace, name string, testFunc fun logger.Infof("Waiting for deployment") stop := make(chan struct{}) done := make(chan struct{}) - onObject := func(obj interface{}) { + onObject := func(obj any) { deployment, ok := obj.(*appsv1.Deployment) if !ok { logger.Warningf("object not deployment: %v", obj) @@ -47,17 +47,16 @@ func WaitForDeployment(c kclient.Interface, namespace, name string, testFunc fun } } watchList := cache.NewListWatchFromClient(c.AppsV1().RESTClient(), "deployments", namespace, fields.OneTermEqualSelector("metadata.name", name)) - _, controller := cache.NewInformer( - watchList, - &appsv1.Deployment{}, - 0, - cache.ResourceEventHandlerFuncs{ + _, controller := cache.NewInformerWithOptions(cache.InformerOptions{ + ListerWatcher: watchList, + ObjectType: &appsv1.Deployment{}, + Handler: cache.ResourceEventHandlerFuncs{ AddFunc: onObject, - UpdateFunc: func(oldObject, newObject interface{}) { + UpdateFunc: func(oldObject, newObject any) { onObject(newObject) }, }, - ) + }) go controller.Run(stop) defer func() { stop <- struct{}{} }() diff --git a/test/e2e/common/diff.go b/test/e2e/common/diff.go index d02a3f49f13..d871357502f 100644 --- a/test/e2e/common/diff.go +++ b/test/e2e/common/diff.go @@ -2,10 +2,11 @@ package common import ( "encoding/json" - "github.com/evanphx/json-patch" + + jsonpatch "github.com/evanphx/json-patch" ) -func JSONDiff(a, b interface{}) ([]byte, error) { +func JSONDiff(a, b any) ([]byte, error) { jsonA, err := json.Marshal(a) if err != nil { return nil, err diff --git a/test/e2e/common/machine.go b/test/e2e/common/machine.go index b6365c81c4f..f5655f81868 100644 --- a/test/e2e/common/machine.go +++ b/test/e2e/common/machine.go @@ -46,9 +46,9 @@ func WaitForMachines(cfg *rest.Config, testFunc func([]*machinev1.Machine) bool, } informer.AddEventHandler( &clientcache.ResourceEventHandlerFuncs{ - AddFunc: func(obj interface{}) { onUpdate() }, - UpdateFunc: func(oldObj, newObj interface{}) { onUpdate() }, - DeleteFunc: func(obj interface{}) { onUpdate() }, + AddFunc: func(obj any) { onUpdate() }, + UpdateFunc: func(oldObj, newObj any) { onUpdate() }, + DeleteFunc: func(obj any) { onUpdate() }, }) ctx, stop := context.WithCancel(context.Background()) diff --git a/test/e2e/common/machineset.go b/test/e2e/common/machineset.go index 8a37c9c076f..f93d6884f0e 100644 --- a/test/e2e/common/machineset.go +++ b/test/e2e/common/machineset.go @@ -44,9 +44,9 @@ func WaitForMachineSets(cfg *rest.Config, testFunc func([]*machinev1.MachineSet) } informer.AddEventHandler( &clientcache.ResourceEventHandlerFuncs{ - AddFunc: func(obj interface{}) { onUpdate() }, - UpdateFunc: func(oldObj, newObj interface{}) { onUpdate() }, - DeleteFunc: func(obj interface{}) { onUpdate() }, + AddFunc: func(obj any) { onUpdate() }, + UpdateFunc: func(oldObj, newObj any) { onUpdate() }, + DeleteFunc: func(obj any) { onUpdate() }, }) ctx, stop := context.WithCancel(context.Background()) diff --git a/test/e2e/common/node.go b/test/e2e/common/node.go index ec7fd37f470..46836548aa0 100644 --- a/test/e2e/common/node.go +++ b/test/e2e/common/node.go @@ -39,9 +39,9 @@ func WaitForNodes(cfg *rest.Config, testFunc func([]*corev1.Node) bool, timeOut } informer.AddEventHandler( &clientcache.ResourceEventHandlerFuncs{ - AddFunc: func(obj interface{}) { onUpdate() }, - UpdateFunc: func(oldObj, newObj interface{}) { onUpdate() }, - DeleteFunc: func(obj interface{}) { onUpdate() }, + AddFunc: func(obj any) { onUpdate() }, + UpdateFunc: func(oldObj, newObj any) { onUpdate() }, + DeleteFunc: func(obj any) { onUpdate() }, }) ctx, stop := context.WithCancel(context.Background()) diff --git a/test/e2e/common/service.go b/test/e2e/common/service.go index 6e236482856..3bb6a162e9b 100644 --- a/test/e2e/common/service.go +++ b/test/e2e/common/service.go @@ -17,7 +17,7 @@ func WaitForService(c kclient.Interface, namespace, name string, testFunc func(* logger.Infof("Waiting for service") stop := make(chan struct{}) done := make(chan struct{}) - onObject := func(obj interface{}) { + onObject := func(obj any) { service, ok := obj.(*corev1.Service) if !ok { logger.Warningf("object not service: %v", obj) @@ -28,17 +28,16 @@ func WaitForService(c kclient.Interface, namespace, name string, testFunc func(* } } watchList := cache.NewListWatchFromClient(c.CoreV1().RESTClient(), "services", namespace, fields.OneTermEqualSelector("metadata.name", name)) - _, controller := cache.NewInformer( - watchList, - &corev1.Service{}, - 0, - cache.ResourceEventHandlerFuncs{ + _, controller := cache.NewInformerWithOptions(cache.InformerOptions{ + ListerWatcher: watchList, + ObjectType: &corev1.Service{}, + Handler: cache.ResourceEventHandlerFuncs{ AddFunc: onObject, - UpdateFunc: func(oldObject, newObject interface{}) { + UpdateFunc: func(oldObject, newObject any) { onObject(newObject) }, }, - ) + }) go controller.Run(stop) defer func() { stop <- struct{}{} }() diff --git a/test/e2e/destroycluster/destroy_test.go b/test/e2e/destroycluster/destroy_test.go index 2b1a278b4e2..d925f4c974e 100644 --- a/test/e2e/destroycluster/destroy_test.go +++ b/test/e2e/destroycluster/destroy_test.go @@ -46,7 +46,10 @@ func TestDestroyCluster(t *testing.T) { logger.Warn("Cluster deployment did not finish installing") } - fail := failTestFunc(t, logger) + fail := func(format string, args ...any) { + logger.Error(fmt.Sprintf(format, args...)) + t.Fatalf(format, args...) + } c := common.MustGetClient() logger.Info("Deleting cluster deployment") @@ -221,10 +224,3 @@ func waitForClusterDeploymentToGoAway(cd *hivev1.ClusterDeployment, cl client.Wi } return nil } - -func failTestFunc(t *testing.T, logger *log.Entry) func(string, ...interface{}) { - return func(format string, args ...interface{}) { - log.Error(fmt.Sprintf(format, args...)) - t.Fatalf(format, args...) - } -} diff --git a/test/e2e/postinstall/machinesets/infra_test.go b/test/e2e/postinstall/machinesets/infra_test.go index 39b9890e8a0..417bcd0ebce 100644 --- a/test/e2e/postinstall/machinesets/infra_test.go +++ b/test/e2e/postinstall/machinesets/infra_test.go @@ -17,7 +17,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/rest" "k8s.io/client-go/util/retry" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" machinev1 "github.com/openshift/api/machine/v1beta1" @@ -64,7 +64,7 @@ func TestScaleMachinePool(t *testing.T) { logger.Infof("expected Machine name prefix: %s", machinePrefix) logger.Info("scaling pool to 1 replicas") - pool.Spec.Replicas = pointer.Int64(1) + pool.Spec.Replicas = ptr.To(int64(1)) return c.Update(context.TODO(), pool) }) require.NoError(t, err, "cannot update worker machine pool to reduce replicas") @@ -81,7 +81,7 @@ func TestScaleMachinePool(t *testing.T) { require.NotNilf(t, pool, "worker machine pool does not exist: %s", workerMachinePoolName) logger.Info("scaling pool back to 3 replicas") - pool.Spec.Replicas = pointer.Int64(3) + pool.Spec.Replicas = ptr.To(int64(3)) return c.Update(context.TODO(), pool) }) require.NoError(t, err, "cannot update worker machine pool to increase replicas") @@ -111,7 +111,7 @@ func TestNewMachinePool(t *testing.T) { Spec: hivev1.MachinePoolSpec{ ClusterDeploymentRef: corev1.LocalObjectReference{Name: cd.Name}, Name: infraMachinePoolName, - Replicas: pointer.Int64(3), + Replicas: ptr.To(int64(3)), Labels: map[string]string{ "openshift.io/machine-type": infraMachinePoolName, }, @@ -282,7 +282,8 @@ func TestAutoscalingMachinePool(t *testing.T) { logger.Info("lowering autoscaler delay so scaling down happens faster") clusterAutoscaler := &autoscalingv1.ClusterAutoscaler{} - for i := 0; i < 10; i++ { +poll: + for range 10 { switch err := rc.Get(context.Background(), client.ObjectKey{Name: "default"}, clusterAutoscaler); { case apierrors.IsNotFound(err): t.Log("waiting for Hive to create cluster autoscaler") @@ -291,7 +292,7 @@ func TestAutoscalingMachinePool(t *testing.T) { t.Fatalf("could not get the cluster autoscaler: %v", err) default: t.Log("found cluster autoscaler") - break + break poll } } machineSetList := &machinev1.MachineSetList{} @@ -308,10 +309,10 @@ func TestAutoscalingMachinePool(t *testing.T) { if clusterAutoscaler.Spec.ScaleDown == nil { clusterAutoscaler.Spec.ScaleDown = &autoscalingv1.ScaleDownConfig{} } - clusterAutoscaler.Spec.ScaleDown.DelayAfterAdd = pointer.String("10s") - clusterAutoscaler.Spec.ScaleDown.DelayAfterDelete = pointer.String("10s") - clusterAutoscaler.Spec.ScaleDown.DelayAfterFailure = pointer.String("10s") - clusterAutoscaler.Spec.ScaleDown.UnneededTime = pointer.String("10s") + clusterAutoscaler.Spec.ScaleDown.DelayAfterAdd = ptr.To("10s") + clusterAutoscaler.Spec.ScaleDown.DelayAfterDelete = ptr.To("10s") + clusterAutoscaler.Spec.ScaleDown.DelayAfterFailure = ptr.To("10s") + clusterAutoscaler.Spec.ScaleDown.UnneededTime = ptr.To("10s") err = rc.Update(context.Background(), clusterAutoscaler) require.NoError(t, err, "could not update the cluster autoscaler") @@ -326,7 +327,7 @@ func TestAutoscalingMachinePool(t *testing.T) { Name: "busybox", }, Spec: appsv1.DeploymentSpec{ - Replicas: pointer.Int32(100), + Replicas: ptr.To(int32(100)), Selector: &metav1.LabelSelector{ MatchLabels: map[string]string{ "scaling-app": "busybox", @@ -355,14 +356,14 @@ func TestAutoscalingMachinePool(t *testing.T) { }, }, SecurityContext: &corev1.SecurityContext{ - AllowPrivilegeEscalation: pointer.Bool(false), + AllowPrivilegeEscalation: ptr.To(false), Capabilities: &corev1.Capabilities{ Drop: []corev1.Capability{"ALL"}, }, }, }}, SecurityContext: &corev1.PodSecurityContext{ - RunAsNonRoot: pointer.Bool(true), + RunAsNonRoot: ptr.To(true), SeccompProfile: &corev1.SeccompProfile{ Type: corev1.SeccompProfileTypeRuntimeDefault, }, @@ -396,7 +397,7 @@ func TestAutoscalingMachinePool(t *testing.T) { pool = common.GetMachinePool(c, cd, "worker") require.NotNil(t, pool, "worker machine pool does not exist") - pool.Spec.Replicas = pointer.Int64(3) + pool.Spec.Replicas = ptr.To(int64(3)) pool.Spec.Autoscaling = nil return c.Update(context.TODO(), pool) })