-
Notifications
You must be signed in to change notification settings - Fork 220
Add a render command to facilitate install-time customizations #309
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1 @@ | ||
| /ingress-operator |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -1,226 +1,22 @@ | ||
| package main | ||
|
|
||
| import ( | ||
| "context" | ||
| "fmt" | ||
| "os" | ||
|
|
||
| "github.com/ghodss/yaml" | ||
| "github.com/spf13/cobra" | ||
|
|
||
| "github.com/openshift/cluster-ingress-operator/pkg/dns" | ||
| awsdns "github.com/openshift/cluster-ingress-operator/pkg/dns/aws" | ||
| azuredns "github.com/openshift/cluster-ingress-operator/pkg/dns/azure" | ||
| gcpdns "github.com/openshift/cluster-ingress-operator/pkg/dns/gcp" | ||
| logf "github.com/openshift/cluster-ingress-operator/pkg/log" | ||
| "github.com/openshift/cluster-ingress-operator/pkg/manifests" | ||
| "github.com/openshift/cluster-ingress-operator/pkg/operator" | ||
| operatorclient "github.com/openshift/cluster-ingress-operator/pkg/operator/client" | ||
| operatorconfig "github.com/openshift/cluster-ingress-operator/pkg/operator/config" | ||
| statuscontroller "github.com/openshift/cluster-ingress-operator/pkg/operator/controller/status" | ||
|
|
||
| configv1 "github.com/openshift/api/config/v1" | ||
|
|
||
| corev1 "k8s.io/api/core/v1" | ||
|
|
||
| "k8s.io/apimachinery/pkg/types" | ||
|
|
||
| "sigs.k8s.io/controller-runtime/pkg/client" | ||
| "sigs.k8s.io/controller-runtime/pkg/client/config" | ||
| "sigs.k8s.io/controller-runtime/pkg/metrics" | ||
| "sigs.k8s.io/controller-runtime/pkg/runtime/signals" | ||
| ) | ||
|
|
||
| const ( | ||
| // cloudCredentialsSecretName is the name of the secret in the | ||
| // operator's namespace that will hold the credentials that the operator | ||
| // will use to authenticate with the cloud API. | ||
| cloudCredentialsSecretName = "cloud-credentials" | ||
| ) | ||
|
|
||
| var log = logf.Logger.WithName("entrypoint") | ||
| var log = logf.Logger.WithName("main") | ||
|
|
||
| func main() { | ||
| metrics.DefaultBindAddress = ":60000" | ||
|
|
||
| // Get a kube client. | ||
| kubeConfig, err := config.GetConfig() | ||
| if err != nil { | ||
| log.Error(err, "failed to get kube config") | ||
| os.Exit(1) | ||
| } | ||
| kubeClient, err := operatorclient.NewClient(kubeConfig) | ||
| if err != nil { | ||
| log.Error(err, "failed to create kube client") | ||
| os.Exit(1) | ||
| } | ||
|
|
||
| // Collect operator configuration. | ||
| operatorNamespace := os.Getenv("WATCH_NAMESPACE") | ||
| if len(operatorNamespace) == 0 { | ||
| operatorNamespace = manifests.DefaultOperatorNamespace | ||
| } | ||
| log.Info("using operator namespace", "namespace", operatorNamespace) | ||
|
|
||
| ingressControllerImage := os.Getenv("IMAGE") | ||
| if len(ingressControllerImage) == 0 { | ||
| log.Error(fmt.Errorf("missing environment variable"), "'IMAGE' environment variable must be set") | ||
| os.Exit(1) | ||
| } | ||
| releaseVersion := os.Getenv("RELEASE_VERSION") | ||
| if len(releaseVersion) == 0 { | ||
| releaseVersion = statuscontroller.UnknownVersionValue | ||
| log.Info("RELEASE_VERSION environment variable missing", "release version", statuscontroller.UnknownVersionValue) | ||
| } | ||
|
|
||
| // Retrieve the cluster infrastructure config. | ||
| infraConfig := &configv1.Infrastructure{} | ||
| err = kubeClient.Get(context.TODO(), types.NamespacedName{Name: "cluster"}, infraConfig) | ||
| if err != nil { | ||
| log.Error(err, "failed to get infrastructure 'config'") | ||
| os.Exit(1) | ||
| } | ||
|
|
||
| dnsConfig := &configv1.DNS{} | ||
| err = kubeClient.Get(context.TODO(), types.NamespacedName{Name: "cluster"}, dnsConfig) | ||
| if err != nil { | ||
| log.Error(err, "failed to get dns 'cluster'") | ||
| os.Exit(1) | ||
| } | ||
| var rootCmd = &cobra.Command{Use: "ingress-operator"} | ||
| rootCmd.AddCommand(NewStartCommand()) | ||
| rootCmd.AddCommand(NewRenderCommand()) | ||
|
|
||
| platformStatus, err := getPlatformStatus(kubeClient, infraConfig) | ||
| if err != nil { | ||
| log.Error(err, "failed to get platform status") | ||
| if err := rootCmd.Execute(); err != nil { | ||
| log.Error(err, "error") | ||
| os.Exit(1) | ||
| } | ||
|
|
||
| operatorConfig := operatorconfig.Config{ | ||
| OperatorReleaseVersion: releaseVersion, | ||
| Namespace: operatorNamespace, | ||
| IngressControllerImage: ingressControllerImage, | ||
| } | ||
|
|
||
| // Set up the DNS manager. | ||
| dnsProvider, err := createDNSProvider(kubeClient, operatorConfig, dnsConfig, platformStatus) | ||
| if err != nil { | ||
| log.Error(err, "failed to create DNS manager") | ||
| os.Exit(1) | ||
| } | ||
|
|
||
| // Set up and start the operator. | ||
| op, err := operator.New(operatorConfig, dnsProvider, kubeConfig) | ||
| if err != nil { | ||
| log.Error(err, "failed to create operator") | ||
| os.Exit(1) | ||
| } | ||
| if err := op.Start(signals.SetupSignalHandler()); err != nil { | ||
| log.Error(err, "failed to start operator") | ||
| os.Exit(1) | ||
| } | ||
| } | ||
|
|
||
| // createDNSManager creates a DNS manager compatible with the given cluster | ||
| // configuration. | ||
| func createDNSProvider(cl client.Client, operatorConfig operatorconfig.Config, dnsConfig *configv1.DNS, platformStatus *configv1.PlatformStatus) (dns.Provider, error) { | ||
| var dnsProvider dns.Provider | ||
| userAgent := fmt.Sprintf("OpenShift/%s (ingress-operator)", operatorConfig.OperatorReleaseVersion) | ||
|
|
||
| switch platformStatus.Type { | ||
| case configv1.AWSPlatformType: | ||
| creds := &corev1.Secret{} | ||
| err := cl.Get(context.TODO(), types.NamespacedName{Namespace: operatorConfig.Namespace, Name: cloudCredentialsSecretName}, creds) | ||
| if err != nil { | ||
| return nil, fmt.Errorf("failed to get cloud credentials from secret %s/%s: %v", creds.Namespace, creds.Name, err) | ||
| } | ||
| provider, err := awsdns.NewProvider(awsdns.Config{ | ||
| AccessID: string(creds.Data["aws_access_key_id"]), | ||
| AccessKey: string(creds.Data["aws_secret_access_key"]), | ||
| DNS: dnsConfig, | ||
| Region: platformStatus.AWS.Region, | ||
| }, operatorConfig.OperatorReleaseVersion) | ||
| if err != nil { | ||
| return nil, fmt.Errorf("failed to create AWS DNS manager: %v", err) | ||
| } | ||
| dnsProvider = provider | ||
| case configv1.AzurePlatformType: | ||
| creds := &corev1.Secret{} | ||
| err := cl.Get(context.TODO(), types.NamespacedName{Namespace: operatorConfig.Namespace, Name: cloudCredentialsSecretName}, creds) | ||
| if err != nil { | ||
| return nil, fmt.Errorf("failed to get cloud credentials from secret %s/%s: %v", creds.Namespace, creds.Name, err) | ||
| } | ||
| provider, err := azuredns.NewProvider(azuredns.Config{ | ||
| Environment: "AzurePublicCloud", | ||
| ClientID: string(creds.Data["azure_client_id"]), | ||
| ClientSecret: string(creds.Data["azure_client_secret"]), | ||
| TenantID: string(creds.Data["azure_tenant_id"]), | ||
| SubscriptionID: string(creds.Data["azure_subscription_id"]), | ||
| DNS: dnsConfig, | ||
| }, operatorConfig.OperatorReleaseVersion) | ||
| if err != nil { | ||
| return nil, fmt.Errorf("failed to create Azure DNS manager: %v", err) | ||
| } | ||
| dnsProvider = provider | ||
| case configv1.GCPPlatformType: | ||
| creds := &corev1.Secret{} | ||
| err := cl.Get(context.TODO(), types.NamespacedName{Namespace: operatorConfig.Namespace, Name: cloudCredentialsSecretName}, creds) | ||
| if err != nil { | ||
| return nil, fmt.Errorf("failed to get cloud credentials from secret %s/%s: %v", creds.Namespace, creds.Name, err) | ||
| } | ||
| provider, err := gcpdns.New(gcpdns.Config{ | ||
| Project: platformStatus.GCP.ProjectID, | ||
| CredentialsJSON: creds.Data["service_account.json"], | ||
| UserAgent: userAgent, | ||
| }) | ||
| if err != nil { | ||
| return nil, fmt.Errorf("failed to create GCP DNS provider: %v", err) | ||
| } | ||
| dnsProvider = provider | ||
| default: | ||
| dnsProvider = &dns.FakeProvider{} | ||
| } | ||
| return dnsProvider, nil | ||
| } | ||
|
|
||
| // getPlatformStatus provides a backwards-compatible way to look up platform status. AWS is the | ||
| // special case. 4.1 clusters on AWS expose the region config only through install-config. New AWS clusters | ||
| // and all other 4.2+ platforms are configured via platform status. | ||
| func getPlatformStatus(client client.Client, infra *configv1.Infrastructure) (*configv1.PlatformStatus, error) { | ||
| if status := infra.Status.PlatformStatus; status != nil { | ||
| // Only AWS needs backwards compatibility with install-config | ||
| if status.Type != configv1.AWSPlatformType { | ||
| return status, nil | ||
| } | ||
|
|
||
| // Check whether the cluster config is already migrated | ||
| if status.AWS != nil && len(status.AWS.Region) > 0 { | ||
| return status, nil | ||
| } | ||
| } | ||
|
|
||
| // Otherwise build a platform status from the deprecated install-config | ||
| type installConfig struct { | ||
| Platform struct { | ||
| AWS struct { | ||
| Region string `json:"region"` | ||
| } `json:"aws"` | ||
| } `json:"platform"` | ||
| } | ||
| clusterConfigName := types.NamespacedName{Namespace: "kube-system", Name: "cluster-config-v1"} | ||
| clusterConfig := &corev1.ConfigMap{} | ||
| if err := client.Get(context.TODO(), clusterConfigName, clusterConfig); err != nil { | ||
| return nil, fmt.Errorf("failed to get configmap %s: %v", clusterConfigName, err) | ||
| } | ||
| data, ok := clusterConfig.Data["install-config"] | ||
| if !ok { | ||
| return nil, fmt.Errorf("missing install-config in configmap") | ||
| } | ||
| var ic installConfig | ||
| if err := yaml.Unmarshal([]byte(data), &ic); err != nil { | ||
| return nil, fmt.Errorf("invalid install-config: %v\njson:\n%s", err, data) | ||
| } | ||
| return &configv1.PlatformStatus{ | ||
| Type: infra.Status.Platform, | ||
| AWS: &configv1.AWSPlatformStatus{ | ||
| Region: ic.Platform.AWS.Region, | ||
| }, | ||
| }, nil | ||
| } |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,59 @@ | ||
| package main | ||
|
|
||
| import ( | ||
| "fmt" | ||
| "io/ioutil" | ||
| "os" | ||
| "path/filepath" | ||
|
|
||
| "github.com/spf13/cobra" | ||
|
|
||
| "github.com/openshift/cluster-ingress-operator/pkg/manifests" | ||
| ) | ||
|
|
||
| func NewRenderCommand() *cobra.Command { | ||
| var options struct { | ||
| OutputDir string | ||
| Prefix string | ||
| } | ||
|
|
||
| var command = &cobra.Command{ | ||
| Use: "render", | ||
| Short: "Render base manifests", | ||
| Long: `render emits the base manifest files necessary to support the creation of an ingresscontroller resource.`, | ||
| Run: func(cmd *cobra.Command, args []string) { | ||
| if err := render(options.OutputDir, options.Prefix); err != nil { | ||
| log.Error(err, "error rendering") | ||
| os.Exit(1) | ||
| } | ||
| }, | ||
| } | ||
|
|
||
| command.Flags().StringVarP(&options.OutputDir, "output-dir", "o", "", "manifest output directory.") | ||
| command.Flags().StringVarP(&options.Prefix, "prefix", "p", "", "optional prefix for rendered filenames.") | ||
| if err := command.MarkFlagRequired("output-dir"); err != nil { | ||
| panic(err) | ||
| } | ||
|
|
||
| return command | ||
| } | ||
|
|
||
| func render(dir string, prefix string) error { | ||
| files := []string{ | ||
| manifests.CustomResourceDefinitionManifest, | ||
| manifests.NamespaceManifest, | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Could we also write out a default ingresscontroller resource so the user does not need to write that from scratch?
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Can we have a separate followup discussion about the default handling in that regard? It's not required for this part of the implementation and could have negative consequences to consider. |
||
| } | ||
|
|
||
| if err := os.MkdirAll(dir, 0750); err != nil { | ||
| return fmt.Errorf("failed to create output directory %q: %v", dir, err) | ||
| } | ||
|
|
||
| for _, file := range files { | ||
| outputFile := filepath.Join(dir, prefix+filepath.Base(file)) | ||
ironcladlou marked this conversation as resolved.
Show resolved
Hide resolved
|
||
| if err := ioutil.WriteFile(outputFile, manifests.MustAsset(file), 0640); err != nil { | ||
| return fmt.Errorf("failed to write %q: %v", outputFile, err) | ||
| } | ||
| fmt.Printf("wrote %s\n", outputFile) | ||
| } | ||
| return nil | ||
| } | ||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
This new user should be unnecessary, and by using, the installer (as an image consumer) must override the user when doing things like running
renderwith a volume mounted output directory (becauseingress-operatoruser doesn't have permissions to write to the volume mount point).Although I can work around it in the installer, if we have no reason to run as a new user, removing the
useraddmakes things more consistent with the other images used for the same purpose.