Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .dockerignore
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
/ingress-operator
4 changes: 1 addition & 3 deletions Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -6,10 +6,8 @@ RUN make build
FROM registry.svc.ci.openshift.org/openshift/origin-v4.0:base
COPY --from=builder /ingress-operator/ingress-operator /usr/bin/
COPY manifests /manifests
RUN useradd ingress-operator
USER ingress-operator
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This new user should be unnecessary, and by using, the installer (as an image consumer) must override the user when doing things like running render with a volume mounted output directory (because ingress-operator user doesn't have permissions to write to the volume mount point).

Although I can work around it in the installer, if we have no reason to run as a new user, removing the useradd makes things more consistent with the other images used for the same purpose.

ENTRYPOINT ["/usr/bin/ingress-operator"]
LABEL io.openshift.release.operator true
LABEL io.openshift.release.operator="true"
LABEL io.k8s.display-name="OpenShift ingress-operator" \
io.k8s.description="This is a component of OpenShift Container Platform and manages the lifecycle of ingress controller components." \
maintainer="Dan Mace <dmace@redhat.com>"
2 changes: 0 additions & 2 deletions Dockerfile.rhel7
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,6 @@ RUN make build
FROM registry.svc.ci.openshift.org/ocp/4.0:base
COPY --from=builder /ingress-operator/ingress-operator /usr/bin/
COPY manifests /manifests
RUN useradd ingress-operator
USER ingress-operator
ENTRYPOINT ["/usr/bin/ingress-operator"]
LABEL io.openshift.release.operator true
LABEL io.k8s.display-name="OpenShift ingress-operator" \
Expand Down
218 changes: 7 additions & 211 deletions cmd/ingress-operator/main.go
Original file line number Diff line number Diff line change
@@ -1,226 +1,22 @@
package main

import (
"context"
"fmt"
"os"

"github.com/ghodss/yaml"
"github.com/spf13/cobra"

"github.com/openshift/cluster-ingress-operator/pkg/dns"
awsdns "github.com/openshift/cluster-ingress-operator/pkg/dns/aws"
azuredns "github.com/openshift/cluster-ingress-operator/pkg/dns/azure"
gcpdns "github.com/openshift/cluster-ingress-operator/pkg/dns/gcp"
logf "github.com/openshift/cluster-ingress-operator/pkg/log"
"github.com/openshift/cluster-ingress-operator/pkg/manifests"
"github.com/openshift/cluster-ingress-operator/pkg/operator"
operatorclient "github.com/openshift/cluster-ingress-operator/pkg/operator/client"
operatorconfig "github.com/openshift/cluster-ingress-operator/pkg/operator/config"
statuscontroller "github.com/openshift/cluster-ingress-operator/pkg/operator/controller/status"

configv1 "github.com/openshift/api/config/v1"

corev1 "k8s.io/api/core/v1"

"k8s.io/apimachinery/pkg/types"

"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/config"
"sigs.k8s.io/controller-runtime/pkg/metrics"
"sigs.k8s.io/controller-runtime/pkg/runtime/signals"
)

const (
// cloudCredentialsSecretName is the name of the secret in the
// operator's namespace that will hold the credentials that the operator
// will use to authenticate with the cloud API.
cloudCredentialsSecretName = "cloud-credentials"
)

var log = logf.Logger.WithName("entrypoint")
var log = logf.Logger.WithName("main")

func main() {
metrics.DefaultBindAddress = ":60000"

// Get a kube client.
kubeConfig, err := config.GetConfig()
if err != nil {
log.Error(err, "failed to get kube config")
os.Exit(1)
}
kubeClient, err := operatorclient.NewClient(kubeConfig)
if err != nil {
log.Error(err, "failed to create kube client")
os.Exit(1)
}

// Collect operator configuration.
operatorNamespace := os.Getenv("WATCH_NAMESPACE")
if len(operatorNamespace) == 0 {
operatorNamespace = manifests.DefaultOperatorNamespace
}
log.Info("using operator namespace", "namespace", operatorNamespace)

ingressControllerImage := os.Getenv("IMAGE")
if len(ingressControllerImage) == 0 {
log.Error(fmt.Errorf("missing environment variable"), "'IMAGE' environment variable must be set")
os.Exit(1)
}
releaseVersion := os.Getenv("RELEASE_VERSION")
if len(releaseVersion) == 0 {
releaseVersion = statuscontroller.UnknownVersionValue
log.Info("RELEASE_VERSION environment variable missing", "release version", statuscontroller.UnknownVersionValue)
}

// Retrieve the cluster infrastructure config.
infraConfig := &configv1.Infrastructure{}
err = kubeClient.Get(context.TODO(), types.NamespacedName{Name: "cluster"}, infraConfig)
if err != nil {
log.Error(err, "failed to get infrastructure 'config'")
os.Exit(1)
}

dnsConfig := &configv1.DNS{}
err = kubeClient.Get(context.TODO(), types.NamespacedName{Name: "cluster"}, dnsConfig)
if err != nil {
log.Error(err, "failed to get dns 'cluster'")
os.Exit(1)
}
var rootCmd = &cobra.Command{Use: "ingress-operator"}
rootCmd.AddCommand(NewStartCommand())
rootCmd.AddCommand(NewRenderCommand())

platformStatus, err := getPlatformStatus(kubeClient, infraConfig)
if err != nil {
log.Error(err, "failed to get platform status")
if err := rootCmd.Execute(); err != nil {
log.Error(err, "error")
os.Exit(1)
}

operatorConfig := operatorconfig.Config{
OperatorReleaseVersion: releaseVersion,
Namespace: operatorNamespace,
IngressControllerImage: ingressControllerImage,
}

// Set up the DNS manager.
dnsProvider, err := createDNSProvider(kubeClient, operatorConfig, dnsConfig, platformStatus)
if err != nil {
log.Error(err, "failed to create DNS manager")
os.Exit(1)
}

// Set up and start the operator.
op, err := operator.New(operatorConfig, dnsProvider, kubeConfig)
if err != nil {
log.Error(err, "failed to create operator")
os.Exit(1)
}
if err := op.Start(signals.SetupSignalHandler()); err != nil {
log.Error(err, "failed to start operator")
os.Exit(1)
}
}

// createDNSManager creates a DNS manager compatible with the given cluster
// configuration.
func createDNSProvider(cl client.Client, operatorConfig operatorconfig.Config, dnsConfig *configv1.DNS, platformStatus *configv1.PlatformStatus) (dns.Provider, error) {
var dnsProvider dns.Provider
userAgent := fmt.Sprintf("OpenShift/%s (ingress-operator)", operatorConfig.OperatorReleaseVersion)

switch platformStatus.Type {
case configv1.AWSPlatformType:
creds := &corev1.Secret{}
err := cl.Get(context.TODO(), types.NamespacedName{Namespace: operatorConfig.Namespace, Name: cloudCredentialsSecretName}, creds)
if err != nil {
return nil, fmt.Errorf("failed to get cloud credentials from secret %s/%s: %v", creds.Namespace, creds.Name, err)
}
provider, err := awsdns.NewProvider(awsdns.Config{
AccessID: string(creds.Data["aws_access_key_id"]),
AccessKey: string(creds.Data["aws_secret_access_key"]),
DNS: dnsConfig,
Region: platformStatus.AWS.Region,
}, operatorConfig.OperatorReleaseVersion)
if err != nil {
return nil, fmt.Errorf("failed to create AWS DNS manager: %v", err)
}
dnsProvider = provider
case configv1.AzurePlatformType:
creds := &corev1.Secret{}
err := cl.Get(context.TODO(), types.NamespacedName{Namespace: operatorConfig.Namespace, Name: cloudCredentialsSecretName}, creds)
if err != nil {
return nil, fmt.Errorf("failed to get cloud credentials from secret %s/%s: %v", creds.Namespace, creds.Name, err)
}
provider, err := azuredns.NewProvider(azuredns.Config{
Environment: "AzurePublicCloud",
ClientID: string(creds.Data["azure_client_id"]),
ClientSecret: string(creds.Data["azure_client_secret"]),
TenantID: string(creds.Data["azure_tenant_id"]),
SubscriptionID: string(creds.Data["azure_subscription_id"]),
DNS: dnsConfig,
}, operatorConfig.OperatorReleaseVersion)
if err != nil {
return nil, fmt.Errorf("failed to create Azure DNS manager: %v", err)
}
dnsProvider = provider
case configv1.GCPPlatformType:
creds := &corev1.Secret{}
err := cl.Get(context.TODO(), types.NamespacedName{Namespace: operatorConfig.Namespace, Name: cloudCredentialsSecretName}, creds)
if err != nil {
return nil, fmt.Errorf("failed to get cloud credentials from secret %s/%s: %v", creds.Namespace, creds.Name, err)
}
provider, err := gcpdns.New(gcpdns.Config{
Project: platformStatus.GCP.ProjectID,
CredentialsJSON: creds.Data["service_account.json"],
UserAgent: userAgent,
})
if err != nil {
return nil, fmt.Errorf("failed to create GCP DNS provider: %v", err)
}
dnsProvider = provider
default:
dnsProvider = &dns.FakeProvider{}
}
return dnsProvider, nil
}

// getPlatformStatus provides a backwards-compatible way to look up platform status. AWS is the
// special case. 4.1 clusters on AWS expose the region config only through install-config. New AWS clusters
// and all other 4.2+ platforms are configured via platform status.
func getPlatformStatus(client client.Client, infra *configv1.Infrastructure) (*configv1.PlatformStatus, error) {
if status := infra.Status.PlatformStatus; status != nil {
// Only AWS needs backwards compatibility with install-config
if status.Type != configv1.AWSPlatformType {
return status, nil
}

// Check whether the cluster config is already migrated
if status.AWS != nil && len(status.AWS.Region) > 0 {
return status, nil
}
}

// Otherwise build a platform status from the deprecated install-config
type installConfig struct {
Platform struct {
AWS struct {
Region string `json:"region"`
} `json:"aws"`
} `json:"platform"`
}
clusterConfigName := types.NamespacedName{Namespace: "kube-system", Name: "cluster-config-v1"}
clusterConfig := &corev1.ConfigMap{}
if err := client.Get(context.TODO(), clusterConfigName, clusterConfig); err != nil {
return nil, fmt.Errorf("failed to get configmap %s: %v", clusterConfigName, err)
}
data, ok := clusterConfig.Data["install-config"]
if !ok {
return nil, fmt.Errorf("missing install-config in configmap")
}
var ic installConfig
if err := yaml.Unmarshal([]byte(data), &ic); err != nil {
return nil, fmt.Errorf("invalid install-config: %v\njson:\n%s", err, data)
}
return &configv1.PlatformStatus{
Type: infra.Status.Platform,
AWS: &configv1.AWSPlatformStatus{
Region: ic.Platform.AWS.Region,
},
}, nil
}
59 changes: 59 additions & 0 deletions cmd/ingress-operator/render.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,59 @@
package main

import (
"fmt"
"io/ioutil"
"os"
"path/filepath"

"github.com/spf13/cobra"

"github.com/openshift/cluster-ingress-operator/pkg/manifests"
)

func NewRenderCommand() *cobra.Command {
var options struct {
OutputDir string
Prefix string
}

var command = &cobra.Command{
Use: "render",
Short: "Render base manifests",
Long: `render emits the base manifest files necessary to support the creation of an ingresscontroller resource.`,
Run: func(cmd *cobra.Command, args []string) {
if err := render(options.OutputDir, options.Prefix); err != nil {
log.Error(err, "error rendering")
os.Exit(1)
}
},
}

command.Flags().StringVarP(&options.OutputDir, "output-dir", "o", "", "manifest output directory.")
command.Flags().StringVarP(&options.Prefix, "prefix", "p", "", "optional prefix for rendered filenames.")
if err := command.MarkFlagRequired("output-dir"); err != nil {
panic(err)
}

return command
}

func render(dir string, prefix string) error {
files := []string{
manifests.CustomResourceDefinitionManifest,
manifests.NamespaceManifest,
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Could we also write out a default ingresscontroller resource so the user does not need to write that from scratch?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Can we have a separate followup discussion about the default handling in that regard? It's not required for this part of the implementation and could have negative consequences to consider.

}

if err := os.MkdirAll(dir, 0750); err != nil {
return fmt.Errorf("failed to create output directory %q: %v", dir, err)
}

for _, file := range files {
outputFile := filepath.Join(dir, prefix+filepath.Base(file))
if err := ioutil.WriteFile(outputFile, manifests.MustAsset(file), 0640); err != nil {
return fmt.Errorf("failed to write %q: %v", outputFile, err)
}
fmt.Printf("wrote %s\n", outputFile)
}
return nil
}
Loading