diff --git a/hack/common b/hack/common index bedb779d7c..73202324fc 100644 --- a/hack/common +++ b/hack/common @@ -189,6 +189,48 @@ function push_image() { skopeo copy --dest-tls-verify=false docker-daemon:"$1" docker://"$2" } +# debug_print [max_count [interval]] +# Example usage in hack/testing/test-*.sh +# debug_print & +# TEST_NAMESPACE=${NAMESPACE} go test ./test/e2e/ \ +# -root=$(pwd) \ +# -kubeconfig=${KUBECONFIG} \ +# -globalMan ${global_manifest} \ +# -namespacedMan ${manifest} \ +# -v \ +# -parallel=1 \ +# -singleNamespace | tee -a $ARTIFACT_DIR/test.log +# +function debug_print() { + local filename=$(basename "$0") + local max=${1:-120} + local interval=${2:-10} + while [ $max -gt 0 ]; + do + clo=$( oc -n ${NAMESPACE} get pods -l name=cluster-logging-operator -o jsonpath='{.items[0].metadata.name}' ) + if [ -n "$clo" ]; then + date >> $ARTIFACT_DIR/$filename.clo.log || : + oc -n ${NAMESPACE} logs $clo >> $ARTIFACT_DIR/$filename.clo.log || : + echo "-------------------------------------------------" >> $ARTIFACT_DIR/$filename.clo.log || : + date >> $ARTIFACT_DIR/$filename.clo.images || : + oc -n ${NAMESPACE} exec $clo -- env | egrep _IMAGE >> $ARTIFACT_DIR/$filename.clo.images || : + echo "-------------------------------------------------" >> $ARTIFACT_DIR/$filename.clo.images || : + else + date >> $ARTIFACT_DIR/$filename.noclo.log || : + oc -n ${NAMESPACE} get deployments >> $ARTIFACT_DIR/$filename.noclo.log || : + echo "-------------------------------------------------" >> $ARTIFACT_DIR/$filename.noclo.log || : + fi + date >> $ARTIFACT_DIR/$filename.events.log || : + oc -n ${NAMESPACE} get events >> $ARTIFACT_DIR/$filename.events.log || : + echo "-------------------------------------------------" >> $ARTIFACT_DIR/$filename.events.log || : + date >> $ARTIFACT_DIR/$filename.all.log || : + oc -n ${NAMESPACE} get all >> $ARTIFACT_DIR/$filename.all.log || : + echo "-------------------------------------------------" >> $ARTIFACT_DIR/$filename.all.log || : + sleep $interval + max=$( expr $max - 1 ) || : + done +} + if [ $REMOTE_REGISTRY = false ] ; then : # skip else diff --git a/manifests/4.3/cluster-logging.v4.3.0.clusterserviceversion.yaml b/manifests/4.3/cluster-logging.v4.3.0.clusterserviceversion.yaml index 3fe98caef6..07aac7949c 100644 --- a/manifests/4.3/cluster-logging.v4.3.0.clusterserviceversion.yaml +++ b/manifests/4.3/cluster-logging.v4.3.0.clusterserviceversion.yaml @@ -25,7 +25,7 @@ metadata: "metadata": { "name": "instance", "namespace": "openshift-logging" - }, + }, "spec": { "managementState": "Managed", "logStore": { @@ -36,8 +36,8 @@ metadata: "storage": { "storageClassName": "gp2", "size": "200G" - } } + } }, "visualization": { "type": "kibana", diff --git a/pkg/constants/constants.go b/pkg/constants/constants.go new file mode 100644 index 0000000000..8b77f610e2 --- /dev/null +++ b/pkg/constants/constants.go @@ -0,0 +1,21 @@ +package constants + +const ( + SingletonName = "instance" + OpenshiftNS = "openshift-logging" + // global proxy / trusted ca bundle consts + ProxyName = "cluster" + TrustedCABundleKey = "ca-bundle.crt" + InjectTrustedCABundleLabel = "config.openshift.io/inject-trusted-cabundle" + TrustedCABundleMountFile = "tls-ca-bundle.pem" + TrustedCABundleMountDir = "/etc/pki/ca-trust/extracted/pem/" + TrustedCABundleHashName = "logging.openshift.io/hash" + FluentdTrustedCAName = "fluentd-trusted-ca-bundle" + KibanaTrustedCAName = "kibana-trusted-ca-bundle" + // internal elasticsearch FQDN to prevent to connect to the global proxy + ElasticsearchFQDN = "elasticsearch.openshift-logging.svc.cluster.local" + ElasticsearchPort = "9200" + LogStoreService = ElasticsearchFQDN + ":" + ElasticsearchPort +) + +var ReconcileForGlobalProxyList = []string{FluentdTrustedCAName, KibanaTrustedCAName} diff --git a/pkg/controller/add_controllers.go b/pkg/controller/add_controllers.go index 19f8282932..da94bd3e8e 100644 --- a/pkg/controller/add_controllers.go +++ b/pkg/controller/add_controllers.go @@ -4,9 +4,10 @@ import ( "github.com/openshift/cluster-logging-operator/pkg/controller/clusterlogging" "github.com/openshift/cluster-logging-operator/pkg/controller/collector" "github.com/openshift/cluster-logging-operator/pkg/controller/forwarding" + "github.com/openshift/cluster-logging-operator/pkg/controller/proxyconfig" ) func init() { // AddToManagerFuncs is a list of functions to create controllers and add them to a manager. - AddToManagerFuncs = append(AddToManagerFuncs, clusterlogging.Add, forwarding.Add, collector.Add) + AddToManagerFuncs = append(AddToManagerFuncs, clusterlogging.Add, forwarding.Add, collector.Add, proxyconfig.Add) } diff --git a/pkg/controller/clusterlogging/clusterlogging_controller.go b/pkg/controller/clusterlogging/clusterlogging_controller.go index e621e2ab60..c150c26470 100644 --- a/pkg/controller/clusterlogging/clusterlogging_controller.go +++ b/pkg/controller/clusterlogging/clusterlogging_controller.go @@ -6,7 +6,9 @@ import ( loggingv1 "github.com/openshift/cluster-logging-operator/pkg/apis/logging/v1" logforwarding "github.com/openshift/cluster-logging-operator/pkg/apis/logging/v1alpha1" + "github.com/openshift/cluster-logging-operator/pkg/constants" "github.com/openshift/cluster-logging-operator/pkg/k8shandler" + "github.com/sirupsen/logrus" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" @@ -23,9 +25,7 @@ import ( var log = logf.Log.WithName("controller_clusterlogging") const ( - singletonName = "instance" singletonMessage = "ClusterLogging is a singleton. Only an instance named 'instance' is allowed" - openshiftNS = "openshift-logging" ) // Add creates a new ClusterLogging Controller and adds it to the Manager. The Manager will set fields on the Controller @@ -76,6 +76,11 @@ var ( // The Controller will requeue the Request to be processed again if the returned error is non-nil or // Result.Requeue is true, otherwise upon completion it will remove the work from the queue. func (r *ReconcileClusterLogging) Reconcile(request reconcile.Request) (reconcile.Result, error) { + if request.Name != constants.SingletonName { + // TODO: update status + return reconcile.Result{}, nil + } + logrus.Debugf("Clusterlogging reconcile request.Name: '%s'", request.Name) // Fetch the ClusterLogging instance instance := &loggingv1.ClusterLogging{} err := r.client.Get(context.TODO(), request.NamespacedName, instance) @@ -90,18 +95,12 @@ func (r *ReconcileClusterLogging) Reconcile(request reconcile.Request) (reconcil return reconcile.Result{}, err } - if instance.Name != singletonName { - // TODO: update status - - return reconcile.Result{}, nil - } - if instance.Spec.ManagementState == loggingv1.ManagementStateUnmanaged { return reconcile.Result{}, nil } forwardinginstance := &logforwarding.LogForwarding{} - fiName := types.NamespacedName{Name: singletonName, Namespace: openshiftNS} + fiName := types.NamespacedName{Name: constants.SingletonName, Namespace: constants.OpenshiftNS} err = r.client.Get(context.TODO(), fiName, forwardinginstance) if err != nil && !errors.IsNotFound(err) { // Request object not found, could have been deleted after reconcile request. diff --git a/pkg/controller/collector/collector_controller.go b/pkg/controller/collector/collector_controller.go index fd79413981..b86edaccf7 100644 --- a/pkg/controller/collector/collector_controller.go +++ b/pkg/controller/collector/collector_controller.go @@ -6,6 +6,7 @@ import ( loggingv1 "github.com/openshift/cluster-logging-operator/pkg/apis/logging/v1" collector "github.com/openshift/cluster-logging-operator/pkg/apis/logging/v1alpha1" + "github.com/openshift/cluster-logging-operator/pkg/constants" "github.com/openshift/cluster-logging-operator/pkg/k8shandler" "github.com/openshift/cluster-logging-operator/pkg/logger" "github.com/openshift/cluster-logging-operator/pkg/utils" @@ -25,10 +26,7 @@ import ( var log = logf.Log.WithName("controller_collector") const ( - singletonName = "instance" - singletonMessage = "Collector is a singleton. Only an instance named 'instance' is allowed" - openshiftNS = "openshift-logging" - + singletonMessage = "Collector is a singleton. Only an instance named 'instance' is allowed" promtailAnnotation = "clusterlogging.openshift.io/promtaildevpreview" ) @@ -101,10 +99,10 @@ func (r *ReconcileCollector) Reconcile(request reconcile.Request) (reconcile.Res //check for instancename and then update status var reconcileErr error = nil - if instance.Name == singletonName && value == "enabled" { + if instance.Name == constants.SingletonName && value == "enabled" { clInstance := &loggingv1.ClusterLogging{} - clName := types.NamespacedName{Name: singletonName, Namespace: openshiftNS} + clName := types.NamespacedName{Name: constants.SingletonName, Namespace: constants.OpenshiftNS} err = r.client.Get(context.TODO(), clName, clInstance) if err != nil && !errors.IsNotFound(err) { // Request object not found, could have been deleted after reconcile request. diff --git a/pkg/controller/forwarding/forwarding_controller.go b/pkg/controller/forwarding/forwarding_controller.go index 60aeb10808..8150079fa0 100644 --- a/pkg/controller/forwarding/forwarding_controller.go +++ b/pkg/controller/forwarding/forwarding_controller.go @@ -6,6 +6,7 @@ import ( loggingv1 "github.com/openshift/cluster-logging-operator/pkg/apis/logging/v1" logforwarding "github.com/openshift/cluster-logging-operator/pkg/apis/logging/v1alpha1" + "github.com/openshift/cluster-logging-operator/pkg/constants" "github.com/openshift/cluster-logging-operator/pkg/k8shandler" "github.com/openshift/cluster-logging-operator/pkg/logger" @@ -24,9 +25,7 @@ import ( var log = logf.Log.WithName("controller_forwarding") const ( - singletonName = "instance" singletonMessage = "LogForwarding is a singleton. Only an instance named 'instance' is allowed" - openshiftNS = "openshift-logging" ) // Add creates a new Controller and adds it to the Manager. The Manager will set fields on the Controller @@ -97,14 +96,14 @@ func (r *ReconcileForwarding) Reconcile(request reconcile.Request) (reconcile.Re //check for instancename and then update status var reconcileErr error = nil - if instance.Name != singletonName { + if instance.Name != constants.SingletonName { instance.Status = logforwarding.NewForwardingStatus(logforwarding.LogForwardingStateRejected, logforwarding.LogForwardingReasonName, singletonMessage) } else { instance.Status = logforwarding.NewForwardingStatus(logforwarding.LogForwardingStateAccepted, logforwarding.LogForwardingReasonName, "") logger.Debug("logforwarding-controller fetching ClusterLogging instance...") clInstance := &loggingv1.ClusterLogging{} - clName := types.NamespacedName{Name: singletonName, Namespace: openshiftNS} + clName := types.NamespacedName{Name: constants.SingletonName, Namespace: constants.OpenshiftNS} err = r.client.Get(context.TODO(), clName, clInstance) if err != nil && !errors.IsNotFound(err) { // Request object not found, could have been deleted after reconcile request. diff --git a/pkg/controller/proxyconfig/proxyconfig_controller.go b/pkg/controller/proxyconfig/proxyconfig_controller.go new file mode 100644 index 0000000000..96023a4ee7 --- /dev/null +++ b/pkg/controller/proxyconfig/proxyconfig_controller.go @@ -0,0 +1,156 @@ +package proxyconfig + +import ( + "context" + "time" + + configv1 "github.com/openshift/api/config/v1" + loggingv1 "github.com/openshift/cluster-logging-operator/pkg/apis/logging/v1" + logforwarding "github.com/openshift/cluster-logging-operator/pkg/apis/logging/v1alpha1" + "github.com/openshift/cluster-logging-operator/pkg/constants" + "github.com/openshift/cluster-logging-operator/pkg/k8shandler" + "github.com/openshift/cluster-logging-operator/pkg/utils" + "github.com/sirupsen/logrus" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + logf "sigs.k8s.io/controller-runtime/pkg/runtime/log" + "sigs.k8s.io/controller-runtime/pkg/source" +) + +var ( + log = logf.Log.WithName("controller_proxyconfig") + reconcilePeriod = 30 * time.Second + reconcileResult = reconcile.Result{RequeueAfter: reconcilePeriod} +) + +// Add creates a new ClusterLogging Controller and adds it to the Manager. The Manager will set fields on the Controller +// and Start it when the Manager is Started. +func Add(mgr manager.Manager) error { + return add(mgr, newReconciler(mgr)) +} + +// newReconciler returns a new reconcile.Reconciler +func newReconciler(mgr manager.Manager) reconcile.Reconciler { + if err := configv1.Install(mgr.GetScheme()); err != nil { + return &ReconcileProxyConfig{} + } + + return &ReconcileProxyConfig{client: mgr.GetClient(), scheme: mgr.GetScheme()} +} + +// add adds a new Controller to mgr with r as the reconcile.Reconciler +func add(mgr manager.Manager, r reconcile.Reconciler) error { + // Create a new controller + c, err := controller.New("proxyconfig-controller", mgr, controller.Options{Reconciler: r}) + if err != nil { + return err + } + + // Watch for changes to the additional trust bundle configmap in "openshift-logging". + pred := predicate.Funcs{ + UpdateFunc: func(e event.UpdateEvent) bool { return handleConfigMap(e.MetaNew) }, + DeleteFunc: func(e event.DeleteEvent) bool { return handleConfigMap(e.Meta) }, + CreateFunc: func(e event.CreateEvent) bool { return handleConfigMap(e.Meta) }, + GenericFunc: func(e event.GenericEvent) bool { return handleConfigMap(e.Meta) }, + } + if err = c.Watch(&source.Kind{Type: &corev1.ConfigMap{}}, &handler.EnqueueRequestForObject{}, pred); err != nil { + return err + } + + // Watch for changes to the proxy resource. + if err = c.Watch(&source.Kind{Type: &configv1.Proxy{}}, &handler.EnqueueRequestForObject{}); err != nil { + return err + } + + return nil +} + +var _ reconcile.Reconciler = &ReconcileProxyConfig{} + +// ReconcileProxyConfig reconciles a ClusterLogging object +type ReconcileProxyConfig struct { + // This client, initialized using mgr.Client() above, is a split client + // that reads objects from the cache and writes to the apiserver + client client.Client + scheme *runtime.Scheme +} + +// Reconcile reads that state of the cluster for a cluster-scoped named "cluster" as well as +// trusted CA bundle configmap objects for the collector and the visualization resources. +// When the user configured and/or system certs are updated, the change is propagated to the +// configmap objects and this reconciler triggers to restart those pods. +func (r *ReconcileProxyConfig) Reconcile(request reconcile.Request) (reconcile.Result, error) { + loggingNamespacedName := types.NamespacedName{Name: constants.SingletonName, Namespace: constants.OpenshiftNS} + proxyNamespacedName := types.NamespacedName{Name: constants.ProxyName} + var proxyConfig *configv1.Proxy = nil + var trustBundle *corev1.ConfigMap = nil + if request.NamespacedName == proxyNamespacedName { + proxyConfig = &configv1.Proxy{} + if err := r.client.Get(context.TODO(), request.NamespacedName, proxyConfig); err != nil { + if apierrors.IsNotFound(err) { + // Request object not found, could have been deleted after reconcile request. + // Return and don't requeue + return reconcile.Result{}, nil + } + // Error reading the object - just return without requeuing. + return reconcile.Result{}, err + } + } else if utils.ContainsString(constants.ReconcileForGlobalProxyList, request.Name) { + trustBundle = &corev1.ConfigMap{} + logrus.Debugf("Trust bundle configmap reconcile request.Namespace/request.Name: '%s/%s'", request.Namespace, request.Name) + if err := r.client.Get(context.TODO(), loggingNamespacedName, trustBundle); err != nil { + if !apierrors.IsNotFound(err) { + // Error reading the object - just return without requeuing. + return reconcile.Result{}, err + } + } + } else { + return reconcile.Result{}, nil + } + + // Fetch the ClusterLogging instance + instance := &loggingv1.ClusterLogging{} + if err := r.client.Get(context.TODO(), loggingNamespacedName, instance); err != nil { + if apierrors.IsNotFound(err) { + // Request object not found, could have been deleted after reconcile request. + // Owned objects are automatically garbage collected. For additional cleanup logic use finalizers. + // Return and don't requeue + return reconcile.Result{}, nil + } + // Error reading the object - just return without requeuing. + return reconcile.Result{}, err + } + + if instance.Spec.ManagementState == loggingv1.ManagementStateUnmanaged { + return reconcile.Result{}, nil + } + + forwardinginstance := &logforwarding.LogForwarding{} + err := r.client.Get(context.TODO(), loggingNamespacedName, forwardinginstance) + if err != nil && !apierrors.IsNotFound(err) { + // Error reading the object - just return without requeuing. + return reconcile.Result{}, err + } + + if err := k8shandler.ReconcileForGlobalProxy(instance, forwardinginstance, proxyConfig, trustBundle, r.client); err != nil { + // Failed to reconcile - requeuing. + return reconcileResult, err + } + + return reconcile.Result{}, nil +} + +// handleConfigMap returns true if meta namespace is "openshift-logging". +func handleConfigMap(meta metav1.Object) bool { + return meta.GetNamespace() == constants.OpenshiftNS && utils.ContainsString(constants.ReconcileForGlobalProxyList, meta.GetName()) +} diff --git a/pkg/k8shandler/collection.go b/pkg/k8shandler/collection.go index 85aeec4946..470dbf7eaf 100644 --- a/pkg/k8shandler/collection.go +++ b/pkg/k8shandler/collection.go @@ -6,6 +6,7 @@ import ( "strings" "time" + "github.com/openshift/cluster-logging-operator/pkg/constants" "github.com/openshift/cluster-logging-operator/pkg/logger" "github.com/openshift/cluster-logging-operator/pkg/utils" "github.com/sirupsen/logrus" @@ -14,6 +15,7 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/util/retry" + configv1 "github.com/openshift/api/config/v1" logging "github.com/openshift/cluster-logging-operator/pkg/apis/logging/v1" apps "k8s.io/api/apps/v1" core "k8s.io/api/core/v1" @@ -36,7 +38,7 @@ var ( var serviceAccountLogCollectorUID types.UID //CreateOrUpdateCollection component of the cluster -func (clusterRequest *ClusterLoggingRequest) CreateOrUpdateCollection() (err error) { +func (clusterRequest *ClusterLoggingRequest) CreateOrUpdateCollection(proxyConfig *configv1.Proxy, trustedCABundleCM *core.ConfigMap) (err error) { cluster := clusterRequest.cluster collectorConfig := "" collectorConfHash := "" @@ -79,7 +81,13 @@ func (clusterRequest *ClusterLoggingRequest) CreateOrUpdateCollection() (err err return } - if err = clusterRequest.createOrUpdateFluentdDaemonset(collectorConfHash); err != nil { + // Create or update cluster proxy trusted CA bundle. + err = clusterRequest.createOrUpdateTrustedCABundleConfigMap(constants.FluentdTrustedCAName) + if err != nil { + return + } + + if err = clusterRequest.createOrUpdateFluentdDaemonset(collectorConfHash, proxyConfig, trustedCABundleCM); err != nil { return } diff --git a/pkg/k8shandler/configmap.go b/pkg/k8shandler/configmap.go index a8303f92ff..594654481e 100644 --- a/pkg/k8shandler/configmap.go +++ b/pkg/k8shandler/configmap.go @@ -2,11 +2,12 @@ package k8shandler import ( "fmt" - - "k8s.io/apimachinery/pkg/api/errors" + "reflect" core "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/util/retry" ) //NewConfigMap stubs an instance of Configmap @@ -24,6 +25,35 @@ func NewConfigMap(configmapName string, namespace string, data map[string]string } } +func (clusterRequest *ClusterLoggingRequest) CreateOrUpdateConfigMap(configMap *core.ConfigMap) error { + err := clusterRequest.Create(configMap) + if err != nil { + if !errors.IsAlreadyExists(err) { + return fmt.Errorf("Failure constructing trusted CA bundle configmap: %v", err) + } + + current := &core.ConfigMap{} + + retryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error { + if err = clusterRequest.Get(configMap.Name, current); err != nil { + if errors.IsNotFound(err) { + // the object doesn't exist -- it was likely culled + // recreate it on the next time through if necessary + return nil + } + return fmt.Errorf("Failed to get %v configmap for %q: %v", configMap.Name, clusterRequest.cluster.Name, err) + } + if reflect.DeepEqual(configMap.Data, current.Data) { + return nil + } + current.Data = configMap.Data + return clusterRequest.Update(current) + }) + return retryErr + } + return nil +} + //RemoveConfigMap with a given name and namespace func (clusterRequest *ClusterLoggingRequest) RemoveConfigMap(configmapName string) error { diff --git a/pkg/k8shandler/fluentd.go b/pkg/k8shandler/fluentd.go index 8f50e8997c..99eff142c2 100644 --- a/pkg/k8shandler/fluentd.go +++ b/pkg/k8shandler/fluentd.go @@ -1,7 +1,6 @@ package k8shandler import ( - "context" "fmt" "reflect" "time" @@ -9,13 +8,13 @@ import ( monitoringv1 "github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1" logging "github.com/openshift/cluster-logging-operator/pkg/apis/logging/v1" logforward "github.com/openshift/cluster-logging-operator/pkg/apis/logging/v1alpha1" + "github.com/openshift/cluster-logging-operator/pkg/constants" "github.com/openshift/cluster-logging-operator/pkg/logger" "github.com/openshift/cluster-logging-operator/pkg/utils" "github.com/sirupsen/logrus" core "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/client-go/util/retry" @@ -48,6 +47,10 @@ func (clusterRequest *ClusterLoggingRequest) removeFluentd() (err error) { return } + if err = clusterRequest.RemoveConfigMap(constants.FluentdTrustedCAName); err != nil { + return + } + if err = clusterRequest.RemoveDaemonset(fluentdName); err != nil { return } @@ -215,7 +218,7 @@ func (clusterRequest *ClusterLoggingRequest) createOrUpdateFluentdSecret() error return nil } -func newFluentdPodSpec(cluster *logging.ClusterLogging, elasticsearchAppName string, elasticsearchInfraName string, proxyConfig *configv1.Proxy, pipelineSpec logforward.ForwardingSpec) v1.PodSpec { +func newFluentdPodSpec(cluster *logging.ClusterLogging, elasticsearchAppName string, elasticsearchInfraName string, proxyConfig *configv1.Proxy, trustedCABundleCM *core.ConfigMap, pipelineSpec logforward.ForwardingSpec) v1.PodSpec { collectionSpec := logging.CollectionSpec{} if cluster.Spec.Collection != nil { collectionSpec = *cluster.Spec.Collection @@ -241,7 +244,7 @@ func newFluentdPodSpec(cluster *logging.ClusterLogging, elasticsearchAppName str } fluentdContainer.Env = []v1.EnvVar{ - {Name: "NODE_NAME", ValueFrom: &v1.EnvVarSource{FieldRef: &v1.ObjectFieldSelector{FieldPath: "spec.nodeName"}}}, + {Name: "NODE_NAME", ValueFrom: &v1.EnvVarSource{FieldRef: &v1.ObjectFieldSelector{APIVersion: "v1", FieldPath: "spec.nodeName"}}}, {Name: "MERGE_JSON_LOG", Value: "false"}, {Name: "PRESERVE_JSON_LOG", Value: "true"}, {Name: "K8S_HOST_URL", Value: "https://kubernetes.default.svc"}, @@ -252,18 +255,12 @@ func newFluentdPodSpec(cluster *logging.ClusterLogging, elasticsearchAppName str {Name: "FILE_BUFFER_LIMIT", Value: "256Mi"}, {Name: "FLUENTD_CPU_LIMIT", ValueFrom: &v1.EnvVarSource{ResourceFieldRef: &v1.ResourceFieldSelector{ContainerName: "fluentd", Resource: "limits.cpu"}}}, {Name: "FLUENTD_MEMORY_LIMIT", ValueFrom: &v1.EnvVarSource{ResourceFieldRef: &v1.ResourceFieldSelector{ContainerName: "fluentd", Resource: "limits.memory"}}}, - {Name: "NODE_IPV4", ValueFrom: &v1.EnvVarSource{FieldRef: &v1.ObjectFieldSelector{FieldPath: "status.hostIP"}}}, + {Name: "NODE_IPV4", ValueFrom: &v1.EnvVarSource{FieldRef: &v1.ObjectFieldSelector{APIVersion: "v1", FieldPath: "status.hostIP"}}}, {Name: "CDM_KEEP_EMPTY_FIELDS", Value: "message"}, // by default, keep empty messages } - if proxyConfig != nil { - proxyEnv := []v1.EnvVar{ - {Name: "HTTP_PROXY", Value: proxyConfig.Status.HTTPProxy}, - {Name: "HTTPS_PROXY", Value: proxyConfig.Status.HTTPSProxy}, - {Name: "NO_PROXY", Value: proxyConfig.Status.NoProxy}, - } - fluentdContainer.Env = append(fluentdContainer.Env, proxyEnv...) - } + proxyEnv := utils.SetProxyEnvVars(proxyConfig) + fluentdContainer.Env = append(fluentdContainer.Env, proxyEnv...) fluentdContainer.VolumeMounts = []v1.VolumeMount{ {Name: "runlogjournal", MountPath: "/run/log/journal"}, @@ -285,11 +282,16 @@ func newFluentdPodSpec(cluster *logging.ClusterLogging, elasticsearchAppName str } } - if proxyConfig != nil && len(proxyConfig.Spec.TrustedCA.Name) > 0 { - proxyCA := []v1.VolumeMount{ - {Name: "proxytrustedca", MountPath: "/etc/fluent/proxy"}, - } - fluentdContainer.VolumeMounts = append(fluentdContainer.VolumeMounts, proxyCA...) + addTrustedCAVolume := false + // If trusted CA bundle ConfigMap exists and its hash value is non-zero, mount the bundle. + if trustedCABundleCM != nil && hasTrustedCABundle(trustedCABundleCM) { + addTrustedCAVolume = true + fluentdContainer.VolumeMounts = append(fluentdContainer.VolumeMounts, + v1.VolumeMount{ + Name: constants.FluentdTrustedCAName, + ReadOnly: true, + MountPath: constants.TrustedCABundleMountDir, + }) } fluentdContainer.SecurityContext = &v1.SecurityContext{ @@ -337,11 +339,26 @@ func newFluentdPodSpec(cluster *logging.ClusterLogging, elasticsearchAppName str } } - if proxyConfig != nil && len(proxyConfig.Spec.TrustedCA.Name) > 0 { - proxyCAVolume := []v1.Volume{ - {Name: "proxytrustedca", VolumeSource: v1.VolumeSource{Secret: &v1.SecretVolumeSource{SecretName: proxyConfig.Spec.TrustedCA.Name}}}, - } - fluentdPodSpec.Volumes = append(fluentdPodSpec.Volumes, proxyCAVolume...) + if addTrustedCAVolume { + optional := true + fluentdPodSpec.Volumes = append(fluentdPodSpec.Volumes, + v1.Volume{ + Name: constants.FluentdTrustedCAName, + VolumeSource: v1.VolumeSource{ + ConfigMap: &v1.ConfigMapVolumeSource{ + LocalObjectReference: v1.LocalObjectReference{ + Name: constants.FluentdTrustedCAName, + }, + Optional: &optional, + Items: []v1.KeyToPath{ + { + Key: constants.TrustedCABundleKey, + Path: constants.TrustedCABundleMountFile, + }, + }, + }, + }, + }) } fluentdPodSpec.PriorityClassName = clusterLoggingPriorityClassName @@ -351,18 +368,11 @@ func newFluentdPodSpec(cluster *logging.ClusterLogging, elasticsearchAppName str return fluentdPodSpec } -func (clusterRequest *ClusterLoggingRequest) createOrUpdateFluentdDaemonset(pipelineConfHash string) (err error) { +func (clusterRequest *ClusterLoggingRequest) createOrUpdateFluentdDaemonset(pipelineConfHash string, proxyConfig *configv1.Proxy, trustedCABundleCM *core.ConfigMap) (err error) { cluster := clusterRequest.cluster - proxy := &configv1.Proxy{} - if err = clusterRequest.client.Get(context.TODO(), types.NamespacedName{Name: "cluster"}, proxy); err != nil { - if !errors.IsNotFound(err) { - logrus.Debugf("fluentd: Failed to get proxy: %v\n", err) - } - } - - fluentdPodSpec := newFluentdPodSpec(cluster, "elasticsearch", "elasticsearch", proxy, clusterRequest.ForwardingSpec) + fluentdPodSpec := newFluentdPodSpec(cluster, "elasticsearch", "elasticsearch", proxyConfig, trustedCABundleCM, clusterRequest.ForwardingSpec) fluentdDaemonset := NewDaemonSet("fluentd", cluster.Namespace, "fluentd", "fluentd", fluentdPodSpec) fluentdDaemonset.Spec.Template.Spec.Containers[0].Env = updateEnvVar(v1.EnvVar{Name: "FLUENT_CONF_HASH", Value: pipelineConfHash}, fluentdDaemonset.Spec.Template.Spec.Containers[0].Env) @@ -383,7 +393,7 @@ func (clusterRequest *ClusterLoggingRequest) createOrUpdateFluentdDaemonset(pipe if clusterRequest.isManaged() { retryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error { - return clusterRequest.updateFluentdDaemonsetIfRequired(fluentdDaemonset) + return clusterRequest.updateFluentdDaemonsetIfRequired(fluentdDaemonset, trustedCABundleCM) }) if retryErr != nil { return retryErr @@ -393,7 +403,7 @@ func (clusterRequest *ClusterLoggingRequest) createOrUpdateFluentdDaemonset(pipe return nil } -func (clusterRequest *ClusterLoggingRequest) updateFluentdDaemonsetIfRequired(desired *apps.DaemonSet) (err error) { +func (clusterRequest *ClusterLoggingRequest) updateFluentdDaemonsetIfRequired(desired *apps.DaemonSet, trustedCABundleCM *core.ConfigMap) (err error) { logger.DebugObject("desired fluent update: %v", desired) current := &apps.DaemonSet{} @@ -409,6 +419,17 @@ func (clusterRequest *ClusterLoggingRequest) updateFluentdDaemonsetIfRequired(de flushBuffer := isBufferFlushRequired(current, desired) desired, different := isDaemonsetDifferent(current, desired) + // Check trustedCA certs have been updated or not by comparing the hash values in annotation. + newTrustedCAHashedValue := calcTrustedCAHashValue(trustedCABundleCM) + trustedCAHashedValue, _ := current.Spec.Template.ObjectMeta.Annotations[constants.TrustedCABundleHashName] + if trustedCAHashedValue != newTrustedCAHashedValue { + different = true + if desired.Spec.Template.ObjectMeta.Annotations == nil { + desired.Spec.Template.ObjectMeta.Annotations = make(map[string]string) + } + desired.Spec.Template.ObjectMeta.Annotations[constants.TrustedCABundleHashName] = newTrustedCAHashedValue + } + if different { current.Spec = desired.Spec if flushBuffer { diff --git a/pkg/k8shandler/fluentd_test.go b/pkg/k8shandler/fluentd_test.go index 25c11c46e6..933fbba8c9 100644 --- a/pkg/k8shandler/fluentd_test.go +++ b/pkg/k8shandler/fluentd_test.go @@ -1,12 +1,14 @@ package k8shandler import ( + "fmt" "reflect" "testing" configv1 "github.com/openshift/api/config/v1" logging "github.com/openshift/cluster-logging-operator/pkg/apis/logging/v1" logforward "github.com/openshift/cluster-logging-operator/pkg/apis/logging/v1alpha1" + "github.com/openshift/cluster-logging-operator/pkg/constants" "github.com/openshift/cluster-logging-operator/pkg/utils" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" @@ -16,7 +18,7 @@ import ( func TestNewFluentdPodSpecWhenFieldsAreUndefined(t *testing.T) { cluster := &logging.ClusterLogging{} - podSpec := newFluentdPodSpec(cluster, "test-app-name", "test-infra-name", nil, logforward.ForwardingSpec{}) + podSpec := newFluentdPodSpec(cluster, "test-app-name", "test-infra-name", nil, nil, logforward.ForwardingSpec{}) if len(podSpec.Containers) != 1 { t.Error("Exp. there to be 1 fluentd container") @@ -52,7 +54,7 @@ func TestNewFluentdPodSpecWhenResourcesAreDefined(t *testing.T) { }, }, } - podSpec := newFluentdPodSpec(cluster, "test-app-name", "test-infra-name", nil, logforward.ForwardingSpec{}) + podSpec := newFluentdPodSpec(cluster, "test-app-name", "test-infra-name", nil, nil, logforward.ForwardingSpec{}) if len(podSpec.Containers) != 1 { t.Error("Exp. there to be 1 fluentd container") @@ -94,7 +96,7 @@ func TestFluentdPodSpecHasTaintTolerations(t *testing.T) { }, }, } - podSpec := newFluentdPodSpec(cluster, "test-app-name", "test-infra-name", nil, logforward.ForwardingSpec{}) + podSpec := newFluentdPodSpec(cluster, "test-app-name", "test-infra-name", nil, nil, logforward.ForwardingSpec{}) if !reflect.DeepEqual(podSpec.Tolerations, expectedTolerations) { t.Errorf("Exp. the tolerations to be %v but was %v", expectedTolerations, podSpec.Tolerations) @@ -117,7 +119,7 @@ func TestNewFluentdPodSpecWhenSelectorIsDefined(t *testing.T) { }, }, } - podSpec := newFluentdPodSpec(cluster, "test-app-name", "test-infra-name", nil, logforward.ForwardingSpec{}) + podSpec := newFluentdPodSpec(cluster, "test-app-name", "test-infra-name", nil, nil, logforward.ForwardingSpec{}) if !reflect.DeepEqual(podSpec.NodeSelector, expSelector) { t.Errorf("Exp. the nodeSelector to be %q but was %q", expSelector, podSpec.NodeSelector) @@ -149,7 +151,7 @@ func TestNewFluentdPodNoTolerations(t *testing.T) { }, } - podSpec := newFluentdPodSpec(cluster, "test-app-name", "test-infra-name", nil, logforward.ForwardingSpec{}) + podSpec := newFluentdPodSpec(cluster, "test-app-name", "test-infra-name", nil, nil, logforward.ForwardingSpec{}) tolerations := podSpec.Tolerations if !utils.AreTolerationsSame(tolerations, expTolerations) { @@ -194,7 +196,7 @@ func TestNewFluentdPodWithTolerations(t *testing.T) { }, } - podSpec := newFluentdPodSpec(cluster, "test-app-name", "test-infra-name", nil, logforward.ForwardingSpec{}) + podSpec := newFluentdPodSpec(cluster, "test-app-name", "test-infra-name", nil, nil, logforward.ForwardingSpec{}) tolerations := podSpec.Tolerations if !utils.AreTolerationsSame(tolerations, expTolerations) { @@ -207,7 +209,7 @@ func TestNewFluentdPodSpecWhenProxyConfigExists(t *testing.T) { cluster := &logging.ClusterLogging{} httpproxy := "http://proxy-user@test.example.com/3128/" noproxy := ".cluster.local,localhost" - trustedca := "user-ca-bundle" + caBundle := fmt.Sprint("-----BEGIN CERTIFICATE-----\n\n-----END CERTIFICATE-----\n") podSpec := newFluentdPodSpec(cluster, "test-app-name", "test-infra-name", &configv1.Proxy{ TypeMeta: metav1.TypeMeta{ @@ -218,7 +220,7 @@ func TestNewFluentdPodSpecWhenProxyConfigExists(t *testing.T) { HTTPProxy: httpproxy, HTTPSProxy: httpproxy, TrustedCA: configv1.ConfigMapNameReference{ - Name: trustedca, + Name: "user-ca-bundle", }, }, Status: configv1.ProxyStatus{ @@ -227,6 +229,15 @@ func TestNewFluentdPodSpecWhenProxyConfigExists(t *testing.T) { NoProxy: noproxy, }, }, + &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "openshift-logging", + Name: constants.FluentdTrustedCAName, + }, + Data: map[string]string{ + constants.TrustedCABundleKey: caBundle, + }, + }, logforward.ForwardingSpec{}, ) @@ -234,14 +245,14 @@ func TestNewFluentdPodSpecWhenProxyConfigExists(t *testing.T) { t.Error("Exp. there to be 1 fluentd container") } - checkProxyEnvVar(t, podSpec, "HTTP_PROXY", httpproxy) - checkProxyEnvVar(t, podSpec, "HTTPS_PROXY", httpproxy) - checkProxyEnvVar(t, podSpec, "NO_PROXY", noproxy) + checkFluentdProxyEnvVar(t, podSpec, "HTTP_PROXY", httpproxy) + checkFluentdProxyEnvVar(t, podSpec, "HTTPS_PROXY", httpproxy) + checkFluentdProxyEnvVar(t, podSpec, "NO_PROXY", noproxy) - checkProxyVolumesAndVolumeMounts(t, podSpec, trustedca) + checkFluentdProxyVolumesAndVolumeMounts(t, podSpec, constants.FluentdTrustedCAName) } -func checkProxyEnvVar(t *testing.T, podSpec v1.PodSpec, name string, value string) { +func checkFluentdProxyEnvVar(t *testing.T, podSpec v1.PodSpec, name string, value string) { env := podSpec.Containers[0].Env found := false for _, elem := range env { @@ -257,34 +268,32 @@ func checkProxyEnvVar(t *testing.T, podSpec v1.PodSpec, name string, value strin } } -func checkProxyVolumesAndVolumeMounts(t *testing.T, podSpec v1.PodSpec, trustedca string) { - name := "proxytrustedca" - volumes := podSpec.Volumes +func checkFluentdProxyVolumesAndVolumeMounts(t *testing.T, podSpec v1.PodSpec, trustedca string) { + volumemounts := podSpec.Containers[0].VolumeMounts found := false - for _, elem := range volumes { - if elem.Name == name { + for _, elem := range volumemounts { + if elem.Name == trustedca { found = true - if elem.VolumeSource.Secret.SecretName != trustedca { - t.Errorf("Volume %s: expected %s, actual %s", name, trustedca, elem.VolumeSource.Secret.SecretName) + if elem.MountPath != constants.TrustedCABundleMountDir { + t.Errorf("VolumeMounts %s: expected %s, actual %s", trustedca, constants.TrustedCABundleMountDir, elem.MountPath) } } } if !found { - t.Errorf("Volume %s not found", name) + t.Errorf("VolumeMounts %s not found", trustedca) } - volumemounts := podSpec.Containers[0].VolumeMounts - value := "/etc/fluent/proxy" + volumes := podSpec.Volumes found = false - for _, elem := range volumemounts { - if elem.Name == name { + for _, elem := range volumes { + if elem.Name == trustedca { found = true - if elem.MountPath != value { - t.Errorf("VolumeMounts %s: expected %s, actual %s", name, value, elem.MountPath) + if elem.VolumeSource.ConfigMap.LocalObjectReference.Name != trustedca { + t.Errorf("Volume %s: expected %s, actual %s", trustedca, trustedca, elem.VolumeSource.Secret.SecretName) } } } if !found { - t.Errorf("VolumeMounts %s not found", name) + t.Errorf("Volume %s not found", trustedca) } } diff --git a/pkg/k8shandler/forwarding.go b/pkg/k8shandler/forwarding.go index cb9d7e2172..52b5a8810a 100644 --- a/pkg/k8shandler/forwarding.go +++ b/pkg/k8shandler/forwarding.go @@ -8,6 +8,7 @@ import ( logging "github.com/openshift/cluster-logging-operator/pkg/apis/logging/v1" logforward "github.com/openshift/cluster-logging-operator/pkg/apis/logging/v1alpha1" + "github.com/openshift/cluster-logging-operator/pkg/constants" "github.com/openshift/cluster-logging-operator/pkg/generators/forwarding" "github.com/openshift/cluster-logging-operator/pkg/logger" "github.com/openshift/cluster-logging-operator/pkg/utils" @@ -16,7 +17,6 @@ import ( const ( internalOutputName = "clo-default-output-es" collectorSecretName = "fluentd" - logStoreService = "elasticsearch.openshift-logging.svc:9200" defaultAppPipelineName = "clo-default-app-pipeline" defaultInfraPipelineName = "clo-default-infra-pipeline" secureForwardConfHash = "8163d9a59a20ada8ab58c2535a3a4924" @@ -82,7 +82,7 @@ func (clusterRequest *ClusterLoggingRequest) normalizeLogForwarding(namespace st logforward.OutputSpec{ Name: internalOutputName, Type: logforward.OutputTypeElasticsearch, - Endpoint: logStoreService, + Endpoint: constants.LogStoreService, Secret: &logforward.OutputSecretSpec{ Name: collectorSecretName, }, diff --git a/pkg/k8shandler/reconciler.go b/pkg/k8shandler/reconciler.go index 0010bf4406..4a75ee2087 100644 --- a/pkg/k8shandler/reconciler.go +++ b/pkg/k8shandler/reconciler.go @@ -3,9 +3,11 @@ package k8shandler import ( "fmt" + configv1 "github.com/openshift/api/config/v1" logging "github.com/openshift/cluster-logging-operator/pkg/apis/logging/v1" logforwarding "github.com/openshift/cluster-logging-operator/pkg/apis/logging/v1alpha1" "github.com/openshift/cluster-logging-operator/pkg/logger" + corev1 "k8s.io/api/core/v1" client "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -31,7 +33,7 @@ func Reconcile(requestCluster *logging.ClusterLogging, forwarding *logforwarding } // Reconcile Visualization - if err = clusterLoggingRequest.CreateOrUpdateVisualization(); err != nil { + if err = clusterLoggingRequest.CreateOrUpdateVisualization(nil, nil); err != nil { return fmt.Errorf("Unable to create or update visualization for %q: %v", clusterLoggingRequest.cluster.Name, err) } @@ -41,7 +43,31 @@ func Reconcile(requestCluster *logging.ClusterLogging, forwarding *logforwarding } // Reconcile Collection - if err = clusterLoggingRequest.CreateOrUpdateCollection(); err != nil { + if err = clusterLoggingRequest.CreateOrUpdateCollection(nil, nil); err != nil { + return fmt.Errorf("Unable to create or update collection for %q: %v", clusterLoggingRequest.cluster.Name, err) + } + + return nil +} + +func ReconcileForGlobalProxy(requestCluster *logging.ClusterLogging, forwarding *logforwarding.LogForwarding, proxyConfig *configv1.Proxy, trustedCABundleCM *corev1.ConfigMap, requestClient client.Client) (err error) { + + clusterLoggingRequest := ClusterLoggingRequest{ + client: requestClient, + cluster: requestCluster, + ForwardingRequest: forwarding, + } + if forwarding != nil { + clusterLoggingRequest.ForwardingSpec = forwarding.Spec + } + + // Reconcile Visualization + if err = clusterLoggingRequest.CreateOrUpdateVisualization(proxyConfig, trustedCABundleCM); err != nil { + return fmt.Errorf("Unable to create or update visualization for %q: %v", clusterLoggingRequest.cluster.Name, err) + } + + // Reconcile Collection + if err = clusterLoggingRequest.CreateOrUpdateCollection(proxyConfig, trustedCABundleCM); err != nil { return fmt.Errorf("Unable to create or update collection for %q: %v", clusterLoggingRequest.cluster.Name, err) } diff --git a/pkg/k8shandler/trustedcabundle.go b/pkg/k8shandler/trustedcabundle.go new file mode 100644 index 0000000000..6349aa40eb --- /dev/null +++ b/pkg/k8shandler/trustedcabundle.go @@ -0,0 +1,54 @@ +package k8shandler + +import ( + "github.com/openshift/cluster-logging-operator/pkg/constants" + "github.com/openshift/cluster-logging-operator/pkg/utils" + "github.com/sirupsen/logrus" + core "k8s.io/api/core/v1" +) + +/* + * Create or update Trusted CA Bundle ConfigMap + * By setting label "config.openshift.io/inject-trusted-cabundle: true", the cert is automatically filled/updated. + */ +func (clusterRequest *ClusterLoggingRequest) createOrUpdateTrustedCABundleConfigMap(configMapName string) error { + logrus.Debug("createOrUpdateTrustedCABundleConfigMap...") + configMap := NewConfigMap( + configMapName, + clusterRequest.cluster.Namespace, + map[string]string{ + constants.TrustedCABundleKey: "", + }, + ) + configMap.ObjectMeta.Labels = make(map[string]string) + configMap.ObjectMeta.Labels[constants.InjectTrustedCABundleLabel] = "true" + + utils.AddOwnerRefToObject(configMap, utils.AsOwner(clusterRequest.cluster)) + + err := clusterRequest.CreateOrUpdateConfigMap(configMap) + return err +} + +func hasTrustedCABundle(configMap *core.ConfigMap) bool { + if configMap == nil { + return false + } + caBundle, ok := configMap.Data[constants.TrustedCABundleKey] + if ok && caBundle != "" { + return true + } else { + return false + } +} + +func calcTrustedCAHashValue(configMap *core.ConfigMap) string { + hashValue := "0" + if configMap == nil { + return hashValue + } + caBundle, ok := configMap.Data[constants.TrustedCABundleKey] + if ok && caBundle != "" { + hashValue = utils.CalculateMD5Hash(caBundle) + } + return hashValue +} diff --git a/pkg/k8shandler/visualization.go b/pkg/k8shandler/visualization.go index 51e784a469..6dc963d981 100644 --- a/pkg/k8shandler/visualization.go +++ b/pkg/k8shandler/visualization.go @@ -6,12 +6,14 @@ import ( "reflect" "strings" + "github.com/openshift/cluster-logging-operator/pkg/constants" "github.com/openshift/cluster-logging-operator/pkg/utils" "github.com/sirupsen/logrus" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/client-go/util/retry" + configv1 "github.com/openshift/api/config/v1" logging "github.com/openshift/cluster-logging-operator/pkg/apis/logging/v1" apps "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" @@ -31,7 +33,7 @@ var ( ) // CreateOrUpdateVisualization reconciles visualization component for cluster logging -func (clusterRequest *ClusterLoggingRequest) CreateOrUpdateVisualization() (err error) { +func (clusterRequest *ClusterLoggingRequest) CreateOrUpdateVisualization(proxyConfig *configv1.Proxy, trustedCABundleCM *v1.ConfigMap) (err error) { if clusterRequest.cluster.Spec.Visualization == nil || clusterRequest.cluster.Spec.Visualization.Type == "" { clusterRequest.removeKibana() return nil @@ -66,7 +68,13 @@ func (clusterRequest *ClusterLoggingRequest) CreateOrUpdateVisualization() (err return } - if err = clusterRequest.createOrUpdateKibanaDeployment(); err != nil { + // Create cluster proxy trusted CA bundle. + err = clusterRequest.createOrUpdateTrustedCABundleConfigMap(constants.KibanaTrustedCAName) + if err != nil { + return + } + + if err = clusterRequest.createOrUpdateKibanaDeployment(proxyConfig, trustedCABundleCM); err != nil { return } @@ -129,6 +137,10 @@ func (clusterRequest *ClusterLoggingRequest) removeKibana() (err error) { return } + if err = clusterRequest.RemoveConfigMap(constants.KibanaTrustedCAName); err != nil { + return + } + if err = clusterRequest.RemoveService(name); err != nil { return } @@ -146,9 +158,9 @@ func (clusterRequest *ClusterLoggingRequest) removeKibana() (err error) { return nil } -func (clusterRequest *ClusterLoggingRequest) createOrUpdateKibanaDeployment() (err error) { +func (clusterRequest *ClusterLoggingRequest) createOrUpdateKibanaDeployment(proxyConfig *configv1.Proxy, trustedCABundleCM *v1.ConfigMap) (err error) { - kibanaPodSpec := newKibanaPodSpec(clusterRequest.cluster, "kibana", "elasticsearch") + kibanaPodSpec := newKibanaPodSpec(clusterRequest.cluster, "kibana", "elasticsearch.openshift-logging.svc.cluster.local", proxyConfig, trustedCABundleCM) kibanaDeployment := NewDeployment( "kibana", clusterRequest.cluster.Namespace, @@ -178,14 +190,102 @@ func (clusterRequest *ClusterLoggingRequest) createOrUpdateKibanaDeployment() (e } return fmt.Errorf("Failed to get Kibana deployment: %v", err) } - current.Spec = kibanaDeployment.Spec - return clusterRequest.Update(current) + + current, different := isDeploymentDifferent(current, kibanaDeployment) + + // Check trustedCA certs have been updated or not by comparing the hash values in annotation. + newTrustedCAHashedValue := calcTrustedCAHashValue(trustedCABundleCM) + trustedCAHashedValue, _ := current.Spec.Template.ObjectMeta.Annotations[constants.TrustedCABundleHashName] + if trustedCAHashedValue != newTrustedCAHashedValue { + different = true + if kibanaDeployment.Spec.Template.ObjectMeta.Annotations == nil { + kibanaDeployment.Spec.Template.ObjectMeta.Annotations = make(map[string]string) + } + kibanaDeployment.Spec.Template.ObjectMeta.Annotations[constants.TrustedCABundleHashName] = newTrustedCAHashedValue + } + + if different { + current.Spec = kibanaDeployment.Spec + return clusterRequest.Update(current) + } + return nil }) } return nil } +func isDeploymentDifferent(current *apps.Deployment, desired *apps.Deployment) (*apps.Deployment, bool) { + + different := false + + // is this needed? + if !utils.AreMapsSame(current.Spec.Template.Spec.NodeSelector, desired.Spec.Template.Spec.NodeSelector) { + logrus.Debugf("Visualization nodeSelector change found, updating '%s'", current.Name) + current.Spec.Template.Spec.NodeSelector = desired.Spec.Template.Spec.NodeSelector + different = true + } + + // is this needed? + if !utils.AreTolerationsSame(current.Spec.Template.Spec.Tolerations, desired.Spec.Template.Spec.Tolerations) { + logrus.Debugf("Visualization tolerations change found, updating '%s'", current.Name) + current.Spec.Template.Spec.Tolerations = desired.Spec.Template.Spec.Tolerations + different = true + } + + if isDeploymentImageDifference(current, desired) { + logrus.Debugf("Visualization image change found, updating %q", current.Name) + current = updateCurrentDeploymentImages(current, desired) + different = true + } + + if utils.AreResourcesDifferent(current, desired) { + logrus.Debugf("Visualization resource(s) change found, updating %q", current.Name) + different = true + } + + if !utils.EnvValueEqual(current.Spec.Template.Spec.Containers[0].Env, desired.Spec.Template.Spec.Containers[0].Env) { + current.Spec.Template.Spec.Containers[0].Env = desired.Spec.Template.Spec.Containers[0].Env + different = true + } + + return current, different +} + +func isDeploymentImageDifference(current *apps.Deployment, desired *apps.Deployment) bool { + + for _, curr := range current.Spec.Template.Spec.Containers { + for _, des := range desired.Spec.Template.Spec.Containers { + // Only compare the images of containers with the same name + if curr.Name == des.Name { + if curr.Image != des.Image { + return true + } + } + } + } + + return false +} + +func updateCurrentDeploymentImages(current *apps.Deployment, desired *apps.Deployment) *apps.Deployment { + + containers := current.Spec.Template.Spec.Containers + + for index, curr := range current.Spec.Template.Spec.Containers { + for _, des := range desired.Spec.Template.Spec.Containers { + // Only compare the images of containers with the same name + if curr.Name == des.Name { + if curr.Image != des.Image { + containers[index].Image = des.Image + } + } + } + } + + return current +} + func (clusterRequest *ClusterLoggingRequest) createOrUpdateKibanaRoute() error { cluster := clusterRequest.cluster @@ -356,7 +456,7 @@ func (clusterRequest *ClusterLoggingRequest) createOrUpdateKibanaSecret() error return nil } -func newKibanaPodSpec(cluster *logging.ClusterLogging, kibanaName string, elasticsearchName string) v1.PodSpec { +func newKibanaPodSpec(cluster *logging.ClusterLogging, kibanaName string, elasticsearchName string, proxyConfig *configv1.Proxy, trustedCABundleCM *v1.ConfigMap) v1.PodSpec { visSpec := logging.VisualizationSpec{} if cluster.Spec.Visualization != nil { visSpec = *cluster.Spec.Visualization @@ -454,6 +554,9 @@ func newKibanaPodSpec(cluster *logging.ClusterLogging, kibanaName string, elasti }, } + proxyEnv := utils.SetProxyEnvVars(proxyConfig) + kibanaProxyContainer.Env = append(kibanaProxyContainer.Env, proxyEnv...) + kibanaProxyContainer.Ports = []v1.ContainerPort{ {Name: "oaproxy", ContainerPort: 3000}, } @@ -462,6 +565,18 @@ func newKibanaPodSpec(cluster *logging.ClusterLogging, kibanaName string, elasti {Name: "kibana-proxy", ReadOnly: true, MountPath: "/secret"}, } + addTrustedCAVolume := false + // If trusted CA bundle ConfigMap exists and its hash value is non-zero, mount the bundle. + if trustedCABundleCM != nil && hasTrustedCABundle(trustedCABundleCM) { + addTrustedCAVolume = true + kibanaProxyContainer.VolumeMounts = append(kibanaProxyContainer.VolumeMounts, + v1.VolumeMount{ + Name: constants.KibanaTrustedCAName, + ReadOnly: true, + MountPath: constants.TrustedCABundleMountDir, + }) + } + kibanaPodSpec := NewPodSpec( "kibana", []v1.Container{kibanaContainer, kibanaProxyContainer}, @@ -483,6 +598,28 @@ func newKibanaPodSpec(cluster *logging.ClusterLogging, kibanaName string, elasti visSpec.Tolerations, ) + if addTrustedCAVolume { + optional := true + kibanaPodSpec.Volumes = append(kibanaPodSpec.Volumes, + v1.Volume{ + Name: constants.KibanaTrustedCAName, + VolumeSource: v1.VolumeSource{ + ConfigMap: &v1.ConfigMapVolumeSource{ + LocalObjectReference: v1.LocalObjectReference{ + Name: constants.KibanaTrustedCAName, + }, + Optional: &optional, + Items: []v1.KeyToPath{ + { + Key: constants.TrustedCABundleKey, + Path: constants.TrustedCABundleMountFile, + }, + }, + }, + }, + }) + } + kibanaPodSpec.Affinity = &v1.Affinity{ PodAntiAffinity: &v1.PodAntiAffinity{ PreferredDuringSchedulingIgnoredDuringExecution: []v1.WeightedPodAffinityTerm{ diff --git a/pkg/k8shandler/visualization_test.go b/pkg/k8shandler/visualization_test.go index 332daa139b..468b8dfd3f 100644 --- a/pkg/k8shandler/visualization_test.go +++ b/pkg/k8shandler/visualization_test.go @@ -1,20 +1,24 @@ package k8shandler import ( + "fmt" "reflect" "strings" "testing" + configv1 "github.com/openshift/api/config/v1" logging "github.com/openshift/cluster-logging-operator/pkg/apis/logging/v1" + "github.com/openshift/cluster-logging-operator/pkg/constants" "github.com/openshift/cluster-logging-operator/pkg/utils" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) func TestNewKibanaPodSpecSetsProxyToUseServiceAccountAsOAuthClient(t *testing.T) { clusterlogging := &logging.ClusterLogging{} - spec := newKibanaPodSpec(clusterlogging, "kibana", "elasticsearch") + spec := newKibanaPodSpec(clusterlogging, "kibana", "elasticsearch", nil, nil) for _, arg := range spec.Containers[1].Args { keyValue := strings.Split(arg, "=") if len(keyValue) >= 2 && keyValue[0] == "-client-id" { @@ -33,7 +37,7 @@ func TestNewKibanaPodSpecSetsProxyToUseServiceAccountAsOAuthClient(t *testing.T) func TestNewKibanaPodSpecWhenFieldsAreUndefined(t *testing.T) { cluster := &logging.ClusterLogging{} - podSpec := newKibanaPodSpec(cluster, "test-app-name", "elasticsearch") + podSpec := newKibanaPodSpec(cluster, "test-app-name", "elasticsearch", nil, nil) if len(podSpec.Containers) != 2 { t.Error("Exp. there to be 2 container") @@ -81,7 +85,7 @@ func TestNewKibanaPodSpecWhenResourcesAreDefined(t *testing.T) { }, }, } - podSpec := newKibanaPodSpec(cluster, "test-app-name", "elasticsearch") + podSpec := newKibanaPodSpec(cluster, "test-app-name", "elasticsearch", nil, nil) limitMemory := resource.MustParse("100Gi") requestMemory := resource.MustParse("120Gi") @@ -134,7 +138,7 @@ func TestNewKibanaPodSpecWhenNodeSelectorIsDefined(t *testing.T) { }, }, } - podSpec := newKibanaPodSpec(cluster, "test-app-name", "elasticsearch") + podSpec := newKibanaPodSpec(cluster, "test-app-name", "elasticsearch", nil, nil) //check kibana if !reflect.DeepEqual(podSpec.NodeSelector, expSelector) { @@ -155,7 +159,7 @@ func TestNewKibanaPodNoTolerations(t *testing.T) { }, } - podSpec := newKibanaPodSpec(cluster, "test-app-name", "test-infra-name") + podSpec := newKibanaPodSpec(cluster, "test-app-name", "test-infra-name", nil, nil) tolerations := podSpec.Tolerations if !utils.AreTolerationsSame(tolerations, expTolerations) { @@ -184,10 +188,103 @@ func TestNewKibanaPodWithTolerations(t *testing.T) { }, } - podSpec := newKibanaPodSpec(cluster, "test-app-name", "test-infra-name") + podSpec := newKibanaPodSpec(cluster, "test-app-name", "test-infra-name", nil, nil) tolerations := podSpec.Tolerations if !utils.AreTolerationsSame(tolerations, expTolerations) { t.Errorf("Exp. the tolerations to be %v but was %v", expTolerations, tolerations) } } + +func TestNewKibanaPodSpecWhenProxyConfigExists(t *testing.T) { + + cluster := &logging.ClusterLogging{} + httpproxy := "http://proxy-user@test.example.com/3128/" + noproxy := ".cluster.local,localhost" + caBundle := fmt.Sprint("-----BEGIN CERTIFICATE-----\n\n-----END CERTIFICATE-----\n") + podSpec := newKibanaPodSpec(cluster, "test-app-name", "test-infra-name", + &configv1.Proxy{ + TypeMeta: metav1.TypeMeta{ + Kind: "Proxy", + APIVersion: "config.openshift.io/v1", + }, + Spec: configv1.ProxySpec{ + HTTPProxy: httpproxy, + HTTPSProxy: httpproxy, + TrustedCA: configv1.ConfigMapNameReference{ + Name: "user-ca-bundle", + }, + }, + Status: configv1.ProxyStatus{ + HTTPProxy: httpproxy, + HTTPSProxy: httpproxy, + NoProxy: noproxy, + }, + }, + &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "openshift-logging", + Name: constants.KibanaTrustedCAName, + }, + Data: map[string]string{ + constants.TrustedCABundleKey: caBundle, + }, + }, + ) + + if len(podSpec.Containers) != 2 { + t.Error("Exp. there to be 2 kibana container") + } + + checkKibanaProxyEnvVar(t, podSpec, "HTTP_PROXY", httpproxy) + checkKibanaProxyEnvVar(t, podSpec, "HTTPS_PROXY", httpproxy) + checkKibanaProxyEnvVar(t, podSpec, "NO_PROXY", noproxy) + + checkKibanaProxyVolumesAndVolumeMounts(t, podSpec, constants.KibanaTrustedCAName) +} + +func checkKibanaProxyEnvVar(t *testing.T, podSpec v1.PodSpec, name string, value string) { + env := podSpec.Containers[1].Env + found := false + for _, elem := range env { + if elem.Name == name { + found = true + if elem.Value != value { + t.Errorf("EnvVar %s: expected %s, actual %s", name, value, elem.Value) + } + } + } + if !found { + t.Errorf("EnvVar %s not found", name) + } +} + +func checkKibanaProxyVolumesAndVolumeMounts(t *testing.T, podSpec v1.PodSpec, trustedca string) { + volumemounts := podSpec.Containers[1].VolumeMounts + found := false + for _, elem := range volumemounts { + if elem.Name == trustedca { + found = true + if elem.MountPath != constants.TrustedCABundleMountDir { + t.Errorf("VolumeMounts %s: expected %s, actual %s", trustedca, constants.TrustedCABundleMountDir, elem.MountPath) + } + } + } + if !found { + t.Errorf("VolumeMounts %s not found", trustedca) + } + + volumes := podSpec.Volumes + found = false + for _, elem := range volumes { + if elem.Name == trustedca { + found = true + if elem.VolumeSource.ConfigMap.LocalObjectReference.Name != trustedca { + t.Errorf("Volume %s: expected %s, actual %s", trustedca, trustedca, elem.VolumeSource.Secret.SecretName) + } + } + } + if !found { + t.Errorf("Volume %s not found", trustedca) + } +} diff --git a/pkg/utils/utils.go b/pkg/utils/utils.go index e5bdd0f331..2e18c5cb26 100644 --- a/pkg/utils/utils.go +++ b/pkg/utils/utils.go @@ -14,6 +14,7 @@ import ( "github.com/sirupsen/logrus" + configv1 "github.com/openshift/api/config/v1" logging "github.com/openshift/cluster-logging-operator/pkg/apis/logging/v1" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -313,17 +314,73 @@ func EnvVarSourceEqual(esource1, esource2 v1.EnvVarSource) bool { (esource1.SecretKeyRef == nil && esource2.SecretKeyRef != nil) { return false } + var rval bool if esource1.FieldRef != nil { - return reflect.DeepEqual(*esource1.FieldRef, *esource2.FieldRef) + if rval = reflect.DeepEqual(*esource1.FieldRef, *esource2.FieldRef); !rval { + return rval + } } if esource1.ResourceFieldRef != nil { - return reflect.DeepEqual(*esource1.ResourceFieldRef, *esource2.ResourceFieldRef) + if rval = EnvVarResourceFieldSelectorEqual(*esource1.ResourceFieldRef, *esource2.ResourceFieldRef); !rval { + return rval + } } if esource1.ConfigMapKeyRef != nil { - return reflect.DeepEqual(*esource1.ConfigMapKeyRef, *esource2.ConfigMapKeyRef) + if rval = reflect.DeepEqual(*esource1.ConfigMapKeyRef, *esource2.ConfigMapKeyRef); !rval { + return rval + } } if esource1.SecretKeyRef != nil { - return reflect.DeepEqual(*esource1.SecretKeyRef, *esource2.SecretKeyRef) + if rval = reflect.DeepEqual(*esource1.SecretKeyRef, *esource2.SecretKeyRef); !rval { + return rval + } } return true } + +func EnvVarResourceFieldSelectorEqual(resource1, resource2 v1.ResourceFieldSelector) bool { + if (resource1.ContainerName == resource2.ContainerName) && + (resource1.Resource == resource2.Resource) && + (resource1.Divisor.Cmp(resource2.Divisor) == 0) { + return true + } + return false +} + +func SetProxyEnvVars(proxyConfig *configv1.Proxy) []v1.EnvVar { + envVars := []v1.EnvVar{} + if proxyConfig == nil { + return envVars + } + if len(proxyConfig.Status.HTTPSProxy) != 0 { + envVars = append(envVars, v1.EnvVar{ + Name: "HTTPS_PROXY", + Value: proxyConfig.Status.HTTPSProxy, + }, + v1.EnvVar{ + Name: "https_proxy", + Value: proxyConfig.Status.HTTPSProxy, + }) + } + if len(proxyConfig.Status.HTTPProxy) != 0 { + envVars = append(envVars, v1.EnvVar{ + Name: "HTTP_PROXY", + Value: proxyConfig.Status.HTTPProxy, + }, + v1.EnvVar{ + Name: "http_proxy", + Value: proxyConfig.Status.HTTPProxy, + }) + } + if len(proxyConfig.Status.NoProxy) != 0 { + envVars = append(envVars, v1.EnvVar{ + Name: "NO_PROXY", + Value: proxyConfig.Status.NoProxy, + }, + v1.EnvVar{ + Name: "no_proxy", + Value: proxyConfig.Status.NoProxy, + }) + } + return envVars +} diff --git a/test/helpers/framework.go b/test/helpers/framework.go index d78413cd32..7c708b756b 100644 --- a/test/helpers/framework.go +++ b/test/helpers/framework.go @@ -22,9 +22,9 @@ import ( cl "github.com/openshift/cluster-logging-operator/pkg/apis/logging/v1" logforwarding "github.com/openshift/cluster-logging-operator/pkg/apis/logging/v1alpha1" k8shandler "github.com/openshift/cluster-logging-operator/pkg/k8shandler" + "github.com/openshift/cluster-logging-operator/pkg/logger" "github.com/openshift/cluster-logging-operator/pkg/utils" e2eutil "github.com/openshift/cluster-logging-operator/test/e2e" - "github.com/openshift/cluster-logging-operator/pkg/logger" ) const (