From dbf962889a792471f6947501626428f8a36acfb3 Mon Sep 17 00:00:00 2001
From: Pavol Loffay
Date: Wed, 17 Apr 2024 09:23:16 +0200
Subject: [PATCH] Remove v1alpha1 webhooks (#2861)
* Remove v1alpha1 webhooks
Signed-off-by: Pavol Loffay
* Fix
Signed-off-by: Pavol Loffay
* Fix
Signed-off-by: Pavol Loffay
---------
Signed-off-by: Pavol Loffay
---
.chloggen/v1beta1-remove-webhook.yaml | 16 +
apis/v1alpha1/collector_webhook.go | 462 -------
apis/v1alpha1/collector_webhook_test.go | 1137 -----------------
apis/v1alpha1/opampbridge_webhook_test.go | 3 +
apis/v1beta1/collector_webhook.go | 362 +++++-
apis/v1beta1/collector_webhook_test.go | 1121 +++++++++++++++-
apis/v1beta1/config.go | 23 +
apis/v1beta1/config_test.go | 68 +
...emetry-operator.clusterserviceversion.yaml | 59 -
config/webhook/manifests.yaml | 59 -
controllers/suite_test.go | 4 -
.../collector/adapters/config_to_ports.go | 37 +-
.../adapters/config_to_ports_test.go | 75 +-
internal/manifests/collector/container.go | 2 +-
internal/manifests/collector/ingress.go | 17 +-
internal/manifests/collector/service.go | 15 +-
internal/manifests/collector/service_test.go | 34 +-
.../podmutation/webhookhandler_suite_test.go | 4 -
main.go | 4 -
pkg/collector/upgrade/suite_test.go | 4 -
20 files changed, 1618 insertions(+), 1888 deletions(-)
create mode 100755 .chloggen/v1beta1-remove-webhook.yaml
delete mode 100644 apis/v1alpha1/collector_webhook.go
delete mode 100644 apis/v1alpha1/collector_webhook_test.go
diff --git a/.chloggen/v1beta1-remove-webhook.yaml b/.chloggen/v1beta1-remove-webhook.yaml
new file mode 100755
index 0000000000..6cb185f239
--- /dev/null
+++ b/.chloggen/v1beta1-remove-webhook.yaml
@@ -0,0 +1,16 @@
+# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix'
+change_type: enhancement
+
+# The name of the component, or a single word describing the area of concern, (e.g. collector, target allocator, auto-instrumentation, opamp, github action)
+component: collector
+
+# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`).
+note: Remove collector v1alpha1 defaulting and validating webhooks.
+
+# One or more tracking issues related to the change
+issues: [2736]
+
+# (Optional) One or more lines of additional information to render under the primary note.
+# These lines will be padded with 2 spaces and then inserted directly into the document.
+# Use pipe (|) for multiline entries.
+subtext: The functionality was moved to the collector v1beta1 webhooks.
diff --git a/apis/v1alpha1/collector_webhook.go b/apis/v1alpha1/collector_webhook.go
deleted file mode 100644
index 1c451ac52e..0000000000
--- a/apis/v1alpha1/collector_webhook.go
+++ /dev/null
@@ -1,462 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package v1alpha1
-
-import (
- "context"
- "fmt"
- "strings"
-
- "github.com/go-logr/logr"
- v1 "k8s.io/api/authorization/v1"
- autoscalingv2 "k8s.io/api/autoscaling/v2"
- rbacv1 "k8s.io/api/rbac/v1"
- "k8s.io/apimachinery/pkg/runtime"
- "k8s.io/apimachinery/pkg/util/intstr"
- "k8s.io/apimachinery/pkg/util/validation"
- ctrl "sigs.k8s.io/controller-runtime"
- "sigs.k8s.io/controller-runtime/pkg/webhook/admission"
-
- "github.com/open-telemetry/opentelemetry-operator/internal/config"
- ta "github.com/open-telemetry/opentelemetry-operator/internal/manifests/targetallocator/adapters"
- "github.com/open-telemetry/opentelemetry-operator/internal/rbac"
-)
-
-var (
- _ admission.CustomValidator = &CollectorWebhook{}
- _ admission.CustomDefaulter = &CollectorWebhook{}
-
- // targetAllocatorCRPolicyRules are the policy rules required for the CR functionality.
- targetAllocatorCRPolicyRules = []*rbacv1.PolicyRule{
- {
- APIGroups: []string{"monitoring.coreos.com"},
- Resources: []string{"servicemonitors", "podmonitors"},
- Verbs: []string{"*"},
- }, {
- APIGroups: []string{""},
- Resources: []string{"nodes", "nodes/metrics", "services", "endpoints", "pods", "namespaces"},
- Verbs: []string{"get", "list", "watch"},
- }, {
- APIGroups: []string{""},
- Resources: []string{"configmaps"},
- Verbs: []string{"get"},
- }, {
- APIGroups: []string{"discovery.k8s.io"},
- Resources: []string{"endpointslices"},
- Verbs: []string{"get", "list", "watch"},
- }, {
- APIGroups: []string{"networking.k8s.io"},
- Resources: []string{"ingresses"},
- Verbs: []string{"get", "list", "watch"},
- }, {
- NonResourceURLs: []string{"/metrics"},
- Verbs: []string{"get"},
- },
- }
-)
-
-// +kubebuilder:webhook:path=/mutate-opentelemetry-io-v1alpha1-opentelemetrycollector,mutating=true,failurePolicy=fail,groups=opentelemetry.io,resources=opentelemetrycollectors,verbs=create;update,versions=v1alpha1,name=mopentelemetrycollector.kb.io,sideEffects=none,admissionReviewVersions=v1
-// +kubebuilder:webhook:verbs=create;update,path=/validate-opentelemetry-io-v1alpha1-opentelemetrycollector,mutating=false,failurePolicy=fail,groups=opentelemetry.io,resources=opentelemetrycollectors,versions=v1alpha1,name=vopentelemetrycollectorcreateupdate.kb.io,sideEffects=none,admissionReviewVersions=v1
-// +kubebuilder:webhook:verbs=delete,path=/validate-opentelemetry-io-v1alpha1-opentelemetrycollector,mutating=false,failurePolicy=ignore,groups=opentelemetry.io,resources=opentelemetrycollectors,versions=v1alpha1,name=vopentelemetrycollectordelete.kb.io,sideEffects=none,admissionReviewVersions=v1
-// +kubebuilder:object:generate=false
-
-type CollectorWebhook struct {
- logger logr.Logger
- cfg config.Config
- scheme *runtime.Scheme
- reviewer *rbac.Reviewer
-}
-
-func (c CollectorWebhook) Default(ctx context.Context, obj runtime.Object) error {
- otelcol, ok := obj.(*OpenTelemetryCollector)
- if !ok {
- return fmt.Errorf("expected an OpenTelemetryCollector, received %T", obj)
- }
- return c.defaulter(otelcol)
-}
-
-func (c CollectorWebhook) ValidateCreate(ctx context.Context, obj runtime.Object) (admission.Warnings, error) {
- otelcol, ok := obj.(*OpenTelemetryCollector)
- if !ok {
- return nil, fmt.Errorf("expected an OpenTelemetryCollector, received %T", obj)
- }
- return c.validate(ctx, otelcol)
-}
-
-func (c CollectorWebhook) ValidateUpdate(ctx context.Context, oldObj, newObj runtime.Object) (admission.Warnings, error) {
- otelcol, ok := newObj.(*OpenTelemetryCollector)
- if !ok {
- return nil, fmt.Errorf("expected an OpenTelemetryCollector, received %T", newObj)
- }
- return c.validate(ctx, otelcol)
-}
-
-func (c CollectorWebhook) ValidateDelete(ctx context.Context, obj runtime.Object) (admission.Warnings, error) {
- otelcol, ok := obj.(*OpenTelemetryCollector)
- if !ok || otelcol == nil {
- return nil, fmt.Errorf("expected an OpenTelemetryCollector, received %T", obj)
- }
- return c.validate(ctx, otelcol)
-}
-
-func (c CollectorWebhook) defaulter(r *OpenTelemetryCollector) error {
- if len(r.Spec.Mode) == 0 {
- r.Spec.Mode = ModeDeployment
- }
- if len(r.Spec.UpgradeStrategy) == 0 {
- r.Spec.UpgradeStrategy = UpgradeStrategyAutomatic
- }
-
- if r.Labels == nil {
- r.Labels = map[string]string{}
- }
- if r.Labels["app.kubernetes.io/managed-by"] == "" {
- r.Labels["app.kubernetes.io/managed-by"] = "opentelemetry-operator"
- }
-
- // We can default to one because dependent objects Deployment and HorizontalPodAutoScaler
- // default to 1 as well.
- one := int32(1)
- if r.Spec.Replicas == nil {
- r.Spec.Replicas = &one
- }
- if r.Spec.TargetAllocator.Enabled && r.Spec.TargetAllocator.Replicas == nil {
- r.Spec.TargetAllocator.Replicas = &one
- }
-
- if r.Spec.MaxReplicas != nil || (r.Spec.Autoscaler != nil && r.Spec.Autoscaler.MaxReplicas != nil) {
- if r.Spec.Autoscaler == nil {
- r.Spec.Autoscaler = &AutoscalerSpec{}
- }
-
- if r.Spec.Autoscaler.MaxReplicas == nil {
- r.Spec.Autoscaler.MaxReplicas = r.Spec.MaxReplicas
- }
- if r.Spec.Autoscaler.MinReplicas == nil {
- if r.Spec.MinReplicas != nil {
- r.Spec.Autoscaler.MinReplicas = r.Spec.MinReplicas
- } else {
- r.Spec.Autoscaler.MinReplicas = r.Spec.Replicas
- }
- }
-
- if r.Spec.Autoscaler.TargetMemoryUtilization == nil && r.Spec.Autoscaler.TargetCPUUtilization == nil {
- defaultCPUTarget := int32(90)
- r.Spec.Autoscaler.TargetCPUUtilization = &defaultCPUTarget
- }
- }
-
- // if pdb isn't provided, we set MaxUnavailable 1,
- // which will work even if there is just one replica,
- // not blocking node drains but preventing out-of-the-box
- // from disruption generated by them with replicas > 1
- if r.Spec.PodDisruptionBudget == nil {
- r.Spec.PodDisruptionBudget = &PodDisruptionBudgetSpec{
- MaxUnavailable: &intstr.IntOrString{
- Type: intstr.Int,
- IntVal: 1,
- },
- }
- }
-
- // if pdb isn't provided for target allocator and it's enabled
- // using a valid strategy (consistent-hashing),
- // we set MaxUnavailable 1, which will work even if there is
- // just one replica, not blocking node drains but preventing
- // out-of-the-box from disruption generated by them with replicas > 1
- if r.Spec.TargetAllocator.Enabled &&
- r.Spec.TargetAllocator.AllocationStrategy == OpenTelemetryTargetAllocatorAllocationStrategyConsistentHashing &&
- r.Spec.TargetAllocator.PodDisruptionBudget == nil {
- r.Spec.TargetAllocator.PodDisruptionBudget = &PodDisruptionBudgetSpec{
- MaxUnavailable: &intstr.IntOrString{
- Type: intstr.Int,
- IntVal: 1,
- },
- }
- }
-
- if r.Spec.Ingress.Type == IngressTypeRoute && r.Spec.Ingress.Route.Termination == "" {
- r.Spec.Ingress.Route.Termination = TLSRouteTerminationTypeEdge
- }
- if r.Spec.Ingress.Type == IngressTypeNginx && r.Spec.Ingress.RuleType == "" {
- r.Spec.Ingress.RuleType = IngressRuleTypePath
- }
- // If someone upgrades to a later version without upgrading their CRD they will not have a management state set.
- // This results in a default state of unmanaged preventing reconciliation from continuing.
- if len(r.Spec.ManagementState) == 0 {
- r.Spec.ManagementState = ManagementStateManaged
- }
- return nil
-}
-
-func (c CollectorWebhook) validate(ctx context.Context, r *OpenTelemetryCollector) (admission.Warnings, error) {
- warnings := admission.Warnings{}
- // validate volumeClaimTemplates
- if r.Spec.Mode != ModeStatefulSet && len(r.Spec.VolumeClaimTemplates) > 0 {
- return warnings, fmt.Errorf("the OpenTelemetry Collector mode is set to %s, which does not support the attribute 'volumeClaimTemplates'", r.Spec.Mode)
- }
-
- // validate tolerations
- if r.Spec.Mode == ModeSidecar && len(r.Spec.Tolerations) > 0 {
- return warnings, fmt.Errorf("the OpenTelemetry Collector mode is set to %s, which does not support the attribute 'tolerations'", r.Spec.Mode)
- }
-
- // validate priorityClassName
- if r.Spec.Mode == ModeSidecar && r.Spec.PriorityClassName != "" {
- return warnings, fmt.Errorf("the OpenTelemetry Collector mode is set to %s, which does not support the attribute 'priorityClassName'", r.Spec.Mode)
- }
-
- // validate affinity
- if r.Spec.Mode == ModeSidecar && r.Spec.Affinity != nil {
- return warnings, fmt.Errorf("the OpenTelemetry Collector mode is set to %s, which does not support the attribute 'affinity'", r.Spec.Mode)
- }
-
- if r.Spec.Mode == ModeSidecar && len(r.Spec.AdditionalContainers) > 0 {
- return warnings, fmt.Errorf("the OpenTelemetry Collector mode is set to %s, which does not support the attribute 'AdditionalContainers'", r.Spec.Mode)
- }
-
- // validate target allocator configs
- if r.Spec.TargetAllocator.Enabled {
- taWarnings, err := c.validateTargetAllocatorConfig(ctx, r)
- if taWarnings != nil {
- warnings = append(warnings, taWarnings...)
- }
- if err != nil {
- return warnings, err
- }
- }
-
- // validator port config
- for _, p := range r.Spec.Ports {
- nameErrs := validation.IsValidPortName(p.Name)
- numErrs := validation.IsValidPortNum(int(p.Port))
- if len(nameErrs) > 0 || len(numErrs) > 0 {
- return warnings, fmt.Errorf("the OpenTelemetry Spec Ports configuration is incorrect, port name '%s' errors: %s, num '%d' errors: %s",
- p.Name, nameErrs, p.Port, numErrs)
- }
- }
-
- var maxReplicas *int32
- if r.Spec.Autoscaler != nil && r.Spec.Autoscaler.MaxReplicas != nil {
- maxReplicas = r.Spec.Autoscaler.MaxReplicas
- }
-
- // check deprecated .Spec.MaxReplicas if maxReplicas is not set
- if maxReplicas == nil && r.Spec.MaxReplicas != nil {
- warnings = append(warnings, "MaxReplicas is deprecated")
- maxReplicas = r.Spec.MaxReplicas
- }
-
- var minReplicas *int32
- if r.Spec.Autoscaler != nil && r.Spec.Autoscaler.MinReplicas != nil {
- minReplicas = r.Spec.Autoscaler.MinReplicas
- }
-
- // check deprecated .Spec.MinReplicas if minReplicas is not set
- if minReplicas == nil {
- if r.Spec.MinReplicas != nil {
- warnings = append(warnings, "MinReplicas is deprecated")
- minReplicas = r.Spec.MinReplicas
- } else {
- minReplicas = r.Spec.Replicas
- }
- }
-
- if r.Spec.Ingress.Type == IngressTypeNginx && r.Spec.Mode == ModeSidecar {
- return warnings, fmt.Errorf("the OpenTelemetry Spec Ingress configuration is incorrect. Ingress can only be used in combination with the modes: %s, %s, %s",
- ModeDeployment, ModeDaemonSet, ModeStatefulSet,
- )
- }
-
- // validate autoscale with horizontal pod autoscaler
- if maxReplicas != nil {
- if *maxReplicas < int32(1) {
- return warnings, fmt.Errorf("the OpenTelemetry Spec autoscale configuration is incorrect, maxReplicas should be defined and one or more")
- }
-
- if r.Spec.Replicas != nil && *r.Spec.Replicas > *maxReplicas {
- return warnings, fmt.Errorf("the OpenTelemetry Spec autoscale configuration is incorrect, replicas must not be greater than maxReplicas")
- }
-
- if minReplicas != nil && *minReplicas > *maxReplicas {
- return warnings, fmt.Errorf("the OpenTelemetry Spec autoscale configuration is incorrect, minReplicas must not be greater than maxReplicas")
- }
-
- if minReplicas != nil && *minReplicas < int32(1) {
- return warnings, fmt.Errorf("the OpenTelemetry Spec autoscale configuration is incorrect, minReplicas should be one or more")
- }
-
- if r.Spec.Autoscaler != nil {
- return warnings, checkAutoscalerSpec(r.Spec.Autoscaler)
- }
- }
-
- if r.Spec.Ingress.Type == IngressTypeNginx && r.Spec.Mode == ModeSidecar {
- return warnings, fmt.Errorf("the OpenTelemetry Spec Ingress configuiration is incorrect. Ingress can only be used in combination with the modes: %s, %s, %s",
- ModeDeployment, ModeDaemonSet, ModeStatefulSet,
- )
- }
- if r.Spec.Ingress.RuleType == IngressRuleTypeSubdomain && (r.Spec.Ingress.Hostname == "" || r.Spec.Ingress.Hostname == "*") {
- return warnings, fmt.Errorf("a valid Ingress hostname has to be defined for subdomain ruleType")
- }
-
- if r.Spec.LivenessProbe != nil {
- if r.Spec.LivenessProbe.InitialDelaySeconds != nil && *r.Spec.LivenessProbe.InitialDelaySeconds < 0 {
- return warnings, fmt.Errorf("the OpenTelemetry Spec LivenessProbe InitialDelaySeconds configuration is incorrect. InitialDelaySeconds should be greater than or equal to 0")
- }
- if r.Spec.LivenessProbe.PeriodSeconds != nil && *r.Spec.LivenessProbe.PeriodSeconds < 1 {
- return warnings, fmt.Errorf("the OpenTelemetry Spec LivenessProbe PeriodSeconds configuration is incorrect. PeriodSeconds should be greater than or equal to 1")
- }
- if r.Spec.LivenessProbe.TimeoutSeconds != nil && *r.Spec.LivenessProbe.TimeoutSeconds < 1 {
- return warnings, fmt.Errorf("the OpenTelemetry Spec LivenessProbe TimeoutSeconds configuration is incorrect. TimeoutSeconds should be greater than or equal to 1")
- }
- if r.Spec.LivenessProbe.SuccessThreshold != nil && *r.Spec.LivenessProbe.SuccessThreshold < 1 {
- return warnings, fmt.Errorf("the OpenTelemetry Spec LivenessProbe SuccessThreshold configuration is incorrect. SuccessThreshold should be greater than or equal to 1")
- }
- if r.Spec.LivenessProbe.FailureThreshold != nil && *r.Spec.LivenessProbe.FailureThreshold < 1 {
- return warnings, fmt.Errorf("the OpenTelemetry Spec LivenessProbe FailureThreshold configuration is incorrect. FailureThreshold should be greater than or equal to 1")
- }
- if r.Spec.LivenessProbe.TerminationGracePeriodSeconds != nil && *r.Spec.LivenessProbe.TerminationGracePeriodSeconds < 1 {
- return warnings, fmt.Errorf("the OpenTelemetry Spec LivenessProbe TerminationGracePeriodSeconds configuration is incorrect. TerminationGracePeriodSeconds should be greater than or equal to 1")
- }
- }
-
- // validate updateStrategy for DaemonSet
- if r.Spec.Mode != ModeDaemonSet && len(r.Spec.UpdateStrategy.Type) > 0 {
- return warnings, fmt.Errorf("the OpenTelemetry Collector mode is set to %s, which does not support the attribute 'updateStrategy'", r.Spec.Mode)
- }
-
- // validate updateStrategy for Deployment
- if r.Spec.Mode != ModeDeployment && len(r.Spec.DeploymentUpdateStrategy.Type) > 0 {
- return warnings, fmt.Errorf("the OpenTelemetry Collector mode is set to %s, which does not support the attribute 'deploymentUpdateStrategy'", r.Spec.Mode)
- }
-
- return warnings, nil
-}
-
-func (c CollectorWebhook) validateTargetAllocatorConfig(ctx context.Context, r *OpenTelemetryCollector) (admission.Warnings, error) {
- if r.Spec.Mode != ModeStatefulSet && r.Spec.Mode != ModeDaemonSet {
- return nil, fmt.Errorf("the OpenTelemetry Collector mode is set to %s, which does not support the target allocation deployment", r.Spec.Mode)
- }
-
- if r.Spec.Mode == ModeDaemonSet && r.Spec.TargetAllocator.AllocationStrategy != OpenTelemetryTargetAllocatorAllocationStrategyPerNode {
- return nil, fmt.Errorf("the OpenTelemetry Collector mode is set to %s, which must be used with target allocation strategy %s ", r.Spec.Mode, OpenTelemetryTargetAllocatorAllocationStrategyPerNode)
- }
-
- if r.Spec.TargetAllocator.AllocationStrategy == OpenTelemetryTargetAllocatorAllocationStrategyPerNode && r.Spec.Mode != ModeDaemonSet {
- return nil, fmt.Errorf("target allocation strategy %s is only supported in OpenTelemetry Collector mode %s", OpenTelemetryTargetAllocatorAllocationStrategyPerNode, ModeDaemonSet)
- }
-
- // validate Prometheus config for target allocation
- promCfg, err := ta.ConfigToPromConfig(r.Spec.Config)
- if err != nil {
- return nil, fmt.Errorf("the OpenTelemetry Spec Prometheus configuration is incorrect, %w", err)
- }
- err = ta.ValidatePromConfig(promCfg, r.Spec.TargetAllocator.Enabled)
- if err != nil {
- return nil, fmt.Errorf("the OpenTelemetry Spec Prometheus configuration is incorrect, %w", err)
- }
- err = ta.ValidateTargetAllocatorConfig(r.Spec.TargetAllocator.PrometheusCR.Enabled, promCfg)
- if err != nil {
- return nil, fmt.Errorf("the OpenTelemetry Spec Prometheus configuration is incorrect, %w", err)
- }
- // if the prometheusCR is enabled, it needs a suite of permissions to function
- if r.Spec.TargetAllocator.PrometheusCR.Enabled {
- if subjectAccessReviews, err := c.reviewer.CheckPolicyRules(ctx, r.GetNamespace(), r.Spec.TargetAllocator.ServiceAccount, targetAllocatorCRPolicyRules...); err != nil {
- return nil, fmt.Errorf("unable to check rbac rules %w", err)
- } else if allowed, deniedReviews := rbac.AllSubjectAccessReviewsAllowed(subjectAccessReviews); !allowed {
- return warningsGroupedByResource(deniedReviews), nil
- }
- }
-
- return nil, nil
-}
-
-func checkAutoscalerSpec(autoscaler *AutoscalerSpec) error {
- if autoscaler.Behavior != nil {
- if autoscaler.Behavior.ScaleDown != nil && autoscaler.Behavior.ScaleDown.StabilizationWindowSeconds != nil &&
- *autoscaler.Behavior.ScaleDown.StabilizationWindowSeconds < int32(1) {
- return fmt.Errorf("the OpenTelemetry Spec autoscale configuration is incorrect, scaleDown should be one or more")
- }
-
- if autoscaler.Behavior.ScaleUp != nil && autoscaler.Behavior.ScaleUp.StabilizationWindowSeconds != nil &&
- *autoscaler.Behavior.ScaleUp.StabilizationWindowSeconds < int32(1) {
- return fmt.Errorf("the OpenTelemetry Spec autoscale configuration is incorrect, scaleUp should be one or more")
- }
- }
- if autoscaler.TargetCPUUtilization != nil && (*autoscaler.TargetCPUUtilization < int32(1) || *autoscaler.TargetCPUUtilization > int32(99)) {
- return fmt.Errorf("the OpenTelemetry Spec autoscale configuration is incorrect, targetCPUUtilization should be greater than 0 and less than 100")
- }
- if autoscaler.TargetMemoryUtilization != nil && (*autoscaler.TargetMemoryUtilization < int32(1) || *autoscaler.TargetMemoryUtilization > int32(99)) {
- return fmt.Errorf("the OpenTelemetry Spec autoscale configuration is incorrect, targetMemoryUtilization should be greater than 0 and less than 100")
- }
-
- for _, metric := range autoscaler.Metrics {
- if metric.Type != autoscalingv2.PodsMetricSourceType {
- return fmt.Errorf("the OpenTelemetry Spec autoscale configuration is incorrect, metric type unsupported. Expected metric of source type Pod")
- }
-
- // pod metrics target only support value and averageValue.
- if metric.Pods.Target.Type == autoscalingv2.AverageValueMetricType {
- if val, ok := metric.Pods.Target.AverageValue.AsInt64(); !ok || val < int64(1) {
- return fmt.Errorf("the OpenTelemetry Spec autoscale configuration is incorrect, average value should be greater than 0")
- }
- } else if metric.Pods.Target.Type == autoscalingv2.ValueMetricType {
- if val, ok := metric.Pods.Target.Value.AsInt64(); !ok || val < int64(1) {
- return fmt.Errorf("the OpenTelemetry Spec autoscale configuration is incorrect, value should be greater than 0")
- }
- } else {
- return fmt.Errorf("the OpenTelemetry Spec autoscale configuration is incorrect, invalid pods target type")
- }
- }
-
- return nil
-}
-
-// warningsGroupedByResource is a helper to take the missing permissions and format them as warnings.
-func warningsGroupedByResource(reviews []*v1.SubjectAccessReview) []string {
- fullResourceToVerbs := make(map[string][]string)
- for _, review := range reviews {
- if review.Spec.ResourceAttributes != nil {
- key := fmt.Sprintf("%s/%s", review.Spec.ResourceAttributes.Group, review.Spec.ResourceAttributes.Resource)
- if len(review.Spec.ResourceAttributes.Group) == 0 {
- key = review.Spec.ResourceAttributes.Resource
- }
- fullResourceToVerbs[key] = append(fullResourceToVerbs[key], review.Spec.ResourceAttributes.Verb)
- } else if review.Spec.NonResourceAttributes != nil {
- key := fmt.Sprintf("nonResourceURL: %s", review.Spec.NonResourceAttributes.Path)
- fullResourceToVerbs[key] = append(fullResourceToVerbs[key], review.Spec.NonResourceAttributes.Verb)
- }
- }
- var warnings []string
- for fullResource, verbs := range fullResourceToVerbs {
- warnings = append(warnings, fmt.Sprintf("missing the following rules for %s: [%s]", fullResource, strings.Join(verbs, ",")))
- }
- return warnings
-}
-
-func SetupCollectorWebhook(mgr ctrl.Manager, cfg config.Config, reviewer *rbac.Reviewer) error {
- cvw := &CollectorWebhook{
- reviewer: reviewer,
- logger: mgr.GetLogger().WithValues("handler", "CollectorWebhook", "version", "v1alpha1"),
- scheme: mgr.GetScheme(),
- cfg: cfg,
- }
- return ctrl.NewWebhookManagedBy(mgr).
- For(&OpenTelemetryCollector{}).
- WithValidator(cvw).
- WithDefaulter(cvw).
- Complete()
-}
diff --git a/apis/v1alpha1/collector_webhook_test.go b/apis/v1alpha1/collector_webhook_test.go
deleted file mode 100644
index 794c4cba65..0000000000
--- a/apis/v1alpha1/collector_webhook_test.go
+++ /dev/null
@@ -1,1137 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package v1alpha1
-
-import (
- "context"
- "fmt"
- "os"
- "testing"
-
- "github.com/go-logr/logr"
- "github.com/stretchr/testify/assert"
- appsv1 "k8s.io/api/apps/v1"
- authv1 "k8s.io/api/authorization/v1"
- autoscalingv2 "k8s.io/api/autoscaling/v2"
- v1 "k8s.io/api/core/v1"
- "k8s.io/apimachinery/pkg/api/resource"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/runtime"
- "k8s.io/apimachinery/pkg/util/intstr"
- "k8s.io/client-go/kubernetes/fake"
- "k8s.io/client-go/kubernetes/scheme"
- kubeTesting "k8s.io/client-go/testing"
-
- "github.com/open-telemetry/opentelemetry-operator/internal/config"
- "github.com/open-telemetry/opentelemetry-operator/internal/rbac"
-)
-
-var (
- testScheme *runtime.Scheme = scheme.Scheme
-)
-
-func TestOTELColDefaultingWebhook(t *testing.T) {
- one := int32(1)
- five := int32(5)
- defaultCPUTarget := int32(90)
-
- if err := AddToScheme(testScheme); err != nil {
- fmt.Printf("failed to register scheme: %v", err)
- os.Exit(1)
- }
-
- tests := []struct {
- name string
- otelcol OpenTelemetryCollector
- expected OpenTelemetryCollector
- }{
- {
- name: "all fields default",
- otelcol: OpenTelemetryCollector{},
- expected: OpenTelemetryCollector{
- ObjectMeta: metav1.ObjectMeta{
- Labels: map[string]string{
- "app.kubernetes.io/managed-by": "opentelemetry-operator",
- },
- },
- Spec: OpenTelemetryCollectorSpec{
- Mode: ModeDeployment,
- Replicas: &one,
- UpgradeStrategy: UpgradeStrategyAutomatic,
- ManagementState: ManagementStateManaged,
- PodDisruptionBudget: &PodDisruptionBudgetSpec{
- MaxUnavailable: &intstr.IntOrString{
- Type: intstr.Int,
- IntVal: 1,
- },
- },
- },
- },
- },
- {
- name: "provided values in spec",
- otelcol: OpenTelemetryCollector{
- Spec: OpenTelemetryCollectorSpec{
- Mode: ModeSidecar,
- Replicas: &five,
- UpgradeStrategy: "adhoc",
- },
- },
- expected: OpenTelemetryCollector{
- ObjectMeta: metav1.ObjectMeta{
- Labels: map[string]string{
- "app.kubernetes.io/managed-by": "opentelemetry-operator",
- },
- },
- Spec: OpenTelemetryCollectorSpec{
- Mode: ModeSidecar,
- Replicas: &five,
- UpgradeStrategy: "adhoc",
- ManagementState: ManagementStateManaged,
- PodDisruptionBudget: &PodDisruptionBudgetSpec{
- MaxUnavailable: &intstr.IntOrString{
- Type: intstr.Int,
- IntVal: 1,
- },
- },
- },
- },
- },
- {
- name: "doesn't override unmanaged",
- otelcol: OpenTelemetryCollector{
- Spec: OpenTelemetryCollectorSpec{
- ManagementState: ManagementStateUnmanaged,
- Mode: ModeSidecar,
- Replicas: &five,
- UpgradeStrategy: "adhoc",
- },
- },
- expected: OpenTelemetryCollector{
- ObjectMeta: metav1.ObjectMeta{
- Labels: map[string]string{
- "app.kubernetes.io/managed-by": "opentelemetry-operator",
- },
- },
- Spec: OpenTelemetryCollectorSpec{
- Mode: ModeSidecar,
- Replicas: &five,
- UpgradeStrategy: "adhoc",
- ManagementState: ManagementStateUnmanaged,
- PodDisruptionBudget: &PodDisruptionBudgetSpec{
- MaxUnavailable: &intstr.IntOrString{
- Type: intstr.Int,
- IntVal: 1,
- },
- },
- },
- },
- },
- {
- name: "Setting Autoscaler MaxReplicas",
- otelcol: OpenTelemetryCollector{
- Spec: OpenTelemetryCollectorSpec{
- Autoscaler: &AutoscalerSpec{
- MaxReplicas: &five,
- MinReplicas: &one,
- },
- },
- },
- expected: OpenTelemetryCollector{
- ObjectMeta: metav1.ObjectMeta{
- Labels: map[string]string{
- "app.kubernetes.io/managed-by": "opentelemetry-operator",
- },
- },
- Spec: OpenTelemetryCollectorSpec{
- Mode: ModeDeployment,
- Replicas: &one,
- UpgradeStrategy: UpgradeStrategyAutomatic,
- ManagementState: ManagementStateManaged,
- Autoscaler: &AutoscalerSpec{
- TargetCPUUtilization: &defaultCPUTarget,
- MaxReplicas: &five,
- MinReplicas: &one,
- },
- PodDisruptionBudget: &PodDisruptionBudgetSpec{
- MaxUnavailable: &intstr.IntOrString{
- Type: intstr.Int,
- IntVal: 1,
- },
- },
- },
- },
- },
- {
- name: "MaxReplicas but no Autoscale",
- otelcol: OpenTelemetryCollector{
- Spec: OpenTelemetryCollectorSpec{
- MaxReplicas: &five,
- },
- },
- expected: OpenTelemetryCollector{
- ObjectMeta: metav1.ObjectMeta{
- Labels: map[string]string{
- "app.kubernetes.io/managed-by": "opentelemetry-operator",
- },
- },
- Spec: OpenTelemetryCollectorSpec{
- Mode: ModeDeployment,
- Replicas: &one,
- UpgradeStrategy: UpgradeStrategyAutomatic,
- ManagementState: ManagementStateManaged,
- Autoscaler: &AutoscalerSpec{
- TargetCPUUtilization: &defaultCPUTarget,
- // webhook Default adds MaxReplicas to Autoscaler because
- // OpenTelemetryCollector.Spec.MaxReplicas is deprecated.
- MaxReplicas: &five,
- MinReplicas: &one,
- },
- MaxReplicas: &five,
- PodDisruptionBudget: &PodDisruptionBudgetSpec{
- MaxUnavailable: &intstr.IntOrString{
- Type: intstr.Int,
- IntVal: 1,
- },
- },
- },
- },
- },
- {
- name: "Missing route termination",
- otelcol: OpenTelemetryCollector{
- Spec: OpenTelemetryCollectorSpec{
- Mode: ModeDeployment,
- Ingress: Ingress{
- Type: IngressTypeRoute,
- },
- },
- },
- expected: OpenTelemetryCollector{
- ObjectMeta: metav1.ObjectMeta{
- Labels: map[string]string{
- "app.kubernetes.io/managed-by": "opentelemetry-operator",
- },
- },
- Spec: OpenTelemetryCollectorSpec{
- Mode: ModeDeployment,
- ManagementState: ManagementStateManaged,
- Ingress: Ingress{
- Type: IngressTypeRoute,
- Route: OpenShiftRoute{
- Termination: TLSRouteTerminationTypeEdge,
- },
- },
- Replicas: &one,
- UpgradeStrategy: UpgradeStrategyAutomatic,
- PodDisruptionBudget: &PodDisruptionBudgetSpec{
- MaxUnavailable: &intstr.IntOrString{
- Type: intstr.Int,
- IntVal: 1,
- },
- },
- },
- },
- },
- {
- name: "Defined PDB for collector",
- otelcol: OpenTelemetryCollector{
- Spec: OpenTelemetryCollectorSpec{
- Mode: ModeDeployment,
- PodDisruptionBudget: &PodDisruptionBudgetSpec{
- MinAvailable: &intstr.IntOrString{
- Type: intstr.String,
- StrVal: "10%",
- },
- },
- },
- },
- expected: OpenTelemetryCollector{
- ObjectMeta: metav1.ObjectMeta{
- Labels: map[string]string{
- "app.kubernetes.io/managed-by": "opentelemetry-operator",
- },
- },
- Spec: OpenTelemetryCollectorSpec{
- Mode: ModeDeployment,
- Replicas: &one,
- UpgradeStrategy: UpgradeStrategyAutomatic,
- ManagementState: ManagementStateManaged,
- PodDisruptionBudget: &PodDisruptionBudgetSpec{
- MinAvailable: &intstr.IntOrString{
- Type: intstr.String,
- StrVal: "10%",
- },
- },
- },
- },
- },
- {
- name: "Defined PDB for target allocator",
- otelcol: OpenTelemetryCollector{
- Spec: OpenTelemetryCollectorSpec{
- Mode: ModeDeployment,
- TargetAllocator: OpenTelemetryTargetAllocator{
- Enabled: true,
- AllocationStrategy: OpenTelemetryTargetAllocatorAllocationStrategyConsistentHashing,
- PodDisruptionBudget: &PodDisruptionBudgetSpec{
- MinAvailable: &intstr.IntOrString{
- Type: intstr.String,
- StrVal: "10%",
- },
- },
- },
- },
- },
- expected: OpenTelemetryCollector{
- ObjectMeta: metav1.ObjectMeta{
- Labels: map[string]string{
- "app.kubernetes.io/managed-by": "opentelemetry-operator",
- },
- },
- Spec: OpenTelemetryCollectorSpec{
- Mode: ModeDeployment,
- Replicas: &one,
- UpgradeStrategy: UpgradeStrategyAutomatic,
- ManagementState: ManagementStateManaged,
- PodDisruptionBudget: &PodDisruptionBudgetSpec{
- MaxUnavailable: &intstr.IntOrString{
- Type: intstr.Int,
- IntVal: 1,
- },
- },
- TargetAllocator: OpenTelemetryTargetAllocator{
- Enabled: true,
- Replicas: &one,
- AllocationStrategy: OpenTelemetryTargetAllocatorAllocationStrategyConsistentHashing,
- PodDisruptionBudget: &PodDisruptionBudgetSpec{
- MinAvailable: &intstr.IntOrString{
- Type: intstr.String,
- StrVal: "10%",
- },
- },
- },
- },
- },
- },
- {
- name: "Undefined PDB for target allocator and consistent-hashing strategy",
- otelcol: OpenTelemetryCollector{
- Spec: OpenTelemetryCollectorSpec{
- Mode: ModeDeployment,
- TargetAllocator: OpenTelemetryTargetAllocator{
- Enabled: true,
- Replicas: &one,
- AllocationStrategy: OpenTelemetryTargetAllocatorAllocationStrategyConsistentHashing,
- },
- },
- },
- expected: OpenTelemetryCollector{
- ObjectMeta: metav1.ObjectMeta{
- Labels: map[string]string{
- "app.kubernetes.io/managed-by": "opentelemetry-operator",
- },
- },
- Spec: OpenTelemetryCollectorSpec{
- Mode: ModeDeployment,
- Replicas: &one,
- UpgradeStrategy: UpgradeStrategyAutomatic,
- ManagementState: ManagementStateManaged,
- PodDisruptionBudget: &PodDisruptionBudgetSpec{
- MaxUnavailable: &intstr.IntOrString{
- Type: intstr.Int,
- IntVal: 1,
- },
- },
- TargetAllocator: OpenTelemetryTargetAllocator{
- Enabled: true,
- Replicas: &one,
- AllocationStrategy: OpenTelemetryTargetAllocatorAllocationStrategyConsistentHashing,
- PodDisruptionBudget: &PodDisruptionBudgetSpec{
- MaxUnavailable: &intstr.IntOrString{
- Type: intstr.Int,
- IntVal: 1,
- },
- },
- },
- },
- },
- },
- {
- name: "Undefined PDB for target allocator and not consistent-hashing strategy",
- otelcol: OpenTelemetryCollector{
- Spec: OpenTelemetryCollectorSpec{
- Mode: ModeDeployment,
- TargetAllocator: OpenTelemetryTargetAllocator{
- Enabled: true,
- AllocationStrategy: OpenTelemetryTargetAllocatorAllocationStrategyLeastWeighted,
- },
- },
- },
- expected: OpenTelemetryCollector{
- ObjectMeta: metav1.ObjectMeta{
- Labels: map[string]string{
- "app.kubernetes.io/managed-by": "opentelemetry-operator",
- },
- },
- Spec: OpenTelemetryCollectorSpec{
- Mode: ModeDeployment,
- Replicas: &one,
- UpgradeStrategy: UpgradeStrategyAutomatic,
- ManagementState: ManagementStateManaged,
- PodDisruptionBudget: &PodDisruptionBudgetSpec{
- MaxUnavailable: &intstr.IntOrString{
- Type: intstr.Int,
- IntVal: 1,
- },
- },
- TargetAllocator: OpenTelemetryTargetAllocator{
- Enabled: true,
- Replicas: &one,
- AllocationStrategy: OpenTelemetryTargetAllocatorAllocationStrategyLeastWeighted,
- },
- },
- },
- },
- }
-
- for _, test := range tests {
- test := test
- t.Run(test.name, func(t *testing.T) {
- cvw := &CollectorWebhook{
- logger: logr.Discard(),
- scheme: testScheme,
- cfg: config.New(
- config.WithCollectorImage("collector:v0.0.0"),
- config.WithTargetAllocatorImage("ta:v0.0.0"),
- ),
- }
- ctx := context.Background()
- err := cvw.Default(ctx, &test.otelcol)
- assert.NoError(t, err)
- assert.Equal(t, test.expected, test.otelcol)
- })
- }
-}
-
-// TODO: a lot of these tests use .Spec.MaxReplicas and .Spec.MinReplicas. These fields are
-// deprecated and moved to .Spec.Autoscaler. Fine to use these fields to test that old CRD is
-// still supported but should eventually be updated.
-func TestOTELColValidatingWebhook(t *testing.T) {
- minusOne := int32(-1)
- zero := int32(0)
- zero64 := int64(0)
- one := int32(1)
- three := int32(3)
- five := int32(5)
-
- tests := []struct { //nolint:govet
- name string
- otelcol OpenTelemetryCollector
- expectedErr string
- expectedWarnings []string
- shouldFailSar bool
- }{
- {
- name: "valid empty spec",
- otelcol: OpenTelemetryCollector{},
- },
- {
- name: "valid full spec",
- otelcol: OpenTelemetryCollector{
- Spec: OpenTelemetryCollectorSpec{
- Mode: ModeStatefulSet,
- MinReplicas: &one,
- Replicas: &three,
- MaxReplicas: &five,
- UpgradeStrategy: "adhoc",
- TargetAllocator: OpenTelemetryTargetAllocator{
- Enabled: true,
- },
- Config: `receivers:
- examplereceiver:
- endpoint: "0.0.0.0:12345"
- examplereceiver/settings:
- endpoint: "0.0.0.0:12346"
- prometheus:
- config:
- scrape_configs:
- - job_name: otel-collector
- scrape_interval: 10s
- jaeger/custom:
- protocols:
- thrift_http:
- endpoint: 0.0.0.0:15268
-`,
- Ports: []PortsSpec{
- {
- ServicePort: v1.ServicePort{
- Name: "port1",
- Port: 5555,
- },
- },
- {
- ServicePort: v1.ServicePort{
- Name: "port2",
- Port: 5554,
- Protocol: v1.ProtocolUDP,
- },
- },
- },
- Autoscaler: &AutoscalerSpec{
- Behavior: &autoscalingv2.HorizontalPodAutoscalerBehavior{
- ScaleDown: &autoscalingv2.HPAScalingRules{
- StabilizationWindowSeconds: &three,
- },
- ScaleUp: &autoscalingv2.HPAScalingRules{
- StabilizationWindowSeconds: &five,
- },
- },
- TargetCPUUtilization: &five,
- },
- },
- },
- expectedWarnings: []string{
- "MaxReplicas is deprecated",
- "MinReplicas is deprecated",
- },
- },
- {
- name: "prom CR admissions warning",
- shouldFailSar: true, // force failure
- otelcol: OpenTelemetryCollector{
- Spec: OpenTelemetryCollectorSpec{
- Mode: ModeStatefulSet,
- MinReplicas: &one,
- Replicas: &three,
- MaxReplicas: &five,
- UpgradeStrategy: "adhoc",
- TargetAllocator: OpenTelemetryTargetAllocator{
- Enabled: true,
- PrometheusCR: OpenTelemetryTargetAllocatorPrometheusCR{Enabled: true},
- },
- Config: `receivers:
- examplereceiver:
- endpoint: "0.0.0.0:12345"
- examplereceiver/settings:
- endpoint: "0.0.0.0:12346"
- prometheus:
- config:
- scrape_configs:
- - job_name: otel-collector
- scrape_interval: 10s
- jaeger/custom:
- protocols:
- thrift_http:
- endpoint: 0.0.0.0:15268
-`,
- Ports: []PortsSpec{
- {
- ServicePort: v1.ServicePort{
- Name: "port1",
- Port: 5555,
- },
- },
- {
- ServicePort: v1.ServicePort{
- Name: "port2",
- Port: 5554,
- Protocol: v1.ProtocolUDP,
- },
- },
- },
- Autoscaler: &AutoscalerSpec{
- Behavior: &autoscalingv2.HorizontalPodAutoscalerBehavior{
- ScaleDown: &autoscalingv2.HPAScalingRules{
- StabilizationWindowSeconds: &three,
- },
- ScaleUp: &autoscalingv2.HPAScalingRules{
- StabilizationWindowSeconds: &five,
- },
- },
- TargetCPUUtilization: &five,
- },
- },
- },
- expectedWarnings: []string{
- "missing the following rules for monitoring.coreos.com/servicemonitors: [*]",
- "missing the following rules for monitoring.coreos.com/podmonitors: [*]",
- "missing the following rules for nodes/metrics: [get,list,watch]",
- "missing the following rules for services: [get,list,watch]",
- "missing the following rules for endpoints: [get,list,watch]",
- "missing the following rules for namespaces: [get,list,watch]",
- "missing the following rules for networking.k8s.io/ingresses: [get,list,watch]",
- "missing the following rules for nodes: [get,list,watch]",
- "missing the following rules for pods: [get,list,watch]",
- "missing the following rules for configmaps: [get]",
- "missing the following rules for discovery.k8s.io/endpointslices: [get,list,watch]",
- "missing the following rules for nonResourceURL: /metrics: [get]",
- "MaxReplicas is deprecated",
- "MinReplicas is deprecated",
- },
- },
- {
- name: "prom CR no admissions warning",
- shouldFailSar: false, // force SAR okay
- otelcol: OpenTelemetryCollector{
- Spec: OpenTelemetryCollectorSpec{
- Mode: ModeStatefulSet,
- Replicas: &three,
- UpgradeStrategy: "adhoc",
- TargetAllocator: OpenTelemetryTargetAllocator{
- Enabled: true,
- PrometheusCR: OpenTelemetryTargetAllocatorPrometheusCR{Enabled: true},
- },
- Config: `receivers:
- examplereceiver:
- endpoint: "0.0.0.0:12345"
- examplereceiver/settings:
- endpoint: "0.0.0.0:12346"
- prometheus:
- config:
- scrape_configs:
- - job_name: otel-collector
- scrape_interval: 10s
- jaeger/custom:
- protocols:
- thrift_http:
- endpoint: 0.0.0.0:15268
-`,
- Ports: []PortsSpec{
- {
- ServicePort: v1.ServicePort{
- Name: "port1",
- Port: 5555,
- },
- },
- {
- ServicePort: v1.ServicePort{
- Name: "port2",
- Port: 5554,
- Protocol: v1.ProtocolUDP,
- },
- },
- },
- Autoscaler: &AutoscalerSpec{
- Behavior: &autoscalingv2.HorizontalPodAutoscalerBehavior{
- ScaleDown: &autoscalingv2.HPAScalingRules{
- StabilizationWindowSeconds: &three,
- },
- ScaleUp: &autoscalingv2.HPAScalingRules{
- StabilizationWindowSeconds: &five,
- },
- },
- TargetCPUUtilization: &five,
- },
- },
- },
- },
- {
- name: "invalid mode with volume claim templates",
- otelcol: OpenTelemetryCollector{
- Spec: OpenTelemetryCollectorSpec{
- Mode: ModeSidecar,
- VolumeClaimTemplates: []v1.PersistentVolumeClaim{{}, {}},
- },
- },
- expectedErr: "does not support the attribute 'volumeClaimTemplates'",
- },
- {
- name: "invalid mode with tolerations",
- otelcol: OpenTelemetryCollector{
- Spec: OpenTelemetryCollectorSpec{
- Mode: ModeSidecar,
- Tolerations: []v1.Toleration{{}, {}},
- },
- },
- expectedErr: "does not support the attribute 'tolerations'",
- },
- {
- name: "invalid mode with target allocator",
- otelcol: OpenTelemetryCollector{
- Spec: OpenTelemetryCollectorSpec{
- Mode: ModeDeployment,
- TargetAllocator: OpenTelemetryTargetAllocator{
- Enabled: true,
- },
- },
- },
- expectedErr: "does not support the target allocation deployment",
- },
- {
- name: "invalid target allocator config",
- otelcol: OpenTelemetryCollector{
- Spec: OpenTelemetryCollectorSpec{
- Mode: ModeStatefulSet,
- TargetAllocator: OpenTelemetryTargetAllocator{
- Enabled: true,
- },
- },
- },
- expectedErr: "the OpenTelemetry Spec Prometheus configuration is incorrect",
- },
- {
- name: "invalid target allocation strategy",
- otelcol: OpenTelemetryCollector{
- Spec: OpenTelemetryCollectorSpec{
- Mode: ModeDaemonSet,
- TargetAllocator: OpenTelemetryTargetAllocator{
- Enabled: true,
- AllocationStrategy: OpenTelemetryTargetAllocatorAllocationStrategyLeastWeighted,
- },
- },
- },
- expectedErr: "mode is set to daemonset, which must be used with target allocation strategy per-node",
- },
- {
- name: "invalid port name",
- otelcol: OpenTelemetryCollector{
- Spec: OpenTelemetryCollectorSpec{
- Ports: []PortsSpec{
- {
- ServicePort: v1.ServicePort{
- // this port name contains a non alphanumeric character, which is invalid.
- Name: "-testš¦port",
- Port: 12345,
- Protocol: v1.ProtocolTCP,
- },
- },
- },
- },
- },
- expectedErr: "the OpenTelemetry Spec Ports configuration is incorrect",
- },
- {
- name: "invalid port name, too long",
- otelcol: OpenTelemetryCollector{
- Spec: OpenTelemetryCollectorSpec{
- Ports: []PortsSpec{
- {
- ServicePort: v1.ServicePort{
- Name: "aaaabbbbccccdddd", // len: 16, too long
- Port: 5555,
- },
- },
- },
- },
- },
- expectedErr: "the OpenTelemetry Spec Ports configuration is incorrect",
- },
- {
- name: "invalid port num",
- otelcol: OpenTelemetryCollector{
- Spec: OpenTelemetryCollectorSpec{
- Ports: []PortsSpec{
- {
- ServicePort: v1.ServicePort{
- Name: "aaaabbbbccccddd", // len: 15
- // no port set means it's 0, which is invalid
- },
- },
- },
- },
- },
- expectedErr: "the OpenTelemetry Spec Ports configuration is incorrect",
- },
- {
- name: "invalid max replicas",
- otelcol: OpenTelemetryCollector{
- Spec: OpenTelemetryCollectorSpec{
- MaxReplicas: &zero,
- },
- },
- expectedErr: "maxReplicas should be defined and one or more",
- expectedWarnings: []string{"MaxReplicas is deprecated"},
- },
- {
- name: "invalid replicas, greater than max",
- otelcol: OpenTelemetryCollector{
- Spec: OpenTelemetryCollectorSpec{
- MaxReplicas: &three,
- Replicas: &five,
- },
- },
- expectedErr: "replicas must not be greater than maxReplicas",
- expectedWarnings: []string{"MaxReplicas is deprecated"},
- },
- {
- name: "invalid min replicas, greater than max",
- otelcol: OpenTelemetryCollector{
- Spec: OpenTelemetryCollectorSpec{
- MaxReplicas: &three,
- MinReplicas: &five,
- },
- },
- expectedErr: "minReplicas must not be greater than maxReplicas",
- expectedWarnings: []string{"MaxReplicas is deprecated", "MinReplicas is deprecated"},
- },
- {
- name: "invalid min replicas, lesser than 1",
- otelcol: OpenTelemetryCollector{
- Spec: OpenTelemetryCollectorSpec{
- MaxReplicas: &three,
- MinReplicas: &zero,
- },
- },
- expectedErr: "minReplicas should be one or more",
- expectedWarnings: []string{"MaxReplicas is deprecated", "MinReplicas is deprecated"},
- },
- {
- name: "invalid autoscaler scale down",
- otelcol: OpenTelemetryCollector{
- Spec: OpenTelemetryCollectorSpec{
- MaxReplicas: &three,
- Autoscaler: &AutoscalerSpec{
- Behavior: &autoscalingv2.HorizontalPodAutoscalerBehavior{
- ScaleDown: &autoscalingv2.HPAScalingRules{
- StabilizationWindowSeconds: &zero,
- },
- },
- },
- },
- },
- expectedErr: "scaleDown should be one or more",
- expectedWarnings: []string{"MaxReplicas is deprecated"},
- },
- {
- name: "invalid autoscaler scale up",
- otelcol: OpenTelemetryCollector{
- Spec: OpenTelemetryCollectorSpec{
- MaxReplicas: &three,
- Autoscaler: &AutoscalerSpec{
- Behavior: &autoscalingv2.HorizontalPodAutoscalerBehavior{
- ScaleUp: &autoscalingv2.HPAScalingRules{
- StabilizationWindowSeconds: &zero,
- },
- },
- },
- },
- },
- expectedErr: "scaleUp should be one or more",
- expectedWarnings: []string{"MaxReplicas is deprecated"},
- },
- {
- name: "invalid autoscaler target cpu utilization",
- otelcol: OpenTelemetryCollector{
- Spec: OpenTelemetryCollectorSpec{
- MaxReplicas: &three,
- Autoscaler: &AutoscalerSpec{
- TargetCPUUtilization: &zero,
- },
- },
- },
- expectedErr: "targetCPUUtilization should be greater than 0 and less than 100",
- expectedWarnings: []string{"MaxReplicas is deprecated"},
- },
- {
- name: "autoscaler minReplicas is less than maxReplicas",
- otelcol: OpenTelemetryCollector{
- Spec: OpenTelemetryCollectorSpec{
- Autoscaler: &AutoscalerSpec{
- MaxReplicas: &one,
- MinReplicas: &five,
- },
- },
- },
- expectedErr: "the OpenTelemetry Spec autoscale configuration is incorrect, minReplicas must not be greater than maxReplicas",
- },
- {
- name: "invalid autoscaler metric type",
- otelcol: OpenTelemetryCollector{
- Spec: OpenTelemetryCollectorSpec{
- MaxReplicas: &three,
- Autoscaler: &AutoscalerSpec{
- Metrics: []MetricSpec{
- {
- Type: autoscalingv2.ResourceMetricSourceType,
- },
- },
- },
- },
- },
- expectedErr: "the OpenTelemetry Spec autoscale configuration is incorrect, metric type unsupported. Expected metric of source type Pod",
- expectedWarnings: []string{"MaxReplicas is deprecated"},
- },
- {
- name: "invalid pod metric average value",
- otelcol: OpenTelemetryCollector{
- Spec: OpenTelemetryCollectorSpec{
- MaxReplicas: &three,
- Autoscaler: &AutoscalerSpec{
- Metrics: []MetricSpec{
- {
- Type: autoscalingv2.PodsMetricSourceType,
- Pods: &autoscalingv2.PodsMetricSource{
- Metric: autoscalingv2.MetricIdentifier{
- Name: "custom1",
- },
- Target: autoscalingv2.MetricTarget{
- Type: autoscalingv2.AverageValueMetricType,
- AverageValue: resource.NewQuantity(int64(0), resource.DecimalSI),
- },
- },
- },
- },
- },
- },
- },
- expectedErr: "the OpenTelemetry Spec autoscale configuration is incorrect, average value should be greater than 0",
- expectedWarnings: []string{"MaxReplicas is deprecated"},
- },
- {
- name: "utilization target is not valid with pod metrics",
- otelcol: OpenTelemetryCollector{
- Spec: OpenTelemetryCollectorSpec{
- MaxReplicas: &three,
- Autoscaler: &AutoscalerSpec{
- Metrics: []MetricSpec{
- {
- Type: autoscalingv2.PodsMetricSourceType,
- Pods: &autoscalingv2.PodsMetricSource{
- Metric: autoscalingv2.MetricIdentifier{
- Name: "custom1",
- },
- Target: autoscalingv2.MetricTarget{
- Type: autoscalingv2.UtilizationMetricType,
- AverageUtilization: &one,
- },
- },
- },
- },
- },
- },
- },
- expectedErr: "the OpenTelemetry Spec autoscale configuration is incorrect, invalid pods target type",
- expectedWarnings: []string{"MaxReplicas is deprecated"},
- },
- {
- name: "invalid deployment mode incompabible with ingress settings",
- otelcol: OpenTelemetryCollector{
- Spec: OpenTelemetryCollectorSpec{
- Mode: ModeSidecar,
- Ingress: Ingress{
- Type: IngressTypeNginx,
- },
- },
- },
- expectedErr: fmt.Sprintf("Ingress can only be used in combination with the modes: %s, %s, %s",
- ModeDeployment, ModeDaemonSet, ModeStatefulSet,
- ),
- },
- {
- name: "invalid mode with priorityClassName",
- otelcol: OpenTelemetryCollector{
- Spec: OpenTelemetryCollectorSpec{
- Mode: ModeSidecar,
- PriorityClassName: "test-class",
- },
- },
- expectedErr: "does not support the attribute 'priorityClassName'",
- },
- {
- name: "invalid mode with affinity",
- otelcol: OpenTelemetryCollector{
- Spec: OpenTelemetryCollectorSpec{
- Mode: ModeSidecar,
- Affinity: &v1.Affinity{
- NodeAffinity: &v1.NodeAffinity{
- RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
- NodeSelectorTerms: []v1.NodeSelectorTerm{
- {
- MatchExpressions: []v1.NodeSelectorRequirement{
- {
- Key: "node",
- Operator: v1.NodeSelectorOpIn,
- Values: []string{"test-node"},
- },
- },
- },
- },
- },
- },
- },
- },
- },
- expectedErr: "does not support the attribute 'affinity'",
- },
- {
- name: "invalid InitialDelaySeconds",
- otelcol: OpenTelemetryCollector{
- Spec: OpenTelemetryCollectorSpec{
- LivenessProbe: &Probe{
- InitialDelaySeconds: &minusOne,
- },
- },
- },
- expectedErr: "the OpenTelemetry Spec LivenessProbe InitialDelaySeconds configuration is incorrect",
- },
- {
- name: "invalid PeriodSeconds",
- otelcol: OpenTelemetryCollector{
- Spec: OpenTelemetryCollectorSpec{
- LivenessProbe: &Probe{
- PeriodSeconds: &zero,
- },
- },
- },
- expectedErr: "the OpenTelemetry Spec LivenessProbe PeriodSeconds configuration is incorrect",
- },
- {
- name: "invalid TimeoutSeconds",
- otelcol: OpenTelemetryCollector{
- Spec: OpenTelemetryCollectorSpec{
- LivenessProbe: &Probe{
- TimeoutSeconds: &zero,
- },
- },
- },
- expectedErr: "the OpenTelemetry Spec LivenessProbe TimeoutSeconds configuration is incorrect",
- },
- {
- name: "invalid SuccessThreshold",
- otelcol: OpenTelemetryCollector{
- Spec: OpenTelemetryCollectorSpec{
- LivenessProbe: &Probe{
- SuccessThreshold: &zero,
- },
- },
- },
- expectedErr: "the OpenTelemetry Spec LivenessProbe SuccessThreshold configuration is incorrect",
- },
- {
- name: "invalid FailureThreshold",
- otelcol: OpenTelemetryCollector{
- Spec: OpenTelemetryCollectorSpec{
- LivenessProbe: &Probe{
- FailureThreshold: &zero,
- },
- },
- },
- expectedErr: "the OpenTelemetry Spec LivenessProbe FailureThreshold configuration is incorrect",
- },
- {
- name: "invalid TerminationGracePeriodSeconds",
- otelcol: OpenTelemetryCollector{
- Spec: OpenTelemetryCollectorSpec{
- LivenessProbe: &Probe{
- TerminationGracePeriodSeconds: &zero64,
- },
- },
- },
- expectedErr: "the OpenTelemetry Spec LivenessProbe TerminationGracePeriodSeconds configuration is incorrect",
- },
- {
- name: "invalid AdditionalContainers",
- otelcol: OpenTelemetryCollector{
- Spec: OpenTelemetryCollectorSpec{
- Mode: ModeSidecar,
- AdditionalContainers: []v1.Container{
- {
- Name: "test",
- },
- },
- },
- },
- expectedErr: "the OpenTelemetry Collector mode is set to sidecar, which does not support the attribute 'AdditionalContainers'",
- },
- {
- name: "missing ingress hostname for subdomain ruleType",
- otelcol: OpenTelemetryCollector{
- Spec: OpenTelemetryCollectorSpec{
- Ingress: Ingress{
- RuleType: IngressRuleTypeSubdomain,
- },
- },
- },
- expectedErr: "a valid Ingress hostname has to be defined for subdomain ruleType",
- },
- {
- name: "invalid updateStrategy for Deployment mode",
- otelcol: OpenTelemetryCollector{
- Spec: OpenTelemetryCollectorSpec{
- Mode: ModeDeployment,
- UpdateStrategy: appsv1.DaemonSetUpdateStrategy{
- Type: "RollingUpdate",
- RollingUpdate: &appsv1.RollingUpdateDaemonSet{
- MaxSurge: &intstr.IntOrString{Type: intstr.Int, IntVal: int32(1)},
- MaxUnavailable: &intstr.IntOrString{Type: intstr.Int, IntVal: int32(1)},
- },
- },
- },
- },
- expectedErr: "the OpenTelemetry Collector mode is set to deployment, which does not support the attribute 'updateStrategy'",
- },
- {
- name: "invalid updateStrategy for Statefulset mode",
- otelcol: OpenTelemetryCollector{
- Spec: OpenTelemetryCollectorSpec{
- Mode: ModeStatefulSet,
- DeploymentUpdateStrategy: appsv1.DeploymentStrategy{
- Type: "RollingUpdate",
- RollingUpdate: &appsv1.RollingUpdateDeployment{
- MaxSurge: &intstr.IntOrString{Type: intstr.Int, IntVal: int32(1)},
- MaxUnavailable: &intstr.IntOrString{Type: intstr.Int, IntVal: int32(1)},
- },
- },
- },
- },
- expectedErr: "the OpenTelemetry Collector mode is set to statefulset, which does not support the attribute 'deploymentUpdateStrategy'",
- },
- }
-
- for _, test := range tests {
- test := test
- t.Run(test.name, func(t *testing.T) {
- cvw := &CollectorWebhook{
- logger: logr.Discard(),
- scheme: testScheme,
- cfg: config.New(
- config.WithCollectorImage("collector:v0.0.0"),
- config.WithTargetAllocatorImage("ta:v0.0.0"),
- ),
- reviewer: getReviewer(test.shouldFailSar),
- }
- ctx := context.Background()
- warnings, err := cvw.ValidateCreate(ctx, &test.otelcol)
- if test.expectedErr == "" {
- assert.NoError(t, err)
- } else {
- assert.ErrorContains(t, err, test.expectedErr)
- }
- assert.Equal(t, len(test.expectedWarnings), len(warnings))
- assert.ElementsMatch(t, warnings, test.expectedWarnings)
- })
- }
-}
-
-func getReviewer(shouldFailSAR bool) *rbac.Reviewer {
- c := fake.NewSimpleClientset()
- c.PrependReactor("create", "subjectaccessreviews", func(action kubeTesting.Action) (handled bool, ret runtime.Object, err error) {
- // check our expectation here
- if !action.Matches("create", "subjectaccessreviews") {
- return false, nil, fmt.Errorf("must be a create for a SAR")
- }
- sar, ok := action.(kubeTesting.CreateAction).GetObject().DeepCopyObject().(*authv1.SubjectAccessReview)
- if !ok || sar == nil {
- return false, nil, fmt.Errorf("bad object")
- }
- sar.Status = authv1.SubjectAccessReviewStatus{
- Allowed: !shouldFailSAR,
- Denied: shouldFailSAR,
- }
- return true, sar, nil
- })
- return rbac.NewReviewer(c)
-}
diff --git a/apis/v1alpha1/opampbridge_webhook_test.go b/apis/v1alpha1/opampbridge_webhook_test.go
index 8c0e6693a6..34d667b04d 100644
--- a/apis/v1alpha1/opampbridge_webhook_test.go
+++ b/apis/v1alpha1/opampbridge_webhook_test.go
@@ -24,10 +24,13 @@ import (
"github.com/stretchr/testify/assert"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/client-go/kubernetes/scheme"
"github.com/open-telemetry/opentelemetry-operator/internal/config"
)
+var testScheme = scheme.Scheme
+
func TestOpAMPBridgeDefaultingWebhook(t *testing.T) {
one := int32(1)
five := int32(5)
diff --git a/apis/v1beta1/collector_webhook.go b/apis/v1beta1/collector_webhook.go
index e6c7ac4efa..693823f370 100644
--- a/apis/v1beta1/collector_webhook.go
+++ b/apis/v1beta1/collector_webhook.go
@@ -20,17 +20,51 @@ import (
"strings"
"github.com/go-logr/logr"
+ authorizationv1 "k8s.io/api/authorization/v1"
+ autoscalingv2 "k8s.io/api/autoscaling/v2"
+ rbacv1 "k8s.io/api/rbac/v1"
"k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/util/intstr"
+ "k8s.io/apimachinery/pkg/util/validation"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
"github.com/open-telemetry/opentelemetry-operator/internal/config"
+ ta "github.com/open-telemetry/opentelemetry-operator/internal/manifests/targetallocator/adapters"
"github.com/open-telemetry/opentelemetry-operator/internal/rbac"
)
var (
_ admission.CustomValidator = &CollectorWebhook{}
_ admission.CustomDefaulter = &CollectorWebhook{}
+ // targetAllocatorCRPolicyRules are the policy rules required for the CR functionality.
+
+ targetAllocatorCRPolicyRules = []*rbacv1.PolicyRule{
+ {
+ APIGroups: []string{"monitoring.coreos.com"},
+ Resources: []string{"servicemonitors", "podmonitors"},
+ Verbs: []string{"*"},
+ }, {
+ APIGroups: []string{""},
+ Resources: []string{"nodes", "nodes/metrics", "services", "endpoints", "pods", "namespaces"},
+ Verbs: []string{"get", "list", "watch"},
+ }, {
+ APIGroups: []string{""},
+ Resources: []string{"configmaps"},
+ Verbs: []string{"get"},
+ }, {
+ APIGroups: []string{"discovery.k8s.io"},
+ Resources: []string{"endpointslices"},
+ Verbs: []string{"get", "list", "watch"},
+ }, {
+ APIGroups: []string{"networking.k8s.io"},
+ Resources: []string{"ingresses"},
+ Verbs: []string{"get", "list", "watch"},
+ }, {
+ NonResourceURLs: []string{"/metrics"},
+ Verbs: []string{"get"},
+ },
+ }
)
// +kubebuilder:webhook:path=/mutate-opentelemetry-io-v1beta1-opentelemetrycollector,mutating=true,failurePolicy=fail,groups=opentelemetry.io,resources=opentelemetrycollectors,verbs=create;update,versions=v1beta1,name=mopentelemetrycollectorbeta.kb.io,sideEffects=none,admissionReviewVersions=v1
@@ -50,47 +84,351 @@ func (c CollectorWebhook) Default(_ context.Context, obj runtime.Object) error {
if !ok {
return fmt.Errorf("expected an OpenTelemetryCollector, received %T", obj)
}
- return c.defaulter(otelcol)
+ if len(otelcol.Spec.Mode) == 0 {
+ otelcol.Spec.Mode = ModeDeployment
+ }
+ if len(otelcol.Spec.UpgradeStrategy) == 0 {
+ otelcol.Spec.UpgradeStrategy = UpgradeStrategyAutomatic
+ }
+
+ if otelcol.Labels == nil {
+ otelcol.Labels = map[string]string{}
+ }
+ if otelcol.Labels["app.kubernetes.io/managed-by"] == "" {
+ otelcol.Labels["app.kubernetes.io/managed-by"] = "opentelemetry-operator"
+ }
+
+ // We can default to one because dependent objects Deployment and HorizontalPodAutoScaler
+ // default to 1 as well.
+ one := int32(1)
+ if otelcol.Spec.Replicas == nil {
+ otelcol.Spec.Replicas = &one
+ }
+ if otelcol.Spec.TargetAllocator.Enabled && otelcol.Spec.TargetAllocator.Replicas == nil {
+ otelcol.Spec.TargetAllocator.Replicas = &one
+ }
+
+ if otelcol.Spec.Autoscaler != nil && otelcol.Spec.Autoscaler.MaxReplicas != nil {
+ if otelcol.Spec.Autoscaler.MinReplicas == nil {
+ otelcol.Spec.Autoscaler.MinReplicas = otelcol.Spec.Replicas
+ }
+
+ if otelcol.Spec.Autoscaler.TargetMemoryUtilization == nil && otelcol.Spec.Autoscaler.TargetCPUUtilization == nil {
+ defaultCPUTarget := int32(90)
+ otelcol.Spec.Autoscaler.TargetCPUUtilization = &defaultCPUTarget
+ }
+ }
+
+ // if pdb isn't provided, we set MaxUnavailable 1,
+ // which will work even if there is just one replica,
+ // not blocking node drains but preventing out-of-the-box
+ // from disruption generated by them with replicas > 1
+ if otelcol.Spec.PodDisruptionBudget == nil {
+ otelcol.Spec.PodDisruptionBudget = &PodDisruptionBudgetSpec{
+ MaxUnavailable: &intstr.IntOrString{
+ Type: intstr.Int,
+ IntVal: 1,
+ },
+ }
+ }
+
+ // if pdb isn't provided for target allocator and it's enabled
+ // using a valid strategy (consistent-hashing),
+ // we set MaxUnavailable 1, which will work even if there is
+ // just one replica, not blocking node drains but preventing
+ // out-of-the-box from disruption generated by them with replicas > 1
+ if otelcol.Spec.TargetAllocator.Enabled &&
+ otelcol.Spec.TargetAllocator.AllocationStrategy == TargetAllocatorAllocationStrategyConsistentHashing &&
+ otelcol.Spec.TargetAllocator.PodDisruptionBudget == nil {
+ otelcol.Spec.TargetAllocator.PodDisruptionBudget = &PodDisruptionBudgetSpec{
+ MaxUnavailable: &intstr.IntOrString{
+ Type: intstr.Int,
+ IntVal: 1,
+ },
+ }
+ }
+
+ if otelcol.Spec.Ingress.Type == IngressTypeRoute && otelcol.Spec.Ingress.Route.Termination == "" {
+ otelcol.Spec.Ingress.Route.Termination = TLSRouteTerminationTypeEdge
+ }
+ if otelcol.Spec.Ingress.Type == IngressTypeNginx && otelcol.Spec.Ingress.RuleType == "" {
+ otelcol.Spec.Ingress.RuleType = IngressRuleTypePath
+ }
+ // If someone upgrades to a later version without upgrading their CRD they will not have a management state set.
+ // This results in a default state of unmanaged preventing reconciliation from continuing.
+ if len(otelcol.Spec.ManagementState) == 0 {
+ otelcol.Spec.ManagementState = ManagementStateManaged
+ }
+ return nil
}
-func (c CollectorWebhook) ValidateCreate(_ context.Context, obj runtime.Object) (admission.Warnings, error) {
+func (c CollectorWebhook) ValidateCreate(ctx context.Context, obj runtime.Object) (admission.Warnings, error) {
otelcol, ok := obj.(*OpenTelemetryCollector)
if !ok {
return nil, fmt.Errorf("expected an OpenTelemetryCollector, received %T", obj)
}
- return c.validate(otelcol)
+ return c.validate(ctx, otelcol)
}
-func (c CollectorWebhook) ValidateUpdate(_ context.Context, _, newObj runtime.Object) (admission.Warnings, error) {
+func (c CollectorWebhook) ValidateUpdate(ctx context.Context, _, newObj runtime.Object) (admission.Warnings, error) {
otelcol, ok := newObj.(*OpenTelemetryCollector)
if !ok {
return nil, fmt.Errorf("expected an OpenTelemetryCollector, received %T", newObj)
}
- return c.validate(otelcol)
+ return c.validate(ctx, otelcol)
}
-func (c CollectorWebhook) ValidateDelete(_ context.Context, obj runtime.Object) (admission.Warnings, error) {
+func (c CollectorWebhook) ValidateDelete(ctx context.Context, obj runtime.Object) (admission.Warnings, error) {
otelcol, ok := obj.(*OpenTelemetryCollector)
if !ok || otelcol == nil {
return nil, fmt.Errorf("expected an OpenTelemetryCollector, received %T", obj)
}
- return c.validate(otelcol)
+ return c.validate(ctx, otelcol)
}
-func (c CollectorWebhook) defaulter(r *OpenTelemetryCollector) error {
- return nil
-}
-
-func (c CollectorWebhook) validate(r *OpenTelemetryCollector) (admission.Warnings, error) {
+func (c CollectorWebhook) validate(ctx context.Context, r *OpenTelemetryCollector) (admission.Warnings, error) {
warnings := admission.Warnings{}
nullObjects := r.Spec.Config.nullObjects()
if len(nullObjects) > 0 {
warnings = append(warnings, fmt.Sprintf("Collector config spec.config has null objects: %s. For compatibility tooling (kustomize and kubectl edit) it is recommended to use empty obejects e.g. batch: {}.", strings.Join(nullObjects, ", ")))
}
+
+ // validate volumeClaimTemplates
+ if r.Spec.Mode != ModeStatefulSet && len(r.Spec.VolumeClaimTemplates) > 0 {
+ return warnings, fmt.Errorf("the OpenTelemetry Collector mode is set to %s, which does not support the attribute 'volumeClaimTemplates'", r.Spec.Mode)
+ }
+
+ // validate tolerations
+ if r.Spec.Mode == ModeSidecar && len(r.Spec.Tolerations) > 0 {
+ return warnings, fmt.Errorf("the OpenTelemetry Collector mode is set to %s, which does not support the attribute 'tolerations'", r.Spec.Mode)
+ }
+
+ // validate priorityClassName
+ if r.Spec.Mode == ModeSidecar && r.Spec.PriorityClassName != "" {
+ return warnings, fmt.Errorf("the OpenTelemetry Collector mode is set to %s, which does not support the attribute 'priorityClassName'", r.Spec.Mode)
+ }
+
+ // validate affinity
+ if r.Spec.Mode == ModeSidecar && r.Spec.Affinity != nil {
+ return warnings, fmt.Errorf("the OpenTelemetry Collector mode is set to %s, which does not support the attribute 'affinity'", r.Spec.Mode)
+ }
+
+ if r.Spec.Mode == ModeSidecar && len(r.Spec.AdditionalContainers) > 0 {
+ return warnings, fmt.Errorf("the OpenTelemetry Collector mode is set to %s, which does not support the attribute 'AdditionalContainers'", r.Spec.Mode)
+ }
+
+ // validate target allocator configs
+ if r.Spec.TargetAllocator.Enabled {
+ taWarnings, err := c.validateTargetAllocatorConfig(ctx, r)
+ if taWarnings != nil {
+ warnings = append(warnings, taWarnings...)
+ }
+ if err != nil {
+ return warnings, err
+ }
+ }
+
+ // validator port config
+ for _, p := range r.Spec.Ports {
+ nameErrs := validation.IsValidPortName(p.Name)
+ numErrs := validation.IsValidPortNum(int(p.Port))
+ if len(nameErrs) > 0 || len(numErrs) > 0 {
+ return warnings, fmt.Errorf("the OpenTelemetry Spec Ports configuration is incorrect, port name '%s' errors: %s, num '%d' errors: %s",
+ p.Name, nameErrs, p.Port, numErrs)
+ }
+ }
+
+ var maxReplicas *int32
+ if r.Spec.Autoscaler != nil && r.Spec.Autoscaler.MaxReplicas != nil {
+ maxReplicas = r.Spec.Autoscaler.MaxReplicas
+ }
+ var minReplicas *int32
+ if r.Spec.Autoscaler != nil && r.Spec.Autoscaler.MinReplicas != nil {
+ minReplicas = r.Spec.Autoscaler.MinReplicas
+ }
+ // check deprecated .Spec.MinReplicas if minReplicas is not set
+ if minReplicas == nil {
+ minReplicas = r.Spec.Replicas
+ }
+
+ // validate autoscale with horizontal pod autoscaler
+ if maxReplicas != nil {
+ if *maxReplicas < int32(1) {
+ return warnings, fmt.Errorf("the OpenTelemetry Spec autoscale configuration is incorrect, maxReplicas should be defined and one or more")
+ }
+
+ if r.Spec.Replicas != nil && *r.Spec.Replicas > *maxReplicas {
+ return warnings, fmt.Errorf("the OpenTelemetry Spec autoscale configuration is incorrect, replicas must not be greater than maxReplicas")
+ }
+
+ if minReplicas != nil && *minReplicas > *maxReplicas {
+ return warnings, fmt.Errorf("the OpenTelemetry Spec autoscale configuration is incorrect, minReplicas must not be greater than maxReplicas")
+ }
+
+ if minReplicas != nil && *minReplicas < int32(1) {
+ return warnings, fmt.Errorf("the OpenTelemetry Spec autoscale configuration is incorrect, minReplicas should be one or more")
+ }
+
+ if r.Spec.Autoscaler != nil {
+ return warnings, checkAutoscalerSpec(r.Spec.Autoscaler)
+ }
+ }
+
+ if r.Spec.Ingress.Type == IngressTypeNginx && r.Spec.Mode == ModeSidecar {
+ return warnings, fmt.Errorf("the OpenTelemetry Spec Ingress configuration is incorrect. Ingress can only be used in combination with the modes: %s, %s, %s",
+ ModeDeployment, ModeDaemonSet, ModeStatefulSet,
+ )
+ }
+
+ if r.Spec.Ingress.Type == IngressTypeNginx && r.Spec.Mode == ModeSidecar {
+ return warnings, fmt.Errorf("the OpenTelemetry Spec Ingress configuiration is incorrect. Ingress can only be used in combination with the modes: %s, %s, %s",
+ ModeDeployment, ModeDaemonSet, ModeStatefulSet,
+ )
+ }
+ if r.Spec.Ingress.RuleType == IngressRuleTypeSubdomain && (r.Spec.Ingress.Hostname == "" || r.Spec.Ingress.Hostname == "*") {
+ return warnings, fmt.Errorf("a valid Ingress hostname has to be defined for subdomain ruleType")
+ }
+
+ if r.Spec.LivenessProbe != nil {
+ if r.Spec.LivenessProbe.InitialDelaySeconds != nil && *r.Spec.LivenessProbe.InitialDelaySeconds < 0 {
+ return warnings, fmt.Errorf("the OpenTelemetry Spec LivenessProbe InitialDelaySeconds configuration is incorrect. InitialDelaySeconds should be greater than or equal to 0")
+ }
+ if r.Spec.LivenessProbe.PeriodSeconds != nil && *r.Spec.LivenessProbe.PeriodSeconds < 1 {
+ return warnings, fmt.Errorf("the OpenTelemetry Spec LivenessProbe PeriodSeconds configuration is incorrect. PeriodSeconds should be greater than or equal to 1")
+ }
+ if r.Spec.LivenessProbe.TimeoutSeconds != nil && *r.Spec.LivenessProbe.TimeoutSeconds < 1 {
+ return warnings, fmt.Errorf("the OpenTelemetry Spec LivenessProbe TimeoutSeconds configuration is incorrect. TimeoutSeconds should be greater than or equal to 1")
+ }
+ if r.Spec.LivenessProbe.SuccessThreshold != nil && *r.Spec.LivenessProbe.SuccessThreshold < 1 {
+ return warnings, fmt.Errorf("the OpenTelemetry Spec LivenessProbe SuccessThreshold configuration is incorrect. SuccessThreshold should be greater than or equal to 1")
+ }
+ if r.Spec.LivenessProbe.FailureThreshold != nil && *r.Spec.LivenessProbe.FailureThreshold < 1 {
+ return warnings, fmt.Errorf("the OpenTelemetry Spec LivenessProbe FailureThreshold configuration is incorrect. FailureThreshold should be greater than or equal to 1")
+ }
+ if r.Spec.LivenessProbe.TerminationGracePeriodSeconds != nil && *r.Spec.LivenessProbe.TerminationGracePeriodSeconds < 1 {
+ return warnings, fmt.Errorf("the OpenTelemetry Spec LivenessProbe TerminationGracePeriodSeconds configuration is incorrect. TerminationGracePeriodSeconds should be greater than or equal to 1")
+ }
+ }
+
+ // validate updateStrategy for DaemonSet
+ if r.Spec.Mode != ModeDaemonSet && len(r.Spec.DaemonSetUpdateStrategy.Type) > 0 {
+ return warnings, fmt.Errorf("the OpenTelemetry Collector mode is set to %s, which does not support the attribute 'updateStrategy'", r.Spec.Mode)
+ }
+
+ // validate updateStrategy for Deployment
+ if r.Spec.Mode != ModeDeployment && len(r.Spec.DeploymentUpdateStrategy.Type) > 0 {
+ return warnings, fmt.Errorf("the OpenTelemetry Collector mode is set to %s, which does not support the attribute 'deploymentUpdateStrategy'", r.Spec.Mode)
+ }
+
return warnings, nil
}
+func (c CollectorWebhook) validateTargetAllocatorConfig(ctx context.Context, r *OpenTelemetryCollector) (admission.Warnings, error) {
+ if r.Spec.Mode != ModeStatefulSet && r.Spec.Mode != ModeDaemonSet {
+ return nil, fmt.Errorf("the OpenTelemetry Collector mode is set to %s, which does not support the target allocation deployment", r.Spec.Mode)
+ }
+
+ if r.Spec.Mode == ModeDaemonSet && r.Spec.TargetAllocator.AllocationStrategy != TargetAllocatorAllocationStrategyPerNode {
+ return nil, fmt.Errorf("the OpenTelemetry Collector mode is set to %s, which must be used with target allocation strategy %s ", r.Spec.Mode, TargetAllocatorAllocationStrategyPerNode)
+ }
+
+ if r.Spec.TargetAllocator.AllocationStrategy == TargetAllocatorAllocationStrategyPerNode && r.Spec.Mode != ModeDaemonSet {
+ return nil, fmt.Errorf("target allocation strategy %s is only supported in OpenTelemetry Collector mode %s", TargetAllocatorAllocationStrategyPerNode, ModeDaemonSet)
+ }
+
+ cfgYaml, err := r.Spec.Config.Yaml()
+ if err != nil {
+ return nil, err
+ }
+ // validate Prometheus config for target allocation
+ promCfg, err := ta.ConfigToPromConfig(cfgYaml)
+ if err != nil {
+ return nil, fmt.Errorf("the OpenTelemetry Spec Prometheus configuration is incorrect, %w", err)
+ }
+ err = ta.ValidatePromConfig(promCfg, r.Spec.TargetAllocator.Enabled)
+ if err != nil {
+ return nil, fmt.Errorf("the OpenTelemetry Spec Prometheus configuration is incorrect, %w", err)
+ }
+ err = ta.ValidateTargetAllocatorConfig(r.Spec.TargetAllocator.PrometheusCR.Enabled, promCfg)
+ if err != nil {
+ return nil, fmt.Errorf("the OpenTelemetry Spec Prometheus configuration is incorrect, %w", err)
+ }
+ // if the prometheusCR is enabled, it needs a suite of permissions to function
+ if r.Spec.TargetAllocator.PrometheusCR.Enabled {
+ if subjectAccessReviews, err := c.reviewer.CheckPolicyRules(ctx, r.GetNamespace(), r.Spec.TargetAllocator.ServiceAccount, targetAllocatorCRPolicyRules...); err != nil {
+ return nil, fmt.Errorf("unable to check rbac rules %w", err)
+ } else if allowed, deniedReviews := rbac.AllSubjectAccessReviewsAllowed(subjectAccessReviews); !allowed {
+ return warningsGroupedByResource(deniedReviews), nil
+ }
+ }
+
+ return nil, nil
+}
+
+func checkAutoscalerSpec(autoscaler *AutoscalerSpec) error {
+ if autoscaler.Behavior != nil {
+ if autoscaler.Behavior.ScaleDown != nil && autoscaler.Behavior.ScaleDown.StabilizationWindowSeconds != nil &&
+ *autoscaler.Behavior.ScaleDown.StabilizationWindowSeconds < int32(1) {
+ return fmt.Errorf("the OpenTelemetry Spec autoscale configuration is incorrect, scaleDown should be one or more")
+ }
+
+ if autoscaler.Behavior.ScaleUp != nil && autoscaler.Behavior.ScaleUp.StabilizationWindowSeconds != nil &&
+ *autoscaler.Behavior.ScaleUp.StabilizationWindowSeconds < int32(1) {
+ return fmt.Errorf("the OpenTelemetry Spec autoscale configuration is incorrect, scaleUp should be one or more")
+ }
+ }
+ if autoscaler.TargetCPUUtilization != nil && (*autoscaler.TargetCPUUtilization < int32(1) || *autoscaler.TargetCPUUtilization > int32(99)) {
+ return fmt.Errorf("the OpenTelemetry Spec autoscale configuration is incorrect, targetCPUUtilization should be greater than 0 and less than 100")
+ }
+ if autoscaler.TargetMemoryUtilization != nil && (*autoscaler.TargetMemoryUtilization < int32(1) || *autoscaler.TargetMemoryUtilization > int32(99)) {
+ return fmt.Errorf("the OpenTelemetry Spec autoscale configuration is incorrect, targetMemoryUtilization should be greater than 0 and less than 100")
+ }
+
+ for _, metric := range autoscaler.Metrics {
+ if metric.Type != autoscalingv2.PodsMetricSourceType {
+ return fmt.Errorf("the OpenTelemetry Spec autoscale configuration is incorrect, metric type unsupported. Expected metric of source type Pod")
+ }
+
+ // pod metrics target only support value and averageValue.
+ if metric.Pods.Target.Type == autoscalingv2.AverageValueMetricType {
+ if val, ok := metric.Pods.Target.AverageValue.AsInt64(); !ok || val < int64(1) {
+ return fmt.Errorf("the OpenTelemetry Spec autoscale configuration is incorrect, average value should be greater than 0")
+ }
+ } else if metric.Pods.Target.Type == autoscalingv2.ValueMetricType {
+ if val, ok := metric.Pods.Target.Value.AsInt64(); !ok || val < int64(1) {
+ return fmt.Errorf("the OpenTelemetry Spec autoscale configuration is incorrect, value should be greater than 0")
+ }
+ } else {
+ return fmt.Errorf("the OpenTelemetry Spec autoscale configuration is incorrect, invalid pods target type")
+ }
+ }
+
+ return nil
+}
+
+// warningsGroupedByResource is a helper to take the missing permissions and format them as warnings.
+func warningsGroupedByResource(reviews []*authorizationv1.SubjectAccessReview) []string {
+ fullResourceToVerbs := make(map[string][]string)
+ for _, review := range reviews {
+ if review.Spec.ResourceAttributes != nil {
+ key := fmt.Sprintf("%s/%s", review.Spec.ResourceAttributes.Group, review.Spec.ResourceAttributes.Resource)
+ if len(review.Spec.ResourceAttributes.Group) == 0 {
+ key = review.Spec.ResourceAttributes.Resource
+ }
+ fullResourceToVerbs[key] = append(fullResourceToVerbs[key], review.Spec.ResourceAttributes.Verb)
+ } else if review.Spec.NonResourceAttributes != nil {
+ key := fmt.Sprintf("nonResourceURL: %s", review.Spec.NonResourceAttributes.Path)
+ fullResourceToVerbs[key] = append(fullResourceToVerbs[key], review.Spec.NonResourceAttributes.Verb)
+ }
+ }
+ var warnings []string
+ for fullResource, verbs := range fullResourceToVerbs {
+ warnings = append(warnings, fmt.Sprintf("missing the following rules for %s: [%s]", fullResource, strings.Join(verbs, ",")))
+ }
+ return warnings
+}
+
func SetupCollectorWebhook(mgr ctrl.Manager, cfg config.Config, reviewer *rbac.Reviewer) error {
cvw := &CollectorWebhook{
reviewer: reviewer,
diff --git a/apis/v1beta1/collector_webhook_test.go b/apis/v1beta1/collector_webhook_test.go
index 9b7647cca5..3489d035de 100644
--- a/apis/v1beta1/collector_webhook_test.go
+++ b/apis/v1beta1/collector_webhook_test.go
@@ -15,10 +15,33 @@
package v1beta1
import (
+ "context"
+ "fmt"
+ "os"
"testing"
+ "github.com/go-logr/logr"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
+ "gopkg.in/yaml.v3"
+ appsv1 "k8s.io/api/apps/v1"
+ authv1 "k8s.io/api/authorization/v1"
+ autoscalingv2 "k8s.io/api/autoscaling/v2"
+ v1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/api/resource"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/util/intstr"
+ "k8s.io/client-go/kubernetes/fake"
+ "k8s.io/client-go/kubernetes/scheme"
+ kubeTesting "k8s.io/client-go/testing"
+
+ "github.com/open-telemetry/opentelemetry-operator/internal/config"
+ "github.com/open-telemetry/opentelemetry-operator/internal/rbac"
+)
+
+var (
+ testScheme = scheme.Scheme
)
func TestValidate(t *testing.T) {
@@ -57,7 +80,7 @@ func TestValidate(t *testing.T) {
webhook := CollectorWebhook{}
t.Run(tt.name, func(t *testing.T) {
tt := tt
- warnings, err := webhook.validate(&tt.collector)
+ warnings, err := webhook.validate(context.Background(), &tt.collector)
if tt.err == "" {
require.NoError(t, err)
} else {
@@ -67,3 +90,1099 @@ func TestValidate(t *testing.T) {
})
}
}
+
+func TestCollectorDefaultingWebhook(t *testing.T) {
+ one := int32(1)
+ five := int32(5)
+ defaultCPUTarget := int32(90)
+
+ if err := AddToScheme(testScheme); err != nil {
+ fmt.Printf("failed to register scheme: %v", err)
+ os.Exit(1)
+ }
+
+ tests := []struct {
+ name string
+ otelcol OpenTelemetryCollector
+ expected OpenTelemetryCollector
+ }{
+ {
+ name: "all fields default",
+ otelcol: OpenTelemetryCollector{},
+ expected: OpenTelemetryCollector{
+ ObjectMeta: metav1.ObjectMeta{
+ Labels: map[string]string{
+ "app.kubernetes.io/managed-by": "opentelemetry-operator",
+ },
+ },
+ Spec: OpenTelemetryCollectorSpec{
+ OpenTelemetryCommonFields: OpenTelemetryCommonFields{
+ ManagementState: ManagementStateManaged,
+ Replicas: &one,
+ PodDisruptionBudget: &PodDisruptionBudgetSpec{
+ MaxUnavailable: &intstr.IntOrString{
+ Type: intstr.Int,
+ IntVal: 1,
+ },
+ },
+ },
+ Mode: ModeDeployment,
+ UpgradeStrategy: UpgradeStrategyAutomatic,
+ },
+ },
+ },
+ {
+ name: "provided values in spec",
+ otelcol: OpenTelemetryCollector{
+ Spec: OpenTelemetryCollectorSpec{
+ Mode: ModeSidecar,
+ UpgradeStrategy: "adhoc",
+ OpenTelemetryCommonFields: OpenTelemetryCommonFields{
+ Replicas: &five,
+ },
+ },
+ },
+ expected: OpenTelemetryCollector{
+ ObjectMeta: metav1.ObjectMeta{
+ Labels: map[string]string{
+ "app.kubernetes.io/managed-by": "opentelemetry-operator",
+ },
+ },
+ Spec: OpenTelemetryCollectorSpec{
+ Mode: ModeSidecar,
+ UpgradeStrategy: "adhoc",
+ OpenTelemetryCommonFields: OpenTelemetryCommonFields{
+ Replicas: &five,
+ ManagementState: ManagementStateManaged,
+ PodDisruptionBudget: &PodDisruptionBudgetSpec{
+ MaxUnavailable: &intstr.IntOrString{
+ Type: intstr.Int,
+ IntVal: 1,
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "doesn't override unmanaged",
+ otelcol: OpenTelemetryCollector{
+ Spec: OpenTelemetryCollectorSpec{
+ Mode: ModeSidecar,
+ UpgradeStrategy: "adhoc",
+ OpenTelemetryCommonFields: OpenTelemetryCommonFields{
+ Replicas: &five,
+ ManagementState: ManagementStateUnmanaged,
+ },
+ },
+ },
+ expected: OpenTelemetryCollector{
+ ObjectMeta: metav1.ObjectMeta{
+ Labels: map[string]string{
+ "app.kubernetes.io/managed-by": "opentelemetry-operator",
+ },
+ },
+ Spec: OpenTelemetryCollectorSpec{
+ Mode: ModeSidecar,
+ UpgradeStrategy: "adhoc",
+ OpenTelemetryCommonFields: OpenTelemetryCommonFields{
+ Replicas: &five,
+ ManagementState: ManagementStateUnmanaged,
+ PodDisruptionBudget: &PodDisruptionBudgetSpec{
+ MaxUnavailable: &intstr.IntOrString{
+ Type: intstr.Int,
+ IntVal: 1,
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "Setting Autoscaler MaxReplicas",
+ otelcol: OpenTelemetryCollector{
+ Spec: OpenTelemetryCollectorSpec{
+ OpenTelemetryCommonFields: OpenTelemetryCommonFields{
+ Autoscaler: &AutoscalerSpec{
+ MaxReplicas: &five,
+ MinReplicas: &one,
+ },
+ },
+ },
+ },
+ expected: OpenTelemetryCollector{
+ ObjectMeta: metav1.ObjectMeta{
+ Labels: map[string]string{
+ "app.kubernetes.io/managed-by": "opentelemetry-operator",
+ },
+ },
+ Spec: OpenTelemetryCollectorSpec{
+ Mode: ModeDeployment,
+ UpgradeStrategy: UpgradeStrategyAutomatic,
+ OpenTelemetryCommonFields: OpenTelemetryCommonFields{
+ Replicas: &one,
+ ManagementState: ManagementStateManaged,
+ Autoscaler: &AutoscalerSpec{
+ TargetCPUUtilization: &defaultCPUTarget,
+ MaxReplicas: &five,
+ MinReplicas: &one,
+ },
+ PodDisruptionBudget: &PodDisruptionBudgetSpec{
+ MaxUnavailable: &intstr.IntOrString{
+ Type: intstr.Int,
+ IntVal: 1,
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "Missing route termination",
+ otelcol: OpenTelemetryCollector{
+ Spec: OpenTelemetryCollectorSpec{
+ Mode: ModeDeployment,
+ Ingress: Ingress{
+ Type: IngressTypeRoute,
+ },
+ },
+ },
+ expected: OpenTelemetryCollector{
+ ObjectMeta: metav1.ObjectMeta{
+ Labels: map[string]string{
+ "app.kubernetes.io/managed-by": "opentelemetry-operator",
+ },
+ },
+ Spec: OpenTelemetryCollectorSpec{
+ Mode: ModeDeployment,
+ OpenTelemetryCommonFields: OpenTelemetryCommonFields{
+ ManagementState: ManagementStateManaged,
+ Replicas: &one,
+ PodDisruptionBudget: &PodDisruptionBudgetSpec{
+ MaxUnavailable: &intstr.IntOrString{
+ Type: intstr.Int,
+ IntVal: 1,
+ },
+ },
+ },
+ Ingress: Ingress{
+ Type: IngressTypeRoute,
+ Route: OpenShiftRoute{
+ Termination: TLSRouteTerminationTypeEdge,
+ },
+ },
+ UpgradeStrategy: UpgradeStrategyAutomatic,
+ },
+ },
+ },
+ {
+ name: "Defined PDB for collector",
+ otelcol: OpenTelemetryCollector{
+ Spec: OpenTelemetryCollectorSpec{
+ Mode: ModeDeployment,
+ OpenTelemetryCommonFields: OpenTelemetryCommonFields{
+ PodDisruptionBudget: &PodDisruptionBudgetSpec{
+ MinAvailable: &intstr.IntOrString{
+ Type: intstr.String,
+ StrVal: "10%",
+ },
+ },
+ },
+ },
+ },
+ expected: OpenTelemetryCollector{
+ ObjectMeta: metav1.ObjectMeta{
+ Labels: map[string]string{
+ "app.kubernetes.io/managed-by": "opentelemetry-operator",
+ },
+ },
+ Spec: OpenTelemetryCollectorSpec{
+ Mode: ModeDeployment,
+ OpenTelemetryCommonFields: OpenTelemetryCommonFields{
+ Replicas: &one,
+ ManagementState: ManagementStateManaged,
+ PodDisruptionBudget: &PodDisruptionBudgetSpec{
+ MinAvailable: &intstr.IntOrString{
+ Type: intstr.String,
+ StrVal: "10%",
+ },
+ },
+ },
+ UpgradeStrategy: UpgradeStrategyAutomatic,
+ },
+ },
+ },
+ {
+ name: "Defined PDB for target allocator",
+ otelcol: OpenTelemetryCollector{
+ Spec: OpenTelemetryCollectorSpec{
+ Mode: ModeDeployment,
+ TargetAllocator: TargetAllocatorEmbedded{
+ Enabled: true,
+ AllocationStrategy: TargetAllocatorAllocationStrategyConsistentHashing,
+ PodDisruptionBudget: &PodDisruptionBudgetSpec{
+ MinAvailable: &intstr.IntOrString{
+ Type: intstr.String,
+ StrVal: "10%",
+ },
+ },
+ },
+ },
+ },
+ expected: OpenTelemetryCollector{
+ ObjectMeta: metav1.ObjectMeta{
+ Labels: map[string]string{
+ "app.kubernetes.io/managed-by": "opentelemetry-operator",
+ },
+ },
+ Spec: OpenTelemetryCollectorSpec{
+ Mode: ModeDeployment,
+ OpenTelemetryCommonFields: OpenTelemetryCommonFields{
+ Replicas: &one,
+ ManagementState: ManagementStateManaged,
+ PodDisruptionBudget: &PodDisruptionBudgetSpec{
+ MaxUnavailable: &intstr.IntOrString{
+ Type: intstr.Int,
+ IntVal: 1,
+ },
+ },
+ },
+ UpgradeStrategy: UpgradeStrategyAutomatic,
+ TargetAllocator: TargetAllocatorEmbedded{
+ Enabled: true,
+ Replicas: &one,
+ AllocationStrategy: TargetAllocatorAllocationStrategyConsistentHashing,
+ PodDisruptionBudget: &PodDisruptionBudgetSpec{
+ MinAvailable: &intstr.IntOrString{
+ Type: intstr.String,
+ StrVal: "10%",
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "Undefined PDB for target allocator and consistent-hashing strategy",
+ otelcol: OpenTelemetryCollector{
+ Spec: OpenTelemetryCollectorSpec{
+ Mode: ModeDeployment,
+ TargetAllocator: TargetAllocatorEmbedded{
+ Enabled: true,
+ Replicas: &one,
+ AllocationStrategy: TargetAllocatorAllocationStrategyConsistentHashing,
+ },
+ },
+ },
+ expected: OpenTelemetryCollector{
+ ObjectMeta: metav1.ObjectMeta{
+ Labels: map[string]string{
+ "app.kubernetes.io/managed-by": "opentelemetry-operator",
+ },
+ },
+ Spec: OpenTelemetryCollectorSpec{
+ Mode: ModeDeployment,
+ OpenTelemetryCommonFields: OpenTelemetryCommonFields{
+ Replicas: &one,
+ ManagementState: ManagementStateManaged,
+ PodDisruptionBudget: &PodDisruptionBudgetSpec{
+ MaxUnavailable: &intstr.IntOrString{
+ Type: intstr.Int,
+ IntVal: 1,
+ },
+ },
+ },
+ UpgradeStrategy: UpgradeStrategyAutomatic,
+ TargetAllocator: TargetAllocatorEmbedded{
+ Enabled: true,
+ Replicas: &one,
+ AllocationStrategy: TargetAllocatorAllocationStrategyConsistentHashing,
+ PodDisruptionBudget: &PodDisruptionBudgetSpec{
+ MaxUnavailable: &intstr.IntOrString{
+ Type: intstr.Int,
+ IntVal: 1,
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "Undefined PDB for target allocator and not consistent-hashing strategy",
+ otelcol: OpenTelemetryCollector{
+ Spec: OpenTelemetryCollectorSpec{
+ Mode: ModeDeployment,
+ TargetAllocator: TargetAllocatorEmbedded{
+ Enabled: true,
+ AllocationStrategy: TargetAllocatorAllocationStrategyLeastWeighted,
+ },
+ },
+ },
+ expected: OpenTelemetryCollector{
+ ObjectMeta: metav1.ObjectMeta{
+ Labels: map[string]string{
+ "app.kubernetes.io/managed-by": "opentelemetry-operator",
+ },
+ },
+ Spec: OpenTelemetryCollectorSpec{
+ Mode: ModeDeployment,
+ OpenTelemetryCommonFields: OpenTelemetryCommonFields{
+ Replicas: &one,
+ ManagementState: ManagementStateManaged,
+ PodDisruptionBudget: &PodDisruptionBudgetSpec{
+ MaxUnavailable: &intstr.IntOrString{
+ Type: intstr.Int,
+ IntVal: 1,
+ },
+ },
+ },
+ UpgradeStrategy: UpgradeStrategyAutomatic,
+ TargetAllocator: TargetAllocatorEmbedded{
+ Enabled: true,
+ Replicas: &one,
+ AllocationStrategy: TargetAllocatorAllocationStrategyLeastWeighted,
+ },
+ },
+ },
+ },
+ }
+
+ for _, test := range tests {
+ test := test
+ t.Run(test.name, func(t *testing.T) {
+ cvw := &CollectorWebhook{
+ logger: logr.Discard(),
+ scheme: testScheme,
+ cfg: config.New(
+ config.WithCollectorImage("collector:v0.0.0"),
+ config.WithTargetAllocatorImage("ta:v0.0.0"),
+ ),
+ }
+ ctx := context.Background()
+ err := cvw.Default(ctx, &test.otelcol)
+ assert.NoError(t, err)
+ assert.Equal(t, test.expected, test.otelcol)
+ })
+ }
+}
+
+var cfgYaml = `receivers:
+ examplereceiver:
+ endpoint: "0.0.0.0:12345"
+ examplereceiver/settings:
+ endpoint: "0.0.0.0:12346"
+ prometheus:
+ config:
+ scrape_configs:
+ - job_name: otel-collector
+ scrape_interval: 10s
+ jaeger/custom:
+ protocols:
+ thrift_http:
+ endpoint: 0.0.0.0:15268
+`
+
+func TestOTELColValidatingWebhook(t *testing.T) {
+ minusOne := int32(-1)
+ zero := int32(0)
+ zero64 := int64(0)
+ one := int32(1)
+ three := int32(3)
+ five := int32(5)
+
+ cfg := Config{}
+ err := yaml.Unmarshal([]byte(cfgYaml), &cfg)
+ require.NoError(t, err)
+
+ tests := []struct { //nolint:govet
+ name string
+ otelcol OpenTelemetryCollector
+ expectedErr string
+ expectedWarnings []string
+ shouldFailSar bool
+ }{
+ {
+ name: "valid empty spec",
+ otelcol: OpenTelemetryCollector{},
+ },
+ {
+ name: "valid full spec",
+ otelcol: OpenTelemetryCollector{
+ Spec: OpenTelemetryCollectorSpec{
+ Mode: ModeStatefulSet,
+ OpenTelemetryCommonFields: OpenTelemetryCommonFields{
+ Replicas: &three,
+ Autoscaler: &AutoscalerSpec{
+ MinReplicas: &one,
+ MaxReplicas: &five,
+ Behavior: &autoscalingv2.HorizontalPodAutoscalerBehavior{
+ ScaleDown: &autoscalingv2.HPAScalingRules{
+ StabilizationWindowSeconds: &three,
+ },
+ ScaleUp: &autoscalingv2.HPAScalingRules{
+ StabilizationWindowSeconds: &five,
+ },
+ },
+ TargetCPUUtilization: &five,
+ },
+ Ports: []PortsSpec{
+ {
+ ServicePort: v1.ServicePort{
+ Name: "port1",
+ Port: 5555,
+ },
+ },
+ {
+ ServicePort: v1.ServicePort{
+ Name: "port2",
+ Port: 5554,
+ Protocol: v1.ProtocolUDP,
+ },
+ },
+ },
+ },
+ UpgradeStrategy: "adhoc",
+ TargetAllocator: TargetAllocatorEmbedded{
+ Enabled: true,
+ },
+ Config: cfg,
+ },
+ },
+ },
+ {
+ name: "prom CR admissions warning",
+ shouldFailSar: true, // force failure
+ otelcol: OpenTelemetryCollector{
+ Spec: OpenTelemetryCollectorSpec{
+ Mode: ModeStatefulSet,
+ OpenTelemetryCommonFields: OpenTelemetryCommonFields{
+ Replicas: &three,
+ Autoscaler: &AutoscalerSpec{
+ MinReplicas: &one,
+ MaxReplicas: &five,
+ Behavior: &autoscalingv2.HorizontalPodAutoscalerBehavior{
+ ScaleDown: &autoscalingv2.HPAScalingRules{
+ StabilizationWindowSeconds: &three,
+ },
+ ScaleUp: &autoscalingv2.HPAScalingRules{
+ StabilizationWindowSeconds: &five,
+ },
+ },
+ TargetCPUUtilization: &five,
+ },
+ Ports: []PortsSpec{
+ {
+ ServicePort: v1.ServicePort{
+ Name: "port1",
+ Port: 5555,
+ },
+ },
+ {
+ ServicePort: v1.ServicePort{
+ Name: "port2",
+ Port: 5554,
+ Protocol: v1.ProtocolUDP,
+ },
+ },
+ },
+ },
+ UpgradeStrategy: "adhoc",
+ TargetAllocator: TargetAllocatorEmbedded{
+ Enabled: true,
+ PrometheusCR: TargetAllocatorPrometheusCR{Enabled: true},
+ },
+ Config: cfg,
+ },
+ },
+ expectedWarnings: []string{
+ "missing the following rules for monitoring.coreos.com/servicemonitors: [*]",
+ "missing the following rules for monitoring.coreos.com/podmonitors: [*]",
+ "missing the following rules for nodes/metrics: [get,list,watch]",
+ "missing the following rules for services: [get,list,watch]",
+ "missing the following rules for endpoints: [get,list,watch]",
+ "missing the following rules for namespaces: [get,list,watch]",
+ "missing the following rules for networking.k8s.io/ingresses: [get,list,watch]",
+ "missing the following rules for nodes: [get,list,watch]",
+ "missing the following rules for pods: [get,list,watch]",
+ "missing the following rules for configmaps: [get]",
+ "missing the following rules for discovery.k8s.io/endpointslices: [get,list,watch]",
+ "missing the following rules for nonResourceURL: /metrics: [get]",
+ },
+ },
+ {
+ name: "prom CR no admissions warning",
+ shouldFailSar: false, // force SAR okay
+ otelcol: OpenTelemetryCollector{
+ Spec: OpenTelemetryCollectorSpec{
+ Mode: ModeStatefulSet,
+ UpgradeStrategy: "adhoc",
+ OpenTelemetryCommonFields: OpenTelemetryCommonFields{
+ Replicas: &three,
+ Ports: []PortsSpec{
+ {
+ ServicePort: v1.ServicePort{
+ Name: "port1",
+ Port: 5555,
+ },
+ },
+ {
+ ServicePort: v1.ServicePort{
+ Name: "port2",
+ Port: 5554,
+ Protocol: v1.ProtocolUDP,
+ },
+ },
+ },
+ Autoscaler: &AutoscalerSpec{
+ Behavior: &autoscalingv2.HorizontalPodAutoscalerBehavior{
+ ScaleDown: &autoscalingv2.HPAScalingRules{
+ StabilizationWindowSeconds: &three,
+ },
+ ScaleUp: &autoscalingv2.HPAScalingRules{
+ StabilizationWindowSeconds: &five,
+ },
+ },
+ TargetCPUUtilization: &five,
+ },
+ },
+ TargetAllocator: TargetAllocatorEmbedded{
+ Enabled: true,
+ PrometheusCR: TargetAllocatorPrometheusCR{Enabled: true},
+ },
+ Config: cfg,
+ },
+ },
+ },
+ {
+ name: "invalid mode with volume claim templates",
+ otelcol: OpenTelemetryCollector{
+ Spec: OpenTelemetryCollectorSpec{
+ Mode: ModeSidecar,
+ OpenTelemetryCommonFields: OpenTelemetryCommonFields{
+ VolumeClaimTemplates: []v1.PersistentVolumeClaim{{}, {}},
+ },
+ },
+ },
+ expectedErr: "does not support the attribute 'volumeClaimTemplates'",
+ },
+ {
+ name: "invalid mode with tolerations",
+ otelcol: OpenTelemetryCollector{
+ Spec: OpenTelemetryCollectorSpec{
+ Mode: ModeSidecar,
+ OpenTelemetryCommonFields: OpenTelemetryCommonFields{
+ Tolerations: []v1.Toleration{{}, {}},
+ },
+ },
+ },
+ expectedErr: "does not support the attribute 'tolerations'",
+ },
+ {
+ name: "invalid mode with target allocator",
+ otelcol: OpenTelemetryCollector{
+ Spec: OpenTelemetryCollectorSpec{
+ Mode: ModeDeployment,
+ TargetAllocator: TargetAllocatorEmbedded{
+ Enabled: true,
+ },
+ },
+ },
+ expectedErr: "does not support the target allocation deployment",
+ },
+ {
+ name: "invalid target allocator config",
+ otelcol: OpenTelemetryCollector{
+ Spec: OpenTelemetryCollectorSpec{
+ Mode: ModeStatefulSet,
+ TargetAllocator: TargetAllocatorEmbedded{
+ Enabled: true,
+ },
+ },
+ },
+ expectedErr: "the OpenTelemetry Spec Prometheus configuration is incorrect",
+ },
+ {
+ name: "invalid target allocation strategy",
+ otelcol: OpenTelemetryCollector{
+ Spec: OpenTelemetryCollectorSpec{
+ Mode: ModeDaemonSet,
+ TargetAllocator: TargetAllocatorEmbedded{
+ Enabled: true,
+ AllocationStrategy: TargetAllocatorAllocationStrategyLeastWeighted,
+ },
+ },
+ },
+ expectedErr: "mode is set to daemonset, which must be used with target allocation strategy per-node",
+ },
+ {
+ name: "invalid port name",
+ otelcol: OpenTelemetryCollector{
+ Spec: OpenTelemetryCollectorSpec{
+ OpenTelemetryCommonFields: OpenTelemetryCommonFields{
+ Ports: []PortsSpec{
+ {
+ ServicePort: v1.ServicePort{
+ // this port name contains a non alphanumeric character, which is invalid.
+ Name: "-testš¦port",
+ Port: 12345,
+ Protocol: v1.ProtocolTCP,
+ },
+ },
+ },
+ },
+ },
+ },
+ expectedErr: "the OpenTelemetry Spec Ports configuration is incorrect",
+ },
+ {
+ name: "invalid port name, too long",
+ otelcol: OpenTelemetryCollector{
+ Spec: OpenTelemetryCollectorSpec{
+ OpenTelemetryCommonFields: OpenTelemetryCommonFields{
+ Ports: []PortsSpec{
+ {
+ ServicePort: v1.ServicePort{
+ Name: "aaaabbbbccccdddd", // len: 16, too long
+ Port: 5555,
+ },
+ },
+ },
+ },
+ },
+ },
+ expectedErr: "the OpenTelemetry Spec Ports configuration is incorrect",
+ },
+ {
+ name: "invalid port num",
+ otelcol: OpenTelemetryCollector{
+ Spec: OpenTelemetryCollectorSpec{
+ OpenTelemetryCommonFields: OpenTelemetryCommonFields{
+ Ports: []PortsSpec{
+ {
+ ServicePort: v1.ServicePort{
+ Name: "aaaabbbbccccddd", // len: 15
+ // no port set means it's 0, which is invalid
+ },
+ },
+ },
+ },
+ },
+ },
+ expectedErr: "the OpenTelemetry Spec Ports configuration is incorrect",
+ },
+ {
+ name: "invalid max replicas",
+ otelcol: OpenTelemetryCollector{
+ Spec: OpenTelemetryCollectorSpec{
+ OpenTelemetryCommonFields: OpenTelemetryCommonFields{
+ Autoscaler: &AutoscalerSpec{
+ MaxReplicas: &zero,
+ },
+ },
+ },
+ },
+ expectedErr: "maxReplicas should be defined and one or more",
+ },
+ {
+ name: "invalid replicas, greater than max",
+ otelcol: OpenTelemetryCollector{
+ Spec: OpenTelemetryCollectorSpec{
+ OpenTelemetryCommonFields: OpenTelemetryCommonFields{
+ Replicas: &five,
+ Autoscaler: &AutoscalerSpec{
+ MaxReplicas: &three,
+ },
+ },
+ },
+ },
+ expectedErr: "replicas must not be greater than maxReplicas",
+ },
+ {
+ name: "invalid min replicas, greater than max",
+ otelcol: OpenTelemetryCollector{
+ Spec: OpenTelemetryCollectorSpec{
+ OpenTelemetryCommonFields: OpenTelemetryCommonFields{
+ Autoscaler: &AutoscalerSpec{
+ MaxReplicas: &three,
+ MinReplicas: &five,
+ },
+ },
+ },
+ },
+ expectedErr: "minReplicas must not be greater than maxReplicas",
+ },
+ {
+ name: "invalid min replicas, lesser than 1",
+ otelcol: OpenTelemetryCollector{
+ Spec: OpenTelemetryCollectorSpec{
+ OpenTelemetryCommonFields: OpenTelemetryCommonFields{
+ Autoscaler: &AutoscalerSpec{
+ MaxReplicas: &three,
+ MinReplicas: &zero,
+ },
+ },
+ },
+ },
+ expectedErr: "minReplicas should be one or more",
+ },
+ {
+ name: "invalid autoscaler scale down",
+ otelcol: OpenTelemetryCollector{
+ Spec: OpenTelemetryCollectorSpec{
+ OpenTelemetryCommonFields: OpenTelemetryCommonFields{
+ Autoscaler: &AutoscalerSpec{
+ MaxReplicas: &three,
+ Behavior: &autoscalingv2.HorizontalPodAutoscalerBehavior{
+ ScaleDown: &autoscalingv2.HPAScalingRules{
+ StabilizationWindowSeconds: &zero,
+ },
+ },
+ },
+ },
+ },
+ },
+ expectedErr: "scaleDown should be one or more",
+ },
+ {
+ name: "invalid autoscaler scale up",
+ otelcol: OpenTelemetryCollector{
+ Spec: OpenTelemetryCollectorSpec{
+ OpenTelemetryCommonFields: OpenTelemetryCommonFields{
+ Autoscaler: &AutoscalerSpec{
+ MaxReplicas: &three,
+ Behavior: &autoscalingv2.HorizontalPodAutoscalerBehavior{
+ ScaleUp: &autoscalingv2.HPAScalingRules{
+ StabilizationWindowSeconds: &zero,
+ },
+ },
+ },
+ },
+ },
+ },
+ expectedErr: "scaleUp should be one or more",
+ },
+ {
+ name: "invalid autoscaler target cpu utilization",
+ otelcol: OpenTelemetryCollector{
+ Spec: OpenTelemetryCollectorSpec{
+ OpenTelemetryCommonFields: OpenTelemetryCommonFields{
+ Autoscaler: &AutoscalerSpec{
+ MaxReplicas: &three,
+ TargetCPUUtilization: &zero,
+ },
+ },
+ },
+ },
+ expectedErr: "targetCPUUtilization should be greater than 0 and less than 100",
+ },
+ {
+ name: "autoscaler minReplicas is less than maxReplicas",
+ otelcol: OpenTelemetryCollector{
+ Spec: OpenTelemetryCollectorSpec{
+ OpenTelemetryCommonFields: OpenTelemetryCommonFields{
+ Autoscaler: &AutoscalerSpec{
+ MaxReplicas: &one,
+ MinReplicas: &five,
+ },
+ },
+ },
+ },
+ expectedErr: "the OpenTelemetry Spec autoscale configuration is incorrect, minReplicas must not be greater than maxReplicas",
+ },
+ {
+ name: "invalid autoscaler metric type",
+ otelcol: OpenTelemetryCollector{
+ Spec: OpenTelemetryCollectorSpec{
+ OpenTelemetryCommonFields: OpenTelemetryCommonFields{
+ Autoscaler: &AutoscalerSpec{
+ MaxReplicas: &three,
+ Metrics: []MetricSpec{
+ {
+ Type: autoscalingv2.ResourceMetricSourceType,
+ },
+ },
+ },
+ },
+ },
+ },
+ expectedErr: "the OpenTelemetry Spec autoscale configuration is incorrect, metric type unsupported. Expected metric of source type Pod",
+ },
+ {
+ name: "invalid pod metric average value",
+ otelcol: OpenTelemetryCollector{
+ Spec: OpenTelemetryCollectorSpec{
+ OpenTelemetryCommonFields: OpenTelemetryCommonFields{
+ Autoscaler: &AutoscalerSpec{
+ MaxReplicas: &three,
+ Metrics: []MetricSpec{
+ {
+ Type: autoscalingv2.PodsMetricSourceType,
+ Pods: &autoscalingv2.PodsMetricSource{
+ Metric: autoscalingv2.MetricIdentifier{
+ Name: "custom1",
+ },
+ Target: autoscalingv2.MetricTarget{
+ Type: autoscalingv2.AverageValueMetricType,
+ AverageValue: resource.NewQuantity(int64(0), resource.DecimalSI),
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ expectedErr: "the OpenTelemetry Spec autoscale configuration is incorrect, average value should be greater than 0",
+ },
+ {
+ name: "utilization target is not valid with pod metrics",
+ otelcol: OpenTelemetryCollector{
+ Spec: OpenTelemetryCollectorSpec{
+ OpenTelemetryCommonFields: OpenTelemetryCommonFields{
+ Autoscaler: &AutoscalerSpec{
+ MaxReplicas: &three,
+ Metrics: []MetricSpec{
+ {
+ Type: autoscalingv2.PodsMetricSourceType,
+ Pods: &autoscalingv2.PodsMetricSource{
+ Metric: autoscalingv2.MetricIdentifier{
+ Name: "custom1",
+ },
+ Target: autoscalingv2.MetricTarget{
+ Type: autoscalingv2.UtilizationMetricType,
+ AverageUtilization: &one,
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ expectedErr: "the OpenTelemetry Spec autoscale configuration is incorrect, invalid pods target type",
+ },
+ {
+ name: "invalid deployment mode incompabible with ingress settings",
+ otelcol: OpenTelemetryCollector{
+ Spec: OpenTelemetryCollectorSpec{
+ Mode: ModeSidecar,
+ Ingress: Ingress{
+ Type: IngressTypeNginx,
+ },
+ },
+ },
+ expectedErr: fmt.Sprintf("Ingress can only be used in combination with the modes: %s, %s, %s",
+ ModeDeployment, ModeDaemonSet, ModeStatefulSet,
+ ),
+ },
+ {
+ name: "invalid mode with priorityClassName",
+ otelcol: OpenTelemetryCollector{
+ Spec: OpenTelemetryCollectorSpec{
+ Mode: ModeSidecar,
+ OpenTelemetryCommonFields: OpenTelemetryCommonFields{
+ PriorityClassName: "test-class",
+ },
+ },
+ },
+ expectedErr: "does not support the attribute 'priorityClassName'",
+ },
+ {
+ name: "invalid mode with affinity",
+ otelcol: OpenTelemetryCollector{
+ Spec: OpenTelemetryCollectorSpec{
+ Mode: ModeSidecar,
+ OpenTelemetryCommonFields: OpenTelemetryCommonFields{
+ Affinity: &v1.Affinity{
+ NodeAffinity: &v1.NodeAffinity{
+ RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
+ NodeSelectorTerms: []v1.NodeSelectorTerm{
+ {
+ MatchExpressions: []v1.NodeSelectorRequirement{
+ {
+ Key: "node",
+ Operator: v1.NodeSelectorOpIn,
+ Values: []string{"test-node"},
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ expectedErr: "does not support the attribute 'affinity'",
+ },
+ {
+ name: "invalid InitialDelaySeconds",
+ otelcol: OpenTelemetryCollector{
+ Spec: OpenTelemetryCollectorSpec{
+ LivenessProbe: &Probe{
+ InitialDelaySeconds: &minusOne,
+ },
+ },
+ },
+ expectedErr: "the OpenTelemetry Spec LivenessProbe InitialDelaySeconds configuration is incorrect",
+ },
+ {
+ name: "invalid PeriodSeconds",
+ otelcol: OpenTelemetryCollector{
+ Spec: OpenTelemetryCollectorSpec{
+ LivenessProbe: &Probe{
+ PeriodSeconds: &zero,
+ },
+ },
+ },
+ expectedErr: "the OpenTelemetry Spec LivenessProbe PeriodSeconds configuration is incorrect",
+ },
+ {
+ name: "invalid TimeoutSeconds",
+ otelcol: OpenTelemetryCollector{
+ Spec: OpenTelemetryCollectorSpec{
+ LivenessProbe: &Probe{
+ TimeoutSeconds: &zero,
+ },
+ },
+ },
+ expectedErr: "the OpenTelemetry Spec LivenessProbe TimeoutSeconds configuration is incorrect",
+ },
+ {
+ name: "invalid SuccessThreshold",
+ otelcol: OpenTelemetryCollector{
+ Spec: OpenTelemetryCollectorSpec{
+ LivenessProbe: &Probe{
+ SuccessThreshold: &zero,
+ },
+ },
+ },
+ expectedErr: "the OpenTelemetry Spec LivenessProbe SuccessThreshold configuration is incorrect",
+ },
+ {
+ name: "invalid FailureThreshold",
+ otelcol: OpenTelemetryCollector{
+ Spec: OpenTelemetryCollectorSpec{
+ LivenessProbe: &Probe{
+ FailureThreshold: &zero,
+ },
+ },
+ },
+ expectedErr: "the OpenTelemetry Spec LivenessProbe FailureThreshold configuration is incorrect",
+ },
+ {
+ name: "invalid TerminationGracePeriodSeconds",
+ otelcol: OpenTelemetryCollector{
+ Spec: OpenTelemetryCollectorSpec{
+ LivenessProbe: &Probe{
+ TerminationGracePeriodSeconds: &zero64,
+ },
+ },
+ },
+ expectedErr: "the OpenTelemetry Spec LivenessProbe TerminationGracePeriodSeconds configuration is incorrect",
+ },
+ {
+ name: "invalid AdditionalContainers",
+ otelcol: OpenTelemetryCollector{
+ Spec: OpenTelemetryCollectorSpec{
+ Mode: ModeSidecar,
+ OpenTelemetryCommonFields: OpenTelemetryCommonFields{
+ AdditionalContainers: []v1.Container{
+ {
+ Name: "test",
+ },
+ },
+ },
+ },
+ },
+ expectedErr: "the OpenTelemetry Collector mode is set to sidecar, which does not support the attribute 'AdditionalContainers'",
+ },
+ {
+ name: "missing ingress hostname for subdomain ruleType",
+ otelcol: OpenTelemetryCollector{
+ Spec: OpenTelemetryCollectorSpec{
+ Ingress: Ingress{
+ RuleType: IngressRuleTypeSubdomain,
+ },
+ },
+ },
+ expectedErr: "a valid Ingress hostname has to be defined for subdomain ruleType",
+ },
+ {
+ name: "invalid updateStrategy for Deployment mode",
+ otelcol: OpenTelemetryCollector{
+ Spec: OpenTelemetryCollectorSpec{
+ Mode: ModeDeployment,
+ DaemonSetUpdateStrategy: appsv1.DaemonSetUpdateStrategy{
+ Type: "RollingUpdate",
+ RollingUpdate: &appsv1.RollingUpdateDaemonSet{
+ MaxSurge: &intstr.IntOrString{Type: intstr.Int, IntVal: int32(1)},
+ MaxUnavailable: &intstr.IntOrString{Type: intstr.Int, IntVal: int32(1)},
+ },
+ },
+ },
+ },
+ expectedErr: "the OpenTelemetry Collector mode is set to deployment, which does not support the attribute 'updateStrategy'",
+ },
+ {
+ name: "invalid updateStrategy for Statefulset mode",
+ otelcol: OpenTelemetryCollector{
+ Spec: OpenTelemetryCollectorSpec{
+ Mode: ModeStatefulSet,
+ DeploymentUpdateStrategy: appsv1.DeploymentStrategy{
+ Type: "RollingUpdate",
+ RollingUpdate: &appsv1.RollingUpdateDeployment{
+ MaxSurge: &intstr.IntOrString{Type: intstr.Int, IntVal: int32(1)},
+ MaxUnavailable: &intstr.IntOrString{Type: intstr.Int, IntVal: int32(1)},
+ },
+ },
+ },
+ },
+ expectedErr: "the OpenTelemetry Collector mode is set to statefulset, which does not support the attribute 'deploymentUpdateStrategy'",
+ },
+ }
+
+ for _, test := range tests {
+ test := test
+ t.Run(test.name, func(t *testing.T) {
+ cvw := &CollectorWebhook{
+ logger: logr.Discard(),
+ scheme: testScheme,
+ cfg: config.New(
+ config.WithCollectorImage("collector:v0.0.0"),
+ config.WithTargetAllocatorImage("ta:v0.0.0"),
+ ),
+ reviewer: getReviewer(test.shouldFailSar),
+ }
+ ctx := context.Background()
+ warnings, err := cvw.ValidateCreate(ctx, &test.otelcol)
+ if test.expectedErr == "" {
+ assert.NoError(t, err)
+ } else {
+ assert.ErrorContains(t, err, test.expectedErr)
+ }
+ assert.Equal(t, len(test.expectedWarnings), len(warnings))
+ assert.ElementsMatch(t, warnings, test.expectedWarnings)
+ })
+ }
+}
+
+func getReviewer(shouldFailSAR bool) *rbac.Reviewer {
+ c := fake.NewSimpleClientset()
+ c.PrependReactor("create", "subjectaccessreviews", func(action kubeTesting.Action) (handled bool, ret runtime.Object, err error) {
+ // check our expectation here
+ if !action.Matches("create", "subjectaccessreviews") {
+ return false, nil, fmt.Errorf("must be a create for a SAR")
+ }
+ sar, ok := action.(kubeTesting.CreateAction).GetObject().DeepCopyObject().(*authv1.SubjectAccessReview)
+ if !ok || sar == nil {
+ return false, nil, fmt.Errorf("bad object")
+ }
+ sar.Status = authv1.SubjectAccessReviewStatus{
+ Allowed: !shouldFailSAR,
+ Denied: shouldFailSAR,
+ }
+ return true, sar, nil
+ })
+ return rbac.NewReviewer(c)
+}
diff --git a/apis/v1beta1/config.go b/apis/v1beta1/config.go
index 04695cefa9..61f166b0f3 100644
--- a/apis/v1beta1/config.go
+++ b/apis/v1beta1/config.go
@@ -18,8 +18,11 @@ import (
"bytes"
"encoding/json"
"fmt"
+ "net"
"reflect"
"sort"
+ "strconv"
+ "strings"
"gopkg.in/yaml.v3"
)
@@ -135,6 +138,26 @@ type Service struct {
Pipelines AnyConfig `json:"pipelines" yaml:"pipelines"`
}
+// MetricsPort gets the port number for the metrics endpoint from the collector config if it has been set.
+func (s *Service) MetricsPort() (int32, error) {
+ if s.GetTelemetry() == nil {
+ // telemetry isn't set, use the default
+ return 8888, nil
+ }
+ _, port, netErr := net.SplitHostPort(s.GetTelemetry().Metrics.Address)
+ if netErr != nil && strings.Contains(netErr.Error(), "missing port in address") {
+ return 8888, nil
+ } else if netErr != nil {
+ return 0, netErr
+ }
+ i64, err := strconv.ParseInt(port, 10, 32)
+ if err != nil {
+ return 0, err
+ }
+
+ return int32(i64), nil
+}
+
// MetricsConfig comes from the collector.
type MetricsConfig struct {
// Level is the level of telemetry metrics, the possible values are:
diff --git a/apis/v1beta1/config_test.go b/apis/v1beta1/config_test.go
index 207a61133a..cf148c5d91 100644
--- a/apis/v1beta1/config_test.go
+++ b/apis/v1beta1/config_test.go
@@ -210,3 +210,71 @@ func TestGetTelemetryFromYAMLIsNil(t *testing.T) {
require.NoError(t, err)
assert.Nil(t, cfg.Service.GetTelemetry())
}
+
+func TestConfigToMetricsPort(t *testing.T) {
+
+ for _, tt := range []struct {
+ desc string
+ expectedPort int32
+ config Service
+ }{
+ {
+ "custom port",
+ 9090,
+ Service{
+ Telemetry: &AnyConfig{
+ Object: map[string]interface{}{
+ "metrics": map[string]interface{}{
+ "address": "0.0.0.0:9090",
+ },
+ },
+ },
+ },
+ },
+ {
+ "bad address",
+ 8888,
+ Service{
+ Telemetry: &AnyConfig{
+ Object: map[string]interface{}{
+ "metrics": map[string]interface{}{
+ "address": "0.0.0.0",
+ },
+ },
+ },
+ },
+ },
+ {
+ "missing address",
+ 8888,
+ Service{
+ Telemetry: &AnyConfig{
+ Object: map[string]interface{}{
+ "metrics": map[string]interface{}{
+ "level": "detailed",
+ },
+ },
+ },
+ },
+ },
+ {
+ "missing metrics",
+ 8888,
+ Service{
+ Telemetry: &AnyConfig{},
+ },
+ },
+ {
+ "missing telemetry",
+ 8888,
+ Service{},
+ },
+ } {
+ t.Run(tt.desc, func(t *testing.T) {
+ // these are acceptable failures, we return to the collector's default metric port
+ port, err := tt.config.MetricsPort()
+ assert.NoError(t, err)
+ assert.Equal(t, tt.expectedPort, port)
+ })
+ }
+}
diff --git a/bundle/manifests/opentelemetry-operator.clusterserviceversion.yaml b/bundle/manifests/opentelemetry-operator.clusterserviceversion.yaml
index f752ffa6c9..7cfcb26c49 100644
--- a/bundle/manifests/opentelemetry-operator.clusterserviceversion.yaml
+++ b/bundle/manifests/opentelemetry-operator.clusterserviceversion.yaml
@@ -659,26 +659,6 @@ spec:
targetPort: 9443
type: MutatingAdmissionWebhook
webhookPath: /mutate-opentelemetry-io-v1alpha1-opampbridge
- - admissionReviewVersions:
- - v1
- containerPort: 443
- deploymentName: opentelemetry-operator-controller-manager
- failurePolicy: Fail
- generateName: mopentelemetrycollector.kb.io
- rules:
- - apiGroups:
- - opentelemetry.io
- apiVersions:
- - v1alpha1
- operations:
- - CREATE
- - UPDATE
- resources:
- - opentelemetrycollectors
- sideEffects: None
- targetPort: 9443
- type: MutatingAdmissionWebhook
- webhookPath: /mutate-opentelemetry-io-v1alpha1-opentelemetrycollector
- admissionReviewVersions:
- v1
containerPort: 443
@@ -796,26 +776,6 @@ spec:
targetPort: 9443
type: ValidatingAdmissionWebhook
webhookPath: /validate-opentelemetry-io-v1alpha1-opampbridge
- - admissionReviewVersions:
- - v1
- containerPort: 443
- deploymentName: opentelemetry-operator-controller-manager
- failurePolicy: Fail
- generateName: vopentelemetrycollectorcreateupdate.kb.io
- rules:
- - apiGroups:
- - opentelemetry.io
- apiVersions:
- - v1alpha1
- operations:
- - CREATE
- - UPDATE
- resources:
- - opentelemetrycollectors
- sideEffects: None
- targetPort: 9443
- type: ValidatingAdmissionWebhook
- webhookPath: /validate-opentelemetry-io-v1alpha1-opentelemetrycollector
- admissionReviewVersions:
- v1
containerPort: 443
@@ -836,25 +796,6 @@ spec:
targetPort: 9443
type: ValidatingAdmissionWebhook
webhookPath: /validate-opentelemetry-io-v1beta1-opentelemetrycollector
- - admissionReviewVersions:
- - v1
- containerPort: 443
- deploymentName: opentelemetry-operator-controller-manager
- failurePolicy: Ignore
- generateName: vopentelemetrycollectordelete.kb.io
- rules:
- - apiGroups:
- - opentelemetry.io
- apiVersions:
- - v1alpha1
- operations:
- - DELETE
- resources:
- - opentelemetrycollectors
- sideEffects: None
- targetPort: 9443
- type: ValidatingAdmissionWebhook
- webhookPath: /validate-opentelemetry-io-v1alpha1-opentelemetrycollector
- admissionReviewVersions:
- v1
containerPort: 443
diff --git a/config/webhook/manifests.yaml b/config/webhook/manifests.yaml
index 1361f76380..173e2f7420 100644
--- a/config/webhook/manifests.yaml
+++ b/config/webhook/manifests.yaml
@@ -64,26 +64,6 @@ webhooks:
resources:
- opampbridges
sideEffects: None
-- admissionReviewVersions:
- - v1
- clientConfig:
- service:
- name: webhook-service
- namespace: system
- path: /mutate-opentelemetry-io-v1alpha1-opentelemetrycollector
- failurePolicy: Fail
- name: mopentelemetrycollector.kb.io
- rules:
- - apiGroups:
- - opentelemetry.io
- apiVersions:
- - v1alpha1
- operations:
- - CREATE
- - UPDATE
- resources:
- - opentelemetrycollectors
- sideEffects: None
- admissionReviewVersions:
- v1
clientConfig:
@@ -226,42 +206,3 @@ webhooks:
resources:
- opampbridges
sideEffects: None
-- admissionReviewVersions:
- - v1
- clientConfig:
- service:
- name: webhook-service
- namespace: system
- path: /validate-opentelemetry-io-v1alpha1-opentelemetrycollector
- failurePolicy: Fail
- name: vopentelemetrycollectorcreateupdate.kb.io
- rules:
- - apiGroups:
- - opentelemetry.io
- apiVersions:
- - v1alpha1
- operations:
- - CREATE
- - UPDATE
- resources:
- - opentelemetrycollectors
- sideEffects: None
-- admissionReviewVersions:
- - v1
- clientConfig:
- service:
- name: webhook-service
- namespace: system
- path: /validate-opentelemetry-io-v1alpha1-opentelemetrycollector
- failurePolicy: Ignore
- name: vopentelemetrycollectordelete.kb.io
- rules:
- - apiGroups:
- - opentelemetry.io
- apiVersions:
- - v1alpha1
- operations:
- - DELETE
- resources:
- - opentelemetrycollectors
- sideEffects: None
diff --git a/controllers/suite_test.go b/controllers/suite_test.go
index b819e8a7e7..d20e7be8b2 100644
--- a/controllers/suite_test.go
+++ b/controllers/suite_test.go
@@ -166,10 +166,6 @@ func TestMain(m *testing.M) {
}
reviewer := rbac.NewReviewer(clientset)
- if err = v1alpha1.SetupCollectorWebhook(mgr, config.New(), reviewer); err != nil {
- fmt.Printf("failed to SetupWebhookWithManager: %v", err)
- os.Exit(1)
- }
if err = v1beta1.SetupCollectorWebhook(mgr, config.New(), reviewer); err != nil {
fmt.Printf("failed to SetupWebhookWithManager: %v", err)
os.Exit(1)
diff --git a/internal/manifests/collector/adapters/config_to_ports.go b/internal/manifests/collector/adapters/config_to_ports.go
index 0a8b183d4f..cb960c423b 100644
--- a/internal/manifests/collector/adapters/config_to_ports.go
+++ b/internal/manifests/collector/adapters/config_to_ports.go
@@ -16,15 +16,11 @@ package adapters
import (
"fmt"
- "net"
"sort"
- "strconv"
- "strings"
"github.com/go-logr/logr"
corev1 "k8s.io/api/core/v1"
- "github.com/open-telemetry/opentelemetry-operator/apis/v1beta1"
"github.com/open-telemetry/opentelemetry-operator/internal/manifests/collector/parser"
exporterParser "github.com/open-telemetry/opentelemetry-operator/internal/manifests/collector/parser/exporter"
receiverParser "github.com/open-telemetry/opentelemetry-operator/internal/manifests/collector/parser/receiver"
@@ -43,7 +39,7 @@ func (c ComponentType) String() string {
}
// ConfigToComponentPorts converts the incoming configuration object into a set of service ports required by the exporters.
-func ConfigToComponentPorts(logger logr.Logger, cType ComponentType, config map[interface{}]interface{}) ([]v1beta1.PortsSpec, error) {
+func ConfigToComponentPorts(logger logr.Logger, cType ComponentType, config map[interface{}]interface{}) ([]corev1.ServicePort, error) {
// now, we gather which ports we might need to open
// for that, we get all the exporters and check their `endpoint` properties,
// extracting the port from it. The port name has to be a "DNS_LABEL", so, we try to make it follow the pattern:
@@ -119,17 +115,10 @@ func ConfigToComponentPorts(logger logr.Logger, cType ComponentType, config map[
return ports[i].Name < ports[j].Name
})
- patchedPorts := []v1beta1.PortsSpec{}
- for _, p := range ports {
- patchedPorts = append(patchedPorts, v1beta1.PortsSpec{
- ServicePort: p,
- })
- }
-
- return patchedPorts, nil
+ return ports, nil
}
-func ConfigToPorts(logger logr.Logger, config map[interface{}]interface{}) ([]v1beta1.PortsSpec, error) {
+func ConfigToPorts(logger logr.Logger, config map[interface{}]interface{}) ([]corev1.ServicePort, error) {
ports, err := ConfigToComponentPorts(logger, ComponentTypeReceiver, config)
if err != nil {
logger.Error(err, "there was a problem while getting the ports from the receivers")
@@ -150,23 +139,3 @@ func ConfigToPorts(logger logr.Logger, config map[interface{}]interface{}) ([]v1
return ports, nil
}
-
-// ConfigToMetricsPort gets the port number for the metrics endpoint from the collector config if it has been set.
-func ConfigToMetricsPort(config v1beta1.Service) (int32, error) {
- if config.GetTelemetry() == nil {
- // telemetry isn't set, use the default
- return 8888, nil
- }
- _, port, netErr := net.SplitHostPort(config.GetTelemetry().Metrics.Address)
- if netErr != nil && strings.Contains(netErr.Error(), "missing port in address") {
- return 8888, nil
- } else if netErr != nil {
- return 0, netErr
- }
- i64, err := strconv.ParseInt(port, 10, 32)
- if err != nil {
- return 0, err
- }
-
- return int32(i64), nil
-}
diff --git a/internal/manifests/collector/adapters/config_to_ports_test.go b/internal/manifests/collector/adapters/config_to_ports_test.go
index 160d998487..a53a695e3b 100644
--- a/internal/manifests/collector/adapters/config_to_ports_test.go
+++ b/internal/manifests/collector/adapters/config_to_ports_test.go
@@ -25,7 +25,6 @@ import (
"k8s.io/apimachinery/pkg/util/intstr"
logf "sigs.k8s.io/controller-runtime/pkg/log"
- "github.com/open-telemetry/opentelemetry-operator/apis/v1beta1"
"github.com/open-telemetry/opentelemetry-operator/internal/manifests/collector/adapters"
"github.com/open-telemetry/opentelemetry-operator/internal/manifests/collector/parser"
"github.com/open-telemetry/opentelemetry-operator/internal/manifests/collector/parser/receiver"
@@ -96,11 +95,6 @@ func TestExtractPortsFromConfig(t *testing.T) {
targetPort4317 := intstr.IntOrString{Type: 0, IntVal: 4317, StrVal: ""}
targetPort4318 := intstr.IntOrString{Type: 0, IntVal: 4318, StrVal: ""}
- svcPorts := []corev1.ServicePort{}
- for _, p := range ports {
- svcPorts = append(svcPorts, p.ServicePort)
- }
-
expectedPorts := []corev1.ServicePort{
{Name: "examplereceiver", Port: 12345},
{Name: "port-12346", Port: 12346},
@@ -113,7 +107,7 @@ func TestExtractPortsFromConfig(t *testing.T) {
{Name: "otlp-http", AppProtocol: &httpAppProtocol, Port: 4318, TargetPort: targetPort4318},
{Name: "zipkin", AppProtocol: &httpAppProtocol, Protocol: "TCP", Port: 9411},
}
- assert.ElementsMatch(t, expectedPorts, svcPorts)
+ assert.ElementsMatch(t, expectedPorts, ports)
}
func TestNoPortsParsed(t *testing.T) {
@@ -211,73 +205,6 @@ func TestParserFailed(t *testing.T) {
assert.NoError(t, err)
assert.True(t, mockParserCalled)
}
-func TestConfigToMetricsPort(t *testing.T) {
-
- for _, tt := range []struct {
- desc string
- expectedPort int32
- config v1beta1.Service
- }{
- {
- "custom port",
- 9090,
- v1beta1.Service{
- Telemetry: &v1beta1.AnyConfig{
- Object: map[string]interface{}{
- "metrics": map[string]interface{}{
- "address": "0.0.0.0:9090",
- },
- },
- },
- },
- },
- {
- "bad address",
- 8888,
- v1beta1.Service{
- Telemetry: &v1beta1.AnyConfig{
- Object: map[string]interface{}{
- "metrics": map[string]interface{}{
- "address": "0.0.0.0",
- },
- },
- },
- },
- },
- {
- "missing address",
- 8888,
- v1beta1.Service{
- Telemetry: &v1beta1.AnyConfig{
- Object: map[string]interface{}{
- "metrics": map[string]interface{}{
- "level": "detailed",
- },
- },
- },
- },
- },
- {
- "missing metrics",
- 8888,
- v1beta1.Service{
- Telemetry: &v1beta1.AnyConfig{},
- },
- },
- {
- "missing telemetry",
- 8888,
- v1beta1.Service{},
- },
- } {
- t.Run(tt.desc, func(t *testing.T) {
- // these are acceptable failures, we return to the collector's default metric port
- port, err := adapters.ConfigToMetricsPort(tt.config)
- assert.NoError(t, err)
- assert.Equal(t, tt.expectedPort, port)
- })
- }
-}
type mockParser struct {
portsFunc func() ([]corev1.ServicePort, error)
diff --git a/internal/manifests/collector/container.go b/internal/manifests/collector/container.go
index 4f5c1b5ef6..d0585ff9d8 100644
--- a/internal/manifests/collector/container.go
+++ b/internal/manifests/collector/container.go
@@ -202,7 +202,7 @@ func getConfigContainerPorts(logger logr.Logger, cfgYaml string, conf v1beta1.Co
}
}
- metricsPort, err := adapters.ConfigToMetricsPort(conf.Service)
+ metricsPort, err := conf.Service.MetricsPort()
if err != nil {
logger.Info("couldn't determine metrics port from configuration, using 8888 default value", "error", err)
metricsPort = 8888
diff --git a/internal/manifests/collector/ingress.go b/internal/manifests/collector/ingress.go
index f8a7530d3d..cfede9e1af 100644
--- a/internal/manifests/collector/ingress.go
+++ b/internal/manifests/collector/ingress.go
@@ -161,19 +161,24 @@ func servicePortsFromCfg(logger logr.Logger, otelcol v1beta1.OpenTelemetryCollec
// in the first case, we remove the port we inferred from the list
// in the second case, we rename our inferred port to something like "port-%d"
portNumbers, portNames := extractPortNumbersAndNames(otelcol.Spec.Ports)
- var resultingInferredPorts []v1beta1.PortsSpec
+ var resultingInferredPorts []corev1.ServicePort
for _, inferred := range ports {
if filtered := filterPort(logger, inferred, portNumbers, portNames); filtered != nil {
resultingInferredPorts = append(resultingInferredPorts, *filtered)
}
}
- ports = append(otelcol.Spec.Ports, resultingInferredPorts...)
+
+ ports = append(toServicePorts(otelcol.Spec.Ports), resultingInferredPorts...)
}
- svcPorts := []corev1.ServicePort{}
- for _, p := range ports {
- svcPorts = append(svcPorts, p.ServicePort)
+ return ports, nil
+}
+
+func toServicePorts(spec []v1beta1.PortsSpec) []corev1.ServicePort {
+ var ports []corev1.ServicePort
+ for _, p := range spec {
+ ports = append(ports, p.ServicePort)
}
- return svcPorts, err
+ return ports
}
diff --git a/internal/manifests/collector/service.go b/internal/manifests/collector/service.go
index 7306943402..10d3ae15ab 100644
--- a/internal/manifests/collector/service.go
+++ b/internal/manifests/collector/service.go
@@ -64,7 +64,7 @@ func MonitoringService(params manifests.Params) (*corev1.Service, error) {
labels := manifestutils.Labels(params.OtelCol.ObjectMeta, name, params.OtelCol.Spec.Image, ComponentOpenTelemetryCollector, []string{})
labels[monitoringLabel] = valueExists
- metricsPort, err := adapters.ConfigToMetricsPort(params.OtelCol.Spec.Config.Service)
+ metricsPort, err := params.OtelCol.Spec.Config.Service.MetricsPort()
if err != nil {
return nil, err
}
@@ -125,14 +125,14 @@ func Service(params manifests.Params) (*corev1.Service, error) {
// in the first case, we remove the port we inferred from the list
// in the second case, we rename our inferred port to something like "port-%d"
portNumbers, portNames := extractPortNumbersAndNames(params.OtelCol.Spec.Ports)
- var resultingInferredPorts []v1beta1.PortsSpec
+ var resultingInferredPorts []corev1.ServicePort
for _, inferred := range ports {
if filtered := filterPort(params.Log, inferred, portNumbers, portNames); filtered != nil {
resultingInferredPorts = append(resultingInferredPorts, *filtered)
}
}
- ports = append(params.OtelCol.Spec.Ports, resultingInferredPorts...)
+ ports = append(toServicePorts(params.OtelCol.Spec.Ports), resultingInferredPorts...)
}
// if we have no ports, we don't need a service
@@ -147,11 +147,6 @@ func Service(params manifests.Params) (*corev1.Service, error) {
trafficPolicy = corev1.ServiceInternalTrafficPolicyLocal
}
- svcPorts := []corev1.ServicePort{}
- for _, p := range ports {
- svcPorts = append(svcPorts, p.ServicePort)
- }
-
return &corev1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: naming.Service(params.OtelCol.Name),
@@ -163,7 +158,7 @@ func Service(params manifests.Params) (*corev1.Service, error) {
InternalTrafficPolicy: &trafficPolicy,
Selector: manifestutils.SelectorLabels(params.OtelCol.ObjectMeta, ComponentOpenTelemetryCollector),
ClusterIP: "",
- Ports: svcPorts,
+ Ports: ports,
},
}, nil
}
@@ -185,7 +180,7 @@ func newPortNumberKey(port int32, protocol corev1.Protocol) PortNumberKey {
return PortNumberKey{Port: port, Protocol: protocol}
}
-func filterPort(logger logr.Logger, candidate v1beta1.PortsSpec, portNumbers map[PortNumberKey]bool, portNames map[string]bool) *v1beta1.PortsSpec {
+func filterPort(logger logr.Logger, candidate corev1.ServicePort, portNumbers map[PortNumberKey]bool, portNames map[string]bool) *corev1.ServicePort {
if portNumbers[newPortNumberKey(candidate.Port, candidate.Protocol)] {
return nil
}
diff --git a/internal/manifests/collector/service_test.go b/internal/manifests/collector/service_test.go
index f73b88d1a9..0e3c125be5 100644
--- a/internal/manifests/collector/service_test.go
+++ b/internal/manifests/collector/service_test.go
@@ -54,14 +54,14 @@ func TestFilterPort(t *testing.T) {
tests := []struct {
name string
- candidate v1beta1.PortsSpec
+ candidate v1.ServicePort
portNumbers map[PortNumberKey]bool
portNames map[string]bool
- expected v1beta1.PortsSpec
+ expected v1.ServicePort
}{
{
name: "should filter out duplicate port",
- candidate: v1beta1.PortsSpec{ServicePort: v1.ServicePort{Name: "web", Port: 8080}},
+ candidate: v1.ServicePort{Name: "web", Port: 8080},
portNumbers: map[PortNumberKey]bool{
newPortNumberKeyByPort(8080): true, newPortNumberKeyByPort(9200): true},
portNames: map[string]bool{"test": true, "metrics": true},
@@ -69,7 +69,7 @@ func TestFilterPort(t *testing.T) {
{
name: "should filter out duplicate port, protocol specified (TCP)",
- candidate: v1beta1.PortsSpec{ServicePort: v1.ServicePort{Name: "web", Port: 8080, Protocol: v1.ProtocolTCP}},
+ candidate: v1.ServicePort{Name: "web", Port: 8080, Protocol: v1.ProtocolTCP},
portNumbers: map[PortNumberKey]bool{
newPortNumberKeyByPort(8080): true, newPortNumberKeyByPort(9200): true},
portNames: map[string]bool{"test": true, "metrics": true},
@@ -77,7 +77,7 @@ func TestFilterPort(t *testing.T) {
{
name: "should filter out duplicate port, protocol specified (UDP)",
- candidate: v1beta1.PortsSpec{ServicePort: v1.ServicePort{Name: "web", Port: 8080, Protocol: v1.ProtocolUDP}},
+ candidate: v1.ServicePort{Name: "web", Port: 8080, Protocol: v1.ProtocolUDP},
portNumbers: map[PortNumberKey]bool{
newPortNumberKey(8080, v1.ProtocolUDP): true, newPortNumberKeyByPort(9200): true},
portNames: map[string]bool{"test": true, "metrics": true},
@@ -85,52 +85,52 @@ func TestFilterPort(t *testing.T) {
{
name: "should not filter unique port",
- candidate: v1beta1.PortsSpec{ServicePort: v1.ServicePort{Name: "web", Port: 8090}},
+ candidate: v1.ServicePort{Name: "web", Port: 8090},
portNumbers: map[PortNumberKey]bool{
newPortNumberKeyByPort(8080): true, newPortNumberKeyByPort(9200): true},
portNames: map[string]bool{"test": true, "metrics": true},
- expected: v1beta1.PortsSpec{ServicePort: v1.ServicePort{Name: "web", Port: 8090}},
+ expected: v1.ServicePort{Name: "web", Port: 8090},
},
{
name: "should not filter same port with different protocols",
- candidate: v1beta1.PortsSpec{ServicePort: v1.ServicePort{Name: "web", Port: 8080}},
+ candidate: v1.ServicePort{Name: "web", Port: 8080},
portNumbers: map[PortNumberKey]bool{
newPortNumberKey(8080, v1.ProtocolUDP): true, newPortNumberKeyByPort(9200): true},
portNames: map[string]bool{"test": true, "metrics": true},
- expected: v1beta1.PortsSpec{ServicePort: v1.ServicePort{Name: "web", Port: 8080}},
+ expected: v1.ServicePort{Name: "web", Port: 8080},
},
{
name: "should not filter same port with different protocols, candidate has specified port (TCP vs UDP)",
- candidate: v1beta1.PortsSpec{ServicePort: v1.ServicePort{Name: "web", Port: 8080, Protocol: v1.ProtocolTCP}},
+ candidate: v1.ServicePort{Name: "web", Port: 8080, Protocol: v1.ProtocolTCP},
portNumbers: map[PortNumberKey]bool{
newPortNumberKey(8080, v1.ProtocolUDP): true, newPortNumberKeyByPort(9200): true},
portNames: map[string]bool{"test": true, "metrics": true},
- expected: v1beta1.PortsSpec{ServicePort: v1.ServicePort{Name: "web", Port: 8080, Protocol: v1.ProtocolTCP}},
+ expected: v1.ServicePort{Name: "web", Port: 8080, Protocol: v1.ProtocolTCP},
},
{
name: "should not filter same port with different protocols, candidate has specified port (UDP vs TCP)",
- candidate: v1beta1.PortsSpec{ServicePort: v1.ServicePort{Name: "web", Port: 8080, Protocol: v1.ProtocolUDP}},
+ candidate: v1.ServicePort{Name: "web", Port: 8080, Protocol: v1.ProtocolUDP},
portNumbers: map[PortNumberKey]bool{
newPortNumberKeyByPort(8080): true, newPortNumberKeyByPort(9200): true},
portNames: map[string]bool{"test": true, "metrics": true},
- expected: v1beta1.PortsSpec{ServicePort: v1.ServicePort{Name: "web", Port: 8080, Protocol: v1.ProtocolUDP}},
+ expected: v1.ServicePort{Name: "web", Port: 8080, Protocol: v1.ProtocolUDP},
},
{
name: "should change the duplicate portName",
- candidate: v1beta1.PortsSpec{ServicePort: v1.ServicePort{Name: "web", Port: 8090}},
+ candidate: v1.ServicePort{Name: "web", Port: 8090},
portNumbers: map[PortNumberKey]bool{
newPortNumberKeyByPort(8080): true, newPortNumberKeyByPort(9200): true},
portNames: map[string]bool{"web": true, "metrics": true},
- expected: v1beta1.PortsSpec{ServicePort: v1.ServicePort{Name: "port-8090", Port: 8090}},
+ expected: v1.ServicePort{Name: "port-8090", Port: 8090},
},
{
name: "should return nil if fallback name clashes with existing portName",
- candidate: v1beta1.PortsSpec{ServicePort: v1.ServicePort{Name: "web", Port: 8090}},
+ candidate: v1.ServicePort{Name: "web", Port: 8090},
portNumbers: map[PortNumberKey]bool{
newPortNumberKeyByPort(8080): true, newPortNumberKeyByPort(9200): true},
portNames: map[string]bool{"web": true, "port-8090": true},
@@ -139,7 +139,7 @@ func TestFilterPort(t *testing.T) {
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
actual := filterPort(logger, test.candidate, test.portNumbers, test.portNames)
- if test.expected != (v1beta1.PortsSpec{}) {
+ if test.expected != (v1.ServicePort{}) {
assert.Equal(t, test.expected, *actual)
return
}
diff --git a/internal/webhook/podmutation/webhookhandler_suite_test.go b/internal/webhook/podmutation/webhookhandler_suite_test.go
index 4e83532873..464649f489 100644
--- a/internal/webhook/podmutation/webhookhandler_suite_test.go
+++ b/internal/webhook/podmutation/webhookhandler_suite_test.go
@@ -105,10 +105,6 @@ func TestMain(m *testing.M) {
}
reviewer := rbac.NewReviewer(clientset)
- if err = v1alpha1.SetupCollectorWebhook(mgr, config.New(), reviewer); err != nil {
- fmt.Printf("failed to SetupWebhookWithManager: %v", err)
- os.Exit(1)
- }
if err = v1beta1.SetupCollectorWebhook(mgr, config.New(), reviewer); err != nil {
fmt.Printf("failed to SetupWebhookWithManager: %v", err)
os.Exit(1)
diff --git a/main.go b/main.go
index 9c1ddbc7fd..0d89d09b22 100644
--- a/main.go
+++ b/main.go
@@ -325,10 +325,6 @@ func main() {
}
if os.Getenv("ENABLE_WEBHOOKS") != "false" {
- if err = otelv1alpha1.SetupCollectorWebhook(mgr, cfg, reviewer); err != nil {
- setupLog.Error(err, "unable to create webhook", "webhook", "OpenTelemetryCollector")
- os.Exit(1)
- }
if err = otelv1beta1.SetupCollectorWebhook(mgr, cfg, reviewer); err != nil {
setupLog.Error(err, "unable to create webhook", "webhook", "OpenTelemetryCollector")
os.Exit(1)
diff --git a/pkg/collector/upgrade/suite_test.go b/pkg/collector/upgrade/suite_test.go
index 8f5ec63c01..e6e505b760 100644
--- a/pkg/collector/upgrade/suite_test.go
+++ b/pkg/collector/upgrade/suite_test.go
@@ -105,10 +105,6 @@ func TestMain(m *testing.M) {
}
reviewer := rbac.NewReviewer(clientset)
- if err = v1alpha1.SetupCollectorWebhook(mgr, config.New(), reviewer); err != nil {
- fmt.Printf("failed to SetupWebhookWithManager: %v", err)
- os.Exit(1)
- }
if err = v1beta1.SetupCollectorWebhook(mgr, config.New(), reviewer); err != nil {
fmt.Printf("failed to SetupWebhookWithManager: %v", err)
os.Exit(1)