From 1ea8e7f174fed8ec951b1e8a129280d9e76fe609 Mon Sep 17 00:00:00 2001 From: Dejan Zele Pejchev Date: Mon, 30 Sep 2024 16:52:52 +0200 Subject: [PATCH] add support for profiling service and ingress in armada components Signed-off-by: Dejan Zele Pejchev --- api/install/v1alpha1/armadaserver_types.go | 4 +- api/install/v1alpha1/binoculars_types.go | 2 + api/install/v1alpha1/common.go | 52 +-- api/install/v1alpha1/common_test.go | 104 ------ api/install/v1alpha1/eventingester_types.go | 2 + api/install/v1alpha1/executor_types.go | 2 + api/install/v1alpha1/lookout_types.go | 2 + api/install/v1alpha1/lookoutingester_types.go | 2 + api/install/v1alpha1/scheduler_types.go | 2 + .../v1alpha1/scheduleringester_types.go | 2 + api/install/v1alpha1/zz_generated.deepcopy.go | 61 +++- .../crds/armadaserver-crd.yaml | 62 ++-- .../armada-operator/crds/binoculars-crd.yaml | 59 ++-- .../crds/eventingester-crd.yaml | 48 +-- charts/armada-operator/crds/executor-crd.yaml | 52 +-- charts/armada-operator/crds/lookout-crd.yaml | 59 ++-- .../crds/lookoutingester-crd.yaml | 48 +-- .../armada-operator/crds/scheduler-crd.yaml | 59 ++-- .../crds/scheduleringester-crd.yaml | 48 +-- ...nstall.armadaproject.io_armadaservers.yaml | 62 ++-- .../install.armadaproject.io_binoculars.yaml | 59 ++-- ...stall.armadaproject.io_eventingesters.yaml | 48 +-- .../install.armadaproject.io_executors.yaml | 52 +-- ...all.armadaproject.io_lookoutingesters.yaml | 48 +-- .../install.armadaproject.io_lookouts.yaml | 59 ++-- ...l.armadaproject.io_scheduleringesters.yaml | 48 +-- .../install.armadaproject.io_schedulers.yaml | 59 ++-- dev/crd/out.md | 38 +-- internal/controller/builders/config.go | 69 ++++ internal/controller/builders/config_test.go | 115 +++++++ .../controller/builders/generate_config.go | 25 -- .../builders/generate_config_test.go | 48 --- internal/controller/builders/ingress.go | 58 ++++ internal/controller/builders/ingress_test.go | 180 ++++++++++ internal/controller/builders/secret.go | 7 +- internal/controller/builders/service.go | 75 +++- .../controller/builders/service_account.go | 2 +- .../builders/service_account_test.go | 2 +- internal/controller/builders/service_test.go | 87 +++-- .../install/armadaserver_controller.go | 236 ++++--------- .../install/armadaserver_controller_test.go | 24 +- .../install/binoculars_controller.go | 220 ++++-------- .../install/binoculars_controller_test.go | 29 +- internal/controller/install/common_helpers.go | 323 +++++++++++++++--- .../controller/install/common_helpers_test.go | 2 +- .../install/eventingester_controller.go | 83 ++--- .../controller/install/executor_controller.go | 69 ++-- .../controller/install/lookout_controller.go | 199 ++++------- .../install/lookout_controller_test.go | 18 +- .../install/lookoutingester_controller.go | 58 ++-- .../install/scheduler_controller.go | 268 ++++++--------- .../install/scheduler_controller_test.go | 36 +- .../install/scheduleringester_controller.go | 117 +++---- 53 files changed, 1935 insertions(+), 1558 deletions(-) create mode 100644 internal/controller/builders/config.go create mode 100644 internal/controller/builders/config_test.go delete mode 100644 internal/controller/builders/generate_config.go delete mode 100644 internal/controller/builders/generate_config_test.go create mode 100644 internal/controller/builders/ingress.go create mode 100644 internal/controller/builders/ingress_test.go diff --git a/api/install/v1alpha1/armadaserver_types.go b/api/install/v1alpha1/armadaserver_types.go index 4eb32446..03e7ca1b 100644 --- a/api/install/v1alpha1/armadaserver_types.go +++ b/api/install/v1alpha1/armadaserver_types.go @@ -29,8 +29,10 @@ type ArmadaServerSpec struct { Replicas *int32 `json:"replicas,omitempty"` // NodeSelector restricts the ArmadaServer pod to run on nodes matching the configured selectors NodeSelector map[string]string `json:"nodeSelector,omitempty"` - // Ingress defines labels and annotations for the Ingress controller of ArmadaServer + // Ingress defines configuration for the Ingress resource Ingress *IngressConfig `json:"ingress,omitempty"` + // ProfilingIngressConfig defines configuration for the profiling Ingress resource + ProfilingIngressConfig *IngressConfig `json:"profilingIngressConfig,omitempty"` // An array of host names to build ingress rules for HostNames []string `json:"hostNames,omitempty"` // Who is issuing certificates for CA diff --git a/api/install/v1alpha1/binoculars_types.go b/api/install/v1alpha1/binoculars_types.go index 0f5c1e99..e893e356 100644 --- a/api/install/v1alpha1/binoculars_types.go +++ b/api/install/v1alpha1/binoculars_types.go @@ -55,6 +55,8 @@ type BinocularsSpec struct { NodeSelector map[string]string `json:"nodeSelector,omitempty"` // Ingress for this component. Used to inject labels/annotations into ingress Ingress *IngressConfig `json:"ingress,omitempty"` + // ProfilingIngressConfig defines configuration for the profiling Ingress resource + ProfilingIngressConfig *IngressConfig `json:"profilingIngressConfig,omitempty"` // An array of host names to build ingress rules for HostNames []string `json:"hostNames,omitempty"` // Who is issuing certificates for CA diff --git a/api/install/v1alpha1/common.go b/api/install/v1alpha1/common.go index d71bef84..e2020209 100644 --- a/api/install/v1alpha1/common.go +++ b/api/install/v1alpha1/common.go @@ -20,13 +20,6 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - "sigs.k8s.io/yaml" -) - -const ( - defaultHTTPPort = 8080 - defaultGRPCPort = 50051 - defaultMetricsPort = 9000 ) type Image struct { @@ -62,8 +55,10 @@ type IngressConfig struct { Annotations map[string]string `json:"annotations,omitempty"` // The type of ingress that is used IngressClass string `json:"ingressClass,omitempty"` - // Overide name for ingress - NameOverride string `json:"nameOverride,omitempty"` + // An array of host names to build ingress rules for + Hostnames []string `json:"hostNames,omitempty"` + // Who is issuing certificates for CA + ClusterIssuer string `json:"clusterIssuer,omitempty"` } type AdditionalClusterRoleBinding struct { @@ -71,14 +66,6 @@ type AdditionalClusterRoleBinding struct { ClusterRoleName string `json:"clusterRoleName"` } -type PortConfig struct { - HttpPort int32 `json:"httpPort"` - HttpNodePort int32 `json:"httpNodePort,omitempty"` - GrpcPort int32 `json:"grpcPort"` - GrpcNodePort int32 `json:"grpcNodePort,omitempty"` - MetricsPort int32 `json:"metricsPort"` -} - // CommonSpecBase is the common configuration for all services. // NOTE(Clif): You must label this with `json:""` when using it as an embedded // struct in order for controller-gen to use the promoted fields as expected. @@ -109,37 +96,6 @@ type CommonSpecBase struct { AdditionalVolumes []corev1.Volume `json:"additionalVolumes,omitempty"` // Additional volume mounts that are added as volumes AdditionalVolumeMounts []corev1.VolumeMount `json:"additionalVolumeMounts,omitempty"` - // PortConfig is automatically populated with defaults and overlaid by values in ApplicationConfig. - PortConfig PortConfig `json:"portConfig,omitempty"` -} - -// BuildPortConfig extracts ports from the ApplicationConfig and returns a PortConfig -func BuildPortConfig(rawAppConfig runtime.RawExtension) (PortConfig, error) { - appConfig, err := ConvertRawExtensionToYaml(rawAppConfig) - if err != nil { - return PortConfig{}, err - } - // defaults - portConfig := PortConfig{ - HttpPort: defaultHTTPPort, - GrpcPort: defaultGRPCPort, - MetricsPort: defaultMetricsPort, - } - err = yaml.Unmarshal([]byte(appConfig), &portConfig) - if err != nil { - return PortConfig{}, err - } - return portConfig, nil -} - -// ConvertRawExtensionToYaml converts a RawExtension input to Yaml -func ConvertRawExtensionToYaml(config runtime.RawExtension) (string, error) { - yamlConfig, err := yaml.JSONToYAML(config.Raw) - if err != nil { - return "", err - } - - return string(yamlConfig), nil } func GetDefaultSecurityContext() *corev1.SecurityContext { diff --git a/api/install/v1alpha1/common_test.go b/api/install/v1alpha1/common_test.go index 5b7b04a4..1c267880 100644 --- a/api/install/v1alpha1/common_test.go +++ b/api/install/v1alpha1/common_test.go @@ -1,105 +1 @@ package v1alpha1 - -import ( - "testing" - - "github.com/stretchr/testify/assert" - - "k8s.io/apimachinery/pkg/runtime" -) - -func TestBuildPortConfig(t *testing.T) { - tests := []struct { - name string - input runtime.RawExtension - expected PortConfig - wantErr bool - }{ - { - name: "it provides some reasonable defaults", - input: runtime.RawExtension{Raw: []byte(`{ }`)}, - expected: PortConfig{ - HttpPort: 8080, - GrpcPort: 50051, - MetricsPort: 9000, - }, - }, - { - name: "it errors with bad json (so does everything else in the app)", - input: runtime.RawExtension{Raw: []byte(`{"httpPort": 8081`)}, - expected: PortConfig{}, - wantErr: true, - }, - { - name: "it accepts partial overrides from the config", - input: runtime.RawExtension{Raw: []byte(`{"httpPort": 8081}`)}, - expected: PortConfig{ - HttpPort: 8081, - GrpcPort: 50051, - MetricsPort: 9000, - }, - }, - { - name: "it accepts complete override from the config", - input: runtime.RawExtension{ - Raw: []byte(`{"httpPort": 8081, "grpcPort": 50052, "metricsPort": 9001 }`), - }, - expected: PortConfig{ - HttpPort: 8081, - GrpcPort: 50052, - MetricsPort: 9001, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - pc, err := BuildPortConfig(tt.input) - if tt.wantErr { - assert.Error(t, err) - } else { - assert.Nil(t, err) - } - assert.Equal(t, tt.expected, pc) - }) - } -} - -func TestConvertRawExtensionToYaml(t *testing.T) { - - tests := []struct { - name string - input runtime.RawExtension - expected string - wantErr bool - }{ - { - name: "it converts runtime.RawExtension json to yaml", - input: runtime.RawExtension{Raw: []byte(`{ "test": { "foo": "bar" }}`)}, - expected: "test:\n foo: bar\n", - }, - { - name: "it converts complex runtime.RawExtension json to yaml", - input: runtime.RawExtension{Raw: []byte(`{ "test": {"foo": "bar"}, "test1": {"foo1": { "foo2": "bar2" }}}`)}, - expected: "test:\n foo: bar\ntest1:\n foo1:\n foo2: bar2\n", - }, - { - name: "it errors if runtime.RawExtension raw is malformed json", - input: runtime.RawExtension{Raw: []byte(`{ "foo": "bar" `)}, - expected: "", - wantErr: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - output, err := ConvertRawExtensionToYaml(tt.input) - if tt.wantErr { - assert.Error(t, err) - } else { - assert.Nil(t, err) - } - assert.Equal(t, tt.expected, output) - }) - } -} diff --git a/api/install/v1alpha1/eventingester_types.go b/api/install/v1alpha1/eventingester_types.go index 1e06ca78..f5a02753 100644 --- a/api/install/v1alpha1/eventingester_types.go +++ b/api/install/v1alpha1/eventingester_types.go @@ -33,6 +33,8 @@ type EventIngesterSpec struct { SecurityContext *corev1.SecurityContext `json:"securityContext,omitempty"` // PodSecurityContext defines the security options the pod should be run with PodSecurityContext *corev1.PodSecurityContext `json:"podSecurityContext,omitempty"` + // ProfilingIngressConfig defines configuration for the profiling Ingress resource + ProfilingIngressConfig *IngressConfig `json:"profilingIngressConfig,omitempty"` } // EventIngesterStatus defines the observed state of EventIngester diff --git a/api/install/v1alpha1/executor_types.go b/api/install/v1alpha1/executor_types.go index a3e9f249..708be0e2 100644 --- a/api/install/v1alpha1/executor_types.go +++ b/api/install/v1alpha1/executor_types.go @@ -62,6 +62,8 @@ type ExecutorSpec struct { SecurityContext *corev1.SecurityContext `json:"securityContext,omitempty"` // PodSecurityContext defines the security options the pod should be run with PodSecurityContext *corev1.PodSecurityContext `json:"podSecurityContext,omitempty"` + // ProfilingIngressConfig defines configuration for the profiling Ingress resource + ProfilingIngressConfig *IngressConfig `json:"profilingIngressConfig,omitempty"` } // ExecutorStatus defines the observed state of Executor diff --git a/api/install/v1alpha1/lookout_types.go b/api/install/v1alpha1/lookout_types.go index 586389d7..518e3cf0 100644 --- a/api/install/v1alpha1/lookout_types.go +++ b/api/install/v1alpha1/lookout_types.go @@ -69,6 +69,8 @@ type LookoutSpec struct { SecurityContext *corev1.SecurityContext `json:"securityContext,omitempty"` // PodSecurityContext defines the security options the pod should be run with PodSecurityContext *corev1.PodSecurityContext `json:"podSecurityContext,omitempty"` + // ProfilingIngressConfig defines configuration for the profiling Ingress resource + ProfilingIngressConfig *IngressConfig `json:"profilingIngressConfig,omitempty"` } // LookoutStatus defines the observed state of lookout diff --git a/api/install/v1alpha1/lookoutingester_types.go b/api/install/v1alpha1/lookoutingester_types.go index 288c5256..0c8229b1 100644 --- a/api/install/v1alpha1/lookoutingester_types.go +++ b/api/install/v1alpha1/lookoutingester_types.go @@ -31,6 +31,8 @@ type LookoutIngesterSpec struct { SecurityContext *corev1.SecurityContext `json:"securityContext,omitempty"` // PodSecurityContext defines the security options the pod should be run with PodSecurityContext *corev1.PodSecurityContext `json:"podSecurityContext,omitempty"` + // ProfilingIngressConfig defines configuration for the profiling Ingress resource + ProfilingIngressConfig *IngressConfig `json:"profilingIngressConfig,omitempty"` } // LookoutIngesterStatus defines the observed state of LookoutIngester diff --git a/api/install/v1alpha1/scheduler_types.go b/api/install/v1alpha1/scheduler_types.go index 9a020e82..fe5a678f 100644 --- a/api/install/v1alpha1/scheduler_types.go +++ b/api/install/v1alpha1/scheduler_types.go @@ -53,6 +53,8 @@ type SchedulerSpec struct { Replicas *int32 `json:"replicas,omitempty"` // Ingress defines labels and annotations for the Ingress controller of Scheduler Ingress *IngressConfig `json:"ingress,omitempty"` + // ProfilingIngressConfig defines configuration for the profiling Ingress resource + ProfilingIngressConfig *IngressConfig `json:"profilingIngressConfig,omitempty"` // An array of host names to build ingress rules for HostNames []string `json:"hostNames,omitempty"` // Who is issuing certificates for CA diff --git a/api/install/v1alpha1/scheduleringester_types.go b/api/install/v1alpha1/scheduleringester_types.go index 7d4039c1..e5ad18ef 100644 --- a/api/install/v1alpha1/scheduleringester_types.go +++ b/api/install/v1alpha1/scheduleringester_types.go @@ -31,6 +31,8 @@ type SchedulerIngesterSpec struct { SecurityContext *corev1.SecurityContext `json:"securityContext,omitempty"` // PodSecurityContext defines the security options the pod should be run with PodSecurityContext *corev1.PodSecurityContext `json:"podSecurityContext,omitempty"` + // ProfilingIngressConfig defines configuration for the profiling Ingress resource + ProfilingIngressConfig *IngressConfig `json:"profilingIngressConfig,omitempty"` } // SchedulerIngesterStatus defines the observed state of SchedulerIngester diff --git a/api/install/v1alpha1/zz_generated.deepcopy.go b/api/install/v1alpha1/zz_generated.deepcopy.go index b49f7614..b2f1868b 100644 --- a/api/install/v1alpha1/zz_generated.deepcopy.go +++ b/api/install/v1alpha1/zz_generated.deepcopy.go @@ -122,6 +122,11 @@ func (in *ArmadaServerSpec) DeepCopyInto(out *ArmadaServerSpec) { *out = new(IngressConfig) (*in).DeepCopyInto(*out) } + if in.ProfilingIngressConfig != nil { + in, out := &in.ProfilingIngressConfig, &out.ProfilingIngressConfig + *out = new(IngressConfig) + (*in).DeepCopyInto(*out) + } if in.HostNames != nil { in, out := &in.HostNames, &out.HostNames *out = make([]string, len(*in)) @@ -244,6 +249,11 @@ func (in *BinocularsSpec) DeepCopyInto(out *BinocularsSpec) { *out = new(IngressConfig) (*in).DeepCopyInto(*out) } + if in.ProfilingIngressConfig != nil { + in, out := &in.ProfilingIngressConfig, &out.ProfilingIngressConfig + *out = new(IngressConfig) + (*in).DeepCopyInto(*out) + } if in.HostNames != nil { in, out := &in.HostNames, &out.HostNames *out = make([]string, len(*in)) @@ -346,7 +356,6 @@ func (in *CommonSpecBase) DeepCopyInto(out *CommonSpecBase) { (*in)[i].DeepCopyInto(&(*out)[i]) } } - out.PortConfig = in.PortConfig } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CommonSpecBase. @@ -444,6 +453,11 @@ func (in *EventIngesterSpec) DeepCopyInto(out *EventIngesterSpec) { *out = new(v1.PodSecurityContext) (*in).DeepCopyInto(*out) } + if in.ProfilingIngressConfig != nil { + in, out := &in.ProfilingIngressConfig, &out.ProfilingIngressConfig + *out = new(IngressConfig) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventIngesterSpec. @@ -572,6 +586,11 @@ func (in *ExecutorSpec) DeepCopyInto(out *ExecutorSpec) { *out = new(v1.PodSecurityContext) (*in).DeepCopyInto(*out) } + if in.ProfilingIngressConfig != nil { + in, out := &in.ProfilingIngressConfig, &out.ProfilingIngressConfig + *out = new(IngressConfig) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecutorSpec. @@ -631,6 +650,11 @@ func (in *IngressConfig) DeepCopyInto(out *IngressConfig) { (*out)[key] = val } } + if in.Hostnames != nil { + in, out := &in.Hostnames, &out.Hostnames + *out = make([]string, len(*in)) + copy(*out, *in) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressConfig. @@ -748,6 +772,11 @@ func (in *LookoutIngesterSpec) DeepCopyInto(out *LookoutIngesterSpec) { *out = new(v1.PodSecurityContext) (*in).DeepCopyInto(*out) } + if in.ProfilingIngressConfig != nil { + in, out := &in.ProfilingIngressConfig, &out.ProfilingIngressConfig + *out = new(IngressConfig) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LookoutIngesterSpec. @@ -858,6 +887,11 @@ func (in *LookoutSpec) DeepCopyInto(out *LookoutSpec) { *out = new(v1.PodSecurityContext) (*in).DeepCopyInto(*out) } + if in.ProfilingIngressConfig != nil { + in, out := &in.ProfilingIngressConfig, &out.ProfilingIngressConfig + *out = new(IngressConfig) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LookoutSpec. @@ -885,21 +919,6 @@ func (in *LookoutStatus) DeepCopy() *LookoutStatus { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PortConfig) DeepCopyInto(out *PortConfig) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PortConfig. -func (in *PortConfig) DeepCopy() *PortConfig { - if in == nil { - return nil - } - out := new(PortConfig) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PrometheusConfig) DeepCopyInto(out *PrometheusConfig) { *out = *in @@ -1068,6 +1087,11 @@ func (in *SchedulerIngesterSpec) DeepCopyInto(out *SchedulerIngesterSpec) { *out = new(v1.PodSecurityContext) (*in).DeepCopyInto(*out) } + if in.ProfilingIngressConfig != nil { + in, out := &in.ProfilingIngressConfig, &out.ProfilingIngressConfig + *out = new(IngressConfig) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchedulerIngesterSpec. @@ -1141,6 +1165,11 @@ func (in *SchedulerSpec) DeepCopyInto(out *SchedulerSpec) { *out = new(IngressConfig) (*in).DeepCopyInto(*out) } + if in.ProfilingIngressConfig != nil { + in, out := &in.ProfilingIngressConfig, &out.ProfilingIngressConfig + *out = new(IngressConfig) + (*in).DeepCopyInto(*out) + } if in.HostNames != nil { in, out := &in.HostNames, &out.HostNames *out = make([]string, len(*in)) diff --git a/charts/armada-operator/crds/armadaserver-crd.yaml b/charts/armada-operator/crds/armadaserver-crd.yaml index 438b9cf9..8efa2bec 100644 --- a/charts/armada-operator/crds/armadaserver-crd.yaml +++ b/charts/armada-operator/crds/armadaserver-crd.yaml @@ -2030,8 +2030,7 @@ spec: - tag type: object ingress: - description: Ingress defines labels and annotations for the Ingress - controller of ArmadaServer + description: Ingress defines configuration for the Ingress resource properties: annotations: additionalProperties: @@ -2039,6 +2038,14 @@ spec: description: Annotations is a map of annotations which will be added to all ingress rules type: object + clusterIssuer: + description: Who is issuing certificates for CA + type: string + hostNames: + description: An array of host names to build ingress rules for + items: + type: string + type: array ingressClass: description: The type of ingress that is used type: string @@ -2048,9 +2055,6 @@ spec: description: Labels is the map of labels which wil be added to all objects type: object - nameOverride: - description: Overide name for ingress - type: string type: object labels: additionalProperties: @@ -2264,29 +2268,33 @@ spec: type: string type: object type: object - portConfig: - description: PortConfig is automatically populated with defaults and - overlaid by values in ApplicationConfig. + profilingIngressConfig: + description: ProfilingIngressConfig defines configuration for the + profiling Ingress resource properties: - grpcNodePort: - format: int32 - type: integer - grpcPort: - format: int32 - type: integer - httpNodePort: - format: int32 - type: integer - httpPort: - format: int32 - type: integer - metricsPort: - format: int32 - type: integer - required: - - grpcPort - - httpPort - - metricsPort + annotations: + additionalProperties: + type: string + description: Annotations is a map of annotations which will be + added to all ingress rules + type: object + clusterIssuer: + description: Who is issuing certificates for CA + type: string + hostNames: + description: An array of host names to build ingress rules for + items: + type: string + type: array + ingressClass: + description: The type of ingress that is used + type: string + labels: + additionalProperties: + type: string + description: Labels is the map of labels which wil be added to + all objects + type: object type: object prometheus: description: PrometheusConfig is the configuration block for Prometheus diff --git a/charts/armada-operator/crds/binoculars-crd.yaml b/charts/armada-operator/crds/binoculars-crd.yaml index 415904b6..a3ccba8e 100644 --- a/charts/armada-operator/crds/binoculars-crd.yaml +++ b/charts/armada-operator/crds/binoculars-crd.yaml @@ -2039,6 +2039,14 @@ spec: description: Annotations is a map of annotations which will be added to all ingress rules type: object + clusterIssuer: + description: Who is issuing certificates for CA + type: string + hostNames: + description: An array of host names to build ingress rules for + items: + type: string + type: array ingressClass: description: The type of ingress that is used type: string @@ -2048,9 +2056,6 @@ spec: description: Labels is the map of labels which wil be added to all objects type: object - nameOverride: - description: Overide name for ingress - type: string type: object labels: additionalProperties: @@ -2264,29 +2269,33 @@ spec: type: string type: object type: object - portConfig: - description: PortConfig is automatically populated with defaults and - overlaid by values in ApplicationConfig. + profilingIngressConfig: + description: ProfilingIngressConfig defines configuration for the + profiling Ingress resource properties: - grpcNodePort: - format: int32 - type: integer - grpcPort: - format: int32 - type: integer - httpNodePort: - format: int32 - type: integer - httpPort: - format: int32 - type: integer - metricsPort: - format: int32 - type: integer - required: - - grpcPort - - httpPort - - metricsPort + annotations: + additionalProperties: + type: string + description: Annotations is a map of annotations which will be + added to all ingress rules + type: object + clusterIssuer: + description: Who is issuing certificates for CA + type: string + hostNames: + description: An array of host names to build ingress rules for + items: + type: string + type: array + ingressClass: + description: The type of ingress that is used + type: string + labels: + additionalProperties: + type: string + description: Labels is the map of labels which wil be added to + all objects + type: object type: object prometheus: description: PrometheusConfig is the configuration block for Prometheus diff --git a/charts/armada-operator/crds/eventingester-crd.yaml b/charts/armada-operator/crds/eventingester-crd.yaml index b92ede2d..0f802494 100644 --- a/charts/armada-operator/crds/eventingester-crd.yaml +++ b/charts/armada-operator/crds/eventingester-crd.yaml @@ -2233,29 +2233,33 @@ spec: type: string type: object type: object - portConfig: - description: PortConfig is automatically populated with defaults and - overlaid by values in ApplicationConfig. + profilingIngressConfig: + description: ProfilingIngressConfig defines configuration for the + profiling Ingress resource properties: - grpcNodePort: - format: int32 - type: integer - grpcPort: - format: int32 - type: integer - httpNodePort: - format: int32 - type: integer - httpPort: - format: int32 - type: integer - metricsPort: - format: int32 - type: integer - required: - - grpcPort - - httpPort - - metricsPort + annotations: + additionalProperties: + type: string + description: Annotations is a map of annotations which will be + added to all ingress rules + type: object + clusterIssuer: + description: Who is issuing certificates for CA + type: string + hostNames: + description: An array of host names to build ingress rules for + items: + type: string + type: array + ingressClass: + description: The type of ingress that is used + type: string + labels: + additionalProperties: + type: string + description: Labels is the map of labels which wil be added to + all objects + type: object type: object prometheus: description: PrometheusConfig is the configuration block for Prometheus diff --git a/charts/armada-operator/crds/executor-crd.yaml b/charts/armada-operator/crds/executor-crd.yaml index 0c239a60..a89937ee 100644 --- a/charts/armada-operator/crds/executor-crd.yaml +++ b/charts/armada-operator/crds/executor-crd.yaml @@ -2246,30 +2246,6 @@ spec: type: string type: object type: object - portConfig: - description: PortConfig is automatically populated with defaults and - overlaid by values in ApplicationConfig. - properties: - grpcNodePort: - format: int32 - type: integer - grpcPort: - format: int32 - type: integer - httpNodePort: - format: int32 - type: integer - httpPort: - format: int32 - type: integer - metricsPort: - format: int32 - type: integer - required: - - grpcPort - - httpPort - - metricsPort - type: object priorityClasses: description: List of PriorityClasses which will be created items: @@ -2326,6 +2302,34 @@ spec: - value type: object type: array + profilingIngressConfig: + description: ProfilingIngressConfig defines configuration for the + profiling Ingress resource + properties: + annotations: + additionalProperties: + type: string + description: Annotations is a map of annotations which will be + added to all ingress rules + type: object + clusterIssuer: + description: Who is issuing certificates for CA + type: string + hostNames: + description: An array of host names to build ingress rules for + items: + type: string + type: array + ingressClass: + description: The type of ingress that is used + type: string + labels: + additionalProperties: + type: string + description: Labels is the map of labels which wil be added to + all objects + type: object + type: object prometheus: description: PrometheusConfig is the configuration block for Prometheus monitoring diff --git a/charts/armada-operator/crds/lookout-crd.yaml b/charts/armada-operator/crds/lookout-crd.yaml index d8b611f4..df956219 100644 --- a/charts/armada-operator/crds/lookout-crd.yaml +++ b/charts/armada-operator/crds/lookout-crd.yaml @@ -2045,6 +2045,14 @@ spec: description: Annotations is a map of annotations which will be added to all ingress rules type: object + clusterIssuer: + description: Who is issuing certificates for CA + type: string + hostNames: + description: An array of host names to build ingress rules for + items: + type: string + type: array ingressClass: description: The type of ingress that is used type: string @@ -2054,9 +2062,6 @@ spec: description: Labels is the map of labels which wil be added to all objects type: object - nameOverride: - description: Overide name for ingress - type: string type: object labels: additionalProperties: @@ -2273,29 +2278,33 @@ spec: type: string type: object type: object - portConfig: - description: PortConfig is automatically populated with defaults and - overlaid by values in ApplicationConfig. + profilingIngressConfig: + description: ProfilingIngressConfig defines configuration for the + profiling Ingress resource properties: - grpcNodePort: - format: int32 - type: integer - grpcPort: - format: int32 - type: integer - httpNodePort: - format: int32 - type: integer - httpPort: - format: int32 - type: integer - metricsPort: - format: int32 - type: integer - required: - - grpcPort - - httpPort - - metricsPort + annotations: + additionalProperties: + type: string + description: Annotations is a map of annotations which will be + added to all ingress rules + type: object + clusterIssuer: + description: Who is issuing certificates for CA + type: string + hostNames: + description: An array of host names to build ingress rules for + items: + type: string + type: array + ingressClass: + description: The type of ingress that is used + type: string + labels: + additionalProperties: + type: string + description: Labels is the map of labels which wil be added to + all objects + type: object type: object prometheus: description: PrometheusConfig is the configuration block for Prometheus diff --git a/charts/armada-operator/crds/lookoutingester-crd.yaml b/charts/armada-operator/crds/lookoutingester-crd.yaml index 79ed147c..4fb9e2c1 100644 --- a/charts/armada-operator/crds/lookoutingester-crd.yaml +++ b/charts/armada-operator/crds/lookoutingester-crd.yaml @@ -2227,29 +2227,33 @@ spec: type: string type: object type: object - portConfig: - description: PortConfig is automatically populated with defaults and - overlaid by values in ApplicationConfig. + profilingIngressConfig: + description: ProfilingIngressConfig defines configuration for the + profiling Ingress resource properties: - grpcNodePort: - format: int32 - type: integer - grpcPort: - format: int32 - type: integer - httpNodePort: - format: int32 - type: integer - httpPort: - format: int32 - type: integer - metricsPort: - format: int32 - type: integer - required: - - grpcPort - - httpPort - - metricsPort + annotations: + additionalProperties: + type: string + description: Annotations is a map of annotations which will be + added to all ingress rules + type: object + clusterIssuer: + description: Who is issuing certificates for CA + type: string + hostNames: + description: An array of host names to build ingress rules for + items: + type: string + type: array + ingressClass: + description: The type of ingress that is used + type: string + labels: + additionalProperties: + type: string + description: Labels is the map of labels which wil be added to + all objects + type: object type: object prometheus: description: PrometheusConfig is the configuration block for Prometheus diff --git a/charts/armada-operator/crds/scheduler-crd.yaml b/charts/armada-operator/crds/scheduler-crd.yaml index b5c68b38..701068ef 100644 --- a/charts/armada-operator/crds/scheduler-crd.yaml +++ b/charts/armada-operator/crds/scheduler-crd.yaml @@ -2039,6 +2039,14 @@ spec: description: Annotations is a map of annotations which will be added to all ingress rules type: object + clusterIssuer: + description: Who is issuing certificates for CA + type: string + hostNames: + description: An array of host names to build ingress rules for + items: + type: string + type: array ingressClass: description: The type of ingress that is used type: string @@ -2048,9 +2056,6 @@ spec: description: Labels is the map of labels which wil be added to all objects type: object - nameOverride: - description: Overide name for ingress - type: string type: object labels: additionalProperties: @@ -2261,29 +2266,33 @@ spec: type: string type: object type: object - portConfig: - description: PortConfig is automatically populated with defaults and - overlaid by values in ApplicationConfig. + profilingIngressConfig: + description: ProfilingIngressConfig defines configuration for the + profiling Ingress resource properties: - grpcNodePort: - format: int32 - type: integer - grpcPort: - format: int32 - type: integer - httpNodePort: - format: int32 - type: integer - httpPort: - format: int32 - type: integer - metricsPort: - format: int32 - type: integer - required: - - grpcPort - - httpPort - - metricsPort + annotations: + additionalProperties: + type: string + description: Annotations is a map of annotations which will be + added to all ingress rules + type: object + clusterIssuer: + description: Who is issuing certificates for CA + type: string + hostNames: + description: An array of host names to build ingress rules for + items: + type: string + type: array + ingressClass: + description: The type of ingress that is used + type: string + labels: + additionalProperties: + type: string + description: Labels is the map of labels which wil be added to + all objects + type: object type: object prometheus: description: PrometheusConfig is the configuration block for Prometheus diff --git a/charts/armada-operator/crds/scheduleringester-crd.yaml b/charts/armada-operator/crds/scheduleringester-crd.yaml index 0d515f5f..2df8454e 100644 --- a/charts/armada-operator/crds/scheduleringester-crd.yaml +++ b/charts/armada-operator/crds/scheduleringester-crd.yaml @@ -2227,29 +2227,33 @@ spec: type: string type: object type: object - portConfig: - description: PortConfig is automatically populated with defaults and - overlaid by values in ApplicationConfig. + profilingIngressConfig: + description: ProfilingIngressConfig defines configuration for the + profiling Ingress resource properties: - grpcNodePort: - format: int32 - type: integer - grpcPort: - format: int32 - type: integer - httpNodePort: - format: int32 - type: integer - httpPort: - format: int32 - type: integer - metricsPort: - format: int32 - type: integer - required: - - grpcPort - - httpPort - - metricsPort + annotations: + additionalProperties: + type: string + description: Annotations is a map of annotations which will be + added to all ingress rules + type: object + clusterIssuer: + description: Who is issuing certificates for CA + type: string + hostNames: + description: An array of host names to build ingress rules for + items: + type: string + type: array + ingressClass: + description: The type of ingress that is used + type: string + labels: + additionalProperties: + type: string + description: Labels is the map of labels which wil be added to + all objects + type: object type: object prometheus: description: PrometheusConfig is the configuration block for Prometheus diff --git a/config/crd/bases/install.armadaproject.io_armadaservers.yaml b/config/crd/bases/install.armadaproject.io_armadaservers.yaml index 1b1246fd..ac4a34d6 100644 --- a/config/crd/bases/install.armadaproject.io_armadaservers.yaml +++ b/config/crd/bases/install.armadaproject.io_armadaservers.yaml @@ -2020,8 +2020,7 @@ spec: - tag type: object ingress: - description: Ingress defines labels and annotations for the Ingress - controller of ArmadaServer + description: Ingress defines configuration for the Ingress resource properties: annotations: additionalProperties: @@ -2029,6 +2028,14 @@ spec: description: Annotations is a map of annotations which will be added to all ingress rules type: object + clusterIssuer: + description: Who is issuing certificates for CA + type: string + hostNames: + description: An array of host names to build ingress rules for + items: + type: string + type: array ingressClass: description: The type of ingress that is used type: string @@ -2038,9 +2045,6 @@ spec: description: Labels is the map of labels which wil be added to all objects type: object - nameOverride: - description: Overide name for ingress - type: string type: object labels: additionalProperties: @@ -2254,29 +2258,33 @@ spec: type: string type: object type: object - portConfig: - description: PortConfig is automatically populated with defaults and - overlaid by values in ApplicationConfig. + profilingIngressConfig: + description: ProfilingIngressConfig defines configuration for the + profiling Ingress resource properties: - grpcNodePort: - format: int32 - type: integer - grpcPort: - format: int32 - type: integer - httpNodePort: - format: int32 - type: integer - httpPort: - format: int32 - type: integer - metricsPort: - format: int32 - type: integer - required: - - grpcPort - - httpPort - - metricsPort + annotations: + additionalProperties: + type: string + description: Annotations is a map of annotations which will be + added to all ingress rules + type: object + clusterIssuer: + description: Who is issuing certificates for CA + type: string + hostNames: + description: An array of host names to build ingress rules for + items: + type: string + type: array + ingressClass: + description: The type of ingress that is used + type: string + labels: + additionalProperties: + type: string + description: Labels is the map of labels which wil be added to + all objects + type: object type: object prometheus: description: PrometheusConfig is the configuration block for Prometheus diff --git a/config/crd/bases/install.armadaproject.io_binoculars.yaml b/config/crd/bases/install.armadaproject.io_binoculars.yaml index 1223ad16..434995fa 100644 --- a/config/crd/bases/install.armadaproject.io_binoculars.yaml +++ b/config/crd/bases/install.armadaproject.io_binoculars.yaml @@ -2029,6 +2029,14 @@ spec: description: Annotations is a map of annotations which will be added to all ingress rules type: object + clusterIssuer: + description: Who is issuing certificates for CA + type: string + hostNames: + description: An array of host names to build ingress rules for + items: + type: string + type: array ingressClass: description: The type of ingress that is used type: string @@ -2038,9 +2046,6 @@ spec: description: Labels is the map of labels which wil be added to all objects type: object - nameOverride: - description: Overide name for ingress - type: string type: object labels: additionalProperties: @@ -2254,29 +2259,33 @@ spec: type: string type: object type: object - portConfig: - description: PortConfig is automatically populated with defaults and - overlaid by values in ApplicationConfig. + profilingIngressConfig: + description: ProfilingIngressConfig defines configuration for the + profiling Ingress resource properties: - grpcNodePort: - format: int32 - type: integer - grpcPort: - format: int32 - type: integer - httpNodePort: - format: int32 - type: integer - httpPort: - format: int32 - type: integer - metricsPort: - format: int32 - type: integer - required: - - grpcPort - - httpPort - - metricsPort + annotations: + additionalProperties: + type: string + description: Annotations is a map of annotations which will be + added to all ingress rules + type: object + clusterIssuer: + description: Who is issuing certificates for CA + type: string + hostNames: + description: An array of host names to build ingress rules for + items: + type: string + type: array + ingressClass: + description: The type of ingress that is used + type: string + labels: + additionalProperties: + type: string + description: Labels is the map of labels which wil be added to + all objects + type: object type: object prometheus: description: PrometheusConfig is the configuration block for Prometheus diff --git a/config/crd/bases/install.armadaproject.io_eventingesters.yaml b/config/crd/bases/install.armadaproject.io_eventingesters.yaml index b427f048..71955a9c 100644 --- a/config/crd/bases/install.armadaproject.io_eventingesters.yaml +++ b/config/crd/bases/install.armadaproject.io_eventingesters.yaml @@ -2223,29 +2223,33 @@ spec: type: string type: object type: object - portConfig: - description: PortConfig is automatically populated with defaults and - overlaid by values in ApplicationConfig. + profilingIngressConfig: + description: ProfilingIngressConfig defines configuration for the + profiling Ingress resource properties: - grpcNodePort: - format: int32 - type: integer - grpcPort: - format: int32 - type: integer - httpNodePort: - format: int32 - type: integer - httpPort: - format: int32 - type: integer - metricsPort: - format: int32 - type: integer - required: - - grpcPort - - httpPort - - metricsPort + annotations: + additionalProperties: + type: string + description: Annotations is a map of annotations which will be + added to all ingress rules + type: object + clusterIssuer: + description: Who is issuing certificates for CA + type: string + hostNames: + description: An array of host names to build ingress rules for + items: + type: string + type: array + ingressClass: + description: The type of ingress that is used + type: string + labels: + additionalProperties: + type: string + description: Labels is the map of labels which wil be added to + all objects + type: object type: object prometheus: description: PrometheusConfig is the configuration block for Prometheus diff --git a/config/crd/bases/install.armadaproject.io_executors.yaml b/config/crd/bases/install.armadaproject.io_executors.yaml index 0d5e0bcb..822bb190 100644 --- a/config/crd/bases/install.armadaproject.io_executors.yaml +++ b/config/crd/bases/install.armadaproject.io_executors.yaml @@ -2236,30 +2236,6 @@ spec: type: string type: object type: object - portConfig: - description: PortConfig is automatically populated with defaults and - overlaid by values in ApplicationConfig. - properties: - grpcNodePort: - format: int32 - type: integer - grpcPort: - format: int32 - type: integer - httpNodePort: - format: int32 - type: integer - httpPort: - format: int32 - type: integer - metricsPort: - format: int32 - type: integer - required: - - grpcPort - - httpPort - - metricsPort - type: object priorityClasses: description: List of PriorityClasses which will be created items: @@ -2316,6 +2292,34 @@ spec: - value type: object type: array + profilingIngressConfig: + description: ProfilingIngressConfig defines configuration for the + profiling Ingress resource + properties: + annotations: + additionalProperties: + type: string + description: Annotations is a map of annotations which will be + added to all ingress rules + type: object + clusterIssuer: + description: Who is issuing certificates for CA + type: string + hostNames: + description: An array of host names to build ingress rules for + items: + type: string + type: array + ingressClass: + description: The type of ingress that is used + type: string + labels: + additionalProperties: + type: string + description: Labels is the map of labels which wil be added to + all objects + type: object + type: object prometheus: description: PrometheusConfig is the configuration block for Prometheus monitoring diff --git a/config/crd/bases/install.armadaproject.io_lookoutingesters.yaml b/config/crd/bases/install.armadaproject.io_lookoutingesters.yaml index 35e9f15e..3940d4cf 100644 --- a/config/crd/bases/install.armadaproject.io_lookoutingesters.yaml +++ b/config/crd/bases/install.armadaproject.io_lookoutingesters.yaml @@ -2217,29 +2217,33 @@ spec: type: string type: object type: object - portConfig: - description: PortConfig is automatically populated with defaults and - overlaid by values in ApplicationConfig. + profilingIngressConfig: + description: ProfilingIngressConfig defines configuration for the + profiling Ingress resource properties: - grpcNodePort: - format: int32 - type: integer - grpcPort: - format: int32 - type: integer - httpNodePort: - format: int32 - type: integer - httpPort: - format: int32 - type: integer - metricsPort: - format: int32 - type: integer - required: - - grpcPort - - httpPort - - metricsPort + annotations: + additionalProperties: + type: string + description: Annotations is a map of annotations which will be + added to all ingress rules + type: object + clusterIssuer: + description: Who is issuing certificates for CA + type: string + hostNames: + description: An array of host names to build ingress rules for + items: + type: string + type: array + ingressClass: + description: The type of ingress that is used + type: string + labels: + additionalProperties: + type: string + description: Labels is the map of labels which wil be added to + all objects + type: object type: object prometheus: description: PrometheusConfig is the configuration block for Prometheus diff --git a/config/crd/bases/install.armadaproject.io_lookouts.yaml b/config/crd/bases/install.armadaproject.io_lookouts.yaml index 8aa59401..241757ba 100644 --- a/config/crd/bases/install.armadaproject.io_lookouts.yaml +++ b/config/crd/bases/install.armadaproject.io_lookouts.yaml @@ -2035,6 +2035,14 @@ spec: description: Annotations is a map of annotations which will be added to all ingress rules type: object + clusterIssuer: + description: Who is issuing certificates for CA + type: string + hostNames: + description: An array of host names to build ingress rules for + items: + type: string + type: array ingressClass: description: The type of ingress that is used type: string @@ -2044,9 +2052,6 @@ spec: description: Labels is the map of labels which wil be added to all objects type: object - nameOverride: - description: Overide name for ingress - type: string type: object labels: additionalProperties: @@ -2263,29 +2268,33 @@ spec: type: string type: object type: object - portConfig: - description: PortConfig is automatically populated with defaults and - overlaid by values in ApplicationConfig. + profilingIngressConfig: + description: ProfilingIngressConfig defines configuration for the + profiling Ingress resource properties: - grpcNodePort: - format: int32 - type: integer - grpcPort: - format: int32 - type: integer - httpNodePort: - format: int32 - type: integer - httpPort: - format: int32 - type: integer - metricsPort: - format: int32 - type: integer - required: - - grpcPort - - httpPort - - metricsPort + annotations: + additionalProperties: + type: string + description: Annotations is a map of annotations which will be + added to all ingress rules + type: object + clusterIssuer: + description: Who is issuing certificates for CA + type: string + hostNames: + description: An array of host names to build ingress rules for + items: + type: string + type: array + ingressClass: + description: The type of ingress that is used + type: string + labels: + additionalProperties: + type: string + description: Labels is the map of labels which wil be added to + all objects + type: object type: object prometheus: description: PrometheusConfig is the configuration block for Prometheus diff --git a/config/crd/bases/install.armadaproject.io_scheduleringesters.yaml b/config/crd/bases/install.armadaproject.io_scheduleringesters.yaml index 1b5b90f0..bbe743a2 100644 --- a/config/crd/bases/install.armadaproject.io_scheduleringesters.yaml +++ b/config/crd/bases/install.armadaproject.io_scheduleringesters.yaml @@ -2217,29 +2217,33 @@ spec: type: string type: object type: object - portConfig: - description: PortConfig is automatically populated with defaults and - overlaid by values in ApplicationConfig. + profilingIngressConfig: + description: ProfilingIngressConfig defines configuration for the + profiling Ingress resource properties: - grpcNodePort: - format: int32 - type: integer - grpcPort: - format: int32 - type: integer - httpNodePort: - format: int32 - type: integer - httpPort: - format: int32 - type: integer - metricsPort: - format: int32 - type: integer - required: - - grpcPort - - httpPort - - metricsPort + annotations: + additionalProperties: + type: string + description: Annotations is a map of annotations which will be + added to all ingress rules + type: object + clusterIssuer: + description: Who is issuing certificates for CA + type: string + hostNames: + description: An array of host names to build ingress rules for + items: + type: string + type: array + ingressClass: + description: The type of ingress that is used + type: string + labels: + additionalProperties: + type: string + description: Labels is the map of labels which wil be added to + all objects + type: object type: object prometheus: description: PrometheusConfig is the configuration block for Prometheus diff --git a/config/crd/bases/install.armadaproject.io_schedulers.yaml b/config/crd/bases/install.armadaproject.io_schedulers.yaml index 2e3968a3..904ce170 100644 --- a/config/crd/bases/install.armadaproject.io_schedulers.yaml +++ b/config/crd/bases/install.armadaproject.io_schedulers.yaml @@ -2029,6 +2029,14 @@ spec: description: Annotations is a map of annotations which will be added to all ingress rules type: object + clusterIssuer: + description: Who is issuing certificates for CA + type: string + hostNames: + description: An array of host names to build ingress rules for + items: + type: string + type: array ingressClass: description: The type of ingress that is used type: string @@ -2038,9 +2046,6 @@ spec: description: Labels is the map of labels which wil be added to all objects type: object - nameOverride: - description: Overide name for ingress - type: string type: object labels: additionalProperties: @@ -2251,29 +2256,33 @@ spec: type: string type: object type: object - portConfig: - description: PortConfig is automatically populated with defaults and - overlaid by values in ApplicationConfig. + profilingIngressConfig: + description: ProfilingIngressConfig defines configuration for the + profiling Ingress resource properties: - grpcNodePort: - format: int32 - type: integer - grpcPort: - format: int32 - type: integer - httpNodePort: - format: int32 - type: integer - httpPort: - format: int32 - type: integer - metricsPort: - format: int32 - type: integer - required: - - grpcPort - - httpPort - - metricsPort + annotations: + additionalProperties: + type: string + description: Annotations is a map of annotations which will be + added to all ingress rules + type: object + clusterIssuer: + description: Who is issuing certificates for CA + type: string + hostNames: + description: An array of host names to build ingress rules for + items: + type: string + type: array + ingressClass: + description: The type of ingress that is used + type: string + labels: + additionalProperties: + type: string + description: Labels is the map of labels which wil be added to + all objects + type: object type: object prometheus: description: PrometheusConfig is the configuration block for Prometheus diff --git a/dev/crd/out.md b/dev/crd/out.md index ce56405b..537b0667 100644 --- a/dev/crd/out.md +++ b/dev/crd/out.md @@ -186,7 +186,8 @@ _Appears in:_ | `CommonSpecBase` _[CommonSpecBase](#commonspecbase)_ | | | | | `replicas` _integer_ | Replicas is the number of replicated instances for ArmadaServer | | | | `nodeSelector` _object (keys:string, values:string)_ | NodeSelector restricts the ArmadaServer pod to run on nodes matching the configured selectors | | | -| `ingress` _[IngressConfig](#ingressconfig)_ | Ingress defines labels and annotations for the Ingress controller of ArmadaServer | | | +| `ingress` _[IngressConfig](#ingressconfig)_ | Ingress defines configuration for the Ingress resource | | | +| `profilingIngressConfig` _[IngressConfig](#ingressconfig)_ | ProfilingIngressConfig defines configuration for the profiling Ingress resource | | | | `hostNames` _string array_ | An array of host names to build ingress rules for | | | | `clusterIssuer` _string_ | Who is issuing certificates for CA | | | | `pulsarInit` _boolean_ | Run Pulsar Init Jobs On Startup | | | @@ -266,6 +267,7 @@ _Appears in:_ | `replicas` _integer_ | Replicas is the number of replicated instances for Binoculars | | | | `nodeSelector` _object (keys:string, values:string)_ | NodeSelector restricts the pod to run on nodes matching the configured selectors | | | | `ingress` _[IngressConfig](#ingressconfig)_ | Ingress for this component. Used to inject labels/annotations into ingress | | | +| `profilingIngressConfig` _[IngressConfig](#ingressconfig)_ | ProfilingIngressConfig defines configuration for the profiling Ingress resource | | | | `hostNames` _string array_ | An array of host names to build ingress rules for | | | | `clusterIssuer` _string_ | Who is issuing certificates for CA | | | | `securityContext` _[SecurityContext](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#securitycontext-v1-core)_ | SecurityContext defines the security options the container should be run with | | | @@ -319,7 +321,6 @@ _Appears in:_ | `environment` _[EnvVar](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#envvar-v1-core) array_ | Extra environment variables that get added to deployment | | | | `additionalVolumes` _[Volume](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#volume-v1-core) array_ | Additional volumes that are mounted into deployments | | | | `additionalVolumeMounts` _[VolumeMount](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#volumemount-v1-core) array_ | Additional volume mounts that are added as volumes | | | -| `portConfig` _[PortConfig](#portconfig)_ | PortConfig is automatically populated with defaults and overlaid by values in ApplicationConfig. | | | #### EventIngester @@ -382,6 +383,7 @@ _Appears in:_ | `nodeSelector` _object (keys:string, values:string)_ | NodeSelector restricts the Executor pod to run on nodes matching the configured selectors | | | | `securityContext` _[SecurityContext](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#securitycontext-v1-core)_ | SecurityContext defines the security options the container should be run with | | | | `podSecurityContext` _[PodSecurityContext](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#podsecuritycontext-v1-core)_ | PodSecurityContext defines the security options the pod should be run with | | | +| `profilingIngressConfig` _[IngressConfig](#ingressconfig)_ | ProfilingIngressConfig defines configuration for the profiling Ingress resource | | | #### EventIngesterStatus @@ -459,6 +461,7 @@ _Appears in:_ | `priorityClasses` _[PriorityClass](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#priorityclass-v1-scheduling) array_ | List of PriorityClasses which will be created | | | | `securityContext` _[SecurityContext](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#securitycontext-v1-core)_ | SecurityContext defines the security options the container should be run with | | | | `podSecurityContext` _[PodSecurityContext](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#podsecuritycontext-v1-core)_ | PodSecurityContext defines the security options the pod should be run with | | | +| `profilingIngressConfig` _[IngressConfig](#ingressconfig)_ | ProfilingIngressConfig defines configuration for the profiling Ingress resource | | | #### ExecutorStatus @@ -502,7 +505,11 @@ _Appears in:_ _Appears in:_ - [ArmadaServerSpec](#armadaserverspec) - [BinocularsSpec](#binocularsspec) +- [EventIngesterSpec](#eventingesterspec) +- [ExecutorSpec](#executorspec) +- [LookoutIngesterSpec](#lookoutingesterspec) - [LookoutSpec](#lookoutspec) +- [SchedulerIngesterSpec](#scheduleringesterspec) - [SchedulerSpec](#schedulerspec) | Field | Description | Default | Validation | @@ -510,7 +517,8 @@ _Appears in:_ | `labels` _object (keys:string, values:string)_ | Labels is the map of labels which wil be added to all objects | | | | `annotations` _object (keys:string, values:string)_ | Annotations is a map of annotations which will be added to all ingress rules | | | | `ingressClass` _string_ | The type of ingress that is used | | | -| `nameOverride` _string_ | Overide name for ingress | | | +| `hostNames` _string array_ | An array of host names to build ingress rules for | | | +| `clusterIssuer` _string_ | Who is issuing certificates for CA | | | #### Lookout @@ -594,6 +602,7 @@ _Appears in:_ | `replicas` _integer_ | Replicas is the number of replicated instances for LookoutIngester | | | | `securityContext` _[SecurityContext](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#securitycontext-v1-core)_ | SecurityContext defines the security options the container should be run with | | | | `podSecurityContext` _[PodSecurityContext](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#podsecuritycontext-v1-core)_ | PodSecurityContext defines the security options the pod should be run with | | | +| `profilingIngressConfig` _[IngressConfig](#ingressconfig)_ | ProfilingIngressConfig defines configuration for the profiling Ingress resource | | | #### LookoutIngesterStatus @@ -653,6 +662,7 @@ _Appears in:_ | `dbPruningSchedule` _string_ | DbPruningSchedule schedule to use for db pruning CronJob | | | | `securityContext` _[SecurityContext](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#securitycontext-v1-core)_ | SecurityContext defines the security options the container should be run with | | | | `podSecurityContext` _[PodSecurityContext](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#podsecuritycontext-v1-core)_ | PodSecurityContext defines the security options the pod should be run with | | | +| `profilingIngressConfig` _[IngressConfig](#ingressconfig)_ | ProfilingIngressConfig defines configuration for the profiling Ingress resource | | | #### LookoutStatus @@ -668,26 +678,6 @@ _Appears in:_ -#### PortConfig - - - - - - - -_Appears in:_ -- [CommonSpecBase](#commonspecbase) - -| Field | Description | Default | Validation | -| --- | --- | --- | --- | -| `httpPort` _integer_ | | | | -| `httpNodePort` _integer_ | | | | -| `grpcPort` _integer_ | | | | -| `grpcNodePort` _integer_ | | | | -| `metricsPort` _integer_ | | | | - - #### PrometheusConfig @@ -824,6 +814,7 @@ _Appears in:_ | `replicas` _integer_ | Replicas is the number of replicated instances for SchedulerIngester | | | | `securityContext` _[SecurityContext](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#securitycontext-v1-core)_ | SecurityContext defines the security options the container should be run with | | | | `podSecurityContext` _[PodSecurityContext](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#podsecuritycontext-v1-core)_ | PodSecurityContext defines the security options the pod should be run with | | | +| `profilingIngressConfig` _[IngressConfig](#ingressconfig)_ | ProfilingIngressConfig defines configuration for the profiling Ingress resource | | | #### SchedulerIngesterStatus @@ -875,6 +866,7 @@ _Appears in:_ | `CommonSpecBase` _[CommonSpecBase](#commonspecbase)_ | | | | | `replicas` _integer_ | Replicas is the number of replicated instances for Scheduler | | | | `ingress` _[IngressConfig](#ingressconfig)_ | Ingress defines labels and annotations for the Ingress controller of Scheduler | | | +| `profilingIngressConfig` _[IngressConfig](#ingressconfig)_ | ProfilingIngressConfig defines configuration for the profiling Ingress resource | | | | `hostNames` _string array_ | An array of host names to build ingress rules for | | | | `clusterIssuer` _string_ | Who is issuing certificates for CA | | | | `migrate` _boolean_ | Migrate toggles whether to run migrations when installed | | | diff --git a/internal/controller/builders/config.go b/internal/controller/builders/config.go new file mode 100644 index 00000000..e801305f --- /dev/null +++ b/internal/controller/builders/config.go @@ -0,0 +1,69 @@ +package builders + +import ( + "github.com/pkg/errors" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/yaml" +) + +const ( + defaultHTTPPort = 8080 + defaultGRPCPort = 50051 + defaultMetricsPort = 9000 + defaultProfilingPort = 1337 +) + +type CommonApplicationConfig struct { + HTTPPort int32 `json:"httpPort"` + HTTPNodePort int32 `json:"httpNodePort,omitempty"` + GRPCPort int32 `json:"grpcPort"` + GRPCNodePort int32 `json:"grpcNodePort,omitempty"` + MetricsPort int32 `json:"metricsPort"` + Profiling ProfilingConfig `json:"profiling"` + GRPC GRPCConfig `json:"grpc"` +} + +type GRPCConfig struct { + Enabled bool `json:"enabled"` + TLS TLSConfig `json:"tls"` +} + +type TLSConfig struct { + Enabled bool `json:"enabled"` +} + +type ProfilingConfig struct { + Port int32 `json:"port"` +} + +// ParseCommonApplicationConfig parses the raw application config into a CommonApplicationConfig. +func ParseCommonApplicationConfig(rawAppConfig runtime.RawExtension) (*CommonApplicationConfig, error) { + appConfig, err := ConvertRawExtensionToYaml(rawAppConfig) + if err != nil { + return nil, err + } + + config := CommonApplicationConfig{ + HTTPPort: defaultHTTPPort, + GRPCPort: defaultGRPCPort, + MetricsPort: defaultMetricsPort, + Profiling: ProfilingConfig{ + Port: defaultProfilingPort, + }, + } + if err = yaml.Unmarshal([]byte(appConfig), &config); err != nil { + return nil, errors.WithStack(err) + } + + return &config, nil +} + +// ConvertRawExtensionToYaml converts a RawExtension input to Yaml +func ConvertRawExtensionToYaml(config runtime.RawExtension) (string, error) { + yamlConfig, err := yaml.JSONToYAML(config.Raw) + if err != nil { + return "", err + } + + return string(yamlConfig), nil +} diff --git a/internal/controller/builders/config_test.go b/internal/controller/builders/config_test.go new file mode 100644 index 00000000..754ba19e --- /dev/null +++ b/internal/controller/builders/config_test.go @@ -0,0 +1,115 @@ +package builders + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "k8s.io/apimachinery/pkg/runtime" +) + +func TestBuildCommonApplicationConfig(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + input runtime.RawExtension + expected *CommonApplicationConfig + wantErr bool + }{ + { + name: "default empty application config", + input: runtime.RawExtension{Raw: []byte(`{ }`)}, + expected: &CommonApplicationConfig{ + HTTPPort: defaultHTTPPort, + GRPCPort: defaultGRPCPort, + MetricsPort: defaultMetricsPort, + Profiling: ProfilingConfig{ + Port: defaultProfilingPort, + }, + }, + }, + { + name: "invalid application config", + input: runtime.RawExtension{Raw: []byte(`{"httpPort": 8081`)}, + expected: nil, + wantErr: true, + }, + { + name: "partially override default application config", + input: runtime.RawExtension{Raw: []byte(`{"httpPort": 1212, "profiling": { "port": 1111}}`)}, + expected: &CommonApplicationConfig{ + HTTPPort: 1212, + GRPCPort: 50051, + MetricsPort: 9000, + Profiling: ProfilingConfig{ + Port: 1111, + }, + }, + }, + { + name: "valid config", + input: runtime.RawExtension{ + Raw: []byte(`{"httpPort": 8081, "grpcPort": 50052, "metricsPort": 9001, "profiling": { "port": 1337} }`), + }, + expected: &CommonApplicationConfig{ + HTTPPort: 8081, + GRPCPort: 50052, + MetricsPort: 9001, + Profiling: ProfilingConfig{ + Port: 1337, + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + pc, err := ParseCommonApplicationConfig(tt.input) + if tt.wantErr { + assert.Error(t, err) + } else { + assert.Nil(t, err) + } + assert.Equal(t, tt.expected, pc) + }) + } +} + +func TestConvertRawExtensionToYaml(t *testing.T) { + tests := []struct { + name string + input runtime.RawExtension + expected string + wantErr bool + }{ + { + name: "it converts runtime.RawExtension json to yaml", + input: runtime.RawExtension{Raw: []byte(`{ "test": { "foo": "bar" }}`)}, + expected: "test:\n foo: bar\n", + }, + { + name: "it converts complex runtime.RawExtension json to yaml", + input: runtime.RawExtension{Raw: []byte(`{ "test": {"foo": "bar"}, "test1": {"foo1": { "foo2": "bar2" }}}`)}, + expected: "test:\n foo: bar\ntest1:\n foo1:\n foo2: bar2\n", + }, + { + name: "it errors if runtime.RawExtension raw is malformed json", + input: runtime.RawExtension{Raw: []byte(`{ "foo": "bar" `)}, + expected: "", + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + output, err := ConvertRawExtensionToYaml(tt.input) + if tt.wantErr { + assert.Error(t, err) + } else { + assert.Nil(t, err) + } + assert.Equal(t, tt.expected, output) + }) + } +} diff --git a/internal/controller/builders/generate_config.go b/internal/controller/builders/generate_config.go deleted file mode 100644 index 921048b2..00000000 --- a/internal/controller/builders/generate_config.go +++ /dev/null @@ -1,25 +0,0 @@ -package builders - -import ( - "k8s.io/apimachinery/pkg/runtime" - "sigs.k8s.io/yaml" -) - -// GenerateArmadaConfig generates armada config from the provided raw data and stores it into a map under the provided key. -func GenerateArmadaConfig(config runtime.RawExtension, key string) (map[string][]byte, error) { - yml, err := ConvertRawExtensionToYaml(config) - if err != nil { - return nil, err - } - return map[string][]byte{key: []byte(yml)}, nil -} - -// ConvertRawExtensionToYaml converts a RawExtension input to Yaml -func ConvertRawExtensionToYaml(config runtime.RawExtension) (string, error) { - yamlConfig, err := yaml.JSONToYAML(config.Raw) - if err != nil { - return "", err - } - - return string(yamlConfig), nil -} diff --git a/internal/controller/builders/generate_config_test.go b/internal/controller/builders/generate_config_test.go deleted file mode 100644 index b84d5980..00000000 --- a/internal/controller/builders/generate_config_test.go +++ /dev/null @@ -1,48 +0,0 @@ -package builders - -import ( - "testing" - - "github.com/stretchr/testify/assert" - - "k8s.io/apimachinery/pkg/runtime" -) - -func Test_convertRawExtensionToYaml(t *testing.T) { - - tests := []struct { - name string - input runtime.RawExtension - expected string - wantErr bool - }{ - { - name: "it converts runtime.RawExtension json to yaml", - input: runtime.RawExtension{Raw: []byte(`{ "test": { "foo": "bar" }}`)}, - expected: "test:\n foo: bar\n", - }, - { - name: "it converts complex runtime.RawExtension json to yaml", - input: runtime.RawExtension{Raw: []byte(`{ "test": {"foo": "bar"}, "test1": {"foo1": { "foo2": "bar2" }}}`)}, - expected: "test:\n foo: bar\ntest1:\n foo1:\n foo2: bar2\n", - }, - { - name: "it errors if runtime.RawExtension raw is malformed json", - input: runtime.RawExtension{Raw: []byte(`{ "foo": "bar" `)}, - expected: "", - wantErr: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - output, err := ConvertRawExtensionToYaml(tt.input) - if tt.wantErr { - assert.Error(t, err) - } else { - assert.Nil(t, err) - } - assert.Equal(t, tt.expected, output) - }) - } -} diff --git a/internal/controller/builders/ingress.go b/internal/controller/builders/ingress.go new file mode 100644 index 00000000..842efb17 --- /dev/null +++ b/internal/controller/builders/ingress.go @@ -0,0 +1,58 @@ +package builders + +import ( + "github.com/pkg/errors" + networkingv1 "k8s.io/api/networking/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/utils/ptr" +) + +func Ingress( + name, namespace string, + labels, annotations map[string]string, + hostnames []string, + serviceName, secret, path string, + servicePort int32, +) (*networkingv1.Ingress, error) { + if len(hostnames) == 0 { + // if no hostnames are provided, no ingress can be configured + return nil, errors.New("no hostnames provided") + } + if servicePort <= 0 { + return nil, errors.New("port must be greater than 0") + } + ingress := &networkingv1.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Labels: labels, + Annotations: annotations, + }, + } + + ingress.Spec.TLS = []networkingv1.IngressTLS{{Hosts: hostnames, SecretName: secret}} + var ingressRules []networkingv1.IngressRule + for _, val := range hostnames { + backend := networkingv1.IngressBackend{ + Service: &networkingv1.IngressServiceBackend{ + Name: serviceName, + Port: networkingv1.ServiceBackendPort{ + Number: servicePort, + }, + }, + } + rule := networkingv1.IngressRule{Host: val, IngressRuleValue: networkingv1.IngressRuleValue{ + HTTP: &networkingv1.HTTPIngressRuleValue{ + Paths: []networkingv1.HTTPIngressPath{{ + Path: path, + PathType: ptr.To(networkingv1.PathTypePrefix), + Backend: backend, + }}, + }, + }} + ingressRules = append(ingressRules, rule) + } + ingress.Spec.Rules = ingressRules + + return ingress, nil +} diff --git a/internal/controller/builders/ingress_test.go b/internal/controller/builders/ingress_test.go new file mode 100644 index 00000000..86a292e6 --- /dev/null +++ b/internal/controller/builders/ingress_test.go @@ -0,0 +1,180 @@ +package builders + +import ( + "testing" + + "k8s.io/utils/ptr" + + "github.com/stretchr/testify/assert" + networkingv1 "k8s.io/api/networking/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func TestIngress(t *testing.T) { + tests := []struct { + test string + labels map[string]string + annotations map[string]string + hostnames []string + service string + secret string + path string + port int32 + expectedErr bool + expected *networkingv1.Ingress + }{ + { + test: "valid ingress", + labels: map[string]string{"app": "my-app"}, + annotations: map[string]string{"ingress.kubernetes.io/rewrite-target": "/"}, + hostnames: []string{"example.com"}, + service: "my-service", + secret: "my-secret", + path: "/", + port: 80, + expectedErr: false, + expected: &networkingv1.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-ingress", + Namespace: "default", + Labels: map[string]string{"app": "my-app"}, + Annotations: map[string]string{ + "ingress.kubernetes.io/rewrite-target": "/", + }, + }, + Spec: networkingv1.IngressSpec{ + TLS: []networkingv1.IngressTLS{ + {Hosts: []string{"example.com"}, SecretName: "my-secret"}, + }, + Rules: []networkingv1.IngressRule{ + { + Host: "example.com", + IngressRuleValue: networkingv1.IngressRuleValue{ + HTTP: &networkingv1.HTTPIngressRuleValue{ + Paths: []networkingv1.HTTPIngressPath{ + { + Path: "/", + PathType: ptr.To(networkingv1.PathTypePrefix), + Backend: networkingv1.IngressBackend{ + Service: &networkingv1.IngressServiceBackend{ + Name: "my-service", + Port: networkingv1.ServiceBackendPort{ + Number: 80, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + { + test: "no hostnames", + labels: map[string]string{"app": "my-app"}, + annotations: map[string]string{"ingress.kubernetes.io/rewrite-target": "/"}, + hostnames: []string{}, + service: "my-service", + secret: "my-secret", + path: "/", + port: 80, + expectedErr: true, + expected: nil, // No hostnames, so should return nil + }, + { + test: "multiple hostnames", + labels: map[string]string{"app": "my-app"}, + annotations: map[string]string{"ingress.kubernetes.io/rewrite-target": "/"}, + hostnames: []string{"example.com", "example.org"}, + service: "my-service", + secret: "my-secret", + path: "/", + port: 443, + expectedErr: false, + expected: &networkingv1.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-ingress", + Namespace: "default", + Labels: map[string]string{"app": "my-app"}, + Annotations: map[string]string{ + "ingress.kubernetes.io/rewrite-target": "/", + }, + }, + Spec: networkingv1.IngressSpec{ + TLS: []networkingv1.IngressTLS{ + {Hosts: []string{"example.com", "example.org"}, SecretName: "my-secret"}, + }, + Rules: []networkingv1.IngressRule{ + { + Host: "example.com", + IngressRuleValue: networkingv1.IngressRuleValue{ + HTTP: &networkingv1.HTTPIngressRuleValue{ + Paths: []networkingv1.HTTPIngressPath{ + { + Path: "/", + PathType: ptr.To(networkingv1.PathTypePrefix), + Backend: networkingv1.IngressBackend{ + Service: &networkingv1.IngressServiceBackend{ + Name: "my-service", + Port: networkingv1.ServiceBackendPort{ + Number: 443, + }, + }, + }, + }, + }, + }, + }, + }, + { + Host: "example.org", + IngressRuleValue: networkingv1.IngressRuleValue{ + HTTP: &networkingv1.HTTPIngressRuleValue{ + Paths: []networkingv1.HTTPIngressPath{ + { + Path: "/", + PathType: ptr.To(networkingv1.PathTypePrefix), + Backend: networkingv1.IngressBackend{ + Service: &networkingv1.IngressServiceBackend{ + Name: "my-service", + Port: networkingv1.ServiceBackendPort{ + Number: 443, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.test, func(t *testing.T) { + ingress, err := Ingress( + "test-ingress", + "default", + tt.labels, + tt.annotations, + tt.hostnames, + tt.service, + tt.secret, + tt.path, + tt.port, + ) + if tt.expectedErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + assert.EqualValues(t, tt.expected, ingress) + }) + } +} diff --git a/internal/controller/builders/secret.go b/internal/controller/builders/secret.go index 74bd842e..115130ec 100644 --- a/internal/controller/builders/secret.go +++ b/internal/controller/builders/secret.go @@ -1,19 +1,20 @@ package builders import ( + "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" ) func CreateSecret(appConfig runtime.RawExtension, secretName, secretNamespace, filename string) (*corev1.Secret, error) { - armadaConfig, err := GenerateArmadaConfig(appConfig, filename) + armadaConfig, err := ConvertRawExtensionToYaml(appConfig) if err != nil { - return nil, err + return nil, errors.WithStack(err) } secret := corev1.Secret{ ObjectMeta: metav1.ObjectMeta{Name: secretName, Namespace: secretNamespace}, - Data: armadaConfig, + Data: map[string][]byte{filename: []byte(armadaConfig)}, } return &secret, nil } diff --git a/internal/controller/builders/service.go b/internal/controller/builders/service.go index 12c41e83..843a82c3 100644 --- a/internal/controller/builders/service.go +++ b/internal/controller/builders/service.go @@ -3,30 +3,76 @@ package builders import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - "github.com/armadaproject/armada-operator/api/install/v1alpha1" ) -func Service(name string, namespace string, labels, identityLabel map[string]string, portConfig v1alpha1.PortConfig) *corev1.Service { +// PortConfig specifies which ports should be exposed by the service +type PortConfig struct { + ExposeHTTP bool + ExposeGRPC bool + ExposeMetrics bool + ExposeProfiling bool +} + +var ServiceEnableApplicationPortsOnly = PortConfig{ + ExposeHTTP: true, + ExposeGRPC: true, + ExposeMetrics: true, +} + +var ServiceEnableHTTPWithMetrics = PortConfig{ + ExposeHTTP: true, + ExposeMetrics: true, +} + +var ServiceEnableGRPCWithMetrics = PortConfig{ + ExposeGRPC: true, + ExposeMetrics: true, +} + +var ServiceEnableProfilingPortOnly = PortConfig{ + ExposeProfiling: true, +} + +var ServiceEnableMetricsPortOnly = PortConfig{ + ExposeMetrics: true, +} + +func Service( + name string, + namespace string, + labels, identityLabel map[string]string, + appConfig *CommonApplicationConfig, + portConfig PortConfig, +) *corev1.Service { var ports []corev1.ServicePort - if portConfig.HttpPort > 0 { + if portConfig.ExposeHTTP { ports = append(ports, corev1.ServicePort{ Name: "web", - Port: portConfig.HttpPort, - NodePort: portConfig.HttpNodePort, + Port: appConfig.HTTPPort, + NodePort: appConfig.HTTPNodePort, + Protocol: corev1.ProtocolTCP, }) } - if portConfig.GrpcPort > 0 { + if portConfig.ExposeGRPC { ports = append(ports, corev1.ServicePort{ Name: "grpc", - Port: portConfig.GrpcPort, - NodePort: portConfig.GrpcNodePort, + Port: appConfig.GRPCPort, + NodePort: appConfig.GRPCNodePort, + Protocol: corev1.ProtocolTCP, + }) + } + if portConfig.ExposeMetrics { + ports = append(ports, corev1.ServicePort{ + Name: "metrics", + Port: appConfig.MetricsPort, + Protocol: corev1.ProtocolTCP, }) } - if portConfig.MetricsPort > 0 { + if port := appConfig.Profiling.Port; portConfig.ExposeProfiling { ports = append(ports, corev1.ServicePort{ - Name: "metrics", - Port: portConfig.MetricsPort, + Name: "profiling", + Port: port, + Protocol: corev1.ProtocolTCP, }) } service := corev1.Service{ @@ -34,10 +80,11 @@ func Service(name string, namespace string, labels, identityLabel map[string]str Spec: corev1.ServiceSpec{ Selector: identityLabel, Ports: ports, + Type: corev1.ServiceTypeClusterIP, }, } - if portConfig.HttpNodePort > 0 || portConfig.GrpcNodePort > 0 { - service.Spec.Type = "NodePort" + if appConfig.HTTPNodePort > 0 || appConfig.GRPCNodePort > 0 { + service.Spec.Type = corev1.ServiceTypeNodePort } return &service diff --git a/internal/controller/builders/service_account.go b/internal/controller/builders/service_account.go index c67ecaa3..5f6b1a64 100644 --- a/internal/controller/builders/service_account.go +++ b/internal/controller/builders/service_account.go @@ -7,7 +7,7 @@ import ( installv1alpha1 "github.com/armadaproject/armada-operator/api/install/v1alpha1" ) -func CreateServiceAccount(name, namespace string, labels map[string]string, serviceAccountConfig *installv1alpha1.ServiceAccountConfig) *corev1.ServiceAccount { +func ServiceAccount(name, namespace string, labels map[string]string, serviceAccountConfig *installv1alpha1.ServiceAccountConfig) *corev1.ServiceAccount { serviceAccount := &corev1.ServiceAccount{ ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: namespace, Labels: labels}, } diff --git a/internal/controller/builders/service_account_test.go b/internal/controller/builders/service_account_test.go index bb2a5216..2c81c44b 100644 --- a/internal/controller/builders/service_account_test.go +++ b/internal/controller/builders/service_account_test.go @@ -33,7 +33,7 @@ func Test_ServiceAccount(t *testing.T) { for name, tt := range tests { t.Run(name, func(t *testing.T) { - output := CreateServiceAccount(tt.name, tt.namespace, tt.labels, tt.serviceAccountConfig) + output := ServiceAccount(tt.name, tt.namespace, tt.labels, tt.serviceAccountConfig) assert.Equal(t, "test", output.Name) assert.Equal(t, "default", output.Namespace) assert.Equal(t, tt.labels, output.Labels) diff --git a/internal/controller/builders/service_test.go b/internal/controller/builders/service_test.go index 75ff3546..1966b14c 100644 --- a/internal/controller/builders/service_test.go +++ b/internal/controller/builders/service_test.go @@ -5,41 +5,50 @@ import ( "github.com/stretchr/testify/assert" corev1 "k8s.io/api/core/v1" - - "github.com/armadaproject/armada-operator/api/install/v1alpha1" ) func TestService(t *testing.T) { - testcases := map[string]struct { + t.Parallel() + + tests := map[string]struct { name string namespace string labels map[string]string identityLabel map[string]string - portConfig v1alpha1.PortConfig - ports []corev1.ServicePort + config CommonApplicationConfig + portConfig PortConfig + expectedPorts []corev1.ServicePort }{ - "PortConfig values are translated to ServicePort": { + "All ports generated correct": { name: "lookout", namespace: "lookout", labels: map[string]string{"app": "lookout", "hello": "world"}, identityLabel: map[string]string{"app": "binoculars"}, - portConfig: v1alpha1.PortConfig{ - GrpcPort: 50059, - HttpPort: 8080, + config: CommonApplicationConfig{ + GRPCPort: 50059, + HTTPPort: 8080, MetricsPort: 9000, }, - ports: []corev1.ServicePort{ + portConfig: PortConfig{ + ExposeHTTP: true, + ExposeGRPC: true, + ExposeMetrics: true, + }, + expectedPorts: []corev1.ServicePort{ { - Name: "grpc", - Port: 50059, + Name: "grpc", + Port: 50059, + Protocol: corev1.ProtocolTCP, }, { - Name: "web", - Port: 8080, + Name: "web", + Port: 8080, + Protocol: corev1.ProtocolTCP, }, { - Name: "metrics", - Port: 9000, + Name: "metrics", + Port: 9000, + Protocol: corev1.ProtocolTCP, }, }, }, @@ -48,44 +57,60 @@ func TestService(t *testing.T) { namespace: "lookout", labels: map[string]string{"app": "lookout", "hello": "world"}, identityLabel: map[string]string{"app": "binoculars"}, - portConfig: v1alpha1.PortConfig{ - GrpcPort: 50059, - GrpcNodePort: 32000, - HttpPort: 8080, - HttpNodePort: 32001, + config: CommonApplicationConfig{ + GRPCPort: 50059, + GRPCNodePort: 32000, + HTTPPort: 8080, + HTTPNodePort: 32001, MetricsPort: 9000, + Profiling: ProfilingConfig{ + Port: 1337, + }, + }, + portConfig: PortConfig{ + ExposeHTTP: true, + ExposeGRPC: true, + ExposeMetrics: true, + ExposeProfiling: true, }, - ports: []corev1.ServicePort{ + expectedPorts: []corev1.ServicePort{ { Name: "grpc", Port: 50059, NodePort: 32000, + Protocol: corev1.ProtocolTCP, }, { Name: "web", Port: 8080, NodePort: 32001, + Protocol: corev1.ProtocolTCP, }, { - Name: "metrics", - Port: 9000, + Name: "metrics", + Port: 9000, + Protocol: corev1.ProtocolTCP, + }, + { + Name: "profiling", + Port: 1337, + Protocol: corev1.ProtocolTCP, }, }, }, } - for name, tc := range testcases { + for name, tc := range tests { t.Run(name, func(t *testing.T) { - got := Service(tc.name, tc.namespace, tc.labels, tc.identityLabel, tc.portConfig) + got := Service(tc.name, tc.namespace, tc.labels, tc.identityLabel, &tc.config, tc.portConfig) assert.Equal(t, tc.name, got.Name) assert.Equal(t, tc.namespace, got.Namespace) - assert.ElementsMatch(t, tc.ports, got.Spec.Ports) - if tc.portConfig.GrpcNodePort > 0 { - assert.Equal(t, corev1.ServiceType("NodePort"), got.Spec.Type) + assert.ElementsMatch(t, tc.expectedPorts, got.Spec.Ports) + if tc.config.GRPCNodePort > 0 || tc.config.HTTPNodePort > 0 { + assert.Equal(t, corev1.ServiceTypeNodePort, got.Spec.Type) } else { - assert.Equal(t, corev1.ServiceType(""), got.Spec.Type) + assert.Equal(t, corev1.ServiceTypeClusterIP, got.Spec.Type) } - }) } } diff --git a/internal/controller/install/armadaserver_controller.go b/internal/controller/install/armadaserver_controller.go index 8e48d394..c6222280 100644 --- a/internal/controller/install/armadaserver_controller.go +++ b/internal/controller/install/armadaserver_controller.go @@ -33,7 +33,6 @@ import ( appsv1 "k8s.io/api/apps/v1" batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" - networking "k8s.io/api/networking/v1" networkingv1 "k8s.io/api/networking/v1" policyv1 "k8s.io/api/policy/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" @@ -74,14 +73,13 @@ func (r *ArmadaServerReconciler) Reconcile(ctx context.Context, req ctrl.Request return ctrl.Result{}, err } - pc, err := installv1alpha1.BuildPortConfig(server.Spec.ApplicationConfig) + commonConfig, err := builders.ParseCommonApplicationConfig(server.Spec.ApplicationConfig) if err != nil { return ctrl.Result{}, err } - server.Spec.PortConfig = pc var components *CommonComponents - components, err = generateArmadaServerInstallComponents(&server, r.Scheme) + components, err = generateArmadaServerInstallComponents(&server, r.Scheme, commonConfig) if err != nil { return ctrl.Result{}, err } @@ -160,7 +158,7 @@ func (r *ArmadaServerReconciler) Reconcile(ctx context.Context, req ctrl.Request return ctrl.Result{}, nil } -func generateArmadaServerInstallComponents(as *installv1alpha1.ArmadaServer, scheme *runtime.Scheme) (*CommonComponents, error) { +func generateArmadaServerInstallComponents(as *installv1alpha1.ArmadaServer, scheme *runtime.Scheme, config *builders.CommonApplicationConfig) (*CommonComponents, error) { secret, err := builders.CreateSecret(as.Spec.ApplicationConfig, as.Name, as.Namespace, GetConfigFilename(as.Name)) if err != nil { return nil, err @@ -172,14 +170,14 @@ func generateArmadaServerInstallComponents(as *installv1alpha1.ArmadaServer, sch var serviceAccount *corev1.ServiceAccount serviceAccountName := as.Spec.CustomServiceAccount if serviceAccountName == "" { - serviceAccount = builders.CreateServiceAccount(as.Name, as.Namespace, AllLabels(as.Name, as.Labels), as.Spec.ServiceAccount) + serviceAccount = builders.ServiceAccount(as.Name, as.Namespace, AllLabels(as.Name, as.Labels), as.Spec.ServiceAccount) if err = controllerutil.SetOwnerReference(as, serviceAccount, scheme); err != nil { return nil, errors.WithStack(err) } serviceAccountName = serviceAccount.Name } - deployment, err := createArmadaServerDeployment(as, serviceAccountName) + deployment, err := createArmadaServerDeployment(as, serviceAccountName, config) if err != nil { return nil, err } @@ -187,7 +185,7 @@ func generateArmadaServerInstallComponents(as *installv1alpha1.ArmadaServer, sch return nil, err } - ingressGrpc, err := createIngressGrpc(as) + ingressGrpc, err := createServerIngressGRPC(as, config) if err != nil { return nil, err } @@ -197,7 +195,7 @@ func generateArmadaServerInstallComponents(as *installv1alpha1.ArmadaServer, sch } } - ingressHttp, err := createIngressHttp(as) + ingressHttp, err := createServerIngressHTTP(as, config) if err != nil { return nil, err } @@ -207,14 +205,30 @@ func generateArmadaServerInstallComponents(as *installv1alpha1.ArmadaServer, sch } } - service := builders.Service(as.Name, as.Namespace, AllLabels(as.Name, as.Labels), IdentityLabel(as.Name), as.Spec.PortConfig) + service := builders.Service( + as.Name, + as.Namespace, + AllLabels(as.Name, as.Labels), + IdentityLabel(as.Name), + config, + builders.ServiceEnableApplicationPortsOnly, + ) if err := controllerutil.SetOwnerReference(as, service, scheme); err != nil { return nil, err } + profilingService, profilingIngress, err := newProfilingComponents( + as, + scheme, + config, + as.Spec.ProfilingIngressConfig, + ) + if err != nil { + return nil, errors.WithStack(err) + } pdb := createServerPodDisruptionBudget(as) if err := controllerutil.SetOwnerReference(as, pdb, scheme); err != nil { - return nil, err + return nil, errors.WithStack(err) } var prometheusRule *monitoringv1.PrometheusRule @@ -233,7 +247,7 @@ func generateArmadaServerInstallComponents(as *installv1alpha1.ArmadaServer, sch jobs := []*batchv1.Job{{}} if as.Spec.PulsarInit { - jobs, err = createArmadaServerMigrationJobs(as) + jobs, err = createArmadaServerMigrationJobs(as, config) if err != nil { return nil, err } @@ -248,7 +262,9 @@ func generateArmadaServerInstallComponents(as *installv1alpha1.ArmadaServer, sch Deployment: deployment, IngressGrpc: ingressGrpc, IngressHttp: ingressHttp, + IngressProfiling: profilingIngress, Service: service, + ServiceProfiling: profilingService, ServiceAccount: serviceAccount, Secret: secret, PodDisruptionBudget: pdb, @@ -259,11 +275,8 @@ func generateArmadaServerInstallComponents(as *installv1alpha1.ArmadaServer, sch } -func createArmadaServerMigrationJobs(as *installv1alpha1.ArmadaServer) ([]*batchv1.Job, error) { - runAsUser := int64(1000) - runAsGroup := int64(2000) +func createArmadaServerMigrationJobs(as *installv1alpha1.ArmadaServer, commonConfig *builders.CommonApplicationConfig) ([]*batchv1.Job, error) { terminationGracePeriodSeconds := as.Spec.TerminationGracePeriodSeconds - allowPrivilegeEscalation := false parallelism := int32(1) completions := int32(1) backoffLimit := int32(0) @@ -299,13 +312,10 @@ func createArmadaServerMigrationJobs(as *installv1alpha1.ArmadaServer) ([]*batch Spec: corev1.PodSpec{ RestartPolicy: "Never", TerminationGracePeriodSeconds: terminationGracePeriodSeconds, - SecurityContext: &corev1.PodSecurityContext{ - RunAsUser: &runAsUser, - RunAsGroup: &runAsGroup, - }, + SecurityContext: as.Spec.PodSecurityContext, Containers: []corev1.Container{{ Name: "wait-for-pulsar", - ImagePullPolicy: "IfNotPresent", + ImagePullPolicy: corev1.PullIfNotPresent, Image: "alpine:3.16", Args: []string{ "/bin/sh", @@ -316,7 +326,7 @@ func createArmadaServerMigrationJobs(as *installv1alpha1.ArmadaServer) ([]*batch }, Ports: []corev1.ContainerPort{{ Name: "metrics", - ContainerPort: as.Spec.PortConfig.MetricsPort, + ContainerPort: commonConfig.MetricsPort, Protocol: "TCP", }}, Env: []corev1.EnvVar{ @@ -331,7 +341,7 @@ func createArmadaServerMigrationJobs(as *installv1alpha1.ArmadaServer) ([]*batch SubPath: as.Name, }, }, - SecurityContext: &corev1.SecurityContext{AllowPrivilegeEscalation: &allowPrivilegeEscalation}, + SecurityContext: as.Spec.SecurityContext, }}, NodeSelector: as.Spec.NodeSelector, Tolerations: as.Spec.Tolerations, @@ -372,7 +382,7 @@ func createArmadaServerMigrationJobs(as *installv1alpha1.ArmadaServer) ([]*batch SecurityContext: &corev1.PodSecurityContext{}, Containers: []corev1.Container{{ Name: "init-pulsar", - ImagePullPolicy: "IfNotPresent", + ImagePullPolicy: corev1.PullIfNotPresent, Image: fmt.Sprintf("%v:%v", asConfig.Pulsar.ArmadaInit.Image.Repository, asConfig.Pulsar.ArmadaInit.Image.Tag), Args: []string{ "/bin/sh", @@ -388,11 +398,6 @@ func createArmadaServerMigrationJobs(as *installv1alpha1.ArmadaServer) ([]*batch bin/pulsar-admin --admin-url $PULSARADMINURL namespaces set-auto-topic-creation public/default --disable bin/pulsar-admin --admin-url $PULSARADMINURL namespaces set-auto-topic-creation armada/armada --disable`, }, - Ports: []corev1.ContainerPort{{ - Name: "metrics", - ContainerPort: as.Spec.PortConfig.MetricsPort, - Protocol: "TCP", - }}, Env: []corev1.EnvVar{ { Name: "PULSARADMINURL", @@ -408,7 +413,7 @@ func createArmadaServerMigrationJobs(as *installv1alpha1.ArmadaServer) ([]*batch SubPath: as.Name, }, }, - SecurityContext: &corev1.SecurityContext{AllowPrivilegeEscalation: &allowPrivilegeEscalation}, + SecurityContext: as.Spec.SecurityContext, }}, NodeSelector: as.Spec.NodeSelector, Tolerations: as.Spec.Tolerations, @@ -428,11 +433,12 @@ func createArmadaServerMigrationJobs(as *installv1alpha1.ArmadaServer) ([]*batch return []*batchv1.Job{&pulsarWaitJob, &initPulsarJob}, nil } -func createArmadaServerDeployment(as *installv1alpha1.ArmadaServer, serviceAccountName string) (*appsv1.Deployment, error) { +func createArmadaServerDeployment( + as *installv1alpha1.ArmadaServer, + serviceAccountName string, + commonConfig *builders.CommonApplicationConfig, +) (*appsv1.Deployment, error) { var replicas int32 = 1 - var runAsUser int64 = 1000 - var runAsGroup int64 = 2000 - allowPrivilegeEscalation := false env := createEnv(as.Spec.Environment) pulsarConfig, err := ExtractPulsarConfig(as.Spec.ApplicationConfig) if err != nil { @@ -466,61 +472,22 @@ func createArmadaServerDeployment(as *installv1alpha1.ArmadaServer, serviceAccou Spec: corev1.PodSpec{ ServiceAccountName: serviceAccountName, TerminationGracePeriodSeconds: as.DeletionGracePeriodSeconds, - SecurityContext: &corev1.PodSecurityContext{ - RunAsUser: &runAsUser, - RunAsGroup: &runAsGroup, - }, - Affinity: &corev1.Affinity{ - PodAffinity: &corev1.PodAffinity{ - PreferredDuringSchedulingIgnoredDuringExecution: []corev1.WeightedPodAffinityTerm{{ - Weight: 100, - PodAffinityTerm: corev1.PodAffinityTerm{ - TopologyKey: "kubernetes.io/hostname", - LabelSelector: &metav1.LabelSelector{ - MatchExpressions: []metav1.LabelSelectorRequirement{{ - Key: "app", - Operator: metav1.LabelSelectorOpIn, - Values: []string{as.Name}, - }}, - }, - }, - }}, - }, - }, + SecurityContext: as.Spec.PodSecurityContext, + Affinity: defaultAffinity(as.Name, 100), Containers: []corev1.Container{{ Name: "armadaserver", - ImagePullPolicy: "IfNotPresent", + ImagePullPolicy: corev1.PullIfNotPresent, Image: ImageString(as.Spec.Image), Args: []string{appConfigFlag, appConfigFilepath}, - Ports: []corev1.ContainerPort{ - { - Name: "metrics", - ContainerPort: as.Spec.PortConfig.MetricsPort, - Protocol: "TCP", - }, - { - Name: "grpc", - ContainerPort: as.Spec.PortConfig.GrpcPort, - Protocol: "TCP", - }, - { - Name: "http", - ContainerPort: as.Spec.PortConfig.HttpPort, - Protocol: "TCP", - }, - }, + Ports: newContainerPortsAll(commonConfig), Env: env, VolumeMounts: volumeMounts, - SecurityContext: &corev1.SecurityContext{AllowPrivilegeEscalation: &allowPrivilegeEscalation}, + SecurityContext: as.Spec.SecurityContext, }}, Volumes: volumes, }, }, - Strategy: appsv1.DeploymentStrategy{}, - MinReadySeconds: 0, - RevisionHistoryLimit: nil, - Paused: false, - ProgressDeadlineSeconds: nil, + Strategy: defaultDeploymentStrategy(1), }, } if as.Spec.Resources != nil { @@ -531,115 +498,46 @@ func createArmadaServerDeployment(as *installv1alpha1.ArmadaServer, serviceAccou return &deployment, nil } -func createIngressGrpc(as *installv1alpha1.ArmadaServer) (*networkingv1.Ingress, error) { +func createServerIngressGRPC(as *installv1alpha1.ArmadaServer, config *builders.CommonApplicationConfig) (*networkingv1.Ingress, error) { if len(as.Spec.HostNames) == 0 { // if no hostnames, no ingress can be configured return nil, nil } - ingressGRPCName := as.Name + "-grpc" - grpcIngress := &networkingv1.Ingress{ - ObjectMeta: metav1.ObjectMeta{Name: ingressGRPCName, Namespace: as.Namespace, Labels: AllLabels(as.Name, as.Labels), - Annotations: map[string]string{ - "kubernetes.io/ingress.class": as.Spec.Ingress.IngressClass, - "nginx.ingress.kubernetes.io/ssl-redirect": "true", - "nginx.ingress.kubernetes.io/backend-protocol": "GRPC", - }, - }, - } - - if as.Spec.ClusterIssuer != "" { - grpcIngress.ObjectMeta.Annotations["certmanager.k8s.io/cluster-issuer"] = as.Spec.ClusterIssuer - grpcIngress.ObjectMeta.Annotations["cert-manager.io/cluster-issuer"] = as.Spec.ClusterIssuer - } - if as.Spec.Ingress.Annotations != nil { - for key, value := range as.Spec.Ingress.Annotations { - grpcIngress.ObjectMeta.Annotations[key] = value - } + name := as.Name + "-grpc" + labels := AllLabels(as.Name, as.Spec.Labels, as.Spec.Ingress.Labels) + baseAnnotations := map[string]string{ + "nginx.ingress.kubernetes.io/ssl-redirect": "true", } - grpcIngress.ObjectMeta.Labels = AllLabels(as.Name, as.Spec.Labels, as.Spec.Ingress.Labels) + annotations := buildIngressAnnotations(as.Spec.Ingress, baseAnnotations, BackendProtocolGRPC, config.GRPC.Enabled) secretName := as.Name + "-service-tls" - grpcIngress.Spec.TLS = []networking.IngressTLS{{Hosts: as.Spec.HostNames, SecretName: secretName}} - var ingressRules []networking.IngressRule serviceName := as.Name - for _, val := range as.Spec.HostNames { - ingressRules = append(ingressRules, networking.IngressRule{Host: val, IngressRuleValue: networking.IngressRuleValue{ - HTTP: &networking.HTTPIngressRuleValue{ - Paths: []networking.HTTPIngressPath{{ - Path: "/", - PathType: (*networking.PathType)(ptr.To[string]("ImplementationSpecific")), - Backend: networking.IngressBackend{ - Service: &networking.IngressServiceBackend{ - Name: serviceName, - Port: networking.ServiceBackendPort{ - Number: as.Spec.PortConfig.GrpcPort, - }, - }, - }, - }}, - }, - }}) - } - grpcIngress.Spec.Rules = ingressRules - - return grpcIngress, nil + servicePort := config.GRPCPort + path := "/" + ingress, err := builders.Ingress(name, as.Namespace, labels, annotations, as.Spec.HostNames, serviceName, secretName, path, servicePort) + return ingress, errors.WithStack(err) } -func createIngressHttp(as *installv1alpha1.ArmadaServer) (*networkingv1.Ingress, error) { +func createServerIngressHTTP(as *installv1alpha1.ArmadaServer, config *builders.CommonApplicationConfig) (*networkingv1.Ingress, error) { if len(as.Spec.HostNames) == 0 { // when no hostnames, no ingress can be configured return nil, nil } - restIngressName := as.Name + "-rest" - restIngress := &networkingv1.Ingress{ - ObjectMeta: metav1.ObjectMeta{ - Name: restIngressName, Namespace: as.Namespace, Labels: AllLabels(as.Name, as.Labels), - Annotations: map[string]string{ - "kubernetes.io/ingress.class": as.Spec.Ingress.IngressClass, - "nginx.ingress.kubernetes.io/rewrite-target": "/$2", - "nginx.ingress.kubernetes.io/ssl-redirect": "true", - }, - }, - } - - if as.Spec.ClusterIssuer != "" { - restIngress.ObjectMeta.Annotations["certmanager.k8s.io/cluster-issuer"] = as.Spec.ClusterIssuer - restIngress.ObjectMeta.Annotations["cert-manager.io/cluster-issuer"] = as.Spec.ClusterIssuer - } - - if as.Spec.Ingress.Annotations != nil { - for key, value := range as.Spec.Ingress.Annotations { - restIngress.ObjectMeta.Annotations[key] = value - } + name := as.Name + "-rest" + labels := AllLabels(as.Name, as.Spec.Labels, as.Spec.Ingress.Labels) + baseAnnotations := map[string]string{ + "nginx.ingress.kubernetes.io/rewrite-target": "/$2", + "nginx.ingress.kubernetes.io/ssl-redirect": "true", } - restIngress.ObjectMeta.Labels = AllLabels(as.Name, as.Spec.Labels, as.Spec.Ingress.Labels) + annotations := buildIngressAnnotations(as.Spec.Ingress, baseAnnotations, BackendProtocolHTTP, config.GRPC.Enabled) secretName := as.Name + "-service-tls" - restIngress.Spec.TLS = []networking.IngressTLS{{Hosts: as.Spec.HostNames, SecretName: secretName}} - var ingressRules []networking.IngressRule serviceName := as.Name - for _, val := range as.Spec.HostNames { - ingressRules = append(ingressRules, networking.IngressRule{Host: val, IngressRuleValue: networking.IngressRuleValue{ - HTTP: &networking.HTTPIngressRuleValue{ - Paths: []networking.HTTPIngressPath{{ - Path: "/api(/|$)(.*)", - PathType: (*networking.PathType)(ptr.To[string]("ImplementationSpecific")), - Backend: networking.IngressBackend{ - Service: &networking.IngressServiceBackend{ - Name: serviceName, - Port: networking.ServiceBackendPort{ - Number: as.Spec.PortConfig.HttpPort, - }, - }, - }, - }}, - }, - }}) - } - restIngress.Spec.Rules = ingressRules - - return restIngress, nil + servicePort := config.HTTPPort + path := "/api(/|$)(.*)" + ingress, err := builders.Ingress(name, as.Namespace, labels, annotations, as.Spec.HostNames, serviceName, secretName, path, servicePort) + return ingress, errors.WithStack(err) } func createServerPodDisruptionBudget(as *installv1alpha1.ArmadaServer) *policyv1.PodDisruptionBudget { diff --git a/internal/controller/install/armadaserver_controller_test.go b/internal/controller/install/armadaserver_controller_test.go index 4c2ddbdc..e94f12ee 100644 --- a/internal/controller/install/armadaserver_controller_test.go +++ b/internal/controller/install/armadaserver_controller_test.go @@ -5,6 +5,8 @@ import ( "testing" "time" + "github.com/armadaproject/armada-operator/internal/controller/builders" + "k8s.io/utils/ptr" "github.com/golang/mock/gomock" @@ -72,7 +74,11 @@ func TestArmadaServerReconciler_Reconcile(t *testing.T) { }, } - expectedComponents, err := generateArmadaServerInstallComponents(&expectedAS, scheme) + commonConfig, err := builders.ParseCommonApplicationConfig(expectedAS.Spec.ApplicationConfig) + if err != nil { + t.Fatalf("should not return error when parsing common application config") + } + expectedComponents, err := generateArmadaServerInstallComponents(&expectedAS, scheme, commonConfig) require.NoError(t, err) mockK8sClient := k8sclient.NewMockClient(mockCtrl) @@ -403,12 +409,16 @@ func TestSchedulerReconciler_createIngress_EmptyHosts(t *testing.T) { t.Parallel() input := v1alpha1.ArmadaServer{} - ingress, err := createIngressHttp(&input) + commonConfig, err := builders.ParseCommonApplicationConfig(input.Spec.ApplicationConfig) + if err != nil { + t.Fatalf("should not return error when parsing common application config") + } + ingress, err := createServerIngressHTTP(&input, commonConfig) // expect no error and nil ingress with empty hosts slice assert.NoError(t, err) assert.Nil(t, ingress) - ingress, err = createIngressGrpc(&input) + ingress, err = createServerIngressGRPC(&input, commonConfig) // expect no error and nil ingress with empty hosts slice assert.NoError(t, err) assert.Nil(t, ingress) @@ -435,12 +445,16 @@ func TestSchedulerReconciler_createIngress(t *testing.T) { HostNames: []string{"localhost"}, }, } - ingress, err := createIngressHttp(&input) + commonConfig, err := builders.ParseCommonApplicationConfig(input.Spec.ApplicationConfig) + if err != nil { + t.Fatalf("should not return error when parsing common application config") + } + ingress, err := createServerIngressHTTP(&input, commonConfig) // expect no error and not-nil ingress assert.NoError(t, err) assert.NotNil(t, ingress) - ingress, err = createIngressGrpc(&input) + ingress, err = createServerIngressGRPC(&input, commonConfig) // expect no error and not-nil ingress assert.NoError(t, err) assert.NotNil(t, ingress) diff --git a/internal/controller/install/binoculars_controller.go b/internal/controller/install/binoculars_controller.go index 382b4c75..d7c80a0d 100644 --- a/internal/controller/install/binoculars_controller.go +++ b/internal/controller/install/binoculars_controller.go @@ -20,8 +20,6 @@ import ( "context" "time" - "k8s.io/utils/ptr" - "github.com/pkg/errors" installv1alpha1 "github.com/armadaproject/armada-operator/api/install/v1alpha1" @@ -71,14 +69,13 @@ func (r *BinocularsReconciler) Reconcile(ctx context.Context, req ctrl.Request) return ctrl.Result{}, err } - pc, err := installv1alpha1.BuildPortConfig(binoculars.Spec.ApplicationConfig) + commonConfig, err := builders.ParseCommonApplicationConfig(binoculars.Spec.ApplicationConfig) if err != nil { return ctrl.Result{}, err } - binoculars.Spec.PortConfig = pc var components *CommonComponents - components, err = generateBinocularsInstallComponents(&binoculars, r.Scheme) + components, err = generateBinocularsInstallComponents(&binoculars, r.Scheme, commonConfig) if err != nil { return ctrl.Result{}, err } @@ -139,7 +136,7 @@ func (r *BinocularsReconciler) Reconcile(ctx context.Context, req ctrl.Request) return ctrl.Result{}, nil } -func generateBinocularsInstallComponents(binoculars *installv1alpha1.Binoculars, scheme *runtime.Scheme) (*CommonComponents, error) { +func generateBinocularsInstallComponents(binoculars *installv1alpha1.Binoculars, scheme *runtime.Scheme, config *builders.CommonApplicationConfig) (*CommonComponents, error) { secret, err := builders.CreateSecret(binoculars.Spec.ApplicationConfig, binoculars.Name, binoculars.Namespace, GetConfigFilename(binoculars.Name)) if err != nil { return nil, errors.WithStack(err) @@ -150,84 +147,92 @@ func generateBinocularsInstallComponents(binoculars *installv1alpha1.Binoculars, var serviceAccount *corev1.ServiceAccount serviceAccountName := binoculars.Spec.CustomServiceAccount if serviceAccountName == "" { - serviceAccount = builders.CreateServiceAccount(binoculars.Name, binoculars.Namespace, AllLabels(binoculars.Name, binoculars.Labels), binoculars.Spec.ServiceAccount) + serviceAccount = builders.ServiceAccount( + binoculars.Name, binoculars.Namespace, AllLabels(binoculars.Name, binoculars.Labels), binoculars.Spec.ServiceAccount, + ) if err = controllerutil.SetOwnerReference(binoculars, serviceAccount, scheme); err != nil { return nil, errors.WithStack(err) } serviceAccountName = serviceAccount.Name } - deployment, err := createBinocularsDeployment(binoculars, secret, serviceAccountName) + deployment, err := createBinocularsDeployment(binoculars, secret, serviceAccountName, config) if err != nil { return nil, errors.WithStack(err) } if err = controllerutil.SetOwnerReference(binoculars, deployment, scheme); err != nil { return nil, errors.WithStack(err) } - service := builders.Service(binoculars.Name, binoculars.Namespace, AllLabels(binoculars.Name, binoculars.Labels), IdentityLabel(binoculars.Name), binoculars.Spec.PortConfig) + service := builders.Service( + binoculars.Name, + binoculars.Namespace, + AllLabels(binoculars.Name, binoculars.Labels), + IdentityLabel(binoculars.Name), + config, + builders.ServiceEnableApplicationPortsOnly, + ) if err = controllerutil.SetOwnerReference(binoculars, service, scheme); err != nil { return nil, errors.WithStack(err) } + profilingService, ingressProfiling, err := newProfilingComponents( + binoculars, + scheme, + config, + binoculars.Spec.ProfilingIngressConfig, + ) + if err != nil { + return nil, errors.WithStack(err) + } - ingress, err := createBinocularsIngressHttp(binoculars) + ingressHTTP, err := createBinocularsIngressHttp(binoculars, config) if err != nil { return nil, errors.WithStack(err) } - if err = controllerutil.SetOwnerReference(binoculars, ingress, scheme); err != nil { + if err = controllerutil.SetOwnerReference(binoculars, ingressHTTP, scheme); err != nil { return nil, errors.WithStack(err) } - ingressGrpc, err := createBinocularsIngressGrpc(binoculars) + ingressGRPC, err := createBinocularsIngressGrpc(binoculars, config) if err != nil { return nil, errors.WithStack(err) } - if err = controllerutil.SetOwnerReference(binoculars, ingressGrpc, scheme); err != nil { + if err = controllerutil.SetOwnerReference(binoculars, ingressGRPC, scheme); err != nil { return nil, errors.WithStack(err) } clusterRole := createBinocularsClusterRole(binoculars) - clusterRoleBinding := generateBinocularsClusterRoleBinding(*binoculars) + clusterRoleBinding := generateBinocularsClusterRoleBinding(binoculars) return &CommonComponents{ Deployment: deployment, Service: service, + ServiceProfiling: profilingService, ServiceAccount: serviceAccount, Secret: secret, ClusterRole: clusterRole, ClusterRoleBindings: []*rbacv1.ClusterRoleBinding{clusterRoleBinding}, - IngressGrpc: ingressGrpc, - IngressHttp: ingress, + IngressGrpc: ingressGRPC, + IngressHttp: ingressHTTP, + IngressProfiling: ingressProfiling, }, nil } // Function to build the deployment object for Binoculars. // This should be changing from CRD to CRD. Not sure if generalizing this helps much -func createBinocularsDeployment(binoculars *installv1alpha1.Binoculars, secret *corev1.Secret, serviceAccountName string) (*appsv1.Deployment, error) { +func createBinocularsDeployment( + binoculars *installv1alpha1.Binoculars, + secret *corev1.Secret, + serviceAccountName string, + commonConfig *builders.CommonApplicationConfig, +) (*appsv1.Deployment, error) { env := createEnv(binoculars.Spec.Environment) volumes := createVolumes(binoculars.Name, binoculars.Spec.AdditionalVolumes) volumeMounts := createVolumeMounts(GetConfigFilename(secret.Name), binoculars.Spec.AdditionalVolumeMounts) - ports := []corev1.ContainerPort{ - { - Name: "metrics", - ContainerPort: binoculars.Spec.PortConfig.MetricsPort, - Protocol: "TCP", - }, - { - Name: "http", - ContainerPort: binoculars.Spec.PortConfig.HttpPort, - Protocol: "TCP", - }, - { - Name: "grpc", - ContainerPort: binoculars.Spec.PortConfig.GrpcPort, - Protocol: "TCP", - }, - } containers := []corev1.Container{{ Name: "binoculars", - ImagePullPolicy: "IfNotPresent", + ImagePullPolicy: corev1.PullIfNotPresent, Image: ImageString(binoculars.Spec.Image), Args: []string{appConfigFlag, appConfigFilepath}, - Ports: ports, + Ports: newContainerPortsAll(commonConfig), Env: env, VolumeMounts: volumeMounts, SecurityContext: binoculars.Spec.SecurityContext, @@ -250,25 +255,9 @@ func createBinocularsDeployment(binoculars *installv1alpha1.Binoculars, secret * ServiceAccountName: serviceAccountName, TerminationGracePeriodSeconds: binoculars.DeletionGracePeriodSeconds, SecurityContext: binoculars.Spec.PodSecurityContext, - Affinity: &corev1.Affinity{ - PodAffinity: &corev1.PodAffinity{ - PreferredDuringSchedulingIgnoredDuringExecution: []corev1.WeightedPodAffinityTerm{{ - Weight: 100, - PodAffinityTerm: corev1.PodAffinityTerm{ - TopologyKey: "kubernetes.io/hostname", - LabelSelector: &metav1.LabelSelector{ - MatchExpressions: []metav1.LabelSelectorRequirement{{ - Key: "app", - Operator: metav1.LabelSelectorOpIn, - Values: []string{binoculars.Name}, - }}, - }, - }, - }}, - }, - }, - Containers: containers, - Volumes: volumes, + Affinity: defaultAffinity(binoculars.Name, 100), + Containers: containers, + Volumes: volumes, }, }, }, @@ -301,7 +290,7 @@ func createBinocularsClusterRole(binoculars *installv1alpha1.Binoculars) *rbacv1 return &clusterRole } -func generateBinocularsClusterRoleBinding(binoculars installv1alpha1.Binoculars) *rbacv1.ClusterRoleBinding { +func generateBinocularsClusterRoleBinding(binoculars *installv1alpha1.Binoculars) *rbacv1.ClusterRoleBinding { clusterRoleBinding := rbacv1.ClusterRoleBinding{ ObjectMeta: metav1.ObjectMeta{ Name: binoculars.Name, @@ -331,116 +320,45 @@ func (r *BinocularsReconciler) deleteExternalResources(ctx context.Context, comp return nil } -func createBinocularsIngressGrpc(binoculars *installv1alpha1.Binoculars) (*networking.Ingress, error) { +func createBinocularsIngressGrpc(binoculars *installv1alpha1.Binoculars, config *builders.CommonApplicationConfig) (*networking.Ingress, error) { if len(binoculars.Spec.HostNames) == 0 { - // when no hostnames provided, no ingress can be configured + // when no hostnames, no ingress can be configured return nil, nil } - - grpcIngressName := binoculars.Name + "-grpc" - - grpcIngress := &networking.Ingress{ - ObjectMeta: metav1.ObjectMeta{Name: grpcIngressName, Namespace: binoculars.Namespace, Labels: AllLabels(binoculars.Name, binoculars.Labels), - Annotations: map[string]string{ - "kubernetes.io/ingress.class": binoculars.Spec.Ingress.IngressClass, - "nginx.ingress.kubernetes.io/ssl-redirect": "true", - "nginx.ingress.kubernetes.io/backend-protocol": "GRPC", - }, - }, - } - - if binoculars.Spec.ClusterIssuer != "" { - grpcIngress.ObjectMeta.Annotations["certmanager.k8s.io/cluster-issuer"] = binoculars.Spec.ClusterIssuer - grpcIngress.ObjectMeta.Annotations["cert-manager.io/cluster-issuer"] = binoculars.Spec.ClusterIssuer - } - - if binoculars.Spec.Ingress.Annotations != nil { - for key, value := range binoculars.Spec.Ingress.Annotations { - grpcIngress.ObjectMeta.Annotations[key] = value - } + name := binoculars.Name + "-grpc" + labels := AllLabels(binoculars.Name, binoculars.Spec.Labels, binoculars.Spec.Ingress.Labels) + baseAnnotations := map[string]string{ + "nginx.ingress.kubernetes.io/ssl-redirect": "true", } - grpcIngress.ObjectMeta.Labels = AllLabels(binoculars.Name, binoculars.Spec.Labels, binoculars.Spec.Ingress.Labels) + annotations := buildIngressAnnotations(binoculars.Spec.Ingress, baseAnnotations, BackendProtocolGRPC, config.GRPC.Enabled) secretName := binoculars.Name + "-service-tls" - grpcIngress.Spec.TLS = []networking.IngressTLS{{Hosts: binoculars.Spec.HostNames, SecretName: secretName}} - var ingressRules []networking.IngressRule - serviceName := "armada" + "-" + binoculars.Name - for _, val := range binoculars.Spec.HostNames { - ingressRules = append(ingressRules, networking.IngressRule{Host: val, IngressRuleValue: networking.IngressRuleValue{ - HTTP: &networking.HTTPIngressRuleValue{ - Paths: []networking.HTTPIngressPath{{ - Path: "/", - PathType: (*networking.PathType)(ptr.To[string]("ImplementationSpecific")), - Backend: networking.IngressBackend{ - Service: &networking.IngressServiceBackend{ - Name: serviceName, - Port: networking.ServiceBackendPort{ - Number: binoculars.Spec.PortConfig.GrpcPort, - }, - }, - }, - }}, - }, - }}) - } - grpcIngress.Spec.Rules = ingressRules - - return grpcIngress, nil + serviceName := binoculars.Name + servicePort := config.HTTPPort + path := "/" + ingress, err := builders.Ingress(name, binoculars.Namespace, labels, annotations, binoculars.Spec.HostNames, serviceName, secretName, path, servicePort) + return ingress, errors.WithStack(err) } -func createBinocularsIngressHttp(binoculars *installv1alpha1.Binoculars) (*networking.Ingress, error) { +func createBinocularsIngressHttp(binoculars *installv1alpha1.Binoculars, config *builders.CommonApplicationConfig) (*networking.Ingress, error) { if len(binoculars.Spec.HostNames) == 0 { - // when no hostnames provided, no ingress can be configured + // when no hostnames, no ingress can be configured return nil, nil } - restIngressName := binoculars.Name + "-rest" - restIngress := &networking.Ingress{ - ObjectMeta: metav1.ObjectMeta{Name: restIngressName, Namespace: binoculars.Namespace, Labels: AllLabels(binoculars.Name, binoculars.Labels), - Annotations: map[string]string{ - "kubernetes.io/ingress.class": binoculars.Spec.Ingress.IngressClass, - "nginx.ingress.kubernetes.io/rewrite-target": "/$2", - "nginx.ingress.kubernetes.io/ssl-redirect": "true", - }, - }, + name := binoculars.Name + "-rest" + labels := AllLabels(binoculars.Name, binoculars.Spec.Labels, binoculars.Spec.Ingress.Labels) + baseAnnotations := map[string]string{ + "nginx.ingress.kubernetes.io/rewrite-target": "/$2", + "nginx.ingress.kubernetes.io/ssl-redirect": "true", } - - if binoculars.Spec.ClusterIssuer != "" { - restIngress.ObjectMeta.Annotations["certmanager.k8s.io/cluster-issuer"] = binoculars.Spec.ClusterIssuer - restIngress.ObjectMeta.Annotations["cert-manager.io/cluster-issuer"] = binoculars.Spec.ClusterIssuer - } - - if binoculars.Spec.Ingress.Annotations != nil { - for key, value := range binoculars.Spec.Ingress.Annotations { - restIngress.ObjectMeta.Annotations[key] = value - } - } - restIngress.ObjectMeta.Labels = AllLabels(binoculars.Name, binoculars.Spec.Labels, binoculars.Spec.Ingress.Labels) + annotations := buildIngressAnnotations(binoculars.Spec.Ingress, baseAnnotations, BackendProtocolHTTP, config.GRPC.Enabled) secretName := binoculars.Name + "-service-tls" - restIngress.Spec.TLS = []networking.IngressTLS{{Hosts: binoculars.Spec.HostNames, SecretName: secretName}} - var ingressRules []networking.IngressRule serviceName := binoculars.Name - for _, val := range binoculars.Spec.HostNames { - ingressRules = append(ingressRules, networking.IngressRule{Host: val, IngressRuleValue: networking.IngressRuleValue{ - HTTP: &networking.HTTPIngressRuleValue{ - Paths: []networking.HTTPIngressPath{{ - Path: "/api(/|$)(.*)", - PathType: (*networking.PathType)(ptr.To[string]("ImplementationSpecific")), - Backend: networking.IngressBackend{ - Service: &networking.IngressServiceBackend{ - Name: serviceName, - Port: networking.ServiceBackendPort{ - Number: binoculars.Spec.PortConfig.HttpPort, - }, - }, - }, - }}, - }, - }}) - } - restIngress.Spec.Rules = ingressRules - - return restIngress, nil + servicePort := config.HTTPPort + path := "/api(/|$)(.*)" + ingress, err := builders.Ingress(name, binoculars.Namespace, labels, annotations, binoculars.Spec.HostNames, serviceName, secretName, path, servicePort) + return ingress, errors.WithStack(err) } // SetupWithManager sets up the controller with the Manager. diff --git a/internal/controller/install/binoculars_controller_test.go b/internal/controller/install/binoculars_controller_test.go index e0fa87fa..1049608b 100644 --- a/internal/controller/install/binoculars_controller_test.go +++ b/internal/controller/install/binoculars_controller_test.go @@ -5,6 +5,8 @@ import ( "testing" "time" + "github.com/armadaproject/armada-operator/internal/controller/builders" + "k8s.io/utils/ptr" "github.com/stretchr/testify/assert" @@ -105,12 +107,15 @@ func TestBinoculars_GenerateBinocularsInstallComponents(t *testing.T) { t.Run(name, func(t *testing.T) { t.Parallel() - _, err := generateBinocularsInstallComponents(tt.binoculars, scheme) + commonConfig, err := builders.ParseCommonApplicationConfig(tt.binoculars.Spec.ApplicationConfig) if tt.expectedError { assert.Error(t, err) + return } else { assert.NoError(t, err) } + _, err = generateBinocularsInstallComponents(tt.binoculars, scheme, commonConfig) + assert.NoError(t, err) }) } } @@ -156,7 +161,11 @@ func TestBinocularsReconciler_Reconcile(t *testing.T) { }, } - binoculars, err := generateBinocularsInstallComponents(&expectedBinoculars, scheme) + commonConfig, err := builders.ParseCommonApplicationConfig(expectedBinoculars.Spec.ApplicationConfig) + if err != nil { + t.Fatalf("should not return error when parsing common application config") + } + binoculars, err := generateBinocularsInstallComponents(&expectedBinoculars, scheme, commonConfig) if err != nil { t.Fatal("We should not fail on generating binoculars") } @@ -447,12 +456,16 @@ func TestSchedulerReconciler_createBinolcularsIngress_EmptyHosts(t *testing.T) { t.Parallel() input := v1alpha1.Binoculars{} - ingress, err := createBinocularsIngressHttp(&input) + commonConfig, err := builders.ParseCommonApplicationConfig(input.Spec.ApplicationConfig) + if err != nil { + t.Fatalf("should not return error when parsing common application config") + } + ingress, err := createBinocularsIngressHttp(&input, commonConfig) // expect no error and nil ingress with empty hosts slice assert.NoError(t, err) assert.Nil(t, ingress) - ingress, err = createBinocularsIngressGrpc(&input) + ingress, err = createBinocularsIngressGrpc(&input, commonConfig) // expect no error and nil ingress with empty hosts slice assert.NoError(t, err) assert.Nil(t, ingress) @@ -479,12 +492,16 @@ func TestSchedulerReconciler_createBinocularsIngress(t *testing.T) { HostNames: []string{"localhost"}, }, } - ingress, err := createBinocularsIngressHttp(&input) + commonConfig, err := builders.ParseCommonApplicationConfig(input.Spec.ApplicationConfig) + if err != nil { + t.Fatalf("should not return error when parsing common application config") + } + ingress, err := createBinocularsIngressHttp(&input, commonConfig) // expect no error and not-nil ingress assert.NoError(t, err) assert.NotNil(t, ingress) - ingress, err = createBinocularsIngressGrpc(&input) + ingress, err = createBinocularsIngressGrpc(&input, commonConfig) // expect no error and not-nil ingress assert.NoError(t, err) assert.NotNil(t, ingress) diff --git a/internal/controller/install/common_helpers.go b/internal/controller/install/common_helpers.go index 3664c6fa..6d5f3288 100644 --- a/internal/controller/install/common_helpers.go +++ b/internal/controller/install/common_helpers.go @@ -8,6 +8,9 @@ import ( "reflect" "time" + "github.com/pkg/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/wait" "github.com/go-logr/logr" @@ -51,7 +54,9 @@ type CommonComponents struct { Deployment *appsv1.Deployment IngressGrpc *networkingv1.Ingress IngressHttp *networkingv1.Ingress + IngressProfiling *networkingv1.Ingress Service *corev1.Service + ServiceProfiling *corev1.Service ServiceAccount *corev1.ServiceAccount Secret *corev1.Secret ClusterRole *rbacv1.ClusterRole @@ -64,50 +69,6 @@ type CommonComponents struct { CronJob *batchv1.CronJob } -// PostgresConfig is used for scanning postgres section of application config -type PostgresConfig struct { - Connection ConnectionConfig -} - -// ConnectionConfig is used for scanning connection section of postgres config -type ConnectionConfig struct { - Host string - Port string - User string - Password string - Dbname string -} - -// PulsarConfig is used for scanning pulsar section of application config -type PulsarConfig struct { - ArmadaInit ArmadaInit - AuthenticationEnabled bool - TlsEnabled bool - AuthenticationSecret string - Cacert string -} - -// ArmadaInit used to initialize pulsar -type ArmadaInit struct { - Enabled bool - Image Image - BrokerHost string - Protocol string - AdminPort int - Port int -} - -// Image represents a docker image -type Image struct { - Repository string - Tag string -} - -// AppConfig is used for scanning the appconfig to find particular values -type AppConfig struct { - Pulsar PulsarConfig -} - // CleanupFunc is a function that will clean up additional resources which are not deleted by owner references. type CleanupFunc func(context.Context) error @@ -154,6 +115,7 @@ func (cc *CommonComponents) ReconcileComponents(newComponents *CommonComponents) cc.Deployment.Spec = newComponents.Deployment.Spec cc.Deployment.Labels = newComponents.Deployment.Labels cc.Deployment.Annotations = newComponents.Deployment.Annotations + if newComponents.Service != nil { cc.Service.Spec = newComponents.Service.Spec cc.Service.Labels = newComponents.Service.Labels @@ -162,6 +124,14 @@ func (cc *CommonComponents) ReconcileComponents(newComponents *CommonComponents) cc.Service = nil } + if newComponents.ServiceProfiling != nil { + cc.ServiceProfiling.Spec = newComponents.ServiceProfiling.Spec + cc.ServiceProfiling.Labels = newComponents.ServiceProfiling.Labels + cc.ServiceProfiling.Annotations = newComponents.ServiceProfiling.Annotations + } else { + cc.ServiceProfiling = nil + } + if newComponents.ServiceAccount != nil { cc.ServiceAccount.Labels = newComponents.ServiceAccount.Labels cc.ServiceAccount.Annotations = newComponents.ServiceAccount.Annotations @@ -196,6 +166,14 @@ func (cc *CommonComponents) ReconcileComponents(newComponents *CommonComponents) cc.IngressHttp = nil } + if newComponents.IngressProfiling != nil { + cc.IngressProfiling.Spec = newComponents.IngressProfiling.Spec + cc.IngressProfiling.Labels = newComponents.IngressProfiling.Labels + cc.IngressProfiling.Annotations = newComponents.IngressProfiling.Annotations + } else { + cc.IngressProfiling = nil + } + if newComponents.PodDisruptionBudget != nil { cc.PodDisruptionBudget.Spec = newComponents.PodDisruptionBudget.Spec cc.PodDisruptionBudget.Labels = newComponents.PodDisruptionBudget.Labels @@ -220,6 +198,50 @@ func (cc *CommonComponents) ReconcileComponents(newComponents *CommonComponents) } } +// PostgresConfig is used for scanning postgres section of application config +type PostgresConfig struct { + Connection ConnectionConfig +} + +// ConnectionConfig is used for scanning connection section of postgres config +type ConnectionConfig struct { + Host string + Port string + User string + Password string + Dbname string +} + +// PulsarConfig is used for scanning pulsar section of application config +type PulsarConfig struct { + ArmadaInit ArmadaInit + AuthenticationEnabled bool + TlsEnabled bool + AuthenticationSecret string + Cacert string +} + +// ArmadaInit used to initialize pulsar +type ArmadaInit struct { + Enabled bool + Image Image + BrokerHost string + Protocol string + AdminPort int + Port int +} + +// Image represents a docker image +type Image struct { + Repository string + Tag string +} + +// AppConfig is used for scanning the appconfig to find particular values +type AppConfig struct { + Pulsar PulsarConfig +} + // ImageString generates a docker image. func ImageString(image installv1alpha1.Image) string { return fmt.Sprintf("%s:%s", image.Repository, image.Tag) @@ -441,6 +463,41 @@ func addGoMemLimit(env []corev1.EnvVar, resources corev1.ResourceRequirements) [ return env } +type BackendProtocol string + +const ( + BackendProtocolGRPC BackendProtocol = "GRPC" + BackendProtocolHTTP BackendProtocol = "HTTP" +) + +func buildIngressAnnotations( + ingressConfig *installv1alpha1.IngressConfig, + baseAnnotations map[string]string, + protocol BackendProtocol, + useTLS bool, +) map[string]string { + annotations := map[string]string{ + "kubernetes.io/ingress.class": ingressConfig.IngressClass, + } + if useTLS { + annotations["nginx.ingress.kubernetes.io/backend-protocol"] = string(protocol) + "S" + annotations["nginx.ingress.kubernetes.io/ssl-passthrough"] = "true" + } else { + annotations["nginx.ingress.kubernetes.io/backend-protocol"] = string(protocol) + } + for key, value := range baseAnnotations { + annotations[key] = value + } + if ingressConfig.ClusterIssuer != "" { + annotations["certmanager.k8s.io/cluster-issuer"] = ingressConfig.ClusterIssuer + annotations["cert-manager.io/cluster-issuer"] = ingressConfig.ClusterIssuer + } + for key, value := range ingressConfig.Annotations { + annotations[key] = value + } + return annotations +} + // checkAndHandleObjectDeletion handles the deletion of the resource by adding/removing the finalizer. // If the resource is being deleted, it will remove the finalizer. // If the resource is not being deleted, it will add the finalizer. @@ -572,3 +629,181 @@ func getObject( } return false, nil } + +func newProfilingComponents( + object metav1.Object, + scheme *runtime.Scheme, + commonConfig *builders.CommonApplicationConfig, + ingressConfig *installv1alpha1.IngressConfig, +) (*corev1.Service, *networkingv1.Ingress, error) { + profilingService, err := newProfilingService(object, commonConfig, scheme) + if err != nil { + return nil, nil, errors.Wrapf(err, "error creating profiling service") + } + profilingIngress, err := newProfilingIngress(object, commonConfig, ingressConfig, scheme) + if err != nil { + return nil, nil, errors.Wrapf(err, "error creating profiling ingress") + } + + return profilingService, profilingIngress, nil +} + +// newProfilingService creates a new Kubernetes Service for the profiling server and sets the owner reference to the parent object. +func newProfilingService( + object metav1.Object, + commonConfig *builders.CommonApplicationConfig, + scheme *runtime.Scheme, +) (*corev1.Service, error) { + profilingService := builders.Service( + object.GetName()+"-profiling", + object.GetNamespace(), + AllLabels(object.GetName(), object.GetLabels()), + IdentityLabel(object.GetName()), + commonConfig, + builders.ServiceEnableProfilingPortOnly, + ) + if err := controllerutil.SetOwnerReference(object, profilingService, scheme); err != nil { + return nil, err + } + + return profilingService, nil +} + +// newProfilingIngress creates a new Kubernetes Ingress for the profiling server and sets the owner reference to the parent object. +func newProfilingIngress( + object metav1.Object, + commonConfig *builders.CommonApplicationConfig, + ingressConfig *installv1alpha1.IngressConfig, + scheme *runtime.Scheme, +) (*networkingv1.Ingress, error) { + if ingressConfig == nil { + return nil, nil + } + baseAnnotations := map[string]string{ + "nginx.ingress.kubernetes.io/ssl-redirect": "true", + } + annotations := buildIngressAnnotations(ingressConfig, baseAnnotations, BackendProtocolHTTP, false) + secretName := object.GetName() + "-service-tls" + serviceName := object.GetName() + servicePort := commonConfig.HTTPPort + path := "/" + profilingIngress, err := builders.Ingress( + object.GetName()+"-profiling", + object.GetNamespace(), + AllLabels(object.GetName(), object.GetLabels(), ingressConfig.Labels), + annotations, + ingressConfig.Hostnames, + serviceName, + secretName, + path, + servicePort, + ) + if err != nil { + return nil, errors.WithStack(err) + } + if err := controllerutil.SetOwnerReference(object, profilingIngress, scheme); err != nil { + return nil, err + } + + return profilingIngress, nil +} + +// newContainerPortsAll creates container ports for grpc, http and metrics server and optional port for profiling server. +func newContainerPortsAll(config *builders.CommonApplicationConfig) []corev1.ContainerPort { + ports := []corev1.ContainerPort{newContainerPortGRPC(config), newContainerPortHTTP(config), newContainerPortMetrics(config)} + if config.Profiling.Port > 0 { + ports = append(ports, newContainerPortProfiling(config)) + } + return ports +} + +func newContainerPortsHTTPWithMetrics(config *builders.CommonApplicationConfig) []corev1.ContainerPort { + ports := []corev1.ContainerPort{newContainerPortHTTP(config), newContainerPortMetrics(config)} + if config.Profiling.Port > 0 { + ports = append(ports, newContainerPortProfiling(config)) + } + return ports +} + +// newContainerPortsGRPCWithMetrics creates container ports for grpc and metrics server and optional port for profiling server. +func newContainerPortsGRPCWithMetrics(config *builders.CommonApplicationConfig) []corev1.ContainerPort { + ports := []corev1.ContainerPort{newContainerPortGRPC(config), newContainerPortMetrics(config)} + if config.Profiling.Port > 0 { + ports = append(ports, newContainerPortProfiling(config)) + } + return ports +} + +// newContainerPortsMetrics creates container ports for metrics server and optional port for profiling server. +func newContainerPortsMetrics(config *builders.CommonApplicationConfig) []corev1.ContainerPort { + ports := []corev1.ContainerPort{newContainerPortMetrics(config)} + if config.Profiling.Port > 0 { + ports = append(ports, newContainerPortProfiling(config)) + } + return ports +} + +// newContainerPortGRPC creates a container port for grpc server from settings defined in builders.CommonApplicationConfig. +func newContainerPortGRPC(config *builders.CommonApplicationConfig) corev1.ContainerPort { + return corev1.ContainerPort{ + Name: "grpc", + ContainerPort: config.GRPCPort, + Protocol: corev1.ProtocolTCP, + } +} + +// newContainerPortHTTP creates a container port for http server from settings defined in builders.CommonApplicationConfig. +func newContainerPortHTTP(config *builders.CommonApplicationConfig) corev1.ContainerPort { + return corev1.ContainerPort{ + + Name: "http", + ContainerPort: config.HTTPPort, + Protocol: corev1.ProtocolTCP, + } +} + +// newContainerPortMetrics creates a container port for metrics server from settings defined in builders.CommonApplicationConfig. +func newContainerPortMetrics(config *builders.CommonApplicationConfig) corev1.ContainerPort { + return corev1.ContainerPort{ + Name: "metrics", + ContainerPort: config.MetricsPort, + Protocol: corev1.ProtocolTCP, + } +} + +func newContainerPortProfiling(config *builders.CommonApplicationConfig) corev1.ContainerPort { + return corev1.ContainerPort{ + Name: "profiling", + ContainerPort: config.Profiling.Port, + Protocol: corev1.ProtocolTCP, + } +} + +func defaultAffinity(app string, weight int32) *corev1.Affinity { + return &corev1.Affinity{ + PodAffinity: &corev1.PodAffinity{ + PreferredDuringSchedulingIgnoredDuringExecution: []corev1.WeightedPodAffinityTerm{{ + Weight: weight, + PodAffinityTerm: corev1.PodAffinityTerm{ + TopologyKey: "kubernetes.io/hostname", + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{{ + Key: "app", + Operator: metav1.LabelSelectorOpIn, + Values: []string{app}, + }}, + }, + }, + }}, + }, + } +} + +func defaultDeploymentStrategy(maxUnavailable int32) appsv1.DeploymentStrategy { + return appsv1.DeploymentStrategy{ + Type: appsv1.RollingUpdateDeploymentStrategyType, + RollingUpdate: &appsv1.RollingUpdateDeployment{ + MaxUnavailable: &intstr.IntOrString{IntVal: maxUnavailable}, + }, + } +} diff --git a/internal/controller/install/common_helpers_test.go b/internal/controller/install/common_helpers_test.go index 80ab867e..29833943 100644 --- a/internal/controller/install/common_helpers_test.go +++ b/internal/controller/install/common_helpers_test.go @@ -756,7 +756,7 @@ func makeCommonComponents() CommonComponents { Spec: corev1.PodSpec{ Containers: []corev1.Container{{ Name: "armadaserver", - ImagePullPolicy: "IfNotPresent", + ImagePullPolicy: corev1.PullIfNotPresent, Image: "gresearch/someimage", Args: []string{appConfigFlag, appConfigFilepath}, Ports: []corev1.ContainerPort{{ diff --git a/internal/controller/install/eventingester_controller.go b/internal/controller/install/eventingester_controller.go index 1ba3e9bb..3e15e439 100644 --- a/internal/controller/install/eventingester_controller.go +++ b/internal/controller/install/eventingester_controller.go @@ -26,7 +26,6 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/intstr" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" @@ -64,13 +63,12 @@ func (r *EventIngesterReconciler) Reconcile(ctx context.Context, req ctrl.Reques return ctrl.Result{}, err } - pc, err := installv1alpha1.BuildPortConfig(eventIngester.Spec.ApplicationConfig) + commonConfig, err := builders.ParseCommonApplicationConfig(eventIngester.Spec.ApplicationConfig) if err != nil { return ctrl.Result{}, err } - eventIngester.Spec.PortConfig = pc - components, err := r.generateEventIngesterComponents(&eventIngester, r.Scheme) + components, err := r.generateEventIngesterComponents(&eventIngester, r.Scheme, commonConfig) if err != nil { return ctrl.Result{}, err } @@ -111,7 +109,11 @@ func (r *EventIngesterReconciler) SetupWithManager(mgr ctrl.Manager) error { Complete(r) } -func (r *EventIngesterReconciler) generateEventIngesterComponents(eventIngester *installv1alpha1.EventIngester, scheme *runtime.Scheme) (*CommonComponents, error) { +func (r *EventIngesterReconciler) generateEventIngesterComponents( + eventIngester *installv1alpha1.EventIngester, + scheme *runtime.Scheme, + config *builders.CommonApplicationConfig, +) (*CommonComponents, error) { secret, err := builders.CreateSecret(eventIngester.Spec.ApplicationConfig, eventIngester.Name, eventIngester.Namespace, GetConfigFilename(eventIngester.Name)) if err != nil { return nil, err @@ -123,14 +125,14 @@ func (r *EventIngesterReconciler) generateEventIngesterComponents(eventIngester var serviceAccount *corev1.ServiceAccount serviceAccountName := eventIngester.Spec.CustomServiceAccount if serviceAccountName == "" { - serviceAccount = builders.CreateServiceAccount(eventIngester.Name, eventIngester.Namespace, AllLabels(eventIngester.Name, eventIngester.Labels), eventIngester.Spec.ServiceAccount) + serviceAccount = builders.ServiceAccount(eventIngester.Name, eventIngester.Namespace, AllLabels(eventIngester.Name, eventIngester.Labels), eventIngester.Spec.ServiceAccount) if err = controllerutil.SetOwnerReference(eventIngester, serviceAccount, scheme); err != nil { return nil, errors.WithStack(err) } serviceAccountName = serviceAccount.Name } - deployment, err := r.createDeployment(eventIngester, serviceAccountName) + deployment, err := r.createDeployment(eventIngester, serviceAccountName, config) if err != nil { return nil, err } @@ -138,17 +140,30 @@ func (r *EventIngesterReconciler) generateEventIngesterComponents(eventIngester return nil, err } + profilingService, profilingIngress, err := newProfilingComponents( + eventIngester, + scheme, + config, + eventIngester.Spec.ProfilingIngressConfig, + ) + if err != nil { + return nil, errors.WithStack(err) + } + return &CommonComponents{ - Deployment: deployment, - ServiceAccount: serviceAccount, - Secret: secret, + Deployment: deployment, + ServiceAccount: serviceAccount, + Secret: secret, + ServiceProfiling: profilingService, + IngressProfiling: profilingIngress, }, nil } -func (r *EventIngesterReconciler) createDeployment(eventIngester *installv1alpha1.EventIngester, serviceAccountName string) (*appsv1.Deployment, error) { - var runAsUser int64 = 1000 - var runAsGroup int64 = 2000 - allowPrivilegeEscalation := false +func (r *EventIngesterReconciler) createDeployment( + eventIngester *installv1alpha1.EventIngester, + serviceAccountName string, + config *builders.CommonApplicationConfig, +) (*appsv1.Deployment, error) { env := createEnv(eventIngester.Spec.Environment) pulsarConfig, err := ExtractPulsarConfig(eventIngester.Spec.ApplicationConfig) if err != nil { @@ -166,12 +181,7 @@ func (r *EventIngesterReconciler) createDeployment(eventIngester *installv1alpha Selector: &metav1.LabelSelector{ MatchLabels: IdentityLabel(eventIngester.Name), }, - Strategy: appsv1.DeploymentStrategy{ - Type: appsv1.RollingUpdateDeploymentStrategyType, - RollingUpdate: &appsv1.RollingUpdateDeployment{ - MaxUnavailable: &intstr.IntOrString{IntVal: int32(1)}, - }, - }, + Strategy: defaultDeploymentStrategy(1), Template: corev1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ Name: eventIngester.Name, @@ -182,40 +192,17 @@ func (r *EventIngesterReconciler) createDeployment(eventIngester *installv1alpha Spec: corev1.PodSpec{ ServiceAccountName: serviceAccountName, TerminationGracePeriodSeconds: eventIngester.Spec.TerminationGracePeriodSeconds, - SecurityContext: &corev1.PodSecurityContext{ - RunAsUser: &runAsUser, - RunAsGroup: &runAsGroup, - }, - Affinity: &corev1.Affinity{ - PodAffinity: &corev1.PodAffinity{ - PreferredDuringSchedulingIgnoredDuringExecution: []corev1.WeightedPodAffinityTerm{{ - Weight: 100, - PodAffinityTerm: corev1.PodAffinityTerm{ - TopologyKey: "kubernetes.io/hostname", - LabelSelector: &metav1.LabelSelector{ - MatchExpressions: []metav1.LabelSelectorRequirement{{ - Key: "app", - Operator: metav1.LabelSelectorOpIn, - Values: []string{eventIngester.Name}, - }}, - }, - }, - }}, - }, - }, + SecurityContext: eventIngester.Spec.PodSecurityContext, + Affinity: defaultAffinity(eventIngester.Name, 100), Containers: []corev1.Container{{ Name: "eventingester", - ImagePullPolicy: "IfNotPresent", + ImagePullPolicy: corev1.PullIfNotPresent, Image: ImageString(eventIngester.Spec.Image), Args: []string{appConfigFlag, appConfigFilepath}, - Ports: []corev1.ContainerPort{{ - Name: "metrics", - ContainerPort: eventIngester.Spec.PortConfig.MetricsPort, - Protocol: "TCP", - }}, + Ports: newContainerPortsMetrics(config), Env: env, VolumeMounts: volumeMounts, - SecurityContext: &corev1.SecurityContext{AllowPrivilegeEscalation: &allowPrivilegeEscalation}, + SecurityContext: eventIngester.Spec.SecurityContext, }}, NodeSelector: eventIngester.Spec.NodeSelector, Tolerations: eventIngester.Spec.Tolerations, diff --git a/internal/controller/install/executor_controller.go b/internal/controller/install/executor_controller.go index a845cd5c..26d0644f 100644 --- a/internal/controller/install/executor_controller.go +++ b/internal/controller/install/executor_controller.go @@ -89,13 +89,12 @@ func (r *ExecutorReconciler) Reconcile(ctx context.Context, req ctrl.Request) (c return ctrl.Result{}, err } - pc, err := installv1alpha1.BuildPortConfig(executor.Spec.ApplicationConfig) + commonConfig, err := builders.ParseCommonApplicationConfig(executor.Spec.ApplicationConfig) if err != nil { return ctrl.Result{}, err } - executor.Spec.PortConfig = pc - components, err := r.generateExecutorInstallComponents(&executor, r.Scheme) + components, err := r.generateExecutorInstallComponents(&executor, r.Scheme, commonConfig) if err != nil { return ctrl.Result{}, err } @@ -160,7 +159,11 @@ func (r *ExecutorReconciler) Reconcile(ctx context.Context, req ctrl.Request) (c return ctrl.Result{}, nil } -func (r *ExecutorReconciler) generateExecutorInstallComponents(executor *installv1alpha1.Executor, scheme *runtime.Scheme) (*CommonComponents, error) { +func (r *ExecutorReconciler) generateExecutorInstallComponents( + executor *installv1alpha1.Executor, + scheme *runtime.Scheme, + config *builders.CommonApplicationConfig, +) (*CommonComponents, error) { secret, err := builders.CreateSecret(executor.Spec.ApplicationConfig, executor.Name, executor.Namespace, GetConfigFilename(executor.Name)) if err != nil { return nil, errors.WithStack(err) @@ -171,17 +174,24 @@ func (r *ExecutorReconciler) generateExecutorInstallComponents(executor *install var serviceAccount *corev1.ServiceAccount serviceAccountName := executor.Spec.CustomServiceAccount if serviceAccountName == "" { - serviceAccount = builders.CreateServiceAccount(executor.Name, executor.Namespace, AllLabels(executor.Name, executor.Labels), executor.Spec.ServiceAccount) + serviceAccount = builders.ServiceAccount(executor.Name, executor.Namespace, AllLabels(executor.Name, executor.Labels), executor.Spec.ServiceAccount) if err = controllerutil.SetOwnerReference(executor, serviceAccount, scheme); err != nil { return nil, errors.WithStack(err) } serviceAccountName = serviceAccount.Name } - deployment := r.createDeployment(executor, serviceAccountName) + deployment := r.createDeployment(executor, serviceAccountName, config) if err = controllerutil.SetOwnerReference(executor, deployment, scheme); err != nil { return nil, errors.WithStack(err) } - service := builders.Service(executor.Name, executor.Namespace, AllLabels(executor.Name, executor.Labels), IdentityLabel(executor.Name), executor.Spec.PortConfig) + service := builders.Service( + executor.Name, + executor.Namespace, + AllLabels(executor.Name, executor.Labels), + IdentityLabel(executor.Name), + config, + builders.ServiceEnableMetricsPortOnly, + ) if err = controllerutil.SetOwnerReference(executor, service, scheme); err != nil { return nil, errors.WithStack(err) } @@ -194,6 +204,16 @@ func (r *ExecutorReconciler) generateExecutorInstallComponents(executor *install clusterRoleBindings = append(clusterRoleBindings, r.createAdditionalClusterRoleBindings(executor, serviceAccountName)...) } + profilingService, profilingIngress, err := newProfilingComponents( + executor, + scheme, + config, + executor.Spec.ProfilingIngressConfig, + ) + if err != nil { + return nil, errors.WithStack(err) + } + components := &CommonComponents{ Deployment: deployment, Service: service, @@ -202,6 +222,8 @@ func (r *ExecutorReconciler) generateExecutorInstallComponents(executor *install ClusterRoleBindings: clusterRoleBindings, PriorityClasses: executor.Spec.PriorityClasses, ClusterRole: clusterRole, + ServiceProfiling: profilingService, + IngressProfiling: profilingIngress, } if executor.Spec.Prometheus != nil && executor.Spec.Prometheus.Enabled { @@ -219,19 +241,15 @@ func (r *ExecutorReconciler) generateExecutorInstallComponents(executor *install return components, nil } -func (r *ExecutorReconciler) createDeployment(executor *installv1alpha1.Executor, serviceAccountName string) *appsv1.Deployment { +func (r *ExecutorReconciler) createDeployment( + executor *installv1alpha1.Executor, + serviceAccountName string, + config *builders.CommonApplicationConfig, +) *appsv1.Deployment { var replicas int32 = 1 - var runAsUser int64 = 1000 - var runAsGroup int64 = 2000 volumes := createVolumes(executor.Name, executor.Spec.AdditionalVolumes) volumeMounts := createVolumeMounts(GetConfigFilename(executor.Name), executor.Spec.AdditionalVolumeMounts) - allowPrivilegeEscalation := false - ports := []corev1.ContainerPort{{ - Name: "metrics", - ContainerPort: executor.Spec.PortConfig.MetricsPort, - Protocol: "TCP", - }} env := []corev1.EnvVar{ { Name: "SERVICE_ACCOUNT", @@ -253,13 +271,13 @@ func (r *ExecutorReconciler) createDeployment(executor *installv1alpha1.Executor env = append(env, executor.Spec.Environment...) containers := []corev1.Container{{ Name: "executor", - ImagePullPolicy: "IfNotPresent", + ImagePullPolicy: corev1.PullIfNotPresent, Image: ImageString(executor.Spec.Image), Args: []string{appConfigFlag, appConfigFilepath}, - Ports: ports, + Ports: newContainerPortsMetrics(config), Env: env, VolumeMounts: volumeMounts, - SecurityContext: &corev1.SecurityContext{AllowPrivilegeEscalation: &allowPrivilegeEscalation}, + SecurityContext: executor.Spec.SecurityContext, }} deployment := appsv1.Deployment{ ObjectMeta: metav1.ObjectMeta{Name: executor.Name, Namespace: executor.Namespace, Labels: AllLabels(executor.Name, executor.Labels)}, @@ -278,14 +296,11 @@ func (r *ExecutorReconciler) createDeployment(executor *installv1alpha1.Executor Spec: corev1.PodSpec{ ServiceAccountName: serviceAccountName, TerminationGracePeriodSeconds: executor.Spec.TerminationGracePeriodSeconds, - SecurityContext: &corev1.PodSecurityContext{ - RunAsUser: &runAsUser, - RunAsGroup: &runAsGroup, - }, - Containers: containers, - NodeSelector: executor.Spec.NodeSelector, - Tolerations: executor.Spec.Tolerations, - Volumes: volumes, + SecurityContext: executor.Spec.PodSecurityContext, + Containers: containers, + NodeSelector: executor.Spec.NodeSelector, + Tolerations: executor.Spec.Tolerations, + Volumes: volumes, }, }, }, diff --git a/internal/controller/install/lookout_controller.go b/internal/controller/install/lookout_controller.go index 48c0e33d..ef0926e6 100644 --- a/internal/controller/install/lookout_controller.go +++ b/internal/controller/install/lookout_controller.go @@ -19,8 +19,6 @@ import ( "github.com/pkg/errors" - "k8s.io/utils/ptr" - monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" installv1alpha1 "github.com/armadaproject/armada-operator/api/install/v1alpha1" @@ -71,14 +69,13 @@ func (r *LookoutReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ct return ctrl.Result{}, err } - pc, err := installv1alpha1.BuildPortConfig(lookout.Spec.ApplicationConfig) + commonConfig, err := builders.ParseCommonApplicationConfig(lookout.Spec.ApplicationConfig) if err != nil { return ctrl.Result{}, err } - lookout.Spec.PortConfig = pc var components *CommonComponents - components, err = generateLookoutInstallComponents(&lookout, r.Scheme) + components, err = generateLookoutInstallComponents(&lookout, r.Scheme, commonConfig) if err != nil { return ctrl.Result{}, err } @@ -144,7 +141,11 @@ type LookoutConfig struct { Postgres PostgresConfig } -func generateLookoutInstallComponents(lookout *installv1alpha1.Lookout, scheme *runtime.Scheme) (*CommonComponents, error) { +func generateLookoutInstallComponents( + lookout *installv1alpha1.Lookout, + scheme *runtime.Scheme, + config *builders.CommonApplicationConfig, +) (*CommonComponents, error) { secret, err := builders.CreateSecret(lookout.Spec.ApplicationConfig, lookout.Name, lookout.Namespace, GetConfigFilename(lookout.Name)) if err != nil { return nil, err @@ -156,14 +157,14 @@ func generateLookoutInstallComponents(lookout *installv1alpha1.Lookout, scheme * var serviceAccount *corev1.ServiceAccount serviceAccountName := lookout.Spec.CustomServiceAccount if serviceAccountName == "" { - serviceAccount = builders.CreateServiceAccount(lookout.Name, lookout.Namespace, AllLabels(lookout.Name, lookout.Labels), lookout.Spec.ServiceAccount) + serviceAccount = builders.ServiceAccount(lookout.Name, lookout.Namespace, AllLabels(lookout.Name, lookout.Labels), lookout.Spec.ServiceAccount) if err = controllerutil.SetOwnerReference(lookout, serviceAccount, scheme); err != nil { return nil, errors.WithStack(err) } serviceAccountName = serviceAccount.Name } - deployment, err := createLookoutDeployment(lookout, serviceAccountName) + deployment, err := createLookoutDeployment(lookout, serviceAccountName, config) if err != nil { return nil, err } @@ -171,11 +172,28 @@ func generateLookoutInstallComponents(lookout *installv1alpha1.Lookout, scheme * return nil, err } - service := builders.Service(lookout.Name, lookout.Namespace, AllLabels(lookout.Name, lookout.Labels), IdentityLabel(lookout.Name), lookout.Spec.PortConfig) + service := builders.Service( + lookout.Name, + lookout.Namespace, + AllLabels(lookout.Name, lookout.Labels), + IdentityLabel(lookout.Name), + config, + builders.ServiceEnableHTTPWithMetrics, + ) if err := controllerutil.SetOwnerReference(lookout, service, scheme); err != nil { return nil, err } + profilingService, profilingIngress, err := newProfilingComponents( + lookout, + scheme, + config, + lookout.Spec.ProfilingIngressConfig, + ) + if err != nil { + return nil, errors.WithStack(err) + } + var serviceMonitor *monitoringv1.ServiceMonitor if lookout.Spec.Prometheus != nil && lookout.Spec.Prometheus.Enabled { serviceMonitor = createLookoutServiceMonitor(lookout) @@ -203,25 +221,27 @@ func generateLookoutInstallComponents(lookout *installv1alpha1.Lookout, scheme * } } - ingressHttp, err := createLookoutIngressHttp(lookout) + ingressHTTP, err := createLookoutIngressHttp(lookout, config) if err != nil { return nil, err } - if ingressHttp != nil { - if err := controllerutil.SetOwnerReference(lookout, ingressHttp, scheme); err != nil { + if ingressHTTP != nil { + if err := controllerutil.SetOwnerReference(lookout, ingressHTTP, scheme); err != nil { return nil, err } } return &CommonComponents{ - Deployment: deployment, - Service: service, - ServiceAccount: serviceAccount, - Secret: secret, - IngressHttp: ingressHttp, - Jobs: []*batchv1.Job{job}, - ServiceMonitor: serviceMonitor, - CronJob: cronJob, + Deployment: deployment, + Service: service, + ServiceProfiling: profilingService, + ServiceAccount: serviceAccount, + Secret: secret, + IngressHttp: ingressHTTP, + IngressProfiling: profilingIngress, + Jobs: []*batchv1.Job{job}, + ServiceMonitor: serviceMonitor, + CronJob: cronJob, }, nil } @@ -246,10 +266,7 @@ func createLookoutServiceMonitor(lookout *installv1alpha1.Lookout) *monitoringv1 // Function to build the deployment object for Lookout. // This should be changing from CRD to CRD. Not sure if generailize this helps much -func createLookoutDeployment(lookout *installv1alpha1.Lookout, serviceAccountName string) (*appsv1.Deployment, error) { - var runAsUser int64 = 1000 - var runAsGroup int64 = 2000 - allowPrivilegeEscalation := false +func createLookoutDeployment(lookout *installv1alpha1.Lookout, serviceAccountName string, config *builders.CommonApplicationConfig) (*appsv1.Deployment, error) { env := createEnv(lookout.Spec.Environment) volumes := createVolumes(lookout.Name, lookout.Spec.AdditionalVolumes) volumeMounts := createVolumeMounts(GetConfigFilename(lookout.Name), lookout.Spec.AdditionalVolumeMounts) @@ -271,52 +288,17 @@ func createLookoutDeployment(lookout *installv1alpha1.Lookout, serviceAccountNam Spec: corev1.PodSpec{ ServiceAccountName: serviceAccountName, TerminationGracePeriodSeconds: lookout.DeletionGracePeriodSeconds, - SecurityContext: &corev1.PodSecurityContext{ - RunAsUser: &runAsUser, - RunAsGroup: &runAsGroup, - }, - Affinity: &corev1.Affinity{ - PodAffinity: &corev1.PodAffinity{ - PreferredDuringSchedulingIgnoredDuringExecution: []corev1.WeightedPodAffinityTerm{{ - Weight: 100, - PodAffinityTerm: corev1.PodAffinityTerm{ - TopologyKey: "kubernetes.io/hostname", - LabelSelector: &metav1.LabelSelector{ - MatchExpressions: []metav1.LabelSelectorRequirement{{ - Key: "app", - Operator: metav1.LabelSelectorOpIn, - Values: []string{lookout.Name}, - }}, - }, - }, - }}, - }, - }, + SecurityContext: lookout.Spec.PodSecurityContext, + Affinity: defaultAffinity(lookout.Name, 100), Containers: []corev1.Container{{ Name: "lookout", - ImagePullPolicy: "IfNotPresent", + ImagePullPolicy: corev1.PullIfNotPresent, Image: ImageString(lookout.Spec.Image), Args: []string{appConfigFlag, appConfigFilepath}, - Ports: []corev1.ContainerPort{ - { - Name: "metrics", - ContainerPort: lookout.Spec.PortConfig.MetricsPort, - Protocol: "TCP", - }, - { - Name: "http", - ContainerPort: lookout.Spec.PortConfig.HttpPort, - Protocol: "TCP", - }, - { - Name: "grpc", - ContainerPort: lookout.Spec.PortConfig.GrpcPort, - Protocol: "TCP", - }, - }, + Ports: newContainerPortsHTTPWithMetrics(config), Env: env, VolumeMounts: volumeMounts, - SecurityContext: &corev1.SecurityContext{AllowPrivilegeEscalation: &allowPrivilegeEscalation}, + SecurityContext: lookout.Spec.SecurityContext, }}, Volumes: volumes, }, @@ -331,70 +313,32 @@ func createLookoutDeployment(lookout *installv1alpha1.Lookout, serviceAccountNam return &deployment, nil } -func createLookoutIngressHttp(lookout *installv1alpha1.Lookout) (*networking.Ingress, error) { +func createLookoutIngressHttp(lookout *installv1alpha1.Lookout, config *builders.CommonApplicationConfig) (*networking.Ingress, error) { if len(lookout.Spec.HostNames) == 0 { // when no hostnames, no ingress can be configured return nil, nil } - ingressName := lookout.Name + "-rest" - ingressHttp := &networking.Ingress{ - ObjectMeta: metav1.ObjectMeta{ - Name: ingressName, Namespace: lookout.Namespace, Labels: AllLabels(lookout.Name, lookout.Labels), - Annotations: map[string]string{ - "kubernetes.io/ingress.class": lookout.Spec.Ingress.IngressClass, - "nginx.ingress.kubernetes.io/ssl-redirect": "true", - }, - }, - } - - if lookout.Spec.ClusterIssuer != "" { - ingressHttp.ObjectMeta.Annotations["certmanager.k8s.io/cluster-issuer"] = lookout.Spec.ClusterIssuer - ingressHttp.ObjectMeta.Annotations["cert-manager.io/cluster-issuer"] = lookout.Spec.ClusterIssuer - } - - if lookout.Spec.Ingress.Annotations != nil { - for key, value := range lookout.Spec.Ingress.Annotations { - ingressHttp.ObjectMeta.Annotations[key] = value - } + name := lookout.Name + "-rest" + labels := AllLabels(lookout.Name, lookout.Spec.Labels, lookout.Spec.Ingress.Labels) + baseAnnotations := map[string]string{ + "nginx.ingress.kubernetes.io/ssl-redirect": "true", } - ingressHttp.ObjectMeta.Labels = AllLabels(lookout.Name, lookout.Spec.Labels, lookout.Spec.Ingress.Labels) + annotations := buildIngressAnnotations(lookout.Spec.Ingress, baseAnnotations, BackendProtocolHTTP, config.GRPC.Enabled) secretName := lookout.Name + "-service-tls" - ingressHttp.Spec.TLS = []networking.IngressTLS{{Hosts: lookout.Spec.HostNames, SecretName: secretName}} - var ingressRules []networking.IngressRule serviceName := lookout.Name - for _, val := range lookout.Spec.HostNames { - ingressRules = append(ingressRules, networking.IngressRule{Host: val, IngressRuleValue: networking.IngressRuleValue{ - HTTP: &networking.HTTPIngressRuleValue{ - Paths: []networking.HTTPIngressPath{{ - Path: "/", - PathType: (*networking.PathType)(ptr.To[string]("Prefix")), - Backend: networking.IngressBackend{ - Service: &networking.IngressServiceBackend{ - Name: serviceName, - Port: networking.ServiceBackendPort{ - Number: lookout.Spec.PortConfig.HttpPort, - }, - }, - }, - }}, - }, - }}) - } - ingressHttp.Spec.Rules = ingressRules - - return ingressHttp, nil + servicePort := config.HTTPPort + path := "/" + ingress, err := builders.Ingress(name, lookout.Namespace, labels, annotations, lookout.Spec.HostNames, serviceName, secretName, path, servicePort) + return ingress, errors.WithStack(err) } // createLookoutMigrationJob returns a batch Job or an error if the app config is not correct func createLookoutMigrationJob(lookout *installv1alpha1.Lookout, serviceAccountName string) (*batchv1.Job, error) { - runAsUser := int64(1000) - runAsGroup := int64(2000) var terminationGracePeriodSeconds int64 if lookout.Spec.TerminationGracePeriodSeconds != nil { terminationGracePeriodSeconds = *lookout.Spec.TerminationGracePeriodSeconds } - allowPrivilegeEscalation := false parallelism := int32(1) completions := int32(1) backoffLimit := int32(0) @@ -433,10 +377,7 @@ func createLookoutMigrationJob(lookout *installv1alpha1.Lookout, serviceAccountN ServiceAccountName: serviceAccountName, RestartPolicy: "Never", TerminationGracePeriodSeconds: &terminationGracePeriodSeconds, - SecurityContext: &corev1.PodSecurityContext{ - RunAsUser: &runAsUser, - RunAsGroup: &runAsGroup, - }, + SecurityContext: lookout.Spec.PodSecurityContext, InitContainers: []corev1.Container{{ Name: "lookout-migration-db-wait", Image: "postgres:15.2-alpine", @@ -478,21 +419,16 @@ func createLookoutMigrationJob(lookout *installv1alpha1.Lookout, serviceAccountN }}, Containers: []corev1.Container{{ Name: "lookout-migration", - ImagePullPolicy: "IfNotPresent", + ImagePullPolicy: corev1.PullIfNotPresent, Image: ImageString(lookout.Spec.Image), Args: []string{ "--migrateDatabase", appConfigFlag, appConfigFilepath, }, - Ports: []corev1.ContainerPort{{ - Name: "metrics", - ContainerPort: lookout.Spec.PortConfig.MetricsPort, - Protocol: "TCP", - }}, Env: env, VolumeMounts: volumeMounts, - SecurityContext: &corev1.SecurityContext{AllowPrivilegeEscalation: &allowPrivilegeEscalation}, + SecurityContext: lookout.Spec.SecurityContext, }}, NodeSelector: lookout.Spec.NodeSelector, Tolerations: lookout.Spec.Tolerations, @@ -507,13 +443,10 @@ func createLookoutMigrationJob(lookout *installv1alpha1.Lookout, serviceAccountN // createLookoutCronJob returns a batch CronJob or an error if the app config is not correct func createLookoutCronJob(lookout *installv1alpha1.Lookout) (*batchv1.CronJob, error) { - runAsUser := int64(1000) - runAsGroup := int64(2000) terminationGracePeriodSeconds := int64(0) if lookout.Spec.TerminationGracePeriodSeconds != nil { terminationGracePeriodSeconds = *lookout.Spec.TerminationGracePeriodSeconds } - allowPrivilegeEscalation := false parallelism := int32(1) completions := int32(1) backoffLimit := int32(0) @@ -563,10 +496,7 @@ func createLookoutCronJob(lookout *installv1alpha1.Lookout) (*batchv1.CronJob, e Spec: corev1.PodSpec{ RestartPolicy: "Never", TerminationGracePeriodSeconds: &terminationGracePeriodSeconds, - SecurityContext: &corev1.PodSecurityContext{ - RunAsUser: &runAsUser, - RunAsGroup: &runAsGroup, - }, + SecurityContext: lookout.Spec.PodSecurityContext, InitContainers: []corev1.Container{{ Name: "lookout-db-pruner-db-wait", Image: "alpine:3.10", @@ -592,21 +522,16 @@ func createLookoutCronJob(lookout *installv1alpha1.Lookout) (*batchv1.CronJob, e }}, Containers: []corev1.Container{{ Name: "lookout-db-pruner", - ImagePullPolicy: "IfNotPresent", + ImagePullPolicy: corev1.PullIfNotPresent, Image: ImageString(lookout.Spec.Image), Args: []string{ "--pruneDatabase", appConfigFlag, appConfigFilepath, }, - Ports: []corev1.ContainerPort{{ - Name: "metrics", - ContainerPort: lookout.Spec.PortConfig.MetricsPort, - Protocol: "TCP", - }}, Env: env, VolumeMounts: volumeMounts, - SecurityContext: &corev1.SecurityContext{AllowPrivilegeEscalation: &allowPrivilegeEscalation}, + SecurityContext: lookout.Spec.SecurityContext, }}, NodeSelector: lookout.Spec.NodeSelector, Tolerations: lookout.Spec.Tolerations, diff --git a/internal/controller/install/lookout_controller_test.go b/internal/controller/install/lookout_controller_test.go index 23a81351..72ba1dc2 100644 --- a/internal/controller/install/lookout_controller_test.go +++ b/internal/controller/install/lookout_controller_test.go @@ -5,6 +5,8 @@ import ( "testing" "time" + "github.com/armadaproject/armada-operator/internal/controller/builders" + "k8s.io/utils/ptr" monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" @@ -572,8 +574,12 @@ func Test_createLookoutMigrationJob(t *testing.T) { func TestSchedulerReconciler_createIngressHttp_EmptyHosts(t *testing.T) { t.Parallel() - lookoutInput := v1alpha1.Lookout{} - ingress, err := createLookoutIngressHttp(&lookoutInput) + input := v1alpha1.Lookout{} + commonConfig, err := builders.ParseCommonApplicationConfig(input.Spec.ApplicationConfig) + if err != nil { + t.Fatalf("should not return error when parsing common application config") + } + ingress, err := createLookoutIngressHttp(&input, commonConfig) // expect no error and nil ingress with empty hosts slice assert.NoError(t, err) assert.Nil(t, ingress) @@ -582,7 +588,7 @@ func TestSchedulerReconciler_createIngressHttp_EmptyHosts(t *testing.T) { func TestSchedulerReconciler_createLookoutIngressHttp(t *testing.T) { t.Parallel() - lookoutInput := v1alpha1.Lookout{ + input := v1alpha1.Lookout{ TypeMeta: metav1.TypeMeta{ Kind: "Lookout", APIVersion: "install.armadaproject.io/v1alpha1", @@ -600,7 +606,11 @@ func TestSchedulerReconciler_createLookoutIngressHttp(t *testing.T) { HostNames: []string{"localhost"}, }, } - ingress, err := createLookoutIngressHttp(&lookoutInput) + commonConfig, err := builders.ParseCommonApplicationConfig(input.Spec.ApplicationConfig) + if err != nil { + t.Fatalf("should not return error when parsing common application config") + } + ingress, err := createLookoutIngressHttp(&input, commonConfig) // expect no error and not-nil ingress assert.NoError(t, err) assert.NotNil(t, ingress) diff --git a/internal/controller/install/lookoutingester_controller.go b/internal/controller/install/lookoutingester_controller.go index 5d26c46c..b7973ed3 100644 --- a/internal/controller/install/lookoutingester_controller.go +++ b/internal/controller/install/lookoutingester_controller.go @@ -65,13 +65,12 @@ func (r *LookoutIngesterReconciler) Reconcile(ctx context.Context, req ctrl.Requ return ctrl.Result{}, err } - pc, err := installv1alpha1.BuildPortConfig(lookoutIngester.Spec.ApplicationConfig) + commonConfig, err := builders.ParseCommonApplicationConfig(lookoutIngester.Spec.ApplicationConfig) if err != nil { return ctrl.Result{}, err } - lookoutIngester.Spec.PortConfig = pc - components, err := r.generateInstallComponents(&lookoutIngester, r.Scheme) + components, err := r.generateInstallComponents(&lookoutIngester, r.Scheme, commonConfig) if err != nil { return ctrl.Result{}, err } @@ -107,7 +106,11 @@ func (r *LookoutIngesterReconciler) SetupWithManager(mgr ctrl.Manager) error { Complete(r) } -func (r *LookoutIngesterReconciler) generateInstallComponents(lookoutIngester *installv1alpha1.LookoutIngester, scheme *runtime.Scheme) (*CommonComponents, error) { +func (r *LookoutIngesterReconciler) generateInstallComponents( + lookoutIngester *installv1alpha1.LookoutIngester, + scheme *runtime.Scheme, + config *builders.CommonApplicationConfig, +) (*CommonComponents, error) { secret, err := builders.CreateSecret(lookoutIngester.Spec.ApplicationConfig, lookoutIngester.Name, lookoutIngester.Namespace, GetConfigFilename(lookoutIngester.Name)) if err != nil { return nil, err @@ -119,14 +122,14 @@ func (r *LookoutIngesterReconciler) generateInstallComponents(lookoutIngester *i var serviceAccount *corev1.ServiceAccount serviceAccountName := lookoutIngester.Spec.CustomServiceAccount if serviceAccountName == "" { - serviceAccount = builders.CreateServiceAccount(lookoutIngester.Name, lookoutIngester.Namespace, AllLabels(lookoutIngester.Name, lookoutIngester.Labels), lookoutIngester.Spec.ServiceAccount) + serviceAccount = builders.ServiceAccount(lookoutIngester.Name, lookoutIngester.Namespace, AllLabels(lookoutIngester.Name, lookoutIngester.Labels), lookoutIngester.Spec.ServiceAccount) if err = controllerutil.SetOwnerReference(lookoutIngester, serviceAccount, scheme); err != nil { return nil, errors.WithStack(err) } serviceAccountName = serviceAccount.Name } - deployment, err := r.createDeployment(lookoutIngester, serviceAccountName) + deployment, err := r.createDeployment(lookoutIngester, serviceAccountName, config) if err != nil { return nil, err } @@ -134,19 +137,32 @@ func (r *LookoutIngesterReconciler) generateInstallComponents(lookoutIngester *i return nil, err } + profilingService, profilingIngress, err := newProfilingComponents( + lookoutIngester, + scheme, + config, + lookoutIngester.Spec.ProfilingIngressConfig, + ) + if err != nil { + return nil, errors.WithStack(err) + } + return &CommonComponents{ - Deployment: deployment, - ServiceAccount: serviceAccount, - Secret: secret, + Deployment: deployment, + ServiceAccount: serviceAccount, + Secret: secret, + ServiceProfiling: profilingService, + IngressProfiling: profilingIngress, }, nil } // TODO: Flesh this out for lookoutingester -func (r *LookoutIngesterReconciler) createDeployment(lookoutIngester *installv1alpha1.LookoutIngester, serviceAccountName string) (*appsv1.Deployment, error) { +func (r *LookoutIngesterReconciler) createDeployment( + lookoutIngester *installv1alpha1.LookoutIngester, + serviceAccountName string, + config *builders.CommonApplicationConfig, +) (*appsv1.Deployment, error) { var replicas int32 = 1 - var runAsUser int64 = 1000 - var runAsGroup int64 = 2000 - allowPrivilegeEscalation := false env := createEnv(lookoutIngester.Spec.Environment) pulsarConfig, err := ExtractPulsarConfig(lookoutIngester.Spec.ApplicationConfig) @@ -175,24 +191,16 @@ func (r *LookoutIngesterReconciler) createDeployment(lookoutIngester *installv1a Spec: corev1.PodSpec{ ServiceAccountName: serviceAccountName, TerminationGracePeriodSeconds: lookoutIngester.Spec.TerminationGracePeriodSeconds, - SecurityContext: &corev1.PodSecurityContext{ - RunAsUser: &runAsUser, - RunAsGroup: &runAsGroup, - }, + SecurityContext: lookoutIngester.Spec.PodSecurityContext, Containers: []corev1.Container{{ Name: "lookoutingester", - ImagePullPolicy: "IfNotPresent", + ImagePullPolicy: corev1.PullIfNotPresent, Image: ImageString(lookoutIngester.Spec.Image), Args: []string{appConfigFlag, appConfigFilepath}, - // FIXME(Clif): Needs to change - Ports: []corev1.ContainerPort{{ - Name: "metrics", - ContainerPort: lookoutIngester.Spec.PortConfig.MetricsPort, - Protocol: "TCP", - }}, + Ports: newContainerPortsMetrics(config), Env: env, VolumeMounts: volumeMounts, - SecurityContext: &corev1.SecurityContext{AllowPrivilegeEscalation: &allowPrivilegeEscalation}, + SecurityContext: lookoutIngester.Spec.SecurityContext, }}, Tolerations: lookoutIngester.Spec.Tolerations, Volumes: volumes, diff --git a/internal/controller/install/scheduler_controller.go b/internal/controller/install/scheduler_controller.go index daa8e5df..0609c407 100644 --- a/internal/controller/install/scheduler_controller.go +++ b/internal/controller/install/scheduler_controller.go @@ -18,13 +18,13 @@ import ( "fmt" "time" + "k8s.io/utils/ptr" + "k8s.io/apimachinery/pkg/util/duration" "k8s.io/apimachinery/pkg/util/intstr" "github.com/pkg/errors" - "k8s.io/utils/ptr" - monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" installv1alpha1 "github.com/armadaproject/armada-operator/api/install/v1alpha1" @@ -79,13 +79,13 @@ func (r *SchedulerReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( return ctrl.Result{}, err } - scheduler.Spec.PortConfig, err = installv1alpha1.BuildPortConfig(scheduler.Spec.ApplicationConfig) + commonConfig, err := builders.ParseCommonApplicationConfig(scheduler.Spec.ApplicationConfig) if err != nil { return ctrl.Result{}, err } var components *CommonComponents - components, err = generateSchedulerInstallComponents(&scheduler, r.Scheme) + components, err = generateSchedulerInstallComponents(&scheduler, r.Scheme, commonConfig) if err != nil { return ctrl.Result{}, err } @@ -144,7 +144,11 @@ type SchedulerConfig struct { Postgres PostgresConfig } -func generateSchedulerInstallComponents(scheduler *installv1alpha1.Scheduler, scheme *runtime.Scheme) (*CommonComponents, error) { +func generateSchedulerInstallComponents( + scheduler *installv1alpha1.Scheduler, + scheme *runtime.Scheme, + config *builders.CommonApplicationConfig, +) (*CommonComponents, error) { secret, err := builders.CreateSecret(scheduler.Spec.ApplicationConfig, scheduler.Name, scheduler.Namespace, GetConfigFilename(scheduler.Name)) if err != nil { return nil, err @@ -156,14 +160,14 @@ func generateSchedulerInstallComponents(scheduler *installv1alpha1.Scheduler, sc var serviceAccount *corev1.ServiceAccount serviceAccountName := scheduler.Spec.CustomServiceAccount if serviceAccountName == "" { - serviceAccount = builders.CreateServiceAccount(scheduler.Name, scheduler.Namespace, AllLabels(scheduler.Name, scheduler.Labels), scheduler.Spec.ServiceAccount) + serviceAccount = builders.ServiceAccount(scheduler.Name, scheduler.Namespace, AllLabels(scheduler.Name, scheduler.Labels), scheduler.Spec.ServiceAccount) if err = controllerutil.SetOwnerReference(scheduler, serviceAccount, scheme); err != nil { return nil, errors.WithStack(err) } serviceAccountName = serviceAccount.Name } - deployment, err := createSchedulerDeployment(scheduler, serviceAccountName) + deployment, err := newSchedulerDeployment(scheduler, serviceAccountName, config) if err != nil { return nil, err } @@ -171,25 +175,42 @@ func generateSchedulerInstallComponents(scheduler *installv1alpha1.Scheduler, sc return nil, err } - service := builders.Service(scheduler.Name, scheduler.Namespace, AllLabels(scheduler.Name, scheduler.Labels), IdentityLabel(scheduler.Name), scheduler.Spec.PortConfig) + service := builders.Service( + scheduler.Name, + scheduler.Namespace, + AllLabels(scheduler.Name, scheduler.Labels), + IdentityLabel(scheduler.Name), + config, + builders.ServiceEnableApplicationPortsOnly, + ) if err := controllerutil.SetOwnerReference(scheduler, service, scheme); err != nil { return nil, err } + profilingService, profilingIngress, err := newProfilingComponents( + scheduler, + scheme, + config, + scheduler.Spec.ProfilingIngressConfig, + ) + if err != nil { + return nil, errors.WithStack(err) + } + var serviceMonitor *monitoringv1.ServiceMonitor var prometheusRule *monitoringv1.PrometheusRule if scheduler.Spec.Prometheus != nil && scheduler.Spec.Prometheus.Enabled { - serviceMonitor = createSchedulerServiceMonitor(scheduler) + serviceMonitor = newSchedulerServiceMonitor(scheduler) if err := controllerutil.SetOwnerReference(scheduler, serviceMonitor, scheme); err != nil { return nil, err } - prometheusRule = createSchedulerPrometheusRule(scheduler) + prometheusRule = newSchedulerPrometheusRule(scheduler) if err := controllerutil.SetOwnerReference(scheduler, prometheusRule, scheme); err != nil { return nil, err } } - job, err := createSchedulerMigrationJob(scheduler, serviceAccountName) + job, err := newSchedulerMigrationJob(scheduler, serviceAccountName) if err != nil { return nil, err } @@ -199,7 +220,7 @@ func generateSchedulerInstallComponents(scheduler *installv1alpha1.Scheduler, sc var cronJob *batchv1.CronJob if scheduler.Spec.Pruner != nil && scheduler.Spec.Pruner.Enabled { - cronJob, err := createSchedulerCronJob(scheduler) + cronJob, err := newSchedulerCronJob(scheduler) if err != nil { return nil, err } @@ -208,35 +229,57 @@ func generateSchedulerInstallComponents(scheduler *installv1alpha1.Scheduler, sc } } - ingressGrpc, err := createSchedulerIngressGrpc(scheduler) + ingressGRPC, err := newSchedulerIngressGRPC(scheduler, config) if err != nil { return nil, err } - if ingressGrpc != nil { - if err := controllerutil.SetOwnerReference(scheduler, ingressGrpc, scheme); err != nil { + if ingressGRPC != nil { + if err := controllerutil.SetOwnerReference(scheduler, ingressGRPC, scheme); err != nil { return nil, err } } return &CommonComponents{ - Deployment: deployment, - Service: service, - ServiceAccount: serviceAccount, - Secret: secret, - IngressGrpc: ingressGrpc, - Jobs: []*batchv1.Job{job}, - ServiceMonitor: serviceMonitor, - PrometheusRule: prometheusRule, - CronJob: cronJob, + Deployment: deployment, + Service: service, + ServiceProfiling: profilingService, + ServiceAccount: serviceAccount, + Secret: secret, + IngressGrpc: ingressGRPC, + IngressProfiling: profilingIngress, + Jobs: []*batchv1.Job{job}, + ServiceMonitor: serviceMonitor, + PrometheusRule: prometheusRule, + CronJob: cronJob, }, nil } +// newSchedulerServiceMonitor will return a ServiceMonitor for this +func newSchedulerServiceMonitor(scheduler *installv1alpha1.Scheduler) *monitoringv1.ServiceMonitor { + return &monitoringv1.ServiceMonitor{ + TypeMeta: metav1.TypeMeta{ + Kind: "ServiceMonitor", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: scheduler.Name, + Namespace: scheduler.Namespace, + Labels: AllLabels(scheduler.Name, scheduler.Spec.Labels, scheduler.Spec.Prometheus.Labels), + }, + Spec: monitoringv1.ServiceMonitorSpec{ + Endpoints: []monitoringv1.Endpoint{ + {Port: "metrics", Interval: "15s"}, + }, + }, + } +} + // Function to build the deployment object for Scheduler. -// This should be changing from CRD to CRD. Not sure if generailize this helps much -func createSchedulerDeployment(scheduler *installv1alpha1.Scheduler, serviceAccountName string) (*appsv1.Deployment, error) { - var runAsUser int64 = 1000 - var runAsGroup int64 = 2000 - allowPrivilegeEscalation := false +// This should be changing from CRD to CRD. Not sure if generalize this helps much +func newSchedulerDeployment( + scheduler *installv1alpha1.Scheduler, + serviceAccountName string, + config *builders.CommonApplicationConfig, +) (*appsv1.Deployment, error) { env := createEnv(scheduler.Spec.Environment) pulsarConfig, err := ExtractPulsarConfig(scheduler.Spec.ApplicationConfig) if err != nil { @@ -264,47 +307,17 @@ func createSchedulerDeployment(scheduler *installv1alpha1.Scheduler, serviceAcco Spec: corev1.PodSpec{ ServiceAccountName: serviceAccountName, TerminationGracePeriodSeconds: scheduler.DeletionGracePeriodSeconds, - SecurityContext: &corev1.PodSecurityContext{ - RunAsUser: &runAsUser, - RunAsGroup: &runAsGroup, - }, - Affinity: &corev1.Affinity{ - PodAntiAffinity: &corev1.PodAntiAffinity{ - PreferredDuringSchedulingIgnoredDuringExecution: []corev1.WeightedPodAffinityTerm{{ - Weight: 100, - PodAffinityTerm: corev1.PodAffinityTerm{ - TopologyKey: "kubernetes.io/hostname", - LabelSelector: &metav1.LabelSelector{ - MatchExpressions: []metav1.LabelSelectorRequirement{{ - Key: "app", - Operator: metav1.LabelSelectorOpIn, - Values: []string{scheduler.Name}, - }}, - }, - }, - }}, - }, - }, + SecurityContext: scheduler.Spec.PodSecurityContext, + Affinity: defaultAffinity(scheduler.Name, 100), Containers: []corev1.Container{{ Name: "scheduler", - ImagePullPolicy: "IfNotPresent", + ImagePullPolicy: corev1.PullIfNotPresent, Image: ImageString(scheduler.Spec.Image), Args: []string{"run", appConfigFlag, appConfigFilepath}, - Ports: []corev1.ContainerPort{ - { - Name: "metrics", - ContainerPort: scheduler.Spec.PortConfig.MetricsPort, - Protocol: "TCP", - }, - { - Name: "grpc", - ContainerPort: scheduler.Spec.PortConfig.GrpcPort, - Protocol: "TCP", - }, - }, + Ports: newContainerPortsGRPCWithMetrics(config), Env: env, VolumeMounts: volumeMounts, - SecurityContext: &corev1.SecurityContext{AllowPrivilegeEscalation: &allowPrivilegeEscalation}, + SecurityContext: scheduler.Spec.SecurityContext, }}, Volumes: volumes, }, @@ -320,71 +333,33 @@ func createSchedulerDeployment(scheduler *installv1alpha1.Scheduler, serviceAcco return &deployment, nil } -func createSchedulerIngressGrpc(scheduler *installv1alpha1.Scheduler) (*networking.Ingress, error) { +func newSchedulerIngressGRPC(scheduler *installv1alpha1.Scheduler, config *builders.CommonApplicationConfig) (*networking.Ingress, error) { if len(scheduler.Spec.HostNames) == 0 { - // when no hostnames provided, no ingress can be configured + // if no hostnames, no ingress can be configured return nil, nil } - ingressName := scheduler.Name + "-grpc" - ingressHttp := &networking.Ingress{ - ObjectMeta: metav1.ObjectMeta{ - Name: ingressName, Namespace: scheduler.Namespace, Labels: AllLabels(scheduler.Name, scheduler.Labels), - Annotations: map[string]string{ - "kubernetes.io/ingress.class": scheduler.Spec.Ingress.IngressClass, - "nginx.ingress.kubernetes.io/ssl-redirect": "true", - "nginx.ingress.kubernetes.io/backend-protocol": "GRPC", - }, - }, - } - if scheduler.Spec.ClusterIssuer != "" { - ingressHttp.ObjectMeta.Annotations["certmanager.k8s.io/cluster-issuer"] = scheduler.Spec.ClusterIssuer - ingressHttp.ObjectMeta.Annotations["cert-manager.io/cluster-issuer"] = scheduler.Spec.ClusterIssuer + name := scheduler.Name + "-grpc" + labels := AllLabels(scheduler.Name, scheduler.Spec.Labels, scheduler.Spec.Ingress.Labels) + baseAnnotations := map[string]string{ + "nginx.ingress.kubernetes.io/ssl-redirect": "true", } - - if scheduler.Spec.Ingress.Annotations != nil { - for key, value := range scheduler.Spec.Ingress.Annotations { - ingressHttp.ObjectMeta.Annotations[key] = value - } - } - ingressHttp.ObjectMeta.Labels = AllLabels(scheduler.Name, scheduler.Spec.Labels, scheduler.Spec.Ingress.Labels) + annotations := buildIngressAnnotations(scheduler.Spec.Ingress, baseAnnotations, BackendProtocolGRPC, config.GRPC.Enabled) secretName := scheduler.Name + "-service-tls" - ingressHttp.Spec.TLS = []networking.IngressTLS{{Hosts: scheduler.Spec.HostNames, SecretName: secretName}} - var ingressRules []networking.IngressRule serviceName := scheduler.Name - for _, val := range scheduler.Spec.HostNames { - ingressRules = append(ingressRules, networking.IngressRule{Host: val, IngressRuleValue: networking.IngressRuleValue{ - HTTP: &networking.HTTPIngressRuleValue{ - Paths: []networking.HTTPIngressPath{{ - Path: "/", - PathType: (*networking.PathType)(ptr.To[string]("Prefix")), - Backend: networking.IngressBackend{ - Service: &networking.IngressServiceBackend{ - Name: serviceName, - Port: networking.ServiceBackendPort{ - Number: scheduler.Spec.PortConfig.GrpcPort, - }, - }, - }, - }}, - }, - }}) - } - ingressHttp.Spec.Rules = ingressRules - - return ingressHttp, nil + servicePort := config.GRPCPort + path := "/" + ingress, err := builders.Ingress(name, scheduler.Namespace, labels, annotations, scheduler.Spec.HostNames, serviceName, secretName, path, servicePort) + return ingress, errors.WithStack(err) } -// createSchedulerMigrationJob returns a batch Job or an error if the app config is not correct -func createSchedulerMigrationJob(scheduler *installv1alpha1.Scheduler, serviceAccountName string) (*batchv1.Job, error) { - runAsUser := int64(1000) - runAsGroup := int64(2000) +// newSchedulerMigrationJob returns a batch Job or an error if the app config is not correct +func newSchedulerMigrationJob(scheduler *installv1alpha1.Scheduler, serviceAccountName string) (*batchv1.Job, error) { var terminationGracePeriodSeconds int64 if scheduler.Spec.TerminationGracePeriodSeconds != nil { terminationGracePeriodSeconds = *scheduler.Spec.TerminationGracePeriodSeconds } - allowPrivilegeEscalation := false parallelism := int32(1) completions := int32(1) backoffLimit := int32(0) @@ -423,10 +398,7 @@ func createSchedulerMigrationJob(scheduler *installv1alpha1.Scheduler, serviceAc ServiceAccountName: serviceAccountName, RestartPolicy: "Never", TerminationGracePeriodSeconds: &terminationGracePeriodSeconds, - SecurityContext: &corev1.PodSecurityContext{ - RunAsUser: &runAsUser, - RunAsGroup: &runAsGroup, - }, + SecurityContext: scheduler.Spec.PodSecurityContext, InitContainers: []corev1.Container{{ Name: "scheduler-migration-db-wait", Image: "postgres:15.2-alpine", @@ -468,21 +440,16 @@ func createSchedulerMigrationJob(scheduler *installv1alpha1.Scheduler, serviceAc }}, Containers: []corev1.Container{{ Name: "scheduler-migration", - ImagePullPolicy: "IfNotPresent", + ImagePullPolicy: corev1.PullIfNotPresent, Image: ImageString(scheduler.Spec.Image), Args: []string{ "migrateDatabase", appConfigFlag, appConfigFilepath, }, - Ports: []corev1.ContainerPort{{ - Name: "metrics", - ContainerPort: scheduler.Spec.PortConfig.MetricsPort, - Protocol: "TCP", - }}, Env: env, VolumeMounts: volumeMounts, - SecurityContext: &corev1.SecurityContext{AllowPrivilegeEscalation: &allowPrivilegeEscalation}, + SecurityContext: scheduler.Spec.SecurityContext, }}, Tolerations: scheduler.Spec.Tolerations, Volumes: volumes, @@ -494,15 +461,12 @@ func createSchedulerMigrationJob(scheduler *installv1alpha1.Scheduler, serviceAc return &job, nil } -// createSchedulerCronJob returns a batch CronJob or an error if the app config is not correct -func createSchedulerCronJob(scheduler *installv1alpha1.Scheduler) (*batchv1.CronJob, error) { - runAsUser := int64(1000) - runAsGroup := int64(2000) +// newSchedulerCronJob returns a batch CronJob or an error if the app config is not correct +func newSchedulerCronJob(scheduler *installv1alpha1.Scheduler) (*batchv1.CronJob, error) { terminationGracePeriodSeconds := int64(0) if scheduler.Spec.TerminationGracePeriodSeconds != nil { terminationGracePeriodSeconds = *scheduler.Spec.TerminationGracePeriodSeconds } - allowPrivilegeEscalation := false parallelism := int32(1) completions := int32(1) backoffLimit := int32(0) @@ -568,10 +532,7 @@ func createSchedulerCronJob(scheduler *installv1alpha1.Scheduler) (*batchv1.Cron Spec: corev1.PodSpec{ RestartPolicy: "Never", TerminationGracePeriodSeconds: &terminationGracePeriodSeconds, - SecurityContext: &corev1.PodSecurityContext{ - RunAsUser: &runAsUser, - RunAsGroup: &runAsGroup, - }, + SecurityContext: scheduler.Spec.PodSecurityContext, InitContainers: []corev1.Container{{ Name: "scheduler-db-pruner-db-wait", Image: "alpine:3.10", @@ -597,17 +558,12 @@ func createSchedulerCronJob(scheduler *installv1alpha1.Scheduler) (*batchv1.Cron }}, Containers: []corev1.Container{{ Name: "scheduler-db-pruner", - ImagePullPolicy: "IfNotPresent", + ImagePullPolicy: corev1.PullIfNotPresent, Image: ImageString(scheduler.Spec.Image), Args: prunerArgs, - Ports: []corev1.ContainerPort{{ - Name: "metrics", - ContainerPort: scheduler.Spec.PortConfig.MetricsPort, - Protocol: "TCP", - }}, Env: env, VolumeMounts: volumeMounts, - SecurityContext: &corev1.SecurityContext{AllowPrivilegeEscalation: &allowPrivilegeEscalation}, + SecurityContext: scheduler.Spec.SecurityContext, Resources: prunerResources, }}, Tolerations: scheduler.Spec.Tolerations, @@ -622,32 +578,8 @@ func createSchedulerCronJob(scheduler *installv1alpha1.Scheduler) (*batchv1.Cron return &job, nil } -// createSchedulerServiceMonitor will return a ServiceMonitor for this -func createSchedulerServiceMonitor(scheduler *installv1alpha1.Scheduler) *monitoringv1.ServiceMonitor { - scrapeInterval := &metav1.Duration{Duration: defaultPrometheusInterval} - if scheduler.Spec.Prometheus.ScrapeInterval == nil { - scrapeInterval = &metav1.Duration{Duration: defaultPrometheusInterval} - } - durationString := duration.ShortHumanDuration(scrapeInterval.Duration) - return &monitoringv1.ServiceMonitor{ - TypeMeta: metav1.TypeMeta{ - Kind: "ServiceMonitor", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: scheduler.Name, - Namespace: scheduler.Namespace, - Labels: AllLabels(scheduler.Name, scheduler.Spec.Labels, scheduler.Spec.Prometheus.Labels), - }, - Spec: monitoringv1.ServiceMonitorSpec{ - Endpoints: []monitoringv1.Endpoint{ - {Port: "metrics", Interval: monitoringv1.Duration(durationString)}, - }, - }, - } -} - -// createSchedulerPrometheusRule creates a PrometheusRule for monitoring Armada scheduler. -func createSchedulerPrometheusRule(scheduler *installv1alpha1.Scheduler) *monitoringv1.PrometheusRule { +// newSchedulerPrometheusRule creates a PrometheusRule for monitoring Armada scheduler. +func newSchedulerPrometheusRule(scheduler *installv1alpha1.Scheduler) *monitoringv1.PrometheusRule { rules := []monitoringv1.Rule{ { Record: "node:armada_scheduler_failed_jobs", diff --git a/internal/controller/install/scheduler_controller_test.go b/internal/controller/install/scheduler_controller_test.go index 3f6344a2..0001638a 100644 --- a/internal/controller/install/scheduler_controller_test.go +++ b/internal/controller/install/scheduler_controller_test.go @@ -5,6 +5,8 @@ import ( "testing" "time" + "github.com/armadaproject/armada-operator/internal/controller/builders" + "k8s.io/utils/ptr" monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" @@ -76,7 +78,11 @@ func TestSchedulerReconciler_Reconcile(t *testing.T) { }, } - scheduler, err := generateSchedulerInstallComponents(&expectedScheduler, scheme) + commonConfig, err := builders.ParseCommonApplicationConfig(expectedScheduler.Spec.ApplicationConfig) + if err != nil { + t.Fatalf("should not return error when parsing common application config") + } + scheduler, err := generateSchedulerInstallComponents(&expectedScheduler, scheme, commonConfig) if err != nil { t.Fatal("We should not fail on generating scheduler") } @@ -345,7 +351,11 @@ func TestSchedulerReconciler_ReconcileMissingResources(t *testing.T) { }, } - scheduler, err := generateSchedulerInstallComponents(&expectedScheduler, scheme) + commonConfig, err := builders.ParseCommonApplicationConfig(expectedScheduler.Spec.ApplicationConfig) + if err != nil { + t.Fatalf("should not return error when parsing common application config") + } + scheduler, err := generateSchedulerInstallComponents(&expectedScheduler, scheme, commonConfig) if err != nil { t.Fatal("We should not fail on generating scheduler") } @@ -517,7 +527,7 @@ func TestSchedulerReconciler_createSchedulerCronJob(t *testing.T) { }, }, } - cronJob, err := createSchedulerCronJob(&schedulerInput) + cronJob, err := newSchedulerCronJob(&schedulerInput) expectedArgs := []string{"--pruneDatabase", appConfigFlag, appConfigFilepath, "--timeout", "10m", "--batchsize", "1000", "--expireAfter", "1d"} expectedResources := *schedulerInput.Spec.Pruner.Resources @@ -529,8 +539,12 @@ func TestSchedulerReconciler_createSchedulerCronJob(t *testing.T) { func TestSchedulerReconciler_createSchedulerIngressGrpc_EmptyHosts(t *testing.T) { t.Parallel() - schedulerInput := v1alpha1.Scheduler{} - ingress, err := createSchedulerIngressGrpc(&schedulerInput) + input := v1alpha1.Scheduler{} + commonConfig, err := builders.ParseCommonApplicationConfig(input.Spec.ApplicationConfig) + if err != nil { + t.Fatalf("should not return error when parsing common application config") + } + ingress, err := newSchedulerIngressGRPC(&input, commonConfig) // expect no error and nil ingress with empty hosts slice assert.NoError(t, err) assert.Nil(t, ingress) @@ -539,7 +553,7 @@ func TestSchedulerReconciler_createSchedulerIngressGrpc_EmptyHosts(t *testing.T) func TestSchedulerReconciler_createSchedulerIngressGrpc(t *testing.T) { t.Parallel() - schedulerInput := v1alpha1.Scheduler{ + input := v1alpha1.Scheduler{ TypeMeta: metav1.TypeMeta{ Kind: "Scheduler", APIVersion: "install.armadaproject.io/v1alpha1", @@ -557,7 +571,11 @@ func TestSchedulerReconciler_createSchedulerIngressGrpc(t *testing.T) { HostNames: []string{"localhost"}, }, } - ingress, err := createSchedulerIngressGrpc(&schedulerInput) + commonConfig, err := builders.ParseCommonApplicationConfig(input.Spec.ApplicationConfig) + if err != nil { + t.Fatalf("should not return error when parsing common application config") + } + ingress, err := newSchedulerIngressGRPC(&input, commonConfig) // expect no error and not-nil ingress assert.NoError(t, err) assert.NotNil(t, ingress) @@ -594,7 +612,7 @@ func TestSchedulerReconciler_createSchedulerCronJobError(t *testing.T) { }, }, } - _, err := createSchedulerCronJob(&expectedScheduler) + _, err := newSchedulerCronJob(&expectedScheduler) assert.Error(t, err) assert.Equal(t, "yaml: line 1: did not find expected ',' or '}'", err.Error()) } @@ -774,7 +792,7 @@ func Test_createSchedulerMigrationJob(t *testing.T) { if tt.modifyInput != nil { tt.modifyInput(&cr) } - rslt, err := createSchedulerMigrationJob(&cr, "sa") + rslt, err := newSchedulerMigrationJob(&cr, "sa") if tt.wantErr { assert.Error(t, err) diff --git a/internal/controller/install/scheduleringester_controller.go b/internal/controller/install/scheduleringester_controller.go index 11ee1cff..d2f88fbc 100644 --- a/internal/controller/install/scheduleringester_controller.go +++ b/internal/controller/install/scheduleringester_controller.go @@ -26,7 +26,6 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/intstr" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" @@ -69,13 +68,12 @@ func (r *SchedulerIngesterReconciler) Reconcile(ctx context.Context, req ctrl.Re return ctrl.Result{}, err } - pc, err := installv1alpha1.BuildPortConfig(schedulerIngester.Spec.ApplicationConfig) + commonConfig, err := builders.ParseCommonApplicationConfig(schedulerIngester.Spec.ApplicationConfig) if err != nil { return ctrl.Result{}, err } - schedulerIngester.Spec.PortConfig = pc - components, err := r.generateSchedulerIngesterComponents(&schedulerIngester, r.Scheme) + components, err := r.generateSchedulerIngesterComponents(&schedulerIngester, r.Scheme, commonConfig) if err != nil { return ctrl.Result{}, err } @@ -111,7 +109,11 @@ func (r *SchedulerIngesterReconciler) SetupWithManager(mgr ctrl.Manager) error { Complete(r) } -func (r *SchedulerIngesterReconciler) generateSchedulerIngesterComponents(schedulerIngester *installv1alpha1.SchedulerIngester, scheme *runtime.Scheme) (*CommonComponents, error) { +func (r *SchedulerIngesterReconciler) generateSchedulerIngesterComponents( + schedulerIngester *installv1alpha1.SchedulerIngester, + scheme *runtime.Scheme, + config *builders.CommonApplicationConfig, +) (*CommonComponents, error) { secret, err := builders.CreateSecret(schedulerIngester.Spec.ApplicationConfig, schedulerIngester.Name, schedulerIngester.Namespace, GetConfigFilename(schedulerIngester.Name)) if err != nil { return nil, err @@ -123,14 +125,14 @@ func (r *SchedulerIngesterReconciler) generateSchedulerIngesterComponents(schedu var serviceAccount *corev1.ServiceAccount serviceAccountName := schedulerIngester.Spec.CustomServiceAccount if serviceAccountName == "" { - serviceAccount = builders.CreateServiceAccount(schedulerIngester.Name, schedulerIngester.Namespace, AllLabels(schedulerIngester.Name, schedulerIngester.Labels), schedulerIngester.Spec.ServiceAccount) + serviceAccount = builders.ServiceAccount(schedulerIngester.Name, schedulerIngester.Namespace, AllLabels(schedulerIngester.Name, schedulerIngester.Labels), schedulerIngester.Spec.ServiceAccount) if err = controllerutil.SetOwnerReference(schedulerIngester, serviceAccount, scheme); err != nil { return nil, errors.WithStack(err) } serviceAccountName = serviceAccount.Name } - deployment, err := r.createDeployment(schedulerIngester, serviceAccountName) + deployment, err := r.createDeployment(schedulerIngester, serviceAccountName, config) if err != nil { return nil, err } @@ -138,94 +140,79 @@ func (r *SchedulerIngesterReconciler) generateSchedulerIngesterComponents(schedu return nil, err } + profilingService, profilingIngress, err := newProfilingComponents( + schedulerIngester, + scheme, + config, + schedulerIngester.Spec.ProfilingIngressConfig, + ) + if err != nil { + return nil, errors.WithStack(err) + } + return &CommonComponents{ - Deployment: deployment, - ServiceAccount: serviceAccount, - Secret: secret, + Deployment: deployment, + ServiceAccount: serviceAccount, + Secret: secret, + ServiceProfiling: profilingService, + IngressProfiling: profilingIngress, }, nil } -func (r *SchedulerIngesterReconciler) createDeployment(scheduleringester *installv1alpha1.SchedulerIngester, serviceAccountName string) (*appsv1.Deployment, error) { - var runAsUser int64 = 1000 - var runAsGroup int64 = 2000 - allowPrivilegeEscalation := false - env := createEnv(scheduleringester.Spec.Environment) - pulsarConfig, err := ExtractPulsarConfig(scheduleringester.Spec.ApplicationConfig) +func (r *SchedulerIngesterReconciler) createDeployment( + schedulerIngester *installv1alpha1.SchedulerIngester, + serviceAccountName string, + config *builders.CommonApplicationConfig, +) (*appsv1.Deployment, error) { + env := createEnv(schedulerIngester.Spec.Environment) + pulsarConfig, err := ExtractPulsarConfig(schedulerIngester.Spec.ApplicationConfig) if err != nil { return nil, err } - volumes := createVolumes(scheduleringester.Name, scheduleringester.Spec.AdditionalVolumes) + volumes := createVolumes(schedulerIngester.Name, schedulerIngester.Spec.AdditionalVolumes) volumes = append(volumes, createPulsarVolumes(pulsarConfig)...) - volumeMounts := createVolumeMounts(GetConfigFilename(scheduleringester.Name), scheduleringester.Spec.AdditionalVolumeMounts) + volumeMounts := createVolumeMounts(GetConfigFilename(schedulerIngester.Name), schedulerIngester.Spec.AdditionalVolumeMounts) volumeMounts = append(volumeMounts, createPulsarVolumeMounts(pulsarConfig)...) deployment := appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{Name: scheduleringester.Name, Namespace: scheduleringester.Namespace, Labels: AllLabels(scheduleringester.Name, scheduleringester.Labels)}, + ObjectMeta: metav1.ObjectMeta{Name: schedulerIngester.Name, Namespace: schedulerIngester.Namespace, Labels: AllLabels(schedulerIngester.Name, schedulerIngester.Labels)}, Spec: appsv1.DeploymentSpec{ - Replicas: scheduleringester.Spec.Replicas, + Replicas: schedulerIngester.Spec.Replicas, Selector: &metav1.LabelSelector{ - MatchLabels: IdentityLabel(scheduleringester.Name), - }, - Strategy: appsv1.DeploymentStrategy{ - Type: appsv1.RollingUpdateDeploymentStrategyType, - RollingUpdate: &appsv1.RollingUpdateDeployment{ - MaxUnavailable: &intstr.IntOrString{IntVal: int32(1)}, - }, + MatchLabels: IdentityLabel(schedulerIngester.Name), }, + Strategy: defaultDeploymentStrategy(1), Template: corev1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ - Name: scheduleringester.Name, - Namespace: scheduleringester.Namespace, - Labels: AllLabels(scheduleringester.Name, scheduleringester.Labels), - Annotations: map[string]string{"checksum/config": GenerateChecksumConfig(scheduleringester.Spec.ApplicationConfig.Raw)}, + Name: schedulerIngester.Name, + Namespace: schedulerIngester.Namespace, + Labels: AllLabels(schedulerIngester.Name, schedulerIngester.Labels), + Annotations: map[string]string{"checksum/config": GenerateChecksumConfig(schedulerIngester.Spec.ApplicationConfig.Raw)}, }, Spec: corev1.PodSpec{ ServiceAccountName: serviceAccountName, - TerminationGracePeriodSeconds: scheduleringester.Spec.TerminationGracePeriodSeconds, - SecurityContext: &corev1.PodSecurityContext{ - RunAsUser: &runAsUser, - RunAsGroup: &runAsGroup, - }, - Affinity: &corev1.Affinity{ - PodAffinity: &corev1.PodAffinity{ - PreferredDuringSchedulingIgnoredDuringExecution: []corev1.WeightedPodAffinityTerm{{ - Weight: 100, - PodAffinityTerm: corev1.PodAffinityTerm{ - TopologyKey: "kubernetes.io/hostname", - LabelSelector: &metav1.LabelSelector{ - MatchExpressions: []metav1.LabelSelectorRequirement{{ - Key: "app", - Operator: metav1.LabelSelectorOpIn, - Values: []string{scheduleringester.Name}, - }}, - }, - }, - }}, - }, - }, + TerminationGracePeriodSeconds: schedulerIngester.Spec.TerminationGracePeriodSeconds, + SecurityContext: schedulerIngester.Spec.PodSecurityContext, + Affinity: defaultAffinity(schedulerIngester.Name, 100), Containers: []corev1.Container{{ Name: "scheduleringester", - ImagePullPolicy: "IfNotPresent", - Image: ImageString(scheduleringester.Spec.Image), + ImagePullPolicy: corev1.PullIfNotPresent, + Image: ImageString(schedulerIngester.Spec.Image), Args: []string{appConfigFlag, appConfigFilepath}, - Ports: []corev1.ContainerPort{{ - Name: "metrics", - ContainerPort: scheduleringester.Spec.PortConfig.MetricsPort, - Protocol: "TCP", - }}, + Ports: newContainerPortsMetrics(config), Env: env, VolumeMounts: volumeMounts, - SecurityContext: &corev1.SecurityContext{AllowPrivilegeEscalation: &allowPrivilegeEscalation}, + SecurityContext: schedulerIngester.Spec.SecurityContext, }}, - Tolerations: scheduleringester.Spec.Tolerations, + Tolerations: schedulerIngester.Spec.Tolerations, Volumes: volumes, }, }, }, } - if scheduleringester.Spec.Resources != nil { - deployment.Spec.Template.Spec.Containers[0].Resources = *scheduleringester.Spec.Resources - deployment.Spec.Template.Spec.Containers[0].Env = addGoMemLimit(deployment.Spec.Template.Spec.Containers[0].Env, *scheduleringester.Spec.Resources) + if schedulerIngester.Spec.Resources != nil { + deployment.Spec.Template.Spec.Containers[0].Resources = *schedulerIngester.Spec.Resources + deployment.Spec.Template.Spec.Containers[0].Env = addGoMemLimit(deployment.Spec.Template.Spec.Containers[0].Env, *schedulerIngester.Spec.Resources) } return &deployment, nil