-
Notifications
You must be signed in to change notification settings - Fork 33
OTA-1014: controllers: Add metadata container and Route #176
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -81,10 +81,13 @@ type kubeResources struct { | |
| graphBuilderContainer *corev1.Container | ||
| graphDataInitContainer *corev1.Container | ||
| policyEngineContainer *corev1.Container | ||
| metadataContainer *corev1.Container | ||
| graphBuilderService *corev1.Service | ||
| policyEngineService *corev1.Service | ||
| metadataService *corev1.Service | ||
| policyEngineRoute *routev1.Route | ||
| policyEngineOldRoute *routev1.Route | ||
| metadataRoute *routev1.Route | ||
| trustedCAConfig *corev1.ConfigMap | ||
| trustedClusterCAConfig *corev1.ConfigMap | ||
| pullSecret *corev1.Secret | ||
|
|
@@ -123,11 +126,14 @@ func newKubeResources(instance *cv1.UpdateService, image string, pullSecret *cor | |
| k.graphBuilderContainer = k.newGraphBuilderContainer(instance, image) | ||
| k.graphDataInitContainer = k.newGraphDataInitContainer(instance) | ||
| k.policyEngineContainer = k.newPolicyEngineContainer(instance, image) | ||
| k.metadataContainer = k.newMetadataContainer(instance, image) | ||
| k.deployment = k.newDeployment(instance) | ||
| k.graphBuilderService = k.newGraphBuilderService(instance) | ||
| k.policyEngineService = k.newPolicyEngineService(instance) | ||
| k.metadataService = k.newMetadataService(instance) | ||
| k.policyEngineRoute = k.newPolicyEngineRoute(instance) | ||
| k.policyEngineOldRoute = k.oldPolicyEngineRoute(instance) | ||
| k.metadataRoute = k.newMetadataRoute(instance) | ||
| return &k, nil | ||
| } | ||
|
|
||
|
|
@@ -217,6 +223,40 @@ func (k *kubeResources) newPolicyEngineService(instance *cv1.UpdateService) *cor | |
| } | ||
| } | ||
|
|
||
| func (k *kubeResources) newMetadataService(instance *cv1.UpdateService) *corev1.Service { | ||
| name := nameMetadataService(instance) | ||
| return &corev1.Service{ | ||
| ObjectMeta: metav1.ObjectMeta{ | ||
| Name: name, | ||
| Namespace: instance.Namespace, | ||
| Labels: map[string]string{ | ||
| "app": name, | ||
|
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. nit: I would expect the
Member
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
There are some existing patterns that don't make sense to me, like why the Services expose the status ports that I'd expect only the kubelet to need access to (and the kubelet gets at the containers without passing through the Service). But I've left that kind of refactoring to follow-up work and just matched existing patterns for this new feature. |
||
| }, | ||
| }, | ||
| Spec: corev1.ServiceSpec{ | ||
| Type: corev1.ServiceTypeClusterIP, | ||
| Ports: []corev1.ServicePort{ | ||
| { | ||
| Name: "metadata", | ||
| Port: 80, | ||
| TargetPort: intstr.FromInt(8082), | ||
| Protocol: corev1.ProtocolTCP, | ||
| }, | ||
| { | ||
| Name: "status-m", | ||
| Port: 9082, | ||
| TargetPort: intstr.FromInt(9082), | ||
| Protocol: corev1.ProtocolTCP, | ||
| }, | ||
| }, | ||
| Selector: map[string]string{ | ||
| "deployment": nameDeployment(instance), | ||
| }, | ||
| SessionAffinity: corev1.ServiceAffinityNone, | ||
| }, | ||
| } | ||
| } | ||
|
|
||
| func (k *kubeResources) newPolicyEngineRoute(instance *cv1.UpdateService) *routev1.Route { | ||
| name := namePolicyEngineRoute(instance) | ||
| return &routev1.Route{ | ||
|
|
@@ -269,6 +309,32 @@ func (k *kubeResources) oldPolicyEngineRoute(instance *cv1.UpdateService) *route | |
| } | ||
| } | ||
|
|
||
| func (k *kubeResources) newMetadataRoute(instance *cv1.UpdateService) *routev1.Route { | ||
| name := nameMetadataRoute(instance) | ||
| return &routev1.Route{ | ||
| ObjectMeta: metav1.ObjectMeta{ | ||
| Name: name, | ||
| Namespace: instance.Namespace, | ||
| Labels: map[string]string{ | ||
| "app": nameDeployment(instance), | ||
| }, | ||
| }, | ||
| Spec: routev1.RouteSpec{ | ||
| Port: &routev1.RoutePort{ | ||
| TargetPort: intstr.FromString("metadata"), | ||
| }, | ||
| To: routev1.RouteTargetReference{ | ||
| Kind: "Service", | ||
| Name: nameMetadataService(instance), | ||
| }, | ||
| TLS: &routev1.TLSConfig{ | ||
| Termination: routev1.TLSTerminationEdge, | ||
| InsecureEdgeTerminationPolicy: routev1.InsecureEdgeTerminationPolicyNone, | ||
| }, | ||
| }, | ||
| } | ||
| } | ||
|
|
||
| func (k *kubeResources) newEnvConfig(instance *cv1.UpdateService) *corev1.ConfigMap { | ||
| return &corev1.ConfigMap{ | ||
| ObjectMeta: metav1.ObjectMeta{ | ||
|
|
@@ -283,6 +349,7 @@ func (k *kubeResources) newEnvConfig(instance *cv1.UpdateService) *corev1.Config | |
| "pe.rust_backtrace": "0", | ||
| "pe.status.address": "::", | ||
| "pe.upstream": "http://localhost:8080/v1/graph", | ||
| "m.rust_backtrace": "0", | ||
| }, | ||
| } | ||
| } | ||
|
|
@@ -357,6 +424,7 @@ func (k *kubeResources) newDeployment(instance *cv1.UpdateService) *appsv1.Deplo | |
| Containers: []corev1.Container{ | ||
| *k.graphBuilderContainer, | ||
| *k.policyEngineContainer, | ||
| *k.metadataContainer, | ||
| }, | ||
| }, | ||
| }, | ||
|
|
@@ -734,6 +802,92 @@ func (k *kubeResources) newPolicyEngineContainer(instance *cv1.UpdateService, im | |
| } | ||
| } | ||
|
|
||
| func (k *kubeResources) newMetadataContainer(instance *cv1.UpdateService, image string) *corev1.Container { | ||
| envConfigName := nameEnvConfig(instance) | ||
| return &corev1.Container{ | ||
| Name: NameContainerMetadata, | ||
| Image: image, | ||
| ImagePullPolicy: corev1.PullIfNotPresent, | ||
| Command: []string{ | ||
| "/usr/bin/metadata-helper", | ||
| }, | ||
| Args: []string{ | ||
| "-vvv", | ||
| "--signatures.dir", | ||
wking marked this conversation as resolved.
Show resolved
Hide resolved
|
||
| "/var/lib/cincinnati/graph-data/signatures", | ||
| "--service.address", | ||
| "::", | ||
| "--service.port", | ||
| "8082", | ||
| "--service.path_prefix", | ||
| "/api/upgrades_info", | ||
| "--status.address", | ||
| "::", | ||
| "--status.port", | ||
| "9082", | ||
| }, | ||
| Ports: []corev1.ContainerPort{ | ||
| { | ||
| Name: "metadata", | ||
| ContainerPort: 8082, | ||
| Protocol: corev1.ProtocolTCP, | ||
| }, | ||
| { | ||
| Name: "status-m", | ||
| ContainerPort: 9082, | ||
| Protocol: corev1.ProtocolTCP, | ||
| }, | ||
| }, | ||
| Env: []corev1.EnvVar{ | ||
| newCMEnvVar("RUST_BACKTRACE", "m.rust_backtrace", envConfigName), | ||
| }, | ||
| Resources: corev1.ResourceRequirements{ | ||
| Limits: corev1.ResourceList{ | ||
| corev1.ResourceCPU: *resource.NewMilliQuantity(750, resource.DecimalSI), | ||
| corev1.ResourceMemory: *resource.NewQuantity(768*1024*1024, resource.BinarySI), | ||
| }, | ||
| Requests: corev1.ResourceList{ | ||
| corev1.ResourceCPU: *resource.NewMilliQuantity(350, resource.DecimalSI), | ||
| corev1.ResourceMemory: *resource.NewQuantity(128*1024*1024, resource.BinarySI), | ||
| }, | ||
| }, | ||
| LivenessProbe: &corev1.Probe{ | ||
| FailureThreshold: 3, | ||
| SuccessThreshold: 1, | ||
| InitialDelaySeconds: 150, | ||
| PeriodSeconds: 30, | ||
| TimeoutSeconds: 3, | ||
| ProbeHandler: corev1.ProbeHandler{ | ||
| HTTPGet: &corev1.HTTPGetAction{ | ||
| Path: "/livez", | ||
| Port: intstr.FromInt(9082), | ||
| Scheme: corev1.URISchemeHTTP, | ||
| }, | ||
| }, | ||
| }, | ||
| ReadinessProbe: &corev1.Probe{ | ||
| FailureThreshold: 3, | ||
| SuccessThreshold: 1, | ||
| InitialDelaySeconds: 150, | ||
| PeriodSeconds: 30, | ||
| TimeoutSeconds: 3, | ||
| ProbeHandler: corev1.ProbeHandler{ | ||
| HTTPGet: &corev1.HTTPGetAction{ | ||
| Path: "/readyz", | ||
| Port: intstr.FromInt(9082), | ||
| Scheme: corev1.URISchemeHTTP, | ||
| }, | ||
| }, | ||
| }, | ||
| VolumeMounts: []corev1.VolumeMount{ | ||
| { | ||
| Name: "cincinnati-graph-data", | ||
| MountPath: "/var/lib/cincinnati/graph-data", | ||
| }, | ||
| }, | ||
| } | ||
| } | ||
|
|
||
| func newCMEnvVar(name, key, cmName string) corev1.EnvVar { | ||
| return corev1.EnvVar{ | ||
| Name: name, | ||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Could you add a comment on why the clause is here?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
It's one of two lines touched by f376675 which explains the addition. I'm fine inlining that commit message in a Makefile comment for folks who prefer to not use
blame, if folks want. I'm also fine dropping the commit from the pull once I get CI greened up.