Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
22 commits
Select commit Hold shift + click to select a range
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .github/bundles/.gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
**/uds-config.yaml
14 changes: 0 additions & 14 deletions .github/bundles/aks/uds-config.yaml

This file was deleted.

27 changes: 0 additions & 27 deletions .github/bundles/eks/uds-config.yaml

This file was deleted.

21 changes: 0 additions & 21 deletions .github/bundles/rke2/uds-config.yaml

This file was deleted.

154 changes: 154 additions & 0 deletions .github/test-infra/aws/eks/cluster.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,154 @@
# Copyright 2025 Defense Unicorns
# SPDX-License-Identifier: AGPL-3.0-or-later OR LicenseRef-Defense-Unicorns-Commercial


# Create a custom launch template with public IP association
resource "aws_launch_template" "eks_node_group" {
name_prefix = "${var.name}-lt-"

network_interfaces {
associate_public_ip_address = true
delete_on_termination = true
}

tag_specifications {
resource_type = "instance"
tags = merge(local.tags, {
Name = "${var.name}-node"
})
}

lifecycle {
create_before_destroy = true
}
}

# Create EKS Cluster
module "eks" {
source = "terraform-aws-modules/eks/aws"
version = "~> 20.35.0"

cluster_name = var.name
cluster_version = var.kubernetes_version
cluster_endpoint_public_access = true
cluster_endpoint_private_access = false

vpc_id = data.aws_vpc.vpc.id
subnet_ids = local.subnet_ids

# IAM
iam_role_permissions_boundary = "arn:${data.aws_partition.current.partition}:iam::${data.aws_caller_identity.current.account_id}:policy/${var.permissions_boundary_name}"

# Add CloudWatch logging
cluster_enabled_log_types = []
cloudwatch_log_group_retention_in_days = 0

# Authentication mode
authentication_mode = "API_AND_CONFIG_MAP"

# Enable cluster creator admin permissions
enable_cluster_creator_admin_permissions = true

# Security groups
create_cluster_security_group = true
create_node_security_group = true
node_security_group_enable_recommended_rules = true
node_security_group_additional_rules = {
clusterapi_ingress = {
description = "Cluster API Ingress on non-privileged ports"
protocol = "tcp"
from_port = 1025
to_port = 65535
type = "ingress"
source_cluster_security_group = true
}
}

enable_security_groups_for_pods = false

# Add tags to all resources
tags = local.tags

# Node groups
eks_managed_node_groups = {
main = {
name = var.name
instance_types = [var.instance_type]
ami_type = "BOTTLEROCKET_x86_64_FIPS"

min_size = var.node_group_min_size
max_size = var.node_group_max_size
desired_size = var.node_group_desired_size

disk_size = var.node_disk_size

# Let the module create the IAM role with permissions boundary
create_iam_role = true
iam_role_use_name_prefix = false
iam_role_name = "${substr(var.name, 0, 30)}-eks-node-role"
iam_role_permissions_boundary = "arn:${data.aws_partition.current.partition}:iam::${data.aws_caller_identity.current.account_id}:policy/${var.permissions_boundary_name}"

# Use our custom launch template that has public IP association
create_launch_template = false
launch_template_id = aws_launch_template.eks_node_group.id
launch_template_version = aws_launch_template.eks_node_group.latest_version

# Add required policies for node functionality
iam_role_additional_policies = {
AmazonSSMManagedInstanceCore = "arn:${data.aws_partition.current.partition}:iam::aws:policy/AmazonSSMManagedInstanceCore"
AmazonEBSCSIDriverPolicy = "arn:${data.aws_partition.current.partition}:iam::aws:policy/service-role/AmazonEBSCSIDriverPolicy"
}

tags = merge(local.tags, {
PermissionsBoundary = var.permissions_boundary_name
})
}
}

# EKS Addons
cluster_addons = {
vpc-cni = {
most_recent = true
configuration_values = jsonencode({
enableNetworkPolicy = "true"
})
}
aws-ebs-csi-driver = {
most_recent = true
}
kube-proxy = {
most_recent = true
}
coredns = {
Comment thread
mjnagel marked this conversation as resolved.
most_recent = true
configuration_values = jsonencode({
corefile = <<-EOT
.:53 {
errors
health {
lameduck 5s
}
ready
kubernetes cluster.local cluster.local in-addr.arpa ip6.arpa {
pods insecure
fallthrough in-addr.arpa ip6.arpa
ttl 30
}
prometheus 0.0.0.0:9153
forward . /etc/resolv.conf
cache 30
loop
reload
loadbalance
rewrite stop {
name regex (.*\.admin\.uds\.dev) admin-ingressgateway.istio-admin-gateway.svc.cluster.local answer auto
}
rewrite stop {
name regex (.*\.uds\.dev) tenant-ingressgateway.istio-tenant-gateway.svc.cluster.local answer auto
}
}
EOT
})
}
}
}
Comment thread
mjnagel marked this conversation as resolved.
35 changes: 35 additions & 0 deletions .github/test-infra/aws/eks/data.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
# Copyright 2024 Defense Unicorns
# SPDX-License-Identifier: AGPL-3.0-or-later OR LicenseRef-Defense-Unicorns-Commercial

# Common data sources
data "aws_caller_identity" "current" {}
data "aws_partition" "current" {}
data "aws_region" "current" {}

# Use existing VPC and subnets
data "aws_vpc" "vpc" {
filter {
name = "tag:Name"
values = [var.vpc_name]
}
}

data "aws_subnet" "eks_ci_subnet_b" {
vpc_id = data.aws_vpc.vpc.id
availability_zone = "${var.region}b"

filter {
name = "tag:Name"
values = [var.subnet_name]
}
}

data "aws_subnet" "eks_ci_subnet_c" {
vpc_id = data.aws_vpc.vpc.id
availability_zone = "${var.region}c"

filter {
name = "tag:Name"
values = [var.subnet_name]
}
}
62 changes: 35 additions & 27 deletions .github/test-infra/aws/eks/main.tf
Original file line number Diff line number Diff line change
@@ -1,25 +1,17 @@
# Copyright 2024 Defense Unicorns
# SPDX-License-Identifier: AGPL-3.0-or-later OR LicenseRef-Defense-Unicorns-Commercial

resource "random_id" "default" {
byte_length = 2
}

data "aws_eks_cluster" "existing" {
name = var.name
}

data "aws_caller_identity" "current" {}

data "aws_partition" "current" {}

data "aws_region" "current" {}

locals {
oidc_url_without_protocol = substr(data.aws_eks_cluster.existing.identity[0].oidc[0].issuer, 8, -1)
oidc_arn = "arn:${data.aws_partition.current.partition}:iam::${data.aws_caller_identity.current.account_id}:oidc-provider/${local.oidc_url_without_protocol}"
iam_role_permissions_boundary = var.use_permissions_boundary ? "arn:${data.aws_partition.current.partition}:iam::${data.aws_caller_identity.current.account_id}:policy/${var.permissions_boundary_name}" : null
# Combine subnet IDs for EKS
subnet_ids = [data.aws_subnet.eks_ci_subnet_b.id, data.aws_subnet.eks_ci_subnet_c.id]

# Tags for resources
tags = {
Name = var.name
Environment = "ci"
PermissionsBoundary = var.permissions_boundary_name
}

# Bucket configurations for IRSA
bucket_configurations = {
for instance in var.bucket_configurations :
instance.name => {
Expand All @@ -29,18 +21,13 @@ locals {
}
}

kms_key_arns = module.generate_kms

# IAM policies for IRSA
iam_policies = {
"loki" = resource.aws_iam_policy.loki_policy.arn
"velero" = resource.aws_iam_policy.velero_policy.arn
}
}

resource "random_id" "unique_id" {
byte_length = 4
}

module "generate_kms" {
for_each = local.bucket_configurations
source = "../modules/kms"
Expand All @@ -51,6 +38,15 @@ module "generate_kms" {
tags = {
Deployment = "UDS Core ${each.value.name}"
}

# Explicit dependency on EKS cluster
depends_on = [
module.eks
]
}

resource "random_id" "unique_id" {
byte_length = 4
}

module "S3" {
Expand All @@ -59,24 +55,36 @@ module "S3" {
bucket_prefix = "${each.value.name}-"
kms_key_arn = module.generate_kms[each.key].kms_key_arn
irsa_role_arn = module.irsa[each.key].role_arn

# Explicit dependency on KMS
depends_on = [
module.generate_kms
]
}

module "irsa" {
for_each = local.bucket_configurations
source = "../modules/irsa"
name = each.value.name
kubernetes_service_account = each.value.service_account
role_permissions_boundary_arn = local.iam_role_permissions_boundary
role_permissions_boundary_arn = "arn:${data.aws_partition.current.partition}:iam::${data.aws_caller_identity.current.account_id}:policy/${var.permissions_boundary_name}"
account_id = data.aws_caller_identity.current.account_id
current_partition = data.aws_partition.current.partition

oidc_providers = {
main = {
provider_arn = local.oidc_arn
provider_arn = module.eks.oidc_provider_arn
namespace_service_accounts = [format("%s:%s", each.value.namespace, each.value.service_account)]
}
}
role_policy_arns = tomap({
"${each.key}" = local.iam_policies[each.key]
(each.key) = local.iam_policies[each.key]
})

# Explicit dependency on EKS cluster
depends_on = [
module.eks,
aws_iam_policy.loki_policy,
aws_iam_policy.velero_policy
]
}
Loading