Skip to content
This repository was archived by the owner on Jun 9, 2022. It is now read-only.
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 3 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -54,8 +54,10 @@ https://craignewtondev.medium.com/how-to-fix-kubernetes-namespace-deleting-stuck

### Teleport
To get access, you will need to configure teleport.
- Add yourself as a user: `kubectl exec -it deployment.apps/teleport-cluster -n teleport -- tctl users add yourusername --roles=editor,access,admin --logins=root,ubuntu,ec2-user`
- Create the kubernetes role: `kubectl exec -it deployment.apps/teleport-cluster -n teleport -- tctl create -f < terraform-k8s/teleport-k8s-admin-role.yaml`
- Add yourself as a user: `kubectl exec -it deployment.apps/teleport-cluster -n teleport -- tctl users add <yourusername> --roles=editor,access,admin,k8s-admin --logins=root,ubuntu,ec2-user`
- Go to the URL they give you and set up your 2fa
- You can use kubernetes if you use tsh to log in: `tsh login --proxy teleport-<clustername>.<domain>:443 --user <yourusername>`
- You should then be able to go to the applications section and pull up gitlab.
- Longer term, we hope to configure more of this through code.

Expand Down
34 changes: 34 additions & 0 deletions clusters/gitlab-cluster/tshnode/tshnode.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
labels:
k8s-app: tshnode
name: tshnode
namespace: teleport
spec:
selector:
matchLabels:
k8s-app: tshnode
template:
metadata:
labels:
k8s-app: tshnode
spec:
containers:
- image: amazonlinux
imagePullPolicy: Always
name: ssm
command: ["/bin/bash"]
args:
- -c
- |
yum-config-manager --add-repo https://rpm.releases.teleport.dev/teleport.repo ;
yum install teleport ;
teleport start --roles=node --token=XXX --auth-server=teleport-cluster.teleport:443
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
dnsPolicy: ClusterFirst
restartPolicy: Always
schedulerName: default-scheduler
terminationGracePeriodSeconds: 2
10 changes: 10 additions & 0 deletions terraform-k8s/teleport-k8s-admin-role.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
kind: role
version: v3
metadata:
name: k8s-admin
spec:
allow:
# This field is used for SSH logins. You have to keep 'logins' as a non-empty random value
# for Kubernetes to work until we fix it.
logins: ['keep-this-value-here']
kubernetes_groups: ["system:masters"]
187 changes: 142 additions & 45 deletions terraform-k8s/teleport.tf
Original file line number Diff line number Diff line change
Expand Up @@ -35,21 +35,46 @@ resource "aws_route53_record" "teleport" {
records = [data.kubernetes_service.teleport.status.0.load_balancer.0.ingress.0.hostname]
}

resource "aws_route53_record" "teleport-gitlab" {
resource "aws_route53_record" "teleport-wildcard" {
zone_id = data.aws_route53_zone.gitlab.zone_id
name = "gitlab.teleport-${var.cluster_name}"
name = "*.teleport-${var.cluster_name}"
type = "CNAME"
ttl = "300"
records = [data.kubernetes_service.teleport.status.0.load_balancer.0.ingress.0.hostname]
}

# This is the join token
resource "random_password" "join-token" {
length = 26
special = true
override_special = "/@£$"
}

resource "kubernetes_secret" "teleport-kube-agent-join-token" {
depends_on = [kubernetes_namespace.teleport]
metadata {
name = "teleport-kube-agent-join-token"
namespace = "teleport"
}

data = {
auth-token = random_password.join-token.result
}
}

# Ideally, this would be done through flux, but we need it to be live
# so we can reference the service to get the elb to put the CNAMEs on.
resource "helm_release" "teleport-cluster" {
name = "teleport-cluster"
repository = "https://charts.releases.teleport.dev"
# XXX remove the tspencer repo and add teleport back once these PRs get in:
# https://github.com/gravitational/teleport/pull/6586
# https://github.com/gravitational/teleport/pull/6619
# repository = "https://charts.releases.teleport.dev"
repository = "https://timothy-spencer.github.io/helm-charts"
chart = "teleport-cluster"
version = "6.0.0"
namespace = "teleport"
depends_on = [kubernetes_namespace.teleport]
depends_on = [kubernetes_secret.teleport-kube-agent-join-token]

set {
name = "namespace"
Expand All @@ -61,59 +86,131 @@ resource "helm_release" "teleport-cluster" {
value = "true"
}

# # XXX temporary
# set {
# name = "logLevel"
# value = "DEBUG"
# }

set {
name = "acmeEmail"
value = "security@login.gov"
value = var.certmanager-issuer
}

set {
name = "clusterName"
value = "teleport-${var.cluster_name}.${var.domain}"
}

set {
name = "customConfig"
value = "true"
name = "kubeClusterName"
value = "teleport-${var.cluster_name}"
}
}

# This is where the customConfig lives (same name as the helm release)
resource "kubernetes_config_map" "teleport-cluster" {
# depends_on = [helm_release.teleport-cluster]
depends_on = [kubernetes_namespace.teleport]
metadata {
name = "teleport-cluster"
namespace = "teleport"
set {
name = "serviceAccountAnnotations.eks\\.amazonaws\\.com/role-arn"
value = aws_iam_role.teleport.arn
}
}

data = {
"teleport.yaml" = <<CUSTOMCONFIG
teleport:
log:
severity: ERROR
output: stdout
format: [level, component, caller]
auth_service:
enabled: true
cluster_name: teleport-${var.cluster_name}.${var.domain}
app_service:
enabled: true
apps:
- name: gitlab
uri: "http://gitlab-webservice-default.gitlab:8181"
kubernetes_service:
enabled: true
listen_addr: 0.0.0.0:3027
proxy_service:
enabled: true
public_addr: 'teleport-${var.cluster_name}.${var.domain}:443'
kube_listen_addr: 0.0.0.0:3026
acme:
enabled: true
email: ${var.certmanager-issuer}
ssh_service:
enabled: false

CUSTOMCONFIG
resource "helm_release" "teleport-kube-agent" {
name = "teleport-kube-agent"
# XXX remove the tspencer repo and add teleport back once these PRs get in:
# https://github.com/gravitational/teleport/pull/6586
# https://github.com/gravitational/teleport/pull/6619
# repository = "https://charts.releases.teleport.dev"
repository = "https://timothy-spencer.github.io/helm-charts"
chart = "teleport-kube-agent"
version = "0.0.4"
namespace = "teleport"
# XXX temporary
wait = false
depends_on = [kubernetes_secret.teleport-kube-agent-join-token]

set {
name = "namespace"
value = "teleport"
}

set {
name = "roles"
value = "app"
}

# # XXX temporary
# set {
# name = "logLevel"
# value = "DEBUG"
# }

set {
name = "proxyAddr"
value = "teleport-${var.cluster_name}.${var.domain}:443"
}

set {
name = "apps[0].name"
value = "gitlab"
}

set {
name = "apps[0].uri"
value = "http://gitlab-webservice-default.gitlab:8181"
}

set {
name = "serviceAccountAnnotations.eks\\.amazonaws\\.com/role-arn"
value = aws_iam_role.teleport.arn
}
}


# set things up for the serviceaccount to have proper perms
resource "aws_iam_role" "teleport" {
name = "${var.cluster_name}-teleport"
assume_role_policy = <<POLICY
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"Federated": "${var.oidc_arn}"
},
"Action": "sts:AssumeRoleWithWebIdentity",
"Condition": {
"ForAnyValue:StringEquals": {
"${var.oidc_url}:sub": [
"system:serviceaccount:teleport:teleport-kube-agent",
"system:serviceaccount:teleport:teleport-cluster"
]
}
}
}
]
}
POLICY
}

resource "aws_iam_role_policy" "teleport" {
name = "${var.cluster_name}-teleport-policy"
role = aws_iam_role.teleport.id

# This came from https://goteleport.com/docs/aws-oss-guide/#create-iam-policy-granting-list-clusters-and-describe-cluster-permissions-optional
policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "ListDescribeClusters",
"Effect": "Allow",
"Action": [
"eks:DescribeCluster",
"eks:ListClusters"
],
"Resource": "*"
}
]
}
EOF
}