Skip to content
Merged
Show file tree
Hide file tree
Changes from 19 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .github/workflows/lint.yml
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ jobs:
name: Minimum version check
runs-on: ubuntu-latest
container:
image: hashicorp/terraform:0.12.2
image: hashicorp/terraform:0.12.6
steps:
- uses: actions/checkout@master
- name: Validate Code
Expand Down
2 changes: 2 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ project adheres to [Semantic Versioning](http://semver.org/).
### Added

- Test against minimum versions specified in `versions.tf` (by @dpiddockcmp)
- Support for AWS EKS Managed Node Groups. (by @wmorgan6796)

### Changed

Expand All @@ -20,6 +21,7 @@ project adheres to [Semantic Versioning](http://semver.org/).
- Exit with error code when `aws-auth` configmap is unable to be updated (by @knittingdev)
- Fix deprecated interpolation-only expression (by @angelabad)
- Fix broken terraform plan/apply on a cluster < 1.14 (by @hodduc)
- Updated required version of AWS Provider to >= v2.38.0 for Managed Node Groups (by @wmorgan6796)

# History

Expand Down
1 change: 1 addition & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -148,6 +148,7 @@ MIT Licensed. See [LICENSE](https://github.com/terraform-aws-modules/terraform-a
| worker\_ami\_owner\_id\_windows | The ID of the owner for the AMI to use for the AWS EKS Windows workers. Valid values are an AWS account ID, 'self' (the current account), or an AWS owner alias (e.g. 'amazon', 'aws-marketplace', 'microsoft'). | string | `"801119661308"` | no |
| worker\_create\_initial\_lifecycle\_hooks | Whether to create initial lifecycle hooks provided in worker groups. | bool | `"false"` | no |
| worker\_create\_security\_group | Whether to create a security group for the workers or attach the workers to `worker_security_group_id`. | bool | `"true"` | no |
| worker\_group\_managed\_node\_groups | A list of maps defining worker group configurations to be defined using AWS EKS Managed Node Groups. See workers_group_defaults for valid keys. | list(any) | `[]` | no |
| worker\_groups | A list of maps defining worker group configurations to be defined using AWS Launch Configurations. See workers_group_defaults for valid keys. | any | `[]` | no |
| worker\_groups\_launch\_template | A list of maps defining worker group configurations to be defined using AWS Launch Templates. See workers_group_defaults for valid keys. | any | `[]` | no |
| worker\_security\_group\_id | If provided, all workers will be attached to this security group. If not given, a security group will be created with necessary ingress/egress to work with the EKS cluster. | string | `""` | no |
Expand Down
104 changes: 104 additions & 0 deletions examples/managed_node_groups/main.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,104 @@
terraform {
required_version = ">= 0.12.6"
}

provider "aws" {
version = ">= 2.28.1"
region = var.region
}

provider "random" {
version = "~> 2.1"
}

provider "local" {
version = "~> 1.2"
}

provider "null" {
version = "~> 2.1"
}

provider "template" {
version = "~> 2.1"
}

data "aws_availability_zones" "available" {
}

locals {
cluster_name = "test-eks-${random_string.suffix.result}"
}

resource "random_string" "suffix" {
length = 8
special = false
}

module "vpc" {
source = "terraform-aws-modules/vpc/aws"
version = "~> 2.6"

name = "test-vpc"
cidr = "172.16.0.0/16"
azs = data.aws_availability_zones.available.names
private_subnets = ["172.16.1.0/24", "172.16.2.0/24", "172.16.3.0/24"]
public_subnets = ["172.16.4.0/24", "172.16.5.0/24", "172.16.6.0/24"]
enable_nat_gateway = true
single_nat_gateway = true
enable_dns_hostnames = true

tags = {
"kubernetes.io/cluster/${local.cluster_name}" = "shared"
}

public_subnet_tags = {
"kubernetes.io/cluster/${local.cluster_name}" = "shared"
"kubernetes.io/role/elb" = "1"
}

private_subnet_tags = {
"kubernetes.io/cluster/${local.cluster_name}" = "shared"
"kubernetes.io/role/internal-elb" = "1"
}
}

module "eks" {
source = "../.."
cluster_name = local.cluster_name
subnets = module.vpc.private_subnets

tags = {
Environment = "test"
GithubRepo = "terraform-aws-eks"
GithubOrg = "terraform-aws-modules"
}

vpc_id = module.vpc.vpc_id

worker_group_managed_node_groups = [
{
name = "example"

node_group_desired_capacity = 1
node_group_max_capacity = 10
node_group_min_capacity = 1

instance_type = "m5.large"
node_group_k8s_labels = {
Environment = "test"
GithubRepo = "terraform-aws-eks"
GithubOrg = "terraform-aws-modules"
}
node_group_additional_tags = {
Environment = "test"
GithubRepo = "terraform-aws-eks"
GithubOrg = "terraform-aws-modules"
}
}
]

map_roles = var.map_roles
map_users = var.map_users
map_accounts = var.map_accounts
}
25 changes: 25 additions & 0 deletions examples/managed_node_groups/outputs.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
output "cluster_endpoint" {
description = "Endpoint for EKS control plane."
value = module.eks.cluster_endpoint
}

output "cluster_security_group_id" {
description = "Security group ids attached to the cluster control plane."
value = module.eks.cluster_security_group_id
}

output "kubectl_config" {
description = "kubectl config as generated by the module."
value = module.eks.kubeconfig
}

output "config_map_aws_auth" {
description = "A kubernetes configuration to authenticate to this EKS cluster."
value = module.eks.config_map_aws_auth
}

output "region" {
description = "AWS region."
value = var.region
}

52 changes: 52 additions & 0 deletions examples/managed_node_groups/variables.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
variable "region" {
default = "us-west-2"
}

variable "map_accounts" {
description = "Additional AWS account numbers to add to the aws-auth configmap."
type = list(string)

default = [
"777777777777",
"888888888888",
]
}

variable "map_roles" {
description = "Additional IAM roles to add to the aws-auth configmap."
type = list(object({
rolearn = string
username = string
groups = list(string)
}))

default = [
{
rolearn = "arn:aws:iam::66666666666:role/role1"
username = "role1"
groups = ["system:masters"]
},
]
}

variable "map_users" {
description = "Additional IAM users to add to the aws-auth configmap."
type = list(object({
userarn = string
username = string
groups = list(string)
}))

default = [
{
userarn = "arn:aws:iam::66666666666:user/user1"
username = "user1"
groups = ["system:masters"]
},
{
userarn = "arn:aws:iam::66666666666:user/user2"
username = "user2"
groups = ["system:masters"]
},
]
}
14 changes: 12 additions & 2 deletions local.tf
Original file line number Diff line number Diff line change
Expand Up @@ -16,8 +16,9 @@ locals {
default_iam_role_id = concat(aws_iam_role.workers.*.id, [""])[0]
kubeconfig_name = var.kubeconfig_name == "" ? "eks_${var.cluster_name}" : var.kubeconfig_name

worker_group_count = length(var.worker_groups)
worker_group_launch_template_count = length(var.worker_groups_launch_template)
worker_group_count = length(var.worker_groups)
worker_group_launch_template_count = length(var.worker_groups_launch_template)
worker_group_managed_node_group_count = length(var.worker_group_managed_node_groups)

default_ami_id_linux = data.aws_ami.eks_worker.id
default_ami_id_windows = data.aws_ami.eks_worker_windows.id
Expand Down Expand Up @@ -79,6 +80,15 @@ locals {
spot_allocation_strategy = "lowest-price" # Valid options are 'lowest-price' and 'capacity-optimized'. If 'lowest-price', the Auto Scaling group launches instances using the Spot pools with the lowest price, and evenly allocates your instances across the number of Spot pools. If 'capacity-optimized', the Auto Scaling group launches instances using Spot pools that are optimally chosen based on the available Spot capacity.
spot_instance_pools = 10 # "Number of Spot pools per availability zone to allocate capacity. EC2 Auto Scaling selects the cheapest Spot pools and evenly allocates Spot capacity across the number of Spot pools that you specify."
spot_max_price = "" # Maximum price per unit hour that the user is willing to pay for the Spot instances. Default is the on-demand price
ami_type = "AL2_x86_64" # AMI Type to use for the Managed Node Groups. Can be either: AL2_x86_64 or AL2_x86_64_GPU
ami_release_version = "" # AMI Release Version of the Managed Node Groups
source_security_group_id = [] # Source Security Group IDs to allow SSH Access to the Nodes. NOTE: IF LEFT BLANK, AND A KEY IS SPECIFIED, THE SSH PORT WILL BE OPENNED TO THE WORLD
node_group_k8s_labels = {} # Kubernetes Labels to apply to the nodes within the Managed Node Group
node_group_desired_capacity = 1 # Desired capacity of the Node Group
node_group_min_capacity = 1 # Min capacity of the Node Group (Minimum value allowed is 1)
node_group_max_capacity = 3 # Max capacity of the Node Group
node_group_iam_role_arn = "" # IAM role to use for Managed Node Groups instead of default one created by the automation
node_group_additional_tags = {} # Additional tags to be applied to the Node Groups
}

workers_group_defaults = merge(
Expand Down
6 changes: 6 additions & 0 deletions variables.tf
Original file line number Diff line number Diff line change
Expand Up @@ -293,3 +293,9 @@ variable "attach_worker_cni_policy" {
type = bool
default = true
}

variable "worker_group_managed_node_groups" {
description = "A list of maps defining worker group configurations to be defined using AWS EKS Managed Node Groups. See workers_group_defaults for valid keys."
type = list(any)
default = []
}
4 changes: 2 additions & 2 deletions versions.tf
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
terraform {
required_version = ">= 0.12.2"
required_version = ">= 0.12.6"

required_providers {
aws = ">= 2.31.0"
aws = ">= 2.38.0"
local = ">= 1.2"
null = ">= 2.1"
template = ">= 2.1"
Expand Down
Loading