-
Notifications
You must be signed in to change notification settings - Fork 111
/
Copy pathaws-eks.tf
122 lines (106 loc) · 4.16 KB
/
aws-eks.tf
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
locals {
eks_map_roles = concat(var.eks_map_roles,
[
{
rolearn = "arn:aws:iam::${data.aws_caller_identity.current.account_id}:role/administrator"
username = "administrator"
groups = [
"system:masters"]
}]
)
worker_tags = [
{
"key" = "k8s.io/cluster-autoscaler/enabled"
"propagate_at_launch" = "false"
"value" = "true"
},
{
"key" = "k8s.io/cluster-autoscaler/${local.name}"
"propagate_at_launch" = "false"
"value" = "true"
}
]
}
#tfsec:ignore:aws-vpc-no-public-egress-sgr tfsec:ignore:aws-eks-enable-control-plane-logging tfsec:ignore:aws-eks-encrypt-secrets tfsec:ignore:aws-eks-no-public-cluster-access tfsec:ignore:aws-eks-no-public-cluster-access-to-cidr
module "eks" {
source = "terraform-aws-modules/eks/aws"
version = "17.1.0"
cluster_name = local.name
cluster_version = var.eks_cluster_version
subnets = module.vpc.intra_subnets
enable_irsa = true
cluster_enabled_log_types = var.eks_cluster_enabled_log_types
cluster_log_retention_in_days = var.eks_cluster_log_retention_in_days
tags = {
ClusterName = local.name
Environment = local.env
}
vpc_id = module.vpc.vpc_id
cluster_encryption_config = var.eks_cluster_encryption_config_enable ? [
{
provider_key_arn = aws_kms_key.eks[0].arn
resources = ["secrets"]
}
] : []
worker_groups_launch_template = [
{
name = "spot"
override_instance_types = var.eks_worker_groups.spot.override_instance_types
spot_instance_pools = var.eks_worker_groups.spot.spot_instance_pools
asg_max_size = var.eks_worker_groups.spot.asg_max_size
asg_min_size = var.eks_worker_groups.spot.asg_min_size
asg_desired_capacity = var.eks_worker_groups.spot.asg_desired_capacity
subnets = module.vpc.private_subnets
kubelet_extra_args = "--node-labels=node.kubernetes.io/lifecycle=spot"
public_ip = false
additional_userdata = file("${path.module}/templates/eks-x86-nodes-userdata.sh")
tags = local.worker_tags
},
{
name = "ondemand"
instance_type = var.eks_worker_groups.ondemand.instance_type
asg_desired_capacity = var.eks_worker_groups.ondemand.asg_desired_capacity
subnets = module.vpc.private_subnets
asg_max_size = var.eks_worker_groups.ondemand.asg_max_size
cpu_credits = "unlimited"
kubelet_extra_args = "--node-labels=node.kubernetes.io/lifecycle=ondemand"
public_ip = false
additional_userdata = file("${path.module}/templates/eks-x86-nodes-userdata.sh")
tags = local.worker_tags
},
{
name = "ci"
override_instance_types = var.eks_worker_groups.ci.override_instance_types
spot_instance_pools = var.eks_worker_groups.ci.spot_instance_pools
asg_max_size = var.eks_worker_groups.ci.asg_max_size
asg_min_size = var.eks_worker_groups.ci.asg_min_size
asg_desired_capacity = var.eks_worker_groups.ci.asg_desired_capacity
subnets = module.vpc.public_subnets
cpu_credits = "unlimited"
kubelet_extra_args = "--node-labels=node.kubernetes.io/lifecycle=spot --node-labels=purpose=ci --register-with-taints=purpose=ci:NoSchedule"
public_ip = true
additional_userdata = file("${path.module}/templates/eks-x86-nodes-userdata.sh")
tags = concat(local.worker_tags, [{
"key" = "k8s.io/cluster-autoscaler/node-template/label/purpose"
"propagate_at_launch" = "true"
"value" = "ci"
}])
},
]
fargate_profiles = {
default = {
name = "fargate"
selectors = [
{
namespace = "fargate"
}
]
subnets = module.vpc.private_subnets
tags = merge(local.tags, {
Namespace = "fargate"
})
}
}
map_roles = local.eks_map_roles
write_kubeconfig = var.eks_write_kubeconfig
}