Setting up AWS EKS (Elastic Kubernetes Service) with Terraform
Learn how to provision and manage AWS EKS clusters using Terraform, including node groups, add-ons, and best practices for Kubernetes deployments
Setting up AWS EKS (Elastic Kubernetes Service) with Terraform
AWS EKS is a managed Kubernetes service that makes it easy to run Kubernetes on AWS. This guide demonstrates how to set up and manage EKS clusters using Terraform.
Video Tutorial
Learn more about managing AWS EKS with Terraform in this comprehensive video tutorial:
Prerequisites
- AWS CLI configured with appropriate permissions
- Terraform installed (version 1.0.0 or later)
- kubectl installed
- Basic understanding of Kubernetes concepts
Project Structure
terraform-eks/
├── main.tf
├── variables.tf
├── outputs.tf
├── modules/
│ └── eks/
│ ├── main.tf
│ ├── variables.tf
│ └── outputs.tf
└── kubernetes/
└── manifests/
├── namespace.yaml
└── deployment.yaml
EKS Configuration
Create modules/eks/main.tf
:
# EKS Cluster
resource "aws_eks_cluster" "main" {
name = "${var.project_name}-cluster"
role_arn = aws_iam_role.cluster.arn
version = var.kubernetes_version
vpc_config {
subnet_ids = var.subnet_ids
endpoint_private_access = true
endpoint_public_access = true
public_access_cidrs = var.allowed_cidr_blocks
security_group_ids = [aws_security_group.cluster.id]
}
encryption_config {
provider {
key_arn = aws_kms_key.eks.arn
}
resources = ["secrets"]
}
enabled_cluster_log_types = [
"api",
"audit",
"authenticator",
"controllerManager",
"scheduler"
]
kubernetes_network_config {
service_ipv4_cidr = var.service_ipv4_cidr
ip_family = "ipv4"
}
tags = merge(
var.tags,
{
Name = "${var.project_name}-cluster"
}
)
depends_on = [
aws_iam_role_policy_attachment.cluster_policy,
aws_cloudwatch_log_group.eks
]
}
# Managed Node Group
resource "aws_eks_node_group" "main" {
cluster_name = aws_eks_cluster.main.name
node_group_name = "${var.project_name}-node-group"
node_role_arn = aws_iam_role.node_group.arn
subnet_ids = var.private_subnet_ids
scaling_config {
desired_size = var.desired_size
max_size = var.max_size
min_size = var.min_size
}
update_config {
max_unavailable = 1
}
ami_type = "AL2_x86_64"
capacity_type = "ON_DEMAND"
disk_size = 50
instance_types = ["t3.medium"]
remote_access {
ec2_ssh_key = var.ssh_key_name
source_security_group_ids = [aws_security_group.remote_access.id]
}
labels = {
role = "general"
}
taint {
key = "dedicated"
value = "special"
effect = "NO_SCHEDULE"
}
tags = merge(
var.tags,
{
Name = "${var.project_name}-node-group"
}
)
depends_on = [
aws_iam_role_policy_attachment.node_group_policy
]
}
# Fargate Profile
resource "aws_eks_fargate_profile" "main" {
cluster_name = aws_eks_cluster.main.name
fargate_profile_name = "${var.project_name}-fargate"
pod_execution_role_arn = aws_iam_role.fargate.arn
subnet_ids = var.private_subnet_ids
selector {
namespace = "default"
labels = {
Environment = "production"
Type = "fargate"
}
}
tags = merge(
var.tags,
{
Name = "${var.project_name}-fargate"
}
)
}
# Add-ons
resource "aws_eks_addon" "vpc_cni" {
cluster_name = aws_eks_cluster.main.name
addon_name = "vpc-cni"
addon_version = var.vpc_cni_version
resolve_conflicts = "OVERWRITE"
service_account_role_arn = aws_iam_role.vpc_cni.arn
tags = merge(
var.tags,
{
Name = "${var.project_name}-vpc-cni"
}
)
}
resource "aws_eks_addon" "coredns" {
cluster_name = aws_eks_cluster.main.name
addon_name = "coredns"
addon_version = var.coredns_version
resolve_conflicts = "OVERWRITE"
tags = merge(
var.tags,
{
Name = "${var.project_name}-coredns"
}
)
}
resource "aws_eks_addon" "kube_proxy" {
cluster_name = aws_eks_cluster.main.name
addon_name = "kube-proxy"
addon_version = var.kube_proxy_version
resolve_conflicts = "OVERWRITE"
tags = merge(
var.tags,
{
Name = "${var.project_name}-kube-proxy"
}
)
}
# Security Groups
resource "aws_security_group" "cluster" {
name = "${var.project_name}-cluster"
description = "EKS cluster security group"
vpc_id = var.vpc_id
ingress {
description = "Allow worker nodes"
from_port = 443
to_port = 443
protocol = "tcp"
cidr_blocks = var.private_subnet_cidrs
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
tags = merge(
var.tags,
{
Name = "${var.project_name}-cluster"
}
)
}
# CloudWatch Logs
resource "aws_cloudwatch_log_group" "eks" {
name = "/aws/eks/${var.project_name}/cluster"
retention_in_days = 30
tags = merge(
var.tags,
{
Name = "${var.project_name}-logs"
}
)
}
IRSA (IAM Roles for Service Accounts)
# OIDC Provider
data "tls_certificate" "eks" {
url = aws_eks_cluster.main.identity[0].oidc[0].issuer
}
resource "aws_iam_openid_connect_provider" "eks" {
client_id_list = ["sts.amazonaws.com"]
thumbprint_list = [data.tls_certificate.eks.certificates[0].sha1_fingerprint]
url = aws_eks_cluster.main.identity[0].oidc[0].issuer
}
# IAM Role for Service Account
resource "aws_iam_role" "service_account" {
name = "${var.project_name}-service-account"
assume_role_policy = jsonencode({
Version = "2012-10-17"
Statement = [
{
Action = "sts:AssumeRoleWithWebIdentity"
Effect = "Allow"
Principal = {
Federated = aws_iam_openid_connect_provider.eks.arn
}
Condition = {
StringEquals = {
"${replace(aws_iam_openid_connect_provider.eks.url, "https://", "")}:sub": "system:serviceaccount:default:my-service-account"
}
}
}
]
})
}
Kubernetes Resources
- Application Deployment
provider "kubernetes" {
host = aws_eks_cluster.main.endpoint
cluster_ca_certificate = base64decode(aws_eks_cluster.main.certificate_authority[0].data)
token = data.aws_eks_cluster_auth.main.token
}
resource "kubernetes_deployment" "app" {
metadata {
name = "my-app"
namespace = "default"
labels = {
app = "my-app"
}
}
spec {
replicas = 3
selector {
match_labels = {
app = "my-app"
}
}
template {
metadata {
labels = {
app = "my-app"
}
}
spec {
container {
image = "nginx:latest"
name = "nginx"
resources {
limits = {
cpu = "0.5"
memory = "512Mi"
}
requests = {
cpu = "250m"
memory = "256Mi"
}
}
liveness_probe {
http_get {
path = "/"
port = 80
}
initial_delay_seconds = 3
period_seconds = 3
}
}
}
}
}
}
- Horizontal Pod Autoscaling
resource "kubernetes_horizontal_pod_autoscaler_v2" "app" {
metadata {
name = "my-app-hpa"
}
spec {
scale_target_ref {
api_version = "apps/v1"
kind = "Deployment"
name = kubernetes_deployment.app.metadata[0].name
}
min_replicas = 1
max_replicas = 10
metric {
type = "Resource"
resource {
name = "cpu"
target {
type = "Utilization"
average_utilization = 50
}
}
}
}
}
Monitoring and Logging
- Prometheus and Grafana
resource "helm_release" "prometheus" {
name = "prometheus"
repository = "https://prometheus-community.github.io/helm-charts"
chart = "kube-prometheus-stack"
namespace = "monitoring"
version = var.prometheus_version
values = [
file("${path.module}/values/prometheus-values.yaml")
]
set {
name = "grafana.enabled"
value = "true"
}
}
- CloudWatch Container Insights
resource "aws_eks_addon" "container_insights" {
cluster_name = aws_eks_cluster.main.name
addon_name = "amazon-cloudwatch-observability"
addon_version = var.container_insights_version
resolve_conflicts = "OVERWRITE"
tags = merge(
var.tags,
{
Name = "${var.project_name}-container-insights"
}
)
}
Best Practices
-
Cluster Configuration
- Use managed node groups
- Enable control plane logging
- Implement IRSA
- Configure private endpoints
-
Security
- Enable envelope encryption
- Use security groups
- Implement network policies
- Regular updates
-
High Availability
- Multi-AZ deployment
- Node group scaling
- Pod disruption budgets
- Cluster autoscaling
-
Cost Optimization
- Use appropriate node types
- Implement cluster auto-scaling
Conclusion
You’ve learned how to set up and manage AWS EKS using Terraform. This setup provides:
- Managed Kubernetes clusters
- Node group management
- Add-on integration
- Monitoring capabilities
Remember to:
- Monitor cluster health
- Implement security best practices
- Optimize costs
- Keep components updated
Advanced Features
- Cluster Autoscaling
resource "helm_release" "cluster_autoscaler" {
name = "cluster-autoscaler"
repository = "https://kubernetes.github.io/autoscaler"
chart = "cluster-autoscaler"
namespace = "kube-system"
version = var.autoscaler_version
set {
name = "autoDiscovery.clusterName"
value = aws_eks_cluster.main.name
}
set {
name = "awsRegion"
value = var.aws_region
}
}
- AWS Load Balancer Controller
resource "helm_release" "aws_load_balancer_controller" {
name = "aws-load-balancer-controller"
repository = "https://aws.github.io/eks-charts"
chart = "aws-load-balancer-controller"
namespace = "kube-system"
version = var.lb_controller_version
set {
name = "clusterName"
value = aws_eks_cluster.main.name
}
set {
name = "serviceAccount.create"
value = "true"
}
set {
name = "serviceAccount.annotations.eks\\.amazonaws\\.com/role-arn"
value = aws_iam_role.lb_controller.arn
}
}