Managing Azure Load Balancer with Terraform
Learn how to set up and manage Azure Load Balancer using Terraform, including internal and external load balancing configurations
Managing Azure Load Balancer with Terraform
Azure Load Balancer is a Layer 4 (TCP/UDP) load balancer that provides high availability by distributing incoming traffic among healthy instances. This guide demonstrates how to set up and manage Load Balancer using Terraform.
Video Tutorial
Learn more about managing Azure Load Balancer with Terraform in this comprehensive video tutorial:
Prerequisites
- Azure CLI configured with appropriate permissions
- Terraform installed (version 1.0.0 or later)
- Resource group created
- Understanding of networking concepts
Project Structure
terraform-azure-loadbalancer/
├── main.tf
├── variables.tf
├── outputs.tf
├── modules/
│ └── loadbalancer/
│ ├── main.tf
│ ├── variables.tf
│ └── outputs.tf
└── configs/
└── health.json
Load Balancer Configuration
Create modules/loadbalancer/main.tf
:
# Public Load Balancer
resource "azurerm_lb" "public" {
name = "${var.project_name}-public-lb"
location = var.location
resource_group_name = var.resource_group_name
sku = "Standard"
sku_tier = "Regional"
frontend_ip_configuration {
name = "public-frontend"
public_ip_address_id = azurerm_public_ip.lb.id
zones = ["1", "2", "3"]
}
tags = var.tags
}
# Backend Pool
resource "azurerm_lb_backend_address_pool" "public" {
name = "backend-pool"
loadbalancer_id = azurerm_lb.public.id
}
# Health Probe
resource "azurerm_lb_probe" "public" {
name = "health-probe"
loadbalancer_id = azurerm_lb.public.id
protocol = "Tcp"
port = 80
interval_in_seconds = 5
number_of_probes = 2
}
# Load Balancing Rule
resource "azurerm_lb_rule" "public" {
name = "http-rule"
loadbalancer_id = azurerm_lb.public.id
frontend_ip_configuration_name = "public-frontend"
protocol = "Tcp"
frontend_port = 80
backend_port = 80
backend_address_pool_ids = [azurerm_lb_backend_address_pool.public.id]
probe_id = azurerm_lb_probe.public.id
idle_timeout_in_minutes = 4
enable_floating_ip = false
enable_tcp_reset = true
}
# NAT Rule
resource "azurerm_lb_nat_rule" "public" {
name = "ssh-nat"
loadbalancer_id = azurerm_lb.public.id
protocol = "Tcp"
frontend_port = 22
backend_port = 22
frontend_ip_configuration_name = "public-frontend"
idle_timeout_in_minutes = 4
enable_floating_ip = false
enable_tcp_reset = true
}
# Outbound Rule
resource "azurerm_lb_outbound_rule" "public" {
name = "outbound-rule"
loadbalancer_id = azurerm_lb.public.id
protocol = "Tcp"
backend_address_pool_id = azurerm_lb_backend_address_pool.public.id
frontend_ip_configuration {
name = "public-frontend"
}
}
# Internal Load Balancer
resource "azurerm_lb" "internal" {
name = "${var.project_name}-internal-lb"
location = var.location
resource_group_name = var.resource_group_name
sku = "Standard"
sku_tier = "Regional"
frontend_ip_configuration {
name = "internal-frontend"
subnet_id = azurerm_subnet.internal.id
private_ip_address_allocation = "Dynamic"
zones = ["1", "2", "3"]
}
tags = var.tags
}
# Backend Pool for Internal LB
resource "azurerm_lb_backend_address_pool" "internal" {
name = "internal-backend-pool"
loadbalancer_id = azurerm_lb.internal.id
}
# Health Probe for Internal LB
resource "azurerm_lb_probe" "internal" {
name = "internal-probe"
loadbalancer_id = azurerm_lb.internal.id
protocol = "Tcp"
port = 80
interval_in_seconds = 5
number_of_probes = 2
}
# Load Balancing Rule for Internal LB
resource "azurerm_lb_rule" "internal" {
name = "internal-rule"
loadbalancer_id = azurerm_lb.internal.id
frontend_ip_configuration_name = "internal-frontend"
protocol = "Tcp"
frontend_port = 80
backend_port = 80
backend_address_pool_ids = [azurerm_lb_backend_address_pool.internal.id]
probe_id = azurerm_lb_probe.internal.id
idle_timeout_in_minutes = 4
enable_floating_ip = false
enable_tcp_reset = true
}
# Public IP for Load Balancer
resource "azurerm_public_ip" "lb" {
name = "${var.project_name}-lb-pip"
location = var.location
resource_group_name = var.resource_group_name
allocation_method = "Static"
sku = "Standard"
zones = ["1", "2", "3"]
tags = var.tags
}
## Network Configuration
1. **Virtual Network Setup**
```hcl
resource "azurerm_virtual_network" "main" {
name = "${var.project_name}-vnet"
address_space = ["10.0.0.0/16"]
location = var.location
resource_group_name = var.resource_group_name
subnet {
name = "frontend"
address_prefix = "10.0.1.0/24"
}
subnet {
name = "backend"
address_prefix = "10.0.2.0/24"
}
tags = var.tags
}
resource "azurerm_subnet" "frontend" {
name = "frontend"
resource_group_name = var.resource_group_name
virtual_network_name = azurerm_virtual_network.main.name
address_prefixes = ["10.0.1.0/24"]
}
resource "azurerm_subnet" "backend" {
name = "backend"
resource_group_name = var.resource_group_name
virtual_network_name = azurerm_virtual_network.main.name
address_prefixes = ["10.0.2.0/24"]
}
resource "azurerm_network_security_group" "frontend" {
name = "${var.project_name}-frontend-nsg"
location = var.location
resource_group_name = var.resource_group_name
security_rule {
name = "allow-http"
priority = 100
direction = "Inbound"
access = "Allow"
protocol = "Tcp"
source_port_range = "*"
destination_port_range = "80"
source_address_prefix = "*"
destination_address_prefix = "*"
}
tags = var.tags
}
High Availability Configuration
- Availability Set
resource "azurerm_availability_set" "main" {
name = "${var.project_name}-avset"
location = var.location
resource_group_name = var.resource_group_name
platform_fault_domain_count = 2
platform_update_domain_count = 5
managed = true
tags = var.tags
}
- VM Scale Set
resource "azurerm_virtual_machine_scale_set" "main" {
name = "${var.project_name}-vmss"
location = var.location
resource_group_name = var.resource_group_name
upgrade_policy_mode = "Rolling"
zones = ["1", "2", "3"]
sku {
name = "Standard_DS1_v2"
tier = "Standard"
capacity = 2
}
rolling_upgrade_policy {
max_batch_instance_percent = 20
max_unhealthy_instance_percent = 20
max_unhealthy_upgraded_instance_percent = 20
pause_time_between_batches = "PT0S"
}
health_probe_id = azurerm_lb_probe.public.id
network_interface {
name = "nic"
primary = true
ip_configuration {
name = "ipconfig"
primary = true
subnet_id = azurerm_subnet.backend.id
load_balancer_backend_address_pool_ids = [azurerm_lb_backend_address_pool.public.id]
}
}
tags = var.tags
}
Monitoring Configuration
- Diagnostic Settings
resource "azurerm_monitor_diagnostic_setting" "lb" {
name = "${var.project_name}-diag"
target_resource_id = azurerm_lb.public.id
log_analytics_workspace_id = var.log_analytics_workspace_id
metric {
category = "AllMetrics"
enabled = true
retention_policy {
enabled = true
days = 30
}
}
}
resource "azurerm_monitor_metric_alert" "lb" {
name = "${var.project_name}-health-alert"
resource_group_name = var.resource_group_name
scopes = [azurerm_lb.public.id]
description = "Alert when health probe status is unhealthy"
criteria {
metric_namespace = "Microsoft.Network/loadBalancers"
metric_name = "DipAvailability"
aggregation = "Average"
operator = "LessThan"
threshold = 100
}
action {
action_group_id = var.action_group_id
}
}
Best Practices
-
Performance
- Use Standard SKU
- Enable zones
- Configure health probes
- Optimize timeout settings
-
High Availability
- Use multiple zones
- Configure VM Scale Sets
- Implement health probes
- Enable automatic failover
-
Security
- Use network security groups
- Implement network isolation
- Control inbound access
- Monitor traffic patterns
-
Cost Optimization
- Choose appropriate SKU
- Monitor usage
- Optimize rules
- Use auto-scaling
Conclusion
You’ve learned how to set up and manage Azure Load Balancer using Terraform. This setup provides:
- Layer 4 load balancing
- High availability
- Network isolation
- Traffic distribution
Remember to:
- Monitor health status
- Review security rules
- Update probe settings
- Maintain backend pools