Provisioning AWS DynamoDB Tables with Terraform
Learn how to provision and manage DynamoDB tables using Terraform, including advanced features like Global Tables, Auto Scaling, and Backups
Provisioning AWS DynamoDB Tables with Terraform
DynamoDB is AWS’s fully managed NoSQL database service. This guide demonstrates how to provision and manage DynamoDB tables using Terraform, including advanced features and best practices.
Video Tutorial
Learn more about managing AWS DynamoDB with Terraform in this comprehensive video tutorial:
Prerequisites
- AWS CLI configured with appropriate permissions
- Terraform installed (version 1.0.0 or later)
- Basic understanding of DynamoDB concepts
- Understanding of NoSQL data modeling
Project Structure
terraform-dynamodb/
├── main.tf
├── variables.tf
├── outputs.tf
├── modules/
│ └── dynamodb/
│ ├── main.tf
│ ├── variables.tf
│ └── outputs.tf
└── policies/
└── backup.json
Basic Table Configuration
Create modules/dynamodb/main.tf
:
# Basic DynamoDB Table
resource "aws_dynamodb_table" "basic" {
name = "${var.project_name}-table"
billing_mode = "PAY_PER_REQUEST"
hash_key = "PK"
range_key = "SK"
attribute {
name = "PK"
type = "S"
}
attribute {
name = "SK"
type = "S"
}
attribute {
name = "GSI1PK"
type = "S"
}
attribute {
name = "GSI1SK"
type = "S"
}
global_secondary_index {
name = "GSI1"
hash_key = "GSI1PK"
range_key = "GSI1SK"
projection_type = "ALL"
}
point_in_time_recovery {
enabled = true
}
server_side_encryption {
enabled = true
}
tags = merge(
var.tags,
{
Name = "${var.project_name}-table"
}
)
}
# Auto Scaling Configuration
resource "aws_appautoscaling_target" "dynamodb_table_read_target" {
max_capacity = 100
min_capacity = 5
resource_id = "table/${aws_dynamodb_table.basic.name}"
scalable_dimension = "dynamodb:table:ReadCapacityUnits"
service_namespace = "dynamodb"
}
resource "aws_appautoscaling_target" "dynamodb_table_write_target" {
max_capacity = 100
min_capacity = 5
resource_id = "table/${aws_dynamodb_table.basic.name}"
scalable_dimension = "dynamodb:table:WriteCapacityUnits"
service_namespace = "dynamodb"
}
resource "aws_appautoscaling_policy" "dynamodb_table_read_policy" {
name = "DynamoDBReadCapacityUtilization:${aws_appautoscaling_target.dynamodb_table_read_target.resource_id}"
policy_type = "TargetTrackingScaling"
resource_id = aws_appautoscaling_target.dynamodb_table_read_target.resource_id
scalable_dimension = aws_appautoscaling_target.dynamodb_table_read_target.scalable_dimension
service_namespace = aws_appautoscaling_target.dynamodb_table_read_target.service_namespace
target_tracking_scaling_policy_configuration {
predefined_metric_specification {
predefined_metric_type = "DynamoDBReadCapacityUtilization"
}
target_value = 70
}
}
resource "aws_appautoscaling_policy" "dynamodb_table_write_policy" {
name = "DynamoDBWriteCapacityUtilization:${aws_appautoscaling_target.dynamodb_table_write_target.resource_id}"
policy_type = "TargetTrackingScaling"
resource_id = aws_appautoscaling_target.dynamodb_table_write_target.resource_id
scalable_dimension = aws_appautoscaling_target.dynamodb_table_write_target.scalable_dimension
service_namespace = aws_appautoscaling_target.dynamodb_table_write_target.service_namespace
target_tracking_scaling_policy_configuration {
predefined_metric_specification {
predefined_metric_type = "DynamoDBWriteCapacityUtilization"
}
target_value = 70
}
}
# Global Tables
resource "aws_dynamodb_table" "global" {
name = "${var.project_name}-global-table"
billing_mode = "PAY_PER_REQUEST"
stream_enabled = true
stream_view_type = "NEW_AND_OLD_IMAGES"
hash_key = "PK"
range_key = "SK"
attribute {
name = "PK"
type = "S"
}
attribute {
name = "SK"
type = "S"
}
replica {
region_name = "us-east-1"
}
replica {
region_name = "eu-west-1"
}
point_in_time_recovery {
enabled = true
}
server_side_encryption {
enabled = true
}
tags = merge(
var.tags,
{
Name = "${var.project_name}-global-table"
}
)
}
# Backup Configuration
resource "aws_dynamodb_table" "backup_enabled" {
name = "${var.project_name}-backup-table"
billing_mode = "PAY_PER_REQUEST"
hash_key = "PK"
range_key = "SK"
attribute {
name = "PK"
type = "S"
}
attribute {
name = "SK"
type = "S"
}
point_in_time_recovery {
enabled = true
}
server_side_encryption {
enabled = true
}
tags = merge(
var.tags,
{
Name = "${var.project_name}-backup-table"
}
)
}
resource "aws_dynamodb_table_item" "example" {
table_name = aws_dynamodb_table.basic.name
hash_key = "PK"
range_key = "SK"
item = jsonencode({
PK = { S = "USER#1" }
SK = { S = "PROFILE#1" }
username = { S = "johndoe" }
email = { S = "john@example.com" }
created_at = { S = "2025-01-19T00:00:00Z" }
})
}
# DAX Cluster
resource "aws_dax_cluster" "main" {
cluster_name = "${var.project_name}-dax-cluster"
iam_role_arn = aws_iam_role.dax.arn
node_type = "dax.t3.small"
replication_factor = 3
server_side_encryption {
enabled = true
}
tags = merge(
var.tags,
{
Name = "${var.project_name}-dax-cluster"
}
)
}
# CloudWatch Alarms
resource "aws_cloudwatch_metric_alarm" "throttled_requests" {
alarm_name = "${var.project_name}-dynamodb-throttled-requests"
comparison_operator = "GreaterThanThreshold"
evaluation_periods = "2"
metric_name = "ThrottledRequests"
namespace = "AWS/DynamoDB"
period = "300"
statistic = "Sum"
threshold = "10"
alarm_description = "DynamoDB throttled requests"
alarm_actions = [aws_sns_topic.alerts.arn]
dimensions = {
TableName = aws_dynamodb_table.basic.name
}
}
## Advanced Features
1. **Time to Live (TTL)**
```hcl
resource "aws_dynamodb_table" "ttl_enabled" {
name = "${var.project_name}-ttl-table"
billing_mode = "PAY_PER_REQUEST"
hash_key = "PK"
range_key = "SK"
attribute {
name = "PK"
type = "S"
}
attribute {
name = "SK"
type = "S"
}
ttl {
attribute_name = "ExpiresAt"
enabled = true
}
}
- Kinesis Stream Integration
resource "aws_dynamodb_kinesis_streaming_destination" "example" {
table_name = aws_dynamodb_table.basic.name
stream_arn = aws_kinesis_stream.example.arn
}
resource "aws_kinesis_stream" "example" {
name = "${var.project_name}-stream"
shard_count = 1
retention_period = 24
stream_mode_details {
stream_mode = "ON_DEMAND"
}
}
- Backup Plans
resource "aws_backup_plan" "dynamodb" {
name = "${var.project_name}-backup-plan"
rule {
rule_name = "daily_backup"
target_vault_name = aws_backup_vault.main.name
schedule = "cron(0 12 * * ? *)"
lifecycle {
delete_after = 30
}
}
}
resource "aws_backup_selection" "dynamodb" {
name = "${var.project_name}-backup-selection"
plan_id = aws_backup_plan.dynamodb.id
iam_role_arn = aws_iam_role.backup.arn
resources = [
aws_dynamodb_table.backup_enabled.arn
]
}
Data Modeling Best Practices
- Single Table Design
resource "aws_dynamodb_table" "single_table" {
name = "${var.project_name}-single-table"
billing_mode = "PAY_PER_REQUEST"
hash_key = "PK"
range_key = "SK"
attribute {
name = "PK"
type = "S"
}
attribute {
name = "SK"
type = "S"
}
attribute {
name = "GSI1PK"
type = "S"
}
attribute {
name = "GSI1SK"
type = "S"
}
attribute {
name = "GSI2PK"
type = "S"
}
attribute {
name = "GSI2SK"
type = "S"
}
global_secondary_index {
name = "GSI1"
hash_key = "GSI1PK"
range_key = "GSI1SK"
projection_type = "ALL"
}
global_secondary_index {
name = "GSI2"
hash_key = "GSI2PK"
range_key = "GSI2SK"
projection_type = "ALL"
}
}
- Access Patterns
resource "aws_dynamodb_table_item" "user" {
table_name = aws_dynamodb_table.single_table.name
hash_key = "PK"
range_key = "SK"
item = jsonencode({
PK = { S = "USER#1" }
SK = { S = "PROFILE#1" }
GSI1PK = { S = "EMAIL#john@example.com" }
GSI1SK = { S = "USER#1" }
GSI2PK = { S = "STATUS#active" }
GSI2SK = { S = "USER#1" }
username = { S = "johndoe" }
email = { S = "john@example.com" }
status = { S = "active" }
})
}
Performance Optimization
- DAX Configuration
resource "aws_dax_parameter_group" "main" {
name = "${var.project_name}-dax-params"
parameters {
name = "query-ttl-millis"
value = "60000"
}
parameters {
name = "record-ttl-millis"
value = "60000"
}
}
- Capacity Planning
resource "aws_dynamodb_table" "provisioned" {
name = "${var.project_name}-provisioned-table"
billing_mode = "PROVISIONED"
read_capacity = 5
write_capacity = 5
hash_key = "PK"
range_key = "SK"
attribute {
name = "PK"
type = "S"
}
attribute {
name = "SK"
type = "S"
}
}
Monitoring and Alerts
- CloudWatch Metrics
resource "aws_cloudwatch_metric_alarm" "read_throttling" {
alarm_name = "${var.project_name}-read-throttling"
comparison_operator = "GreaterThanThreshold"
evaluation_periods = "2"
metric_name = "ReadThrottleEvents"
namespace = "AWS/DynamoDB"
period = "300"
statistic = "Sum"
threshold = "1"
alarm_description = "DynamoDB read throttling"
alarm_actions = [aws_sns_topic.alerts.arn]
dimensions = {
TableName = aws_dynamodb_table.basic.name
}
}
- Contributor Insights
resource "aws_dynamodb_contributor_insights" "example" {
table_name = aws_dynamodb_table.basic.name
}
Best Practices
-
Security
- Enable encryption at rest
- Use IAM roles for access
- Enable CloudTrail logging
- Implement backup strategy
-
Performance
- Choose appropriate partition keys
- Use sparse indexes
- Implement caching with DAX
- Monitor throttling events
-
Cost Optimization
- Use on-demand capacity
- Implement auto scaling
- Monitor usage patterns
- Clean up unused items
-
Data Modeling
- Use single-table design
- Plan access patterns
- Implement proper indexes
- Consider item collections
Conclusion
You’ve learned how to set up and manage AWS DynamoDB using Terraform. This setup provides:
- Flexible table configuration
- Advanced feature implementation
- Performance optimization
- Monitoring and alerting
Remember to:
- Plan your data model
- Monitor performance
- Optimize costs
- Implement security controls