In this Terraform project we will learn how to provisions a Virtual Private Cloud (VPC) on AWS, deploys an Amazon Elastic Kubernetes Service (EKS) cluster within that VPC, and provides associated networking and IAM resources.
In this I’ll create a well-organized Terraform configuration for an AWS VPC and EKS cluster, split into logical modules.
The complete project GitHub URl: https://github.com/Consultantsrihari/Create-a-Terraform-project-which-Implements-a-VPC-on-AWS-and-deploys-to-EKS-cluster..git
AWS VPC and EKS Terraform Configuration
**_
I. created a modular Terraform configuration that sets up:
_**Main.tf:
terraform {
required_providers {
aws = {
source = "hashicorp/aws"
version = "~> 5.0"
}
}
}
provider "aws" {
region = var.aws_region
}
module "vpc" {
source = "./modules/vpc"
vpc_cidr = var.vpc_cidr
availability_zones = var.availability_zones
environment = var.environment
}
module "eks" {
source = "./modules/eks"
environment = var.environment
subnet_ids = module.vpc.private_subnet_ids
kubernetes_version = "1.27"
node_desired_size = 2
node_max_size = 4
node_min_size = 1
node_instance_types = ["t3.medium"]
}
- Variables.tf:
variable "aws_region" {
description = "AWS region"
type = string
default = "us-east-1"
}
variable "environment" {
description = "Environment name"
type = string
default = "dev"
}
variable "vpc_cidr" {
description = "CIDR block for VPC"
type = string
default = "10.0.0.0/16"
}
variable "availability_zones" {
description = "Availability zones"
type = list(string)
default = ["us-east-1a", "us-east-1b", "us-east-1c"]
}
- Outputs.tf
output "vpc_id" {
value = module.vpc.vpc_id
}
output "cluster_endpoint" {
value = module.eks.cluster_endpoint
}
output "cluster_name" {
value = module.eks.cluster_name
}
_
**II. A VPC module (modules/vpc) that creates:
_
**VPC with DNS support
Public and private subnets across multiple AZs
Internet Gateway
NAT Gateways
Route tables
main.tf
# VPC
resource "aws_vpc" "main" {
cidr_block = var.vpc_cidr
enable_dns_hostnames = true
enable_dns_support = true
tags = {
Name = "${var.environment}-vpc"
Environment = var.environment
}
}
# Public Subnets
resource "aws_subnet" "public" {
count = length(var.availability_zones)
vpc_id = aws_vpc.main.id
cidr_block = cidrsubnet(var.vpc_cidr, 8, count.index)
availability_zone = var.availability_zones[count.index]
map_public_ip_on_launch = true
tags = {
Name = "${var.environment}-public-${var.availability_zones[count.index]}"
Environment = var.environment
"kubernetes.io/cluster/${var.environment}-eks" = "shared"
"kubernetes.io/role/elb" = 1
}
}
# Private Subnets
resource "aws_subnet" "private" {
count = length(var.availability_zones)
vpc_id = aws_vpc.main.id
cidr_block = cidrsubnet(var.vpc_cidr, 8, count.index + length(var.availability_zones))
availability_zone = var.availability_zones[count.index]
tags = {
Name = "${var.environment}-private-${var.availability_zones[count.index]}"
Environment = var.environment
"kubernetes.io/cluster/${var.environment}-eks" = "shared"
"kubernetes.io/role/internal-elb" = 1
}
}
# Internet Gateway
resource "aws_internet_gateway" "main" {
vpc_id = aws_vpc.main.id
tags = {
Name = "${var.environment}-igw"
Environment = var.environment
}
}
# Elastic IP for NAT Gateway
resource "aws_eip" "nat" {
domain = "vpc"
tags = {
Name = "${var.environment}-nat-eip"
Environment = var.environment
}
}
# NAT Gateway
resource "aws_nat_gateway" "main" {
allocation_id = aws_eip.nat.id
subnet_id = aws_subnet.public[0].id
tags = {
Name = "${var.environment}-nat"
Environment = var.environment
}
}
# Public Route Table
resource "aws_route_table" "public" {
vpc_id = aws_vpc.main.id
route {
cidr_block = "0.0.0.0/0"
gateway_id = aws_internet_gateway.main.id
}
tags = {
Name = "${var.environment}-public-rt"
Environment = var.environment
}
}
# Private Route Table
resource "aws_route_table" "private" {
vpc_id = aws_vpc.main.id
route {
cidr_block = "0.0.0.0/0"
nat_gateway_id = aws_nat_gateway.main.id
}
tags = {
Name = "${var.environment}-private-rt"
Environment = var.environment
}
}
# Route Table Associations
resource "aws_route_table_association" "public" {
count = length(var.availability_zones)
subnet_id = aws_subnet.public[count.index].id
route_table_id = aws_route_table.public.id
}
resource "aws_route_table_association" "private" {
count = length(var.availability_zones)
subnet_id = aws_subnet.private[count.index].id
route_table_id = aws_route_table.private.id
}
- variables.tf
variable "vpc_cidr" {
description = "CIDR block for VPC"
type = string
}
variable "availability_zones" {
description = "Availability zones"
type = list(string)
}
variable "environment" {
description = "Environment name"
type = string
}
- Outputs.tf
output "vpc_id" {
value = aws_vpc.main.id
}
output "public_subnet_ids" {
value = aws_subnet.public[*].id
}
output "private_subnet_ids" {
value = aws_subnet.private[*].id
}
**_
III. An EKS module (modules/eks) that creates:
_**
EKS cluster
Node group with autoscaling
Required IAM roles and policies
main.tf
resource "aws_eks_cluster" "main" {
name = "${var.environment}-eks"
role_arn = aws_iam_role.eks_cluster.arn
version = var.kubernetes_version
vpc_config {
subnet_ids = var.subnet_ids
endpoint_private_access = true
endpoint_public_access = true
}
depends_on = [
aws_iam_role_policy_attachment.eks_cluster_policy,
aws_iam_role_policy_attachment.eks_service_policy,
]
}
# EKS Cluster IAM Role
resource "aws_iam_role" "eks_cluster" {
name = "${var.environment}-eks-cluster"
assume_role_policy = jsonencode({
Version = "2012-10-17"
Statement = [
{
Action = "sts:AssumeRole"
Effect = "Allow"
Principal = {
Service = "eks.amazonaws.com"
}
}
]
})
}
resource "aws_iam_role_policy_attachment" "eks_cluster_policy" {
policy_arn = "arn:aws:iam::aws:policy/AmazonEKSClusterPolicy"
role = aws_iam_role.eks_cluster.name
}
resource "aws_iam_role_policy_attachment" "eks_service_policy" {
policy_arn = "arn:aws:iam::aws:policy/AmazonEKSServicePolicy"
role = aws_iam_role.eks_cluster.name
}
# Node Group
resource "aws_eks_node_group" "main" {
cluster_name = aws_eks_cluster.main.name
node_group_name = "${var.environment}-node-group"
node_role_arn = aws_iam_role.eks_node_group.arn
subnet_ids = var.subnet_ids
scaling_config {
desired_size = var.node_desired_size
max_size = var.node_max_size
min_size = var.node_min_size
}
instance_types = var.node_instance_types
depends_on = [
aws_iam_role_policy_attachment.eks_node_policy,
aws_iam_role_policy_attachment.eks_cni_policy,
aws_iam_role_policy_attachment.eks_container_registry,
]
}
# Node Group IAM Role
resource "aws_iam_role" "eks_node_group" {
name = "${var.environment}-eks-node-group"
assume_role_policy = jsonencode({
Version = "2012-10-17"
Statement = [
{
Action = "sts:AssumeRole"
Effect = "Allow"
Principal = {
Service = "ec2.amazonaws.com"
}
}
]
})
}
resource "aws_iam_role_policy_attachment" "eks_node_policy" {
policy_arn = "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy"
role = aws_iam_role.eks_node_group.name
}
resource "aws_iam_role_policy_attachment" "eks_cni_policy" {
policy_arn = "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy"
role = aws_iam_role.eks_node_group.name
}
resource "aws_iam_role_policy_attachment" "eks_container_registry" {
policy_arn = "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly"
role = aws_iam_role.eks_node_group.name
}
- variables.tf
variable "environment" {
description = "Environment name"
type = string
}
variable "subnet_ids" {
description = "Subnet IDs for EKS cluster"
type = list(string)
}
variable "kubernetes_version" {
description = "Kubernetes version"
type = string
default = "1.27"
}
variable "node_desired_size" {
description = "Desired number of worker nodes"
type = number
default = 2
}
variable "node_max_size" {
description = "Maximum number of worker nodes"
type = number
default = 4
}
variable "node_min_size" {
description = "Minimum number of worker nodes"
type = number
default = 1
}
variable "node_instance_types" {
description = "Instance types for worker nodes"
type = list(string)
default = ["t3.medium"]
}
- Outputs.tf
output "cluster_endpoint" {
value = aws_eks_cluster.main.endpoint
}
output "cluster_name" {
value = aws_eks_cluster.main.name
}
output "cluster_certificate_authority_data" {
value = aws_eks_cluster.main.certificate_authority[0].data
}
To use this configuration:
Initialize Terraform:
terraform init
Review the plan:
terraform plan
Apply the configuration:
terraform apply
The configuration uses variables with sensible defaults but can be customized through the terraform.tfvars file.
Key features:
Highly available setup across 3 AZs
Private subnets for EKS nodes
Public subnets for load balancers
Proper IAM roles and policies
Autoscaling node group
Proper tagging for EKS and AWS Load Balancer Controller
https://github.com/Consultantsrihari/Create-a-Terraform-project-which-Implements-a-VPC-on-AWS-and-deploys-to-EKS-cluster..git
………………………………………………………………………………………………………………………………………………………………………………………………
You’re welcome! Have a great time ahead! Enjoy your day!
Please Connect with me any doubts.
Mail: sriharimalapati6@gmail.com
LinkedIn: www.linkedin.com/in/
GitHub: https://github.com/Consultantsrihari
Medium: Sriharimalapati — Medium
Top comments (0)