DEV Community

Jeffrey Hicks
Jeffrey Hicks

Posted on • Edited on

Coding a Barebones EKS Cluster using Terraform Registry Module

Resources:

Video Notes

Prereq - VPC Module

provider "aws" {
  region = local.region
}

locals {
  name   = "barebones-cluster"
  region = "us-east-1"

  vpc_cidr = "10.123.0.0/16"
  azs      = ["us-east-1a", "us-east-1b"]

  public_subnets  = ["10.123.1.0/24", "10.123.2.0/24"]
  private_subnets = ["10.123.3.0/24", "10.123.4.0/24"]
  intra_subnets   = ["10.123.5.0/24", "10.123.6.0/24"]

  tags = {
    Example = local.name
  }
}

module "vpc" {
  source  = "terraform-aws-modules/vpc/aws"
  version = "~> 4.0"

  name = local.name
  cidr = local.vpc_cidr

  azs             = local.azs
  private_subnets = local.private_subnets
  public_subnets  = local.public_subnets
  intra_subnets   = local.intra_subnets

  enable_nat_gateway = true

  public_subnet_tags = {
    "kubernetes.io/role/elb" = 1
  }

  private_subnet_tags = {
    "kubernetes.io/role/internal-elb" = 1
  }
}
Enter fullscreen mode Exit fullscreen mode

Using a module

  • Visit Terraform EKS Module Source Code

  • Open Examples Folder

    • Recommends:
    • complete (Used in this/his guide)
    • self_managed_node_group
    • Warnings:
    • eks_managed_node_group - IPV6 & IPV4
  • Open complete example > main.tf

  • Copy contents under EKS Module Header

################################################################################
# EKS Module
################################################################################
Enter fullscreen mode Exit fullscreen mode
module "eks" {
  source  = "terraform-aws-modules/eks/aws"
  version = "19.20.0"
Enter fullscreen mode Exit fullscreen mode

and use it to replace

module "eks" {
  source = "../.."

Enter fullscreen mode Exit fullscreen mode
  • Remove "extra" configuration such as preserve and timeouts
      preserve    = true
      timeouts = {
        create = "25m"
        delete = "10m"
      }
Enter fullscreen mode Exit fullscreen mode
  • The complete example has kms keys externally. To just let EKS manage them delete section:
  # External encryption key
  create_kms_key = false
  cluster_encryption_config = {
    resources        = ["secrets"]
    provider_key_arn = module.kms.key_arn
  }
Enter fullscreen mode Exit fullscreen mode
  • If you need to attach any additional iam_role_policies, configure the following. For now you can delete.
  iam_role_additional_policies = {
    additional = aws_iam_policy.additional.arn
  }
Enter fullscreen mode Exit fullscreen mode
  • Add needed cluster and node communication needs, or fo now delete:
  # Extend cluster security group rules
  cluster_security_group_additional_rules = {
    ingress_nodes_ephemeral_ports_tcp = {
      description                = "Nodes on ephemeral ports"
      protocol                   = "tcp"
      from_port                  = 1025
      to_port                    = 65535
      type                       = "ingress"
      source_node_security_group = true
    }
    # Test: https://github.com/terraform-aws-modules/terraform-aws-eks/pull/2319
    ingress_source_security_group_id = {
      description              = "Ingress from another computed security group"
      protocol                 = "tcp"
      from_port                = 22
      to_port                  = 22
      type                     = "ingress"
      source_security_group_id = aws_security_group.additional.id
    }
  }
Enter fullscreen mode Exit fullscreen mode
  # Extend node-to-node security group rules
  node_security_group_additional_rules = {
    ingress_self_all = {
      description = "Node to node all ports/protocols"
      protocol    = "-1"
      from_port   = 0
      to_port     = 0
      type        = "ingress"
      self        = true
    }
    # Test: https://github.com/terraform-aws-modules/terraform-aws-eks/pull/2319
    ingress_source_security_group_id = {
      description              = "Ingress from another computed security group"
      protocol                 = "tcp"
      from_port                = 22
      to_port                  = 22
      type                     = "ingress"
      source_security_group_id = aws_security_group.additional.id
    }
  }
Enter fullscreen mode Exit fullscreen mode
  • To keep things simple, use a managed node group instead of a self managed node group delete:
  # Self Managed Node Group(s)
  self_managed_node_group_defaults = {
    vpc_security_group_ids = [aws_security_group.additional.id]
    iam_role_additional_policies = {
      additional = aws_iam_policy.additional.arn
    }

    instance_refresh = {
      strategy = "Rolling"
      preferences = {
        min_healthy_percentage = 66
      }
    }
  }

  self_managed_node_groups = {
    spot = {
      instance_type = "m5.large"
      instance_market_options = {
        market_type = "spot"
      }

      pre_bootstrap_user_data = <<-EOT
        echo "foo"
        export FOO=bar
      EOT

      bootstrap_extra_args = "--kubelet-extra-args '--node-labels=node.kubernetes.io/lifecycle=spot'"

      post_bootstrap_user_data = <<-EOT
        cd /tmp
        sudo yum install -y https://s3.amazonaws.com/ec2-downloads-windows/SSMAgent/latest/linux_amd64/amazon-ssm-agent.rpm
        sudo systemctl enable amazon-ssm-agent
        sudo systemctl start amazon-ssm-agent
      EOT
    }
  }
Enter fullscreen mode Exit fullscreen mode
  • In EKS Managed Node Group(s), Update Instance Type
    instance_types = ["m6i.large", "m5.large", "m5n.large", "m5zn.large"]
Enter fullscreen mode Exit fullscreen mode
  • Configure vpc_security_group_ids, and iam_role_additional_policies, otherwise remove
    vpc_security_group_ids                = [aws_security_group.additional.id]

    iam_role_additional_policies = {
      additional = aws_iam_policy.additional.arn
    }
Enter fullscreen mode Exit fullscreen mode
  • Remove blue, green groups and create a worker group named after the cluster barebones-cluster-wg reduce max instances to 2
  eks_managed_node_groups = {
    barebones-cluster-wg = {
      min_size     = 1
      max_size     = 2
      desired_size = 1

      instance_types = ["t3.small"]
Enter fullscreen mode Exit fullscreen mode
  • Remove labels and taints for barebones
      labels = {
        Environment = "test"
        GithubRepo  = "terraform-aws-eks"
        GithubOrg   = "terraform-aws-modules"
      }

      taints = {
        dedicated = {
          key    = "dedicated"
          value  = "gpuGroup"
          effect = "NO_SCHEDULE"
        }
      }
Enter fullscreen mode Exit fullscreen mode
  • Remove update_config for barebones
      update_config = {
        max_unavailable_percentage = 33 # or set `max_unavailable`
      }
Enter fullscreen mode Exit fullscreen mode
  • Update tag to helloworld
      tags = {
        ExtraTag = "helloworld"
      }
Enter fullscreen mode Exit fullscreen mode
  • Remove all the Fargate (Not well supported)
  # Fargate Profile(s)
  fargate_profiles = {
    default = {
      name = "default"

   ...
Enter fullscreen mode Exit fullscreen mode
  • A barebones install doesn't require auth, but this is where you can do AWS Auth stuff. For now, delete
  # aws-auth configmap
  manage_aws_auth_configmap = true

  aws_auth_node_iam_role_arns_non_windows = [
    module.eks_managed_node_group.iam_role_arn,
    module.self_managed_node_group.iam_role_arn,
  ]
  aws_auth_fargate_profile_pod_execution_role_arns = [
    module.fargate_profile.fargate_profile_pod_execution_role_arn
  ]

  aws_auth_roles = [
    {
      rolearn  = module.eks_managed_node_group.iam_role_arn
      username = "system:node:{{EC2PrivateDNSName}}"
      groups = [
        "system:bootstrappers",
        "system:nodes",
      ]
    },
    {
      rolearn  = module.self_managed_node_group.iam_role_arn
      username = "system:node:{{EC2PrivateDNSName}}"
      groups = [
        "system:bootstrappers",
        "system:nodes",
      ]
    },
    {
      rolearn  = module.fargate_profile.fargate_profile_pod_execution_role_arn
      username = "system:node:{{SessionName}}"
      groups = [
        "system:bootstrappers",
        "system:nodes",
        "system:node-proxier",
      ]
    }
  ]

  aws_auth_users = [
    {
      userarn  = "arn:aws:iam::66666666666:user/user1"
      username = "user1"
      groups   = ["system:masters"]
    },
    {
      userarn  = "arn:aws:iam::66666666666:user/user2"
      username = "user2"
      groups   = ["system:masters"]
    },
  ]

  aws_auth_accounts = [
    "777777777777",
    "888888888888",
  ]
Enter fullscreen mode Exit fullscreen mode

Top comments (0)