DEV Community

MohammedBanabila
MohammedBanabila

Posted on

pulumi stack reference and Kubernetes provider to be centralized stack cluster for Eks auto mode and use Kubernetes provider

                                                                Eks auto mode 
Enter fullscreen mode Exit fullscreen mode

Before this feature, we require to manage the compute, networking, storage, observability, authentication and authorization, with eks cluster. to deploy and access workload.
In this feature, it handles the compute, networking, storage, observability, authentication and authorization, and ease you only to deploy workloads
And manage them depend on your requirements.
To enable auto mode with cluster:

  1. Enable access with authentication mode either API or API_AND_CONFIGMAP
  2. Enable compute which manage and operate nodepools
  3. Enable storage which let to use Ebs or file system to store the data
  4. Enable vpc configuration to manage subnet, endpoint public access, endpoint private access,
  5. Enable Kubernetes configuration which let you to deploy elastic load balancing
    Without need to install aws load balancer controller, optional to add network cidr of Kubernetes either ipv4 or ipv6 or on behalf will be create it either 10.100.0.0/16 or 172.31.0.0/16

    Note: You can deploy eks auto mode to exiting cluster by enable all components
    You can deploy eks auto mode with:

  6. Aws cli

  7. Eksctl

  8. Infrastructure as code example terraform, pulumi and cloudformation

  9. Aws management console

My studies references:
1. AWS re:Invent 2024 - Automate your entire Kubernetes cluster with Amazon EKS Auto Mode (KUB204-NEW)

     https://www.youtube.com/watch?v=a_aDPo9oTMo&t=919s
    2. Dive into Amazon EKS Auto Mode
    https://www.youtube.com/watch?v=qxPP6zb_3mM&t=2092s
   3. AWS re:Invent 2024 - Simplify Kubernetes workloads with Karpenter & Amazon EKS Auto Mode (KUB312)
   https://www.youtube.com/watch?v=JwzP8I8tdaY  
   4. Simplify Amazon EKS Cluster Access Management using EKS Access Entry API

       https://www.youtube.com/watch?v=NvfOulAqy8w&t=48s
Enter fullscreen mode Exit fullscreen mode
  1. EKS Auto - Theory and Live Demo and Q/A (From Principal SA at AWS)

https://www.youtube.com/watch?v=qxVUMyW3a_g
6. Simplified Amazon EKS Access - NEW Cluster Access Management Controls
https://www.youtube.com/watch?v=ae25cbV5Lxo

  1. Automate cluster infrastructure with EKS Auto Mode

https://docs.aws.amazon.com/eks/latest/userguide/automode.html

  1. A deep dive into simplified Amazon EKS access management controls

https://aws.amazon.com/blogs/containers/a-deep-dive-into-simplified-amazon-eks-access-management-controls/

             example of deploy eks auto mode with pulumi at python  
Enter fullscreen mode Exit fullscreen mode

a"""An AWS Python Pulumi program"""
import pulumi , pulumi_aws as aws ,json

cfg1=pulumi.Config()

eksvpc1=aws.ec2.Vpc(
"eksvpc1",
aws.ec2.VpcArgs(
cidr_block=cfg1.require_secret(key="block1"),
tags={
"Name": "eksvpc1",
},
enable_dns_hostnames=True,
enable_dns_support=True
)
)
intgw1=aws.ec2.InternetGateway(
"intgw1" ,
aws.ec2.InternetGatewayArgs(
vpc_id=eksvpc1.id,
tags={
"Name": "intgw1",
},
)
)

pbsubs=["public1","public2"]
zones=["us-east-1a","us-east-1b"]
pbcidr1=cfg1.require_secret("cidr1")
pbcidr2=cfg1.require_secret("cidr2")
pbcidrs=[pbcidr1,pbcidr2]
for allpbsub in range(len(pbsubs)):
pbsubs[allpbsub]=aws.ec2.Subnet(
pbsubs[allpbsub],
aws.ec2.SubnetArgs(
vpc_id=eksvpc1.id,
cidr_block=pbcidrs[allpbsub],
availability_zone=zones[allpbsub],
map_public_ip_on_launch=True,
tags={
"Name" : pbsubs[allpbsub],
"kubernetes.io/role/elb": "1",
}
)
)

ndsubs=["node1","node2"]
ndcidr1=cfg1.require_secret("cidr3")
ndcidr2=cfg1.require_secret("cidr4")
ndcidrs=[ndcidr1,ndcidr2]
for allndsub in range(len(ndsubs)):
ndsubs[allndsub]=aws.ec2.Subnet(
ndsubs[allndsub],
aws.ec2.SubnetArgs(
vpc_id=eksvpc1.id,
cidr_block=ndcidrs[allndsub],
availability_zone=zones[allndsub],
tags={
"Name" : ndsubs[allndsub],
"kubernetes.io/role/internal-elb": "1",
}
)
)

publictable=aws.ec2.RouteTable(
"publictable",
aws.ec2.RouteTableArgs(
vpc_id=eksvpc1.id,
routes=[
aws.ec2.RouteTableRouteArgs(
cidr_block=cfg1.require_secret(key="any-traffic-ipv4"),
gateway_id=intgw1.id,
),
],
tags={
"Name": "publictable",
},
)
)

tblink1=aws.ec2.RouteTableAssociation(
"tblink1",
aws.ec2.RouteTableAssociationArgs(
subnet_id=pbsubs[0].id,
route_table_id=publictable.id,
)
)

tblink2=aws.ec2.RouteTableAssociation(
"tblink2",
aws.ec2.RouteTableAssociationArgs(
subnet_id=pbsubs[1].id,
route_table_id=publictable.id,
)
)

eips=["eip1", "eip2"]
for alleip in range(len(eips)):
eips[alleip]=aws.ec2.Eip(
eips[alleip],
aws.ec2.EipArgs(
domain="vpc",
tags={
"Name": eips[alleip],
},
)
)

natgws=["natgw1", "natgw2"]
allocates=[eips[0].id , eips[1].id]
for allnat in range(len(natgws)):
natgws[allnat]=aws.ec2.NatGateway(
natgws[allnat],
aws.ec2.NatGatewayArgs(
subnet_id=pbsubs[allpbsub].id,
allocation_id=allocates[allnat],
tags={
"Name": natgws[allnat],
},
)
)

privatetables=["privatetable1" , "privatetable2"]
for allprivtable in range(len(privatetables)):
privatetables[allprivtable]=aws.ec2.RouteTable(
privatetables[allprivtable],
aws.ec2.RouteTableArgs(
vpc_id=eksvpc1.id,
routes=[
aws.ec2.RouteTableRouteArgs(
cidr_block=cfg1.require_secret(key="any-traffic-ipv4"),
nat_gateway_id=natgws[0].id
),
aws.ec2.RouteTableRouteArgs(
cidr_block=cfg1.require_secret(key="any-traffic-ipv4"),
nat_gateway_id=natgws[1].id
),
],
tags={
"Name": privatetables[allprivtable],
},
)
)

privatetablelink1=aws.ec2.RouteTableAssociation(
"privatetablelink1",
aws.ec2.RouteTableAssociationArgs(
subnet_id=ndsubs[0].id,
route_table_id=privatetables[0].id,
)
)

privatetablelink2=aws.ec2.RouteTableAssociation(
"privatetablelink2",
aws.ec2.RouteTableAssociationArgs(
subnet_id=ndsubs[1].id,
route_table_id=privatetables[1].id,
)
)

inbound_traffic=[
aws.ec2.NetworkAclIngressArgs(
from_port=0,
to_port=0,
rule_no=100,
action="allow",
protocol="-1",
cidr_block=cfg1.require_secret(key="any-traffic-ipv4"),
icmp_code=0,
icmp_type=0
),
]
outbound_traffic=[
aws.ec2.NetworkAclEgressArgs(
from_port=0,
to_port=0,
rule_no=100,
action="allow",
protocol="-1",
cidr_block=cfg1.require_secret(key="any-traffic-ipv4"),
icmp_code=0,
icmp_type=0
),
]

nacllists=["mynacls1" , "mynacls2"]
for allnacls in range(len(nacllists)):
nacllists[allnacls]=aws.ec2.NetworkAcl(
nacllists[allnacls],
aws.ec2.NetworkAclArgs(
vpc_id=eksvpc1.id,
ingress=inbound_traffic,
egress=outbound_traffic,
tags={
"Name": nacllists[allnacls],
}
)
)
nacls30=aws.ec2.NetworkAclAssociation(
"nacls30",
aws.ec2.NetworkAclAssociationArgs(
network_acl_id=nacllists[0].id,
subnet_id=pbsubs[0].id
)
)

nacls31=aws.ec2.NetworkAclAssociation(
"nacls31",
aws.ec2.NetworkAclAssociationArgs(
network_acl_id=nacllists[0].id,
subnet_id=pbsubs[1].id
)
)

nacls10=aws.ec2.NetworkAclAssociation(
"nacls10",
aws.ec2.NetworkAclAssociationArgs(
network_acl_id=nacllists[1].id,
subnet_id=ndsubs[0].id
)
)

nacls11=aws.ec2.NetworkAclAssociation(
"nacls11",
aws.ec2.NetworkAclAssociationArgs(
network_acl_id=nacllists[1].id,
subnet_id=ndsubs[1].id
)
)

eksrole=aws.iam.Role(
"eksrole",
aws.iam.RoleArgs(
assume_role_policy=json.dumps({
"Version":"2012-10-17",
"Statement":[
{
"Effect":"Allow",
"Principal":{
"Service":"eks.amazonaws.com"
},
"Action": [
"sts:AssumeRole",
"sts:TagSession",
]
}
] })

))

nodesrole=aws.iam.Role(
"nodesrole",
aws.iam.RoleArgs(
assume_role_policy=json.dumps({
"Version": "2012-10-17",
"Statement":[
{
"Effect": "Allow",
"Principal":{
"Service":"ec2.amazonaws.com"
},
"Action": "sts:AssumeRole"
}

]
})
))

clusterattach1=aws.iam.RolePolicyAttachment(
"clusterattach1",
aws.iam.RolePolicyAttachmentArgs(
role=eksrole.name,
policy_arn="arn:aws:iam::aws:policy/AmazonEKSClusterPolicy",
)
)

clusterattach2=aws.iam.RolePolicyAttachment(
"clusterattach2",
aws.iam.RolePolicyAttachmentArgs(
role=eksrole.name,
policy_arn="arn:aws:iam::aws:policy/AmazonEKSComputePolicy",
)
)

clusterattach3=aws.iam.RolePolicyAttachment(
"clusterattach3",
aws.iam.RolePolicyAttachmentArgs(
role=eksrole.name,
policy_arn="arn:aws:iam::aws:policy/AmazonEKSBlockStoragePolicy",
)
)

clusterattach4=aws.iam.RolePolicyAttachment(
"clusterattach4",
aws.iam.RolePolicyAttachmentArgs(
role=eksrole.name,
policy_arn="arn:aws:iam::aws:policy/AmazonEKSLoadBalancingPolicy",
)
)

clusterattach5=aws.iam.RolePolicyAttachment(
"clusterattach5",
aws.iam.RolePolicyAttachmentArgs(
role=eksrole.name,
policy_arn="arn:aws:iam::aws:policy/AmazonEKSNetworkingPolicy",
)
)

nodesattach1=aws.iam.RolePolicyAttachment(
"nodesattach1",
aws.iam.RolePolicyAttachmentArgs(
role=nodesrole.name,
policy_arn="arn:aws:iam::aws:policy/AmazonEKSWorkerNodeMinimalPolicy",
)
)

nodesattach2=aws.iam.RolePolicyAttachment(
"nodesattach2",
aws.iam.RolePolicyAttachmentArgs(
role=nodesrole.name,
policy_arn="arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryPullOnly",
)
)

automode=aws.eks.Cluster(
"automode",
aws.eks.ClusterArgs(
name="automode",
bootstrap_self_managed_addons=False,
role_arn=eksrole.arn,
version="1.31",
compute_config={
"enabled": True,
"node_pools": ["general-purpose"],
"node_role_arn": nodesrole.arn,
},
access_config={
"authentication_mode": "API",
},
storage_config={
"block_storage": {
"enabled": True,
},

},
kubernetes_network_config={
"elastic_load_balancing": {
"enabled": True,
},
},

    tags={
      "Name" : "automode"    
    },
    vpc_config={
    "endpoint_private_access": True,
    "endpoint_public_access": True,
    "public_access_cidrs": [
        cfg1.require_secret(key="myips"),
    ],
    "subnet_ids": [
        ndsubs[0].id,
        ndsubs[1].id,
    ],
    }
),
opts=pulumi.ResourceOptions(
        depends_on=[
          clusterattach1,
          clusterattach2,
          clusterattach3,
          clusterattach4,
          clusterattach5,
        ]
    )
Enter fullscreen mode Exit fullscreen mode

)

myentry1=aws.eks.AccessEntry(
"myentry1",
aws.eks.AccessEntryArgs(
cluster_name=automode.name,
principal_arn=cfg1.require_secret(key="principal"),
type="STANDARD"
),
opts=pulumi.ResourceOptions(
depends_on=[
automode
]
)
)

entrypolicy1=aws.eks.AccessPolicyAssociation(
"entrypolicy1",
aws.eks.AccessPolicyAssociationArgs(
cluster_name=automode.name,
principal_arn=myentry1.principal_arn,
policy_arn="arn:aws:eks::aws:cluster-access-policy/AmazonEKSClusterAdminPolicy",
access_scope={
"type" : "cluster",
}
),
opts=pulumi.ResourceOptions(
depends_on=[
automode
]
)
)

Note:
In case you can’t access eks cluster from local pc or labtop
Follow this video , Fixing 'The Server Has Asked for Credentials' in Kubernetes Cluster API - EKS AWS Production

https://www.youtube.com/watch?v=4aLwQASVHAE
create deploy.yaml , for deployment and service NodePort
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx-deploy
namespace: default
labels:
app: nginx
spec:
selector:
matchLabels:
app: nginx
replicas: 4
strategy:
rollingUpdate:
maxSurge: 25%
maxUnavailable: 25%
type: RollingUpdate
template:
metadata:
labels:
app: nginx
spec:
# initContainers:
# Init containers are exactly like regular containers, except:
# - Init containers always run to completion.
# - Each init container must complete successfully before the next one starts.

  containers:
  - name:  nginx
    image:  nginx:1.27.3
    imagePullPolicy: IfNotPresent
    resources:
      requests:
        cpu: 100m
        memory: 100Mi
      limits:
        cpu: 100m
        memory: 100Mi
    livenessProbe:
      tcpSocket:
        port: 80
      initialDelaySeconds: 5
      timeoutSeconds: 5
      successThreshold: 1
      failureThreshold: 3
      periodSeconds: 10
    readinessProbe:
      httpGet:
        path: /
        port: 80
      initialDelaySeconds: 5
      timeoutSeconds: 2
      successThreshold: 1
      failureThreshold: 3
      periodSeconds: 10
    ports:
    - containerPort:  80
      name:  http
Enter fullscreen mode Exit fullscreen mode

apiVersion: v1
kind: Service
metadata:
name: nginx-svc
namespace: default
spec:
selector:
app: nginx
type: NodePort
ports:

  • name: nginx-svc protocol: TCP port: 80 targetPort: 80 # If you set the spec.type field to NodePort and you want a specific port number, # you can specify a value in the spec.ports[*].nodePort field.

Create ingress


apiVersion: networking.k8s.io/v1
kind: IngressClass
metadata:
namespace: default
labels:
app.kubernetes.io/name: myingress
name: myingress
spec:
controller: eks.amazonaws.com/alb


apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
namespace: default
name: myalblab
annotations:

alb.ingress.kubernetes.io/scheme: internet-facing
alb.ingress.kubernetes.io/target-type: ip
Enter fullscreen mode Exit fullscreen mode

spec:
ingressClassName: myingress
rules:
- http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: nginx-svc
port:
number: 80

create Nlb with service loadbalancer

apiVersion: v1
kind: Service
metadata:
name: lbsvc
namespace: default
annotations:

service.beta.kubernetes.io/aws-load-balancer-type: nlb
service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing
service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
Enter fullscreen mode Exit fullscreen mode

spec:
selector:
app: lbsvc
type: LoadBalancer
ports:

  • name: lbsvc protocol: TCP port: 80 targetPort: 80 # If you set the spec.type field to NodePort and you want a specific port number, # you can specify a value in the spec.ports[*].nodePort field.

Other options: using stack reference and pulumi Kubernetes registry
Main goals:
To have centralized stack which other stacks able to use and deploy from it their resources. Meaning the outputs from stack A be reference value to the stack B as variable. To ease manage and control the provisioning resources
Using stack reference we would require to have:
Organization/project name/stack name

Note: Organization = pulumi account name

Example:
Stack name : eksdev which deploy eks auto mode
Stack name : kubedev which deploy pod ,service, ingress with pulumi Kubernetes registry

Stack name kubedev

 """A k8s Python Pulumi program"""
Enter fullscreen mode Exit fullscreen mode

import pulumi , pulumi_kubernetes as k8s

cluster_stack="MohammedBanabila/lab-eks/eksdev"

stackref1=pulumi.StackReference(name="stackref1", stack_name=cluster_stack)

cfg1=pulumi.Config()

mycluster=pulumi.export("mycluster", value=stackref1.get_output("cluster"))

provider=k8s.Provider("provider", cluster=mycluster)

deploy=k8s.apps.v1.Deployment(
"deploy",
metadata=k8s.meta.v1.ObjectMetaArgs(
name="nginx-deploy",
namespace="default",
labels={
"app":"nginx"
}
),
spec=k8s.apps.v1.DeploymentSpecArgs(
replicas=4,
selector=k8s.meta.v1.LabelSelectorArgs(
match_labels={
"app":"nginx"
}
),
template=k8s.core.v1.PodTemplateSpecArgs(
metadata=k8s.meta.v1.ObjectMetaArgs(
labels={
"app":"nginx"
}
),
spec=k8s.core.v1.PodSpecArgs(
containers=[
k8s.core.v1.ContainerArgs(
name="nginx",
image="nginx",
ports=[
k8s.core.v1.ContainerPortArgs(
container_port=80
)
]
)
]
)
)
) ,

opts=pulumi.ResourceOptions(provider=provider)
Enter fullscreen mode Exit fullscreen mode

)

svc1=k8s.core.v1.Service(
"svc1",
metadata=k8s.meta.v1.ObjectMetaArgs(
name="nginx-svc",
namespace="default"
),
spec=k8s.core.v1.ServiceSpecArgs(
selector={
"app":"nginx"
},
ports=[
k8s.core.v1.ServicePortArgs(
port=80,
target_port=80
)
],
type="NodePort"
) ,

opts=pulumi.ResourceOptions(provider=provider)  
Enter fullscreen mode Exit fullscreen mode

)

ingressclaass=k8s.networking.v1.IngressClass(
"ingressclass",
metadata=k8s.meta.v1.ObjectMetaArgs(
name="nginx-ingress",
namespace="default",
labels={
"app.kubernetes.io/name":"nginx-ingress"
}
),
spec=k8s.networking.v1.IngressClassSpecArgs(
controller="eks.amazonaws.com/alb"
),

opts=pulumi.ResourceOptions(provider=provider)
Enter fullscreen mode Exit fullscreen mode

)

myingress=k8s.networking.v1.Ingress(
"myingress",
metadata=k8s.meta.v1.ObjectMetaArgs(
name="nginx-ingress",
namespace="default",
labels={
"app" : "nginx"
},
annotations={
"alb.ingress.kubernetes.io/scheme":"internet-facing",
"alb.ingress.kubernetes.io/target-type":"ip"
}
),
spec=k8s.networking.v1.IngressSpecArgs(
ingress_class_name="nginx-ingress",
rules=[
k8s.networking.v1.IngressRuleArgs(
http=k8s.networking.v1.HTTPIngressRuleValueArgs(
paths=[
k8s.networking.v1.HTTPIngressPathArgs(
path="/",
path_type="Prefix",
backend=k8s.networking.v1.IngressBackendArgs(
service=k8s.networking.v1.IngressServiceBackendArgs(
name="nginx-svc",
port=k8s.networking.v1.ServiceBackendPortArgs(
number=80
)
)
)
)
]
)
)
]
) ,

opts=pulumi.ResourceOptions(provider=provider)
Enter fullscreen mode Exit fullscreen mode

)

Top comments (0)