<?xml version="1.0" encoding="UTF-8"?>
<rss version="2.0" xmlns:atom="http://www.w3.org/2005/Atom" xmlns:dc="http://purl.org/dc/elements/1.1/">
  <channel>
    <title>DEV Community: AMIT CHATURVEDI</title>
    <description>The latest articles on DEV Community by AMIT CHATURVEDI (@gittest20202).</description>
    <link>https://dev.to/gittest20202</link>
    
    <atom:link rel="self" type="application/rss+xml" href="https://dev.to/feed/gittest20202"/>
    <language>en</language>
    <item>
      <title>Automation Using GitLab Issue Templates with approval process</title>
      <dc:creator>AMIT CHATURVEDI</dc:creator>
      <pubDate>Sat, 07 Sep 2024 16:14:02 +0000</pubDate>
      <link>https://dev.to/gittest20202/automation-using-gitlab-issue-templates-with-approval-process-5181</link>
      <guid>https://dev.to/gittest20202/automation-using-gitlab-issue-templates-with-approval-process-5181</guid>
      <description>&lt;p&gt;&lt;strong&gt;Introduction&lt;/strong&gt;&lt;/p&gt;

&lt;p&gt;&lt;em&gt;GitLab Issue Templates allow teams to standardize and automate issue creation, ensuring that all necessary information is collected upfront and that processes are consistently followed. Automating issue creation using these templates can lead to better collaboration, streamlined workflows, and faster resolution of issues.&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;1) Kubernetes Pod Restart&lt;br&gt;
2) Kubernetes Deployment Image Patch&lt;/strong&gt; &lt;/p&gt;

&lt;p&gt;&lt;strong&gt;What Are GitLab Issue Templates?&lt;/strong&gt;&lt;br&gt;
&lt;em&gt;GitLab issue templates are pre-defined formats for issues that provide a structured way to report bugs, suggest features, or handle tasks. They help ensure that team members include all relevant information when creating issues, reducing miscommunication and the need for follow-up.&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Why Use GitLab Issue Templates?&lt;/strong&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Consistency:&lt;/strong&gt; &lt;em&gt;Ensure that all issues follow a uniform structure.&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Speed:&lt;/strong&gt; &lt;em&gt;Quickly generate issues by filling in pre-set fields.&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Clarity:&lt;/strong&gt; &lt;em&gt;Include necessary details, such as steps to reproduce bugs, acceptance criteria, and links to relevant documentation.&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Automation&lt;/strong&gt;: &lt;em&gt;Automate the issue creation process by embedding workflows into the template.&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;&lt;em&gt;Automating Template to Restart Kubernetes Pod and Patch Deployment&lt;/em&gt;&lt;/strong&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Setting Up a Template in Your Repository&lt;/strong&gt;&lt;br&gt;
&lt;em&gt;Issue templates are stored in your repository and can be reused across multiple issues. To create a template, follow these steps:&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;em&gt;Navigate to your GitLab repository&lt;/em&gt;.&lt;br&gt;
&lt;em&gt;Go to the *&lt;/em&gt;.gitlab** or &lt;strong&gt;issue_templates&lt;/strong&gt; folder in the root directory. If the folder doesn't exist, create one and add a new &lt;strong&gt;Markdown file (.md)&lt;/strong&gt; for each issue template. For example, you can name it*&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;k8s_deployment_patch.md&lt;/strong&gt;&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;### Environment
environment=

### Namespace
namespace=

### Deployment Name
deployment=

### Container Name
container_name=

### Image Name
image=

### Justification

/label ~"type::issue" ~"action::deployment-patch"
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;&lt;strong&gt;k8s_deployments_restart.md&lt;/strong&gt;&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;### Environment
environment=

### Namespace
namespace=

### Deployment Name
deployment=

### Justification

/label ~"type::issue" ~"action::pod-restart"
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;&lt;strong&gt;Merge the Code And Verify&lt;/strong&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Project --&amp;gt; Issues --&amp;gt; Create New Issue&lt;/strong&gt;&lt;/p&gt;

&lt;p&gt;&lt;a href="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fcloudopskube.com%2Fblog%2Fcontent%2Fimages%2F20240907205147-GL2.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fcloudopskube.com%2Fblog%2Fcontent%2Fimages%2F20240907205147-GL2.png" alt="enter image description here"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Lets Create Automation Code(Using Python)&lt;/strong&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;1) Code To List Created Issue&lt;/strong&gt;&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;import gitlab
import re
import os
import subprocess
from common import check_namespace_exists, check_deployment_exists, close_issue, approval_verify,extract_info,approver_list



def init_gitlab_connection(private_token):
    return gitlab.Gitlab('https://gitlab.com', private_token=private_token)

def process_issues(issues, project):
    for issue in issues:
        labels = set(issue.labels)
        comment = (f" 🤖 Its a automated genetated note. Waiting for approval from @{', '.join(approver_list)}")
        notes = issue.notes.list(get_all=True)
        if not notes:  
          issue.notes.create({'body': comment})
        if 'action::pod-restart' in labels:
            namespace, deployment, env, img, cn = extract_info(issue.description)   
            if approval_verify(issue):
                result = subprocess.run(['python', 'deployment_restart.py', namespace, deployment, env], capture_output=True, text=True)
                res = result.returncode
                if res == 0:
                   comment = (f"Deployment '{deployment}' in Namespace '{namespace}' has been restarted 🚀")
                else:
                   comment = (f"Deployment '{deployment}' or Namespace '{namespace}' not found 😔")    
                close_issue(issue,comment,res)    

        elif 'action::deployment-patch' in labels:
            namespace, deployment, env, img, cn = extract_info(issue.description)
            if approval_verify(issue):
                result = subprocess.run(['python', 'deployment_patch.py', namespace, deployment, env, img, cn], capture_output=True, text=True)
                res = result.returncode
                if res == 0:
                   comment = (f"Deployment '{deployment}' in Namespace '{namespace}' Patched sucessfully with {img} 🚀")
                else:
                   comment = (f"Deployment '{deployment}' or Namespace '{namespace}' not found 😔")           
                close_issue(issue,comment,res)    


def main():
    private_token = os.getenv('gitlab_token')
    gitlab_project = os.getenv('gitlab_project')
    gl = init_gitlab_connection(private_token)
    project = gl.projects.get(gitlab_project)
    issues = project.issues.list(state='opened')
    if not issues:
        print("No issues found.")
    else:
        process_issues(issues, project)

if __name__ == '__main__':
    main()


&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;&lt;strong&gt;2) Code for Common work&lt;/strong&gt;&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;from kubernetes import client, config
import argparse
import datetime
import sys
import re
approver_list = ['champ25']

config.load_kube_config()

def close_issue(issue,comment,res):
    notes = issue.notes.list(get_all=True)
    issue.notes.create({'body': comment})
    if res == 0:
       issue.state_event = 'close'
       issue.save()

def approval_verify(issue):
    notes = issue.notes.list(get_all=True)
    for note in notes:
        if 'approved' in note.body.lower() and note.author['username'] in approver_list:
            new_comment =  f"🤖 Approval has been noted by @{note.author.get('username')}.🤖"
            issue.notes.create({'body': new_comment})
            return True
    return False

def extract_info(description):
    namespace_pattern = re.compile(r'namespace=\s*(\S+)')
    deployment_pattern = re.compile(r'deployment=\s*(\S+)')
    env_pattern = re.compile(r'environment=\s*(\S+)')
    cn_pattern = re.compile(r'container_name=\s*(\S+)')
    img_pattern = re.compile(r'image=\s*(\S+)')

    namespace_match = namespace_pattern.search(description)
    deployment_match = deployment_pattern.search(description)
    env_match = env_pattern.search(description)
    cn_match = cn_pattern.search(description)
    img_match = img_pattern.search(description)

    namespace = namespace_match.group(1) if namespace_match else None
    deployment = deployment_match.group(1) if deployment_match else None
    env = env_match.group(1) if env_match else None
    cn = cn_match.group(1) if cn_match else None
    img = img_match.group(1) if img_match else None
    return namespace, deployment, env, img, cn

def check_namespace_exists(namespace_name):
    print(namespace_name)
    v1 = client.CoreV1Api()
    try:
        v1.read_namespace(name=namespace_name)
        return True
    except client.exceptions.ApiException as e:
        if e.status == 404:
            print(f"Namespace '{namespace_name}' does not exist.")
            sys.exit(1)
        else:
            print(f"Error occurred: {e}")
            sys.exit(1)
        return False

def check_deployment_exists(deployment_name, namespace_name):
    apps_v1 = client.AppsV1Api()
    try:
        apps_v1.read_namespaced_deployment(name=deployment_name, namespace=namespace_name)
        return True
    except client.exceptions.ApiException as e:
        if e.status == 404:
            print(f"Deployment '{deployment_name}' does not exist in namespace '{namespace_name}'.")
            sys.exit(1) 
        else:
            print(f"Error occurred: {e}")
            sys.exit(1) 
        return False
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;&lt;strong&gt;3) Code for Restarting Pods&lt;/strong&gt;&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;from kubernetes import client, config
import argparse
import datetime
import sys
from common import check_namespace_exists, check_deployment_exists, close_issue, approval_verify,extract_info
def restart_deployment(namespace, deployment_name, env):
    if check_namespace_exists(namespace):
        if check_deployment_exists(deployment_name, namespace):
           api_instance = client.AppsV1Api()
           try:
             patch = {
              "spec": {
                 "template": {
                     "metadata": {
                         "annotations": {
                             "kubectl.kubernetes.io/restartedAt": datetime.datetime.utcnow().isoformat() 
                                  }
                                }
                            }
                        }
                    }
        # Apply the updated deployment
             api_instance.patch_namespaced_deployment(deployment_name, namespace, body=patch)
           except client.exceptions.ApiException as e:
               print(f"Exception when calling AppsV1Api-&amp;gt;patch_namespaced_deployment: {e}")
def main():
    # Define the namespace and deployment name
    config.load_kube_config()
    parser = argparse.ArgumentParser()
    parser.add_argument('namespace', type=str, help='Namespace of the deployment')
    parser.add_argument('deployment', type=str, help='Name of the deployment')
    parser.add_argument('env', type=str, help='Cluster Environment')
    args = parser.parse_args()

    # Restart the deployment
    comment = restart_deployment(args.namespace, args.deployment, args.env)

if __name__ == '__main__':
    main()

&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;&lt;strong&gt;4) Create Code for Patching the Deployment&lt;/strong&gt;&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;from kubernetes import client, config
import argparse
import datetime
import sys
from common import check_namespace_exists, check_deployment_exists, close_issue, approval_verify,extract_info
def patch_deployment(namespace, deployment_name, env, img, cn):
    if check_namespace_exists(namespace):
        if check_deployment_exists(deployment_name, namespace):
           print("Validation Sucessfull")
           api_instance = client.AppsV1Api()
           try:
             patch = {
                 "spec": {
                    "template": {
                       "spec": {
                          "containers": [
                              {
                                "name": cn,
                                "image": img
                             }
                          ]
                        }
                    }
                }
            }
        # Apply the updated deployment
             api_instance.patch_namespaced_deployment(deployment_name, namespace, body=patch)
           except client.exceptions.ApiException as e:
               print(f"Exception when calling AppsV1Api-&amp;gt;patch_namespaced_deployment: {e}")
def main():
    # Define the namespace and deployment name
    config.load_kube_config()
    parser = argparse.ArgumentParser()
    parser.add_argument('namespace',help='Namespace of the deployment')
    parser.add_argument('deployment', help='Name of the deployment')
    parser.add_argument('env', help='Cluster Environment')
    parser.add_argument('img', help='Image to be patched')
    parser.add_argument('cn', help='Container Name to be patched')
    args = parser.parse_args()

    # Patch the deployment
    patch_deployment(args.namespace, args.deployment, args.env, args.img, args.cn)

if __name__ == '__main__':
    main()
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;&lt;strong&gt;6) Create Image to Use the above Code&lt;/strong&gt;&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;# Use the official Python base image
FROM python:3.9-slim

# Set environment variables
ENV PYTHONDONTWRITEBYTECODE=1 \
    PYTHONUNBUFFERED=1 \
    KUBECONFIG=/src/.kube/config

# Create necessary directories
RUN mkdir -p /src/.kube

# Set the working directory
WORKDIR /src

# Copy Kubernetes config and requirements
COPY config /src/.kube/
COPY requirments.txt /src/

# Debug step: List files to ensure `requirements.txt` is copied
RUN ls -l /src/

# Install Python dependencies
RUN pip install --upgrade pip &amp;amp;&amp;amp; \
    pip install --no-cache-dir -r /src/requirments.txt || \
    (echo "Error occurred during pip install" &amp;amp;&amp;amp; exit 1)

# Install Kubernetes client (if not already in requirements.txt)
RUN pip install --no-cache-dir kubernetes

&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;&lt;strong&gt;requirments.txt file&lt;/strong&gt;&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;kubernetes
python-gitlab
requests

&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;&lt;strong&gt;Create Image and push it to your Repo.&lt;/strong&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Create Gitlab-ci.yaml file&lt;/strong&gt;&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;######################## Default image #######################

default:
    image: omvedi25/devops-tool:v1.1
############################# Stages ######################################

stages:
 - devops-automation

 ########################### Templates ######################################
devops-automation:
    stage: devops-automation
    script:
      - cd src 
      - python issuelist.py
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;&lt;a href="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fcloudopskube.com%2Fblog%2Fcontent%2Fimages%2F20240907210202-GL3.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fcloudopskube.com%2Fblog%2Fcontent%2Fimages%2F20240907210202-GL3.png" alt="enter image description here"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Now Lets See the Working&lt;/strong&gt;&lt;/p&gt;

&lt;p&gt;&lt;em&gt;Create a new issue to restart the pod&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;a href="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fcloudopskube.com%2Fblog%2Fcontent%2Fimages%2F20240907210602-GL5.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fcloudopskube.com%2Fblog%2Fcontent%2Fimages%2F20240907210602-GL5.png" alt="enter image description here"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;&lt;em&gt;Move to Pipeline and run it&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;a href="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fcloudopskube.com%2Fblog%2Fcontent%2Fimages%2F20240907210747-GL6.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fcloudopskube.com%2Fblog%2Fcontent%2Fimages%2F20240907210747-GL6.png" alt="enter image description here"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;&lt;a href="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fcloudopskube.com%2Fblog%2Fcontent%2Fimages%2F20240907210834-GL7.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fcloudopskube.com%2Fblog%2Fcontent%2Fimages%2F20240907210834-GL7.png" alt="enter image description here"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;&lt;em&gt;Go to Issue and Verify&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;a href="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fcloudopskube.com%2Fblog%2Fcontent%2Fimages%2F20240907210947-GL8.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fcloudopskube.com%2Fblog%2Fcontent%2Fimages%2F20240907210947-GL8.png" alt="enter image description here"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;&lt;em&gt;Waiting got the approval for the valid requester&lt;/em&gt;&lt;br&gt;
&lt;em&gt;Add **Approved&lt;/em&gt;* in Comment login to valid user*&lt;br&gt;
&lt;a href="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fcloudopskube.com%2Fblog%2Fcontent%2Fimages%2F20240907211145-GL9.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fcloudopskube.com%2Fblog%2Fcontent%2Fimages%2F20240907211145-GL9.png" alt="enter image description here"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;&lt;em&gt;Lets See the Status of Pods&lt;/em&gt;&lt;br&gt;
&lt;a href="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fcloudopskube.com%2Fblog%2Fcontent%2Fimages%2F20240907211315-GL10.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fcloudopskube.com%2Fblog%2Fcontent%2Fimages%2F20240907211315-GL10.png" alt="enter image description here"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;&lt;em&gt;Run the Pipeline Again&lt;/em&gt;&lt;br&gt;
&lt;a href="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fcloudopskube.com%2Fblog%2Fcontent%2Fimages%2F20240907211539-GL11.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fcloudopskube.com%2Fblog%2Fcontent%2Fimages%2F20240907211539-GL11.png" alt="enter image description here"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;&lt;a href="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fcloudopskube.com%2Fblog%2Fcontent%2Fimages%2F20240907211552-GL12.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fcloudopskube.com%2Fblog%2Fcontent%2Fimages%2F20240907211552-GL12.png" alt="enter image description here"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;&lt;em&gt;Verify the Pods Status&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;a href="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fcloudopskube.com%2Fblog%2Fcontent%2Fimages%2F20240907211626-GL13.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fcloudopskube.com%2Fblog%2Fcontent%2Fimages%2F20240907211626-GL13.png" alt="enter image description here"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Lets Patch the Deployment&lt;/strong&gt;&lt;/p&gt;

&lt;p&gt;&lt;em&gt;Create a new Issue&lt;/em&gt;&lt;br&gt;
&lt;a href="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fcloudopskube.com%2Fblog%2Fcontent%2Fimages%2F20240907212024-GL14.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fcloudopskube.com%2Fblog%2Fcontent%2Fimages%2F20240907212024-GL14.png" alt="enter image description here"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;&lt;em&gt;Verify the Existing Image assigned to Deployment&lt;/em&gt;&lt;br&gt;
&lt;a href="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fcloudopskube.com%2Fblog%2Fcontent%2Fimages%2F20240907212137-GL15.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fcloudopskube.com%2Fblog%2Fcontent%2Fimages%2F20240907212137-GL15.png" alt="enter image description here"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;&lt;em&gt;Trigger the Pipeline&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;a href="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fcloudopskube.com%2Fblog%2Fcontent%2Fimages%2F20240907212250-GL16.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fcloudopskube.com%2Fblog%2Fcontent%2Fimages%2F20240907212250-GL16.png" alt="enter image description here"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;&lt;em&gt;Verify the Pipeline&lt;/em&gt;&lt;br&gt;
&lt;a href="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fcloudopskube.com%2Fblog%2Fcontent%2Fimages%2F20240907212407-GL17.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fcloudopskube.com%2Fblog%2Fcontent%2Fimages%2F20240907212407-GL17.png" alt="enter image description here"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;&lt;em&gt;Approve the Pipeline&lt;/em&gt;&lt;br&gt;
&lt;a href="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fcloudopskube.com%2Fblog%2Fcontent%2Fimages%2F20240907212513-GL18.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fcloudopskube.com%2Fblog%2Fcontent%2Fimages%2F20240907212513-GL18.png" alt="enter image description here"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;&lt;em&gt;Run the Pipeline again and verify issue&lt;/em&gt;&lt;br&gt;
&lt;a href="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fcloudopskube.com%2Fblog%2Fcontent%2Fimages%2F20240907212711-GL19.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fcloudopskube.com%2Fblog%2Fcontent%2Fimages%2F20240907212711-GL19.png" alt="enter image description here"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;&lt;em&gt;Verify the Deployment&lt;/em&gt;&lt;br&gt;
&lt;a href="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fcloudopskube.com%2Fblog%2Fcontent%2Fimages%2F20240907212816-GL20.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fcloudopskube.com%2Fblog%2Fcontent%2Fimages%2F20240907212816-GL20.png" alt="enter image description here"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Note&lt;/strong&gt;&lt;br&gt;
&lt;em&gt;We can create a scheduler to run the pipeline every 5 min&lt;/em&gt;&lt;br&gt;
&lt;a href="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fcloudopskube.com%2Fblog%2Fcontent%2Fimages%2F20240907212951-GL21.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fcloudopskube.com%2Fblog%2Fcontent%2Fimages%2F20240907212951-GL21.png" alt="enter image description here"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Conclusion&lt;/strong&gt;&lt;br&gt;
&lt;em&gt;GitLab Issue Templates are a powerful tool for standardizing and automating issue creation across your team. By incorporating these templates into your workflow and integrating them with automation tools like GitLab CI/CD or monitoring systems, you can improve consistency, save time, and ensure critical issues are addressed promptly.&lt;/em&gt;&lt;/p&gt;

</description>
    </item>
    <item>
      <title>Streamlining Infrastructure: Installing Backstage Developer Portal and Managing Azure Resources with Terraform (Part-2)</title>
      <dc:creator>AMIT CHATURVEDI</dc:creator>
      <pubDate>Fri, 09 Feb 2024 03:19:44 +0000</pubDate>
      <link>https://dev.to/gittest20202/streamlining-infrastructure-installing-backstage-developer-portal-and-managing-azure-resources-with-terraform-part-2-37bg</link>
      <guid>https://dev.to/gittest20202/streamlining-infrastructure-installing-backstage-developer-portal-and-managing-azure-resources-with-terraform-part-2-37bg</guid>
      <description>&lt;p&gt;&lt;a href="https://dev.to/gittest20202/streamlining-infrastructure-installing-backstage-developer-portal-and-managing-azure-resources-with-terraform-part-1-4gl5"&gt;Part-1&lt;/a&gt;&lt;br&gt;
&lt;a href="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fjcg13wyg4nvmuxecc6ac.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fjcg13wyg4nvmuxecc6ac.png" alt="Image description"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Continuing to Create Template in Portal and deploy Resource Group In Azure.&lt;/strong&gt;&lt;/p&gt;

&lt;p&gt;&lt;em&gt;In last blog we have created a project named&lt;/em&gt; &lt;strong&gt;Chicco.&lt;/strong&gt; &lt;em&gt;Traverse to project Dir.&lt;/em&gt;&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;

root@devmaster:~# cd chicco/


&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;

&lt;p&gt;&lt;em&gt;Create a directory rg-creation with structure as below&lt;/em&gt;&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;

root@devmaster:~/chicco# mkdir -p rg-creation/template/content
root@devmaster:~/chicco# tree rg-creation/
rg-creation/
└── template
    ├── content
    │   └── component-info.yaml
    └── template.yaml

3 directories, 2 files


&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;

&lt;p&gt;&lt;strong&gt;Template&lt;/strong&gt;&lt;br&gt;
&lt;em&gt;In Backstage, templates are used to define the structure and layout of various entities, such as services, documentation, components, and more. Templates allow you to standardize how different types of entities are created and managed within your organization.&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;em&gt;Here's how templates work in Backstage:&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Definition:&lt;/strong&gt; &lt;em&gt;Templates are defined using YAML files, typically located in the templates directory of your Backstage repository. These YAML files specify the properties, fields, and configurations for each type of entity that can be created using the template.&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Customization:&lt;/strong&gt; &lt;em&gt;Templates can be customized to match your organization's specific needs and preferences. You can modify the YAML files to add or remove fields, change default values, specify validation rules, and more.&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Usage:&lt;/strong&gt; &lt;em&gt;Once templates are defined, they can be used to create new instances of entities within Backstage. Users can select a template from the available options and fill in the required fields to create a new entity based on that template.&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Validation:&lt;/strong&gt; &lt;em&gt;When creating a new entity using a template, Backstage validates the input against the template's specifications to ensure that all required fields are provided and that the data entered is valid.&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Consistency:&lt;/strong&gt; &lt;em&gt;Templates help enforce consistency and standardization across your organization by providing a predefined structure for creating entities. This ensures that entities are created and managed in a consistent manner, making it easier to understand and navigate your Backstage instance.&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Lets Create A template which will create Resource Group and VNET in Azure.&lt;/strong&gt;&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;

root@devmaster:~/chicco# vim rg-creation/template/template.yaml

apiVersion: scaffolder.backstage.io/v1beta3
kind: Template
metadata:
  name: create-rg
  title: Create an Resource Group And VNET
  description: Create an Resource Group and VNET in Azure Subscription
  tags: ['recommended']
spec:
  owner: guests
  type: service
  parameters:
    - title: Resource Group And VNET Creation
      required:
         - ResourceGroupName
         - Location
         - VirtualNetworkName
      properties:
        ResourceGroupName:
          title: Resource Group Name
          type: string
          description: Name of the Resource Group.
          ui:autofocus: true
          ui:options:
            rows: 5

        Location:
          title:  Location
          type: string
          description: Location name where you want to create Resource Group and Virtual Network  eg:- eastus.

        VirtualNetworkName:
          title: Virtual Network Name
          type: string
          description: Name of the Virtual Network
        action:
          title: action
          type: string
          description: What action do you want to perform? Create or delete?
          enum:
            - apply
            - destroy

    - title: Choose a Repository Location
      required:
        - repoUrl
      properties:
        repoUrl:
          title: Location of the repository
          type: string
          ui:field: RepoUrlPicker
          ui:options:
            allowedHosts:
              - github.com

  steps:
# Getting the all the files/details from the template
    - id: fetch-base
      name: Fetching Details from content folder
      action: fetch:template
      input:
        url: ./content  # Location of the content directory where catlog-info.yaml file is present for the template
        values:
          name: ${{ parameters.ResourceGroupName}}

# Publish the content of the current working directory to our github directory
    - id: publish
      name: Publishing Details
      action: publish:github
      input:
        allowedHosts: ['github.com']
        description: This repo is to create Resource Group Name ${{ parameters.ResourceGroupName }} using backstage.
        repoUrl: ${{ parameters.repoUrl }}
        repoVisibility: public # or 'internal' or 'private'
        # defaultBranch: master

# Triggering CI-CD to create resource in our case github action.
    - id: github-action
      name: Starting GitHub action
      action: github:actions:dispatch
      input:
        workflowId: terraform-build.yaml  # Name of the github pipeline created into .github/workflows 
        repoUrl: 'github.com?repo=terraform-cloud&amp;amp;owner=gittest20202' # Repo location where the github action build pipeline is created
        branchOrTagName: 'main'
        workflowInputs:
          ResourceGroupName: ${{ parameters.ResourceGroupName }}
          Location: ${{ parameters.Location }}
          VirtualNetworkName: ${{ parameters.VirtualNetworkName }}
          action: ${{ parameters.action }}

# Registering new component in the catalog of backstage.
    - id: register
      name: Registering the new Component
      action: catalog:register
      input:
        repoContentsUrl: ${{steps['publish'].output.repoContentsUrl }}
        catalogInfoPath: '/component-info.yaml' # where the info of the catalog is stored.

# Output links  are displayed to the user after the template execution.
  output:
    links:
      - title: Repository
        url: ${{ steps['publish'].output.remoteUrl }}
      - title: Open in catalog
        icon: catalog
        entityRef: ${{ steps['register'].output.entityRef }}



&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;

&lt;p&gt;&lt;a href="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fahkisxwqhm6ltbylybqh.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fahkisxwqhm6ltbylybqh.png" alt="Image description"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Let's Create Template&lt;/strong&gt;&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;

root@devmaster:~/chicco# vim app-config.yaml
 - type: file
      target: ../../rg-creation/template/template.yaml
      rules:
        - allow: [Template]

root@devmaster:~/chicco# yarn dev


&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;

&lt;p&gt;&lt;a href="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2F92ujr4aurmok92b3orl9.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2F92ujr4aurmok92b3orl9.png" alt="Image description"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;&lt;em&gt;In Backstage, component-info metadata is typically stored in YAML files or in a database, and it's used to provide context and information about the components/services managed within the platform. This metadata helps users understand the purpose, usage, and dependencies of each component/service, and it's used to facilitate various features such as search, discovery, documentation, and collaboration.&lt;/em&gt;&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;

root@devmaster:~/chicco# cat rg-creation/template/content/component-info.yaml
apiVersion: backstage.io/v1alpha1
kind: Component
metadata:
  name: ${{ values.name | dump }}
  annotations:
    github.com/project-slug: gittest20202/terraform-cloud/terraform/dev # Change the value to your github repo which contains terraform code.
    backstage.io/techdocs-ref: dir:.
  title: 'Infrastructure Automation' # you can give any title
  description: An example of a Resource Group. # Change Description accordingly
spec:
  type: service
  owner: user:guest
  lifecycle: experimental



&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;

&lt;p&gt;_Login to Dev Portal and verify the Template _&lt;/p&gt;

&lt;p&gt;&lt;a href="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2F04dy9fb1nzeaqpruscc8.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2F04dy9fb1nzeaqpruscc8.png" alt="Image description"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;&lt;em&gt;Create a Github Repo with the terraform Code&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;a href="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fosmqfylkcggivtqy2yp9.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fosmqfylkcggivtqy2yp9.png" alt="Image description"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;&lt;em&gt;Create a github action pipeline with name terraform-build.yaml&lt;/em&gt;&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;

name: rg-connectivity-001
on:
  push:
    paths:
      - '*.tf'
  workflow_dispatch:
     inputs:
      ResourceGroupName:
        description: "Resource Group Name"
        type: "string"
      VirtualNetworkName:
        description: "Virtual Network Name"
        type: "string"
      Location:
        description: "Location"
        type: "string"  
      action:
        description: "Action to perform like apply or destroy"
        type: "string"   
jobs:
  resourcegroups:
    runs-on: ubuntu-latest
    name: 'rg-connectivity-001'
    env:
      ARM_CLIENT_ID: ${{ secrets.AZURE_CLIENT_ID }}
      ARM_CLIENT_SECRET: ${{ secrets.AZURE_CLIENT_SECRET }}
      ARM_SUBSCRIPTION_ID: ${{ secrets.SUBSCRIPTION_ID }}
      ARM_TENANT_ID: ${{ secrets.AZURE_TENANT_ID }}
    steps:
      - uses: actions/checkout@main
      - name: 'Terraform init'
        id: init
        run: |
          terraform init          

      - name: 'Terraform plan'
        id: plan
        run: |
          terraform plan -var rg_name=${{ github.event.inputs.ResourceGroupName }} -var rg_location=${{ github.event.inputs.Location }} -var vnetname=${{ github.event.inputs.VirtualNetworkName }}

      - name: 'Terraform apply'
        id: apply
        run: |
          terraform ${{ github.event.inputs.action }} -var rg_name=${{ github.event.inputs.ResourceGroupName }} -var rg_location=${{ github.event.inputs.Location }} -var vnetname=${{ github.event.inputs.VirtualNetworkName 


&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;

&lt;p&gt;&lt;em&gt;Create Secrets in terraform-cloud for Azure SPN&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;a href="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2F8fqmtzzheopga3goajrq.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2F8fqmtzzheopga3goajrq.png" alt="Image description"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;&lt;em&gt;Lets Use the Template to Deploy the Resource group and VNET&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;a href="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Flz4flrof4no9w6yop0td.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Flz4flrof4no9w6yop0td.png" alt="Image description"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Click on Choose template for Create an Resource Group And VNET&lt;/strong&gt;&lt;/p&gt;

&lt;p&gt;&lt;a href="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2F07h3e3yt3btb8515cv0c.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2F07h3e3yt3btb8515cv0c.png" alt="Image description"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;&lt;a href="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fpurq3c9p40dduq9j251i.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fpurq3c9p40dduq9j251i.png" alt="Image description"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;&lt;a href="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2F0m4pwxk0xpjyi5f691nu.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2F0m4pwxk0xpjyi5f691nu.png" alt="Image description"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;&lt;a href="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fjb0iwn9ni6d8ckiezn91.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fjb0iwn9ni6d8ckiezn91.png" alt="Image description"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;After Verifying Click on Create&lt;/strong&gt;&lt;/p&gt;

&lt;p&gt;&lt;a href="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2F377tlckaz1eq0ct6b158.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2F377tlckaz1eq0ct6b158.png" alt="Image description"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;&lt;em&gt;Verify the Pipeline&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;a href="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2F6dodgvy4vgidg36tx1o1.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2F6dodgvy4vgidg36tx1o1.png" alt="Image description"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;&lt;a href="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fzs1xppymjshvleq9jeob.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fzs1xppymjshvleq9jeob.png" alt="Image description"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;Login to Azure Portal and Verify&lt;/p&gt;

&lt;p&gt;&lt;a href="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fsdmvs480d38z9f3rdbv3.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fsdmvs480d38z9f3rdbv3.png" alt="Image description"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;&lt;em&gt;Verify that the Template Get Deployed.&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;a href="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fxupwwsvdstbdtu4n29bx.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fxupwwsvdstbdtu4n29bx.png" alt="Image description"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;&lt;a href="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fc3munnj5hnwkwcnbz7qm.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fc3munnj5hnwkwcnbz7qm.png" alt="Image description"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;*&lt;em&gt;Hope you have enjoyed reading the blog and starting using the Backstage for deployment.&lt;br&gt;
*&lt;/em&gt;&lt;/p&gt;

</description>
    </item>
    <item>
      <title>Streamlining Infrastructure: Installing Backstage Developer Portal and Managing Azure Resources with Terraform (Part-1)</title>
      <dc:creator>AMIT CHATURVEDI</dc:creator>
      <pubDate>Wed, 07 Feb 2024 14:57:24 +0000</pubDate>
      <link>https://dev.to/gittest20202/streamlining-infrastructure-installing-backstage-developer-portal-and-managing-azure-resources-with-terraform-part-1-4gl5</link>
      <guid>https://dev.to/gittest20202/streamlining-infrastructure-installing-backstage-developer-portal-and-managing-azure-resources-with-terraform-part-1-4gl5</guid>
      <description>&lt;p&gt;&lt;a href="https://media.dev.to/cdn-cgi/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fw5q63km1uzg1bv70cz2r.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media.dev.to/cdn-cgi/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fw5q63km1uzg1bv70cz2r.png" alt="Image description" width="479" height="105"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Introduction&lt;/strong&gt;&lt;br&gt;
&lt;em&gt;In today's fast-paced software development landscape, managing complex projects and coordinating diverse teams can be a daunting task. Developers often find themselves juggling multiple tools and platforms, leading to inefficiencies, delays, and frustration. Enter Backstage – an open-source platform built by Spotify to streamline the developer experience and supercharge productivity. In this blog post, we'll explore the transformative capabilities of Backstage and how it can revolutionize your development workflow.&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Understanding Backstage&lt;/strong&gt;&lt;br&gt;
&lt;em&gt;Backstage is an open-source platform developed by Spotify to address the challenges of managing the software development lifecycle at scale. It serves as a unified developer portal, providing a single point of entry for accessing various tools, services, and information needed by engineering teams.&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;em&gt;Some key features and components of the Backstage Developer Portal:&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Catalog:&lt;/strong&gt; &lt;em&gt;Backstage features a centralized catalog that acts as a directory of all the services, components, and resources available within an organization. This catalog provides a holistic view of the entire software ecosystem, making it easier for developers to discover and consume resources.&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Plugins:&lt;/strong&gt; &lt;em&gt;Backstage is highly extensible, allowing organizations to customize and extend its functionality through plugins. Plugins can be developed to integrate with existing tools, automate workflows, and provide additional features tailored to specific use cases.&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Integration:&lt;/strong&gt; &lt;em&gt;Backstage integrates seamlessly with a wide range of third-party tools and services commonly used in the software development process. This includes version control systems (e.g., GitHub, GitLab), CI/CD pipelines (e.g., Jenkins, CircleCI), cloud providers (e.g., AWS, Azure, Google Cloud), and more.&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Developer Experience:&lt;/strong&gt; &lt;em&gt;Backstage prioritizes the developer experience, providing a user-friendly interface that promotes collaboration, transparency, and productivity. Developers can easily navigate the portal, access relevant information, and perform common tasks without context switching between multiple tools.&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Self-Service:&lt;/strong&gt; &lt;em&gt;Backstage empowers developers with self-service capabilities, allowing them to provision resources, manage configurations, and deploy applications independently. This reduces reliance on centralized IT teams and accelerates the pace of development.&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Customization:&lt;/strong&gt; &lt;em&gt;Organizations can customize the look and feel of the Backstage portal to align with their branding and preferences. Custom themes, layouts, and styling options enable organizations to create a tailored experience that reflects their unique culture and identity.&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Getting Started with Backstage&lt;/strong&gt;&lt;/p&gt;

&lt;p&gt;&lt;em&gt;1) Install nvm ( Version should be above 18)&lt;/em&gt;&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;root@devmaster:~# curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.39.1/install.sh | bash

root@devmaster:~# nvm install 20

&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;&lt;em&gt;2) Install yarn&lt;/em&gt;&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;root@devmaster:~# npm install -g yarn@1
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;&lt;em&gt;3) Create Backstage App&lt;/em&gt;&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;root@devmaster:~# npx @backstage/create-app@latest
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;&lt;a href="https://media.dev.to/cdn-cgi/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fto30fdltjapfmlx6vw5n.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media.dev.to/cdn-cgi/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fto30fdltjapfmlx6vw5n.png" alt="Image description" width="607" height="759"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;&lt;em&gt;4) Move to the App Dir&lt;/em&gt;&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;root@devmaster:~# cd chicco
root@devmaster:~# vi app-config.yaml (update your system Ip)
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;&lt;a href="https://media.dev.to/cdn-cgi/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Ftqr05x2efjkchom74llt.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media.dev.to/cdn-cgi/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Ftqr05x2efjkchom74llt.png" alt="Image description" width="800" height="363"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;&lt;em&gt;5) Start the Developer Portal&lt;/em&gt;&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;root@devmaster:~/chicco# yarn dev
yarn run v1.22.21
$ concurrently "yarn start" "yarn start-backend"
$ yarn workspace backend start
$ yarn workspace app start
$ backstage-cli package start
$ backstage-cli package start
[0] Loaded config from app-config.yaml
[0] &amp;lt;i&amp;gt; [webpack-dev-server] Project is running at:
[0] &amp;lt;i&amp;gt; [webpack-dev-server] On Your Network (IPv4): http://192.168.0.117:3000/
[0] &amp;lt;i&amp;gt; [webpack-dev-server] Content not from webpack is served from '/root/chicco/packages/app/public' directory
[0] &amp;lt;i&amp;gt; [webpack-dev-server] 404s will fallback to '/index.html'
[0] &amp;lt;i&amp;gt; [webpack-dev-middleware] wait until bundle finished: /
[1] Loaded config from app-config.yaml
[1] 2024-02-07T12:40:51.076Z backstage info Found 2 new secrets in config that will be redacted
[1] 2024-02-07T12:40:51.088Z backstage info Created UrlReader predicateMux{readers=azure{host=dev.azure.com,authed=false},bitbucketCloud{host=bitbucket.org,authed=false},github{host=github.com,authed=false},gitlab{host=gitlab.com,authed=false},awsS3{host=amazonaws.com,authed=false},fetch{}
[1] 2024-02-07T12:40:51.114Z catalog info Performing database migration type=plugin
[1] 2024-02-07T12:40:51.679Z auth info Configuring "database" as KeyStore provider type=plugin
[1] 2024-02-07T12:40:51.698Z techdocs info Creating Local publisher for TechDocs type=plugin
[1] 2024-02-07T12:40:51.703Z search info Added DefaultCatalogCollatorFactory collator factory for type software-catalog type=plugin
[1] 2024-02-07T12:40:51.704Z search info Added DefaultTechDocsCollatorFactory collator factory for type techdocs type=plugin
[1] 2024-02-07T12:40:51.705Z search info Starting all scheduled search tasks. type=plugin
[1] 2024-02-07T12:40:51.833Z backstage info Listening on :7007
[1] 2024-02-07T12:40:51.862Z backstage info Task worker starting: search_index_software_catalog, {"version":2,"cadence":"PT10M","initialDelayDuration":"PT3S","timeoutAfterDuration":"PT15M"} type=taskManager task=search_index_software_catalog
[1] 2024-02-07T12:40:51.864Z backstage info Task worker starting: search_index_techdocs, {"version":2,"cadence":"PT10M","initialDelayDuration":"PT3S","timeoutAfterDuration":"PT15M"} type=taskManager task=search_index_techdocs
[1] 2024-02-07T12:40:54.867Z search info Collating documents for software-catalog via DefaultCatalogCollatorFactory type=plugin documentType=software-catalog
[1] 2024-02-07T12:40:54.900Z search info Collating documents for techdocs via DefaultTechDocsCollatorFactory type=plugin documentType=techdocs
[1] 2024-02-07T12:40:54.938Z backstage info ::1 - - [07/Feb/2024:12:40:54 +0000] "GET /api/catalog/entities?limit=500&amp;amp;filter=metadata.annotations.backstage.io%2Ftechdocs-ref&amp;amp;offset=0 HTTP/1.1" 200 2 "-" "node-fetch/1.0 (+https://github.com/bitinn/node-fetch)" type=incomingRequest
[1] 2024-02-07T12:40:54.943Z search warn Index for techdocs was not created: indexer received 0 documents type=plugin documentType=techdocs
[1] 2024-02-07T12:40:54.944Z search info Collating documents for techdocs succeeded type=plugin documentType=techdocs
[1] 2024-02-07T12:40:54.947Z backstage info ::1 - - [07/Feb/2024:12:40:54 +0000] "GET /api/catalog/entities?limit=500&amp;amp;offset=0 HTTP/1.1" 200 - "-" "node-fetch/1.0 (+https://github.com/bitinn/node-fetch)" type=incomingRequest
[1] 2024-02-07T12:40:54.957Z search info Collating documents for software-catalog succeeded type=plugin documentType=software-catalog
[0] webpack compiled successfully
[1] 2024-02-07T12:41:07.203Z backstage info ::ffff:192.168.0.102 - - [07/Feb/2024:12:41:07 +0000] "GET /api/catalog/entity-facets?facet=relations.ownedBy HTTP/1.1" 200 118 "http://192.168.0.117:3000/" "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/121.0.0.0 Safari/537.36" type=incomingRequest
[1] 2024-02-07T12:41:07.303Z backstage info ::ffff:192.168.0.102 - - [07/Feb/2024:12:41:07 +0000] "GET /api/catalog/entity-facets?facet=kind HTTP/1.1" 200 225 "http://192.168.0.117:3000/" "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/121.0.0.0 Safari/537.36" type=incomingRequest
[1] 2024-02-07T12:41:07.306Z backstage info ::ffff:192.168.0.102 - - [07/Feb/2024:12:41:07 +0000] "GET /api/catalog/entity-facets?facet=metadata.tags HTTP/1.1" 200 31 "http://192.168.0.117:3000/" "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/121.0.0.0 Safari/537.36" type=incomingRequest
[1] 2024-02-07T12:41:07.310Z backstage info ::ffff:192.168.0.102 - - [07/Feb/2024:12:41:07 +0000] "GET /api/catalog/entity-facets?facet=metadata.namespace HTTP/1.1" 200 65 "http://192.168.0.117:3000/" "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/121.0.0.0 Safari/537.36" type=incomingRequest

&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;&lt;a href="https://media.dev.to/cdn-cgi/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fbc16o8kr6itky1t1a25r.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media.dev.to/cdn-cgi/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fbc16o8kr6itky1t1a25r.png" alt="Image description" width="800" height="211"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;&lt;em&gt;6) Open The Browser and access the Portal&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;a href="https://media.dev.to/cdn-cgi/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fz7w0hv0a8woz4eyakhg4.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media.dev.to/cdn-cgi/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fz7w0hv0a8woz4eyakhg4.png" alt="Image description" width="800" height="344"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Getting Started, configuring Backstage&lt;/strong&gt;&lt;/p&gt;

&lt;p&gt;&lt;em&gt;Backstage to use a PostgreSQL database&lt;/em&gt;&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;root@devmaster:~/chicco# apt-get install postgresql
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;&lt;em&gt;Test if your database is working&lt;/em&gt;&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;root@devmaster:~/chicco# sudo -u postgres psql
could not change directory to "/root/chicco": Permission denied
psql (15.5 (Ubuntu 15.5-0ubuntu0.23.10.1))
Type "help" for help.

postgres=#

&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;&lt;em&gt;For testing going to use the existing postgres user. The next step is to set the password for this user&lt;/em&gt;&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;postgres=# ALTER USER postgres PASSWORD 'secret';
ALTER ROLE

&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;&lt;em&gt;Stop Backstage, and go to the root directory of your freshly installed Backstage App. Use the following commands to start the PostgreSQL client installation&lt;/em&gt;&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;root@devmaster:~/chicco# yarn --cwd packages/backend add pg
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;&lt;a href="https://media.dev.to/cdn-cgi/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Ftujw3von52r2wjirqpuo.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media.dev.to/cdn-cgi/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Ftujw3von52r2wjirqpuo.png" alt="Image description" width="800" height="262"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;&lt;em&gt;Add Postgres configuration in app-config.yaml&lt;/em&gt;&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;root@devmaster:~/chicco# vim app-config.yaml
client: pg
connection:
      host: ${POSTGRES_HOST}
      port: ${POSTGRES_PORT}
      user: "postgres"
      password: "secret"
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;&lt;a href="https://media.dev.to/cdn-cgi/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fhayc5yozrnlm5mbyylzr.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media.dev.to/cdn-cgi/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fhayc5yozrnlm5mbyylzr.png" alt="Image description" width="667" height="595"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;&lt;em&gt;Restart the Dev portal&lt;/em&gt;&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;root@devmaster:~/chicco# yarn dev
yarn run v1.22.21
$ concurrently "yarn start" "yarn start-backend"
$ yarn workspace app start
$ yarn workspace backend start
$ backstage-cli package start
$ backstage-cli package start
[0] Loaded config from app-config.yaml
[0] &amp;lt;i&amp;gt; [webpack-dev-server] Project is running at:
[0] &amp;lt;i&amp;gt; [webpack-dev-server] On Your Network (IPv4): http://192.168.0.117:3000/
[0] &amp;lt;i&amp;gt; [webpack-dev-server] Content not from webpack is served from '/root/chicco/packages/app/public' directory
[0] &amp;lt;i&amp;gt; [webpack-dev-server] 404s will fallback to '/index.html'
[0] &amp;lt;i&amp;gt; [webpack-dev-middleware] wait until bundle finished: /
[1] Loaded config from app-config.yaml
[1] 2024-02-07T13:31:32.561Z backstage info Found 2 new secrets in config that will be redacted
[1] 2024-02-07T13:31:32.574Z backstage info Created UrlReader predicateMux{readers=azure{host=dev.azure.com,authed=false},bitbucketCloud{host=bitbucket.org,authed=false},github{host=github.com,authed=false},gitlab{host=gitlab.com,authed=false},awsS3{host=amazonaws.com,authed=false},fetch{}
[1] 2024-02-07T13:31:32.781Z catalog info Performing database migration type=plugin
[1] 2024-02-07T13:31:33.834Z auth info Configuring "database" as KeyStore provider type=plugin
[1] 2024-02-07T13:31:33.998Z techdocs info Creating Local publisher for TechDocs type=plugin

&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;&lt;em&gt;Setting up authentication with github&lt;/em&gt;&lt;br&gt;
&lt;strong&gt;Edit app-config.yaml and update the github PAT Token.&lt;/strong&gt;&lt;/p&gt;

&lt;p&gt;'''&lt;br&gt;
root@devmaster:~/chicco# vim app-config.yaml&lt;br&gt;
'''&lt;/p&gt;

&lt;p&gt;&lt;a href="https://media.dev.to/cdn-cgi/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fpa7gn554evzoh8jgb3nr.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media.dev.to/cdn-cgi/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fpa7gn554evzoh8jgb3nr.png" alt="Image description" width="800" height="98"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;&lt;em&gt;Restart the Dev portal&lt;/em&gt;&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;root@devmaster:~/chicco# yarn dev
yarn run v1.22.21
$ concurrently "yarn start" "yarn start-backend"
$ yarn workspace app start
$ yarn workspace backend start
$ backstage-cli package start
$ backstage-cli package start
[0] Loaded config from app-config.yaml
[0] &amp;lt;i&amp;gt; [webpack-dev-server] Project is running at:
[0] &amp;lt;i&amp;gt; [webpack-dev-server] On Your Network (IPv4): http://192.168.0.117:9999/
[0] &amp;lt;i&amp;gt; [webpack-dev-server] Content not from webpack is served from '/root/chicco/packages/app/public' directory
[0] &amp;lt;i&amp;gt; [webpack-dev-server] 404s will fallback to '/index.html'
[0] &amp;lt;i&amp;gt; [webpack-dev-middleware] wait until bundle finished: /

&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;&lt;strong&gt;In Part-2 We will use Backstage Dev portal to Deploy Resource in Azure using Terraform.&lt;/strong&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Happy Reading...............&lt;/strong&gt;&lt;/p&gt;

</description>
    </item>
    <item>
      <title>Migration from Calico CNI to Cilium CNI in BareMetal Kubernetes Cluster and Monitoring traffic using Hubble UI</title>
      <dc:creator>AMIT CHATURVEDI</dc:creator>
      <pubDate>Tue, 06 Feb 2024 14:51:39 +0000</pubDate>
      <link>https://dev.to/gittest20202/migration-from-calico-to-cilium-in-baremetal-kubernetes-cluster-and-monitoring-traffic-using-hubble-ui-40ga</link>
      <guid>https://dev.to/gittest20202/migration-from-calico-to-cilium-in-baremetal-kubernetes-cluster-and-monitoring-traffic-using-hubble-ui-40ga</guid>
      <description>&lt;p&gt;&lt;a href="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fp9j5e9qf1vovisdpm5xv.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fp9j5e9qf1vovisdpm5xv.png" alt="Image description"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;&lt;em&gt;Verify the current running CNI&lt;/em&gt;&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;

root@devmaster:~# kubectl get pods -n kube-system | grep calico
calico-kube-controllers-5dd4b7dfd9-j7mfj              1/1     Running   6 (46d ago)      51d
calico-node-lctql                                     1/1     Running   3 (46d ago)      71d
calico-node-nx7lx                                     1/1     Running   0                2m34s
calico-node-xdmm7                                     1/1     Running   2 (46d ago)      71d



&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;

&lt;p&gt;&lt;strong&gt;Introduction&lt;/strong&gt;&lt;br&gt;
&lt;em&gt;In the realm of Kubernetes networking, the choice of Container Network Interface (CNI) plays a crucial role in determining performance, security, and scalability. While Calico has long been a popular choice for networking solutions, Cilium has emerged as a compelling alternative, offering advanced features and robust performance enhancements. In this guide, we'll explore the process of migrating from Calico to Cilium CNI, highlighting the benefits and steps involved in making the transition.&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Why Migrate from Calico to Cilium?&lt;/strong&gt;&lt;br&gt;
&lt;em&gt;Before delving into the migration process, it's essential to understand why organizations might consider shifting from Calico to Cilium CNI. While Calico provides solid networking capabilities, Cilium offers several key advantages&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Enhanced Performance:&lt;/strong&gt; &lt;em&gt;Cilium leverages eBPF (extended Berkeley Packet Filter) technology to provide efficient packet processing, resulting in lower latency and improved throughput compared to traditional networking solutions.&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Advanced Security:&lt;/strong&gt; &lt;em&gt;Cilium offers powerful security features, including Layer 7 application-aware security policies, transparent encryption, and network visibility, enabling organizations to strengthen their defense against cyber threats.&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Native Integration with Service Mesh:&lt;/strong&gt; &lt;em&gt;Cilium seamlessly integrates with popular service mesh solutions like Istio, enabling enhanced observability, traffic management, and security within Kubernetes environments.&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Rich Feature Set:&lt;/strong&gt; &lt;em&gt;Cilium provides a wide range of features, including network policy enforcement, load balancing, DNS-based service discovery, and more, empowering organizations to build highly resilient and scalable infrastructure.&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Migration Process:&lt;/strong&gt;&lt;br&gt;
&lt;em&gt;Now, let's dive into the steps involved in migrating from Calico to Cilium CNI&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;em&gt;Install the cilium binary on linux machine&lt;/em&gt;&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;

CILIUM_CLI_VERSION=$(curl -s https://raw.githubusercontent.com/cilium/cilium-cli/main/stable.txt)
CLI_ARCH=amd64
if [ "$(uname -m)" = "aarch64" ]; then CLI_ARCH=arm64; fi
curl -L --fail --remote-name-all https://github.com/cilium/cilium-cli/releases/download/${CILIUM_CLI_VERSION}/cilium-linux-${CLI_ARCH}.tar.gz{,.sha256sum}
sha256sum --check cilium-linux-${CLI_ARCH}.tar.gz.sha256sum
sudo tar xzvfC cilium-linux-${CLI_ARCH}.tar.gz /usr/local/bin
rm cilium-linux-${CLI_ARCH}.tar.gz{,.sha256sum}


&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;

&lt;p&gt;&lt;a href="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fmv7a3r1wtqkio7dl2a26.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fmv7a3r1wtqkio7dl2a26.png" alt="Image description"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Install Cilium&lt;/strong&gt;&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;

root@devmaster:~# cilium install --version 1.15.0


&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;

&lt;p&gt;&lt;a href="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Frk6vi6ofmj4trodz9x07.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Frk6vi6ofmj4trodz9x07.png" alt="Image description"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;&lt;a href="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fksx36xiz8m84by55yzha.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fksx36xiz8m84by55yzha.png" alt="Image description"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Verify the Cilium installation&lt;/strong&gt;&lt;/p&gt;

&lt;p&gt;&lt;a href="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fbaj7pyfbf7akiwv35sqe.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fbaj7pyfbf7akiwv35sqe.png" alt="Image description"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Verify the Cilium Connectivity&lt;/strong&gt;&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;

root@devmaster:~# cilium connectivity test
ℹ️  Monitor aggregation detected, will skip some flow validation steps
✨ [kubernetes] Creating namespace cilium-test for connectivity check...
✨ [kubernetes] Deploying echo-same-node service...
✨ [kubernetes] Deploying DNS test server configmap...
✨ [kubernetes] Deploying same-node deployment...
✨ [kubernetes] Deploying client deployment...
✨ [kubernetes] Deploying client2 deployment...
✨ [kubernetes] Deploying client3 deployment...
✨ [kubernetes] Deploying echo-other-node service...
✨ [kubernetes] Deploying other-node deployment...
✨ [host-netns] Deploying kubernetes daemonset...
✨ [host-netns-non-cilium] Deploying kubernetes daemonset...
ℹ️  Skipping tests that require a node Without Cilium
⌛ [kubernetes] Waiting for deployment cilium-test/client to become ready...
⌛ [kubernetes] Waiting for deployment cilium-test/client2 to become ready...
⌛ [kubernetes] Waiting for deployment cilium-test/echo-same-node to become ready...
⌛ [kubernetes] Waiting for deployment cilium-test/client3 to become ready...
⌛ [kubernetes] Waiting for deployment cilium-test/echo-other-node to become ready...
⌛ [kubernetes] Waiting for pod cilium-test/client-65847bf96-ctw2m to reach DNS server on cilium-test/echo-same-node-56dfd8bd85-hd72q pod...
⌛ [kubernetes] Waiting for pod cilium-test/client2-85585bdd-cjvnw to reach DNS server on cilium-test/echo-same-node-56dfd8bd85-hd72q pod...
⌛ [kubernetes] Waiting for pod cilium-test/client3-54d97dc775-rzlm6 to reach DNS server on cilium-test/echo-same-node-56dfd8bd85-hd72q pod...
⌛ [kubernetes] Waiting for pod cilium-test/client3-54d97dc775-rzlm6 to reach DNS server on cilium-test/echo-other-node-7b76c5bbf9-rzt5f pod...
⌛ [kubernetes] Waiting for pod cilium-test/client-65847bf96-ctw2m to reach DNS server on cilium-test/echo-other-node-7b76c5bbf9-rzt5f pod...
⌛ [kubernetes] Waiting for pod cilium-test/client2-85585bdd-cjvnw to reach DNS server on cilium-test/echo-other-node-7b76c5bbf9-rzt5f pod...
⌛ [kubernetes] Waiting for pod cilium-test/client-65847bf96-ctw2m to reach default/kubernetes service...
⌛ [kubernetes] Waiting for pod cilium-test/client2-85585bdd-cjvnw to reach default/kubernetes service...
⌛ [kubernetes] Waiting for pod cilium-test/client3-54d97dc775-rzlm6 to reach default/kubernetes service...
⌛ [kubernetes] Waiting for Service cilium-test/echo-other-node to become ready...
⌛ [kubernetes] Waiting for Service cilium-test/echo-other-node to be synchronized by Cilium pod kube-system/cilium-cwrxr
⌛ [kubernetes] Waiting for Service cilium-test/echo-other-node to be synchronized by Cilium pod kube-system/cilium-jwpck
⌛ [kubernetes] Waiting for Service cilium-test/echo-same-node to become ready...
⌛ [kubernetes] Waiting for Service cilium-test/echo-same-node to be synchronized by Cilium pod kube-system/cilium-cwrxr
⌛ [kubernetes] Waiting for Service cilium-test/echo-same-node to be synchronized by Cilium pod kube-system/cilium-jwpck
⌛ [kubernetes] Waiting for NodePort 172.17.17.101:31784 (cilium-test/echo-other-node) to become ready...
⌛ [kubernetes] Waiting for NodePort 172.17.17.101:31656 (cilium-test/echo-same-node) to become ready...
⌛ [kubernetes] Waiting for NodePort 192.168.0.117:31784 (cilium-test/echo-other-node) to become ready...
⌛ [kubernetes] Waiting for NodePort 192.168.0.117:31656 (cilium-test/echo-same-node) to become ready...
⌛ [kubernetes] Waiting for NodePort 172.17.17.102:31784 (cilium-test/echo-other-node) to become ready...
⌛ [kubernetes] Waiting for NodePort 172.17.17.102:31656 (cilium-test/echo-same-node) to become ready...
⌛ [kubernetes] Waiting for DaemonSet cilium-test/host-netns-non-cilium to become ready...
⌛ [kubernetes] Waiting for DaemonSet cilium-test/host-netns to become ready...
ℹ️  Skipping IPCache check
🔭 Enabling Hubble telescope...
⚠️  Unable to contact Hubble Relay, disabling Hubble telescope and flow validation: rpc error: code = Unavailable desc = connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:4245: connect: connection refused"
ℹ️  Expose Relay locally with:
   cilium hubble enable
   cilium hubble port-forward&amp;amp;
ℹ️  Cilium version: 1.15.0
🏃 Running 64 tests ...



&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;

&lt;p&gt;&lt;em&gt;Create a per-node config that will instruct Cilium to “take over” CNI networking on the node. Initially, this will apply to no nodes; you will roll it out gradually via the migration process.&lt;/em&gt;&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;

root@devmaster:~# cat &amp;lt;&amp;lt;EOF | kubectl apply --server-side -f -
apiVersion: cilium.io/v2alpha1
kind: CiliumNodeConfig
metadata:
  namespace: kube-system
  name: cilium-default
spec:
  nodeSelector:
    matchLabels:
      io.cilium.migration/cilium-default: "true"
  defaults:
    write-cni-conf-when-ready: /host/etc/cni/net.d/05-cilium.conflist
    custom-cni-conf: "false"
    cni-chaining-mode: "none"
    cni-exclusive: "true"
EOF
ciliumnodeconfig.cilium.io/cilium-default serverside-applied


&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;

&lt;p&gt;&lt;em&gt;Select a node to be migrated. It is not recommended to start with a control-plane node.&lt;/em&gt;&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;

root@devmaster:~# NODE="devworker2.homecluster.store"
root@devmaster:~# kubectl cordon $NODE
node/devworker2.homecluster.store cordoned
root@devmaster:~# kubectl drain --ignore-daemonsets $NODE
node/devworker2.homecluster.store already cordoned
root@devmaster:~# kubectl get nodes
NAME                           STATUS                     ROLES           AGE   VERSION
devmaster.homecluster.store    Ready                      control-plane   72d   v1.26.0
devworker1.homecluster.store   Ready                      &amp;lt;none&amp;gt;          72d   v1.26.0
devworker2.homecluster.store   Ready,SchedulingDisabled   &amp;lt;none&amp;gt;          72d   v1.26.0



&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;

&lt;p&gt;&lt;em&gt;Label the node. This causes the CiliumNodeConfig to apply to this node.&lt;/em&gt;&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;

root@devmaster:~# kubectl label node $NODE --overwrite "io.cilium.migration/cilium-default=true"
node/devworker2.homecluster.store labeled



&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;

&lt;p&gt;&lt;em&gt;Restart Cilium. This will cause it to write its CNI configuration file.&lt;/em&gt;&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;

root@devmaster:~# kubectl -n kube-system delete pod --field-selector spec.nodeName=$NODE -l k8s-app=cilium
pod "cilium-jvc7v" deleted
root@devmaster:~# kubectl -n kube-system rollout status ds/cilium -w
daemon set "cilium" successfully rolled out


&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;

&lt;p&gt;&lt;em&gt;Restart the Node&lt;/em&gt;&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;

root@devmaster:~# ssh root@172.17.17.102
root@devworker2:~# init 6


&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;

&lt;p&gt;&lt;em&gt;Validate that the node has been successfully migrated.&lt;/em&gt;&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;

root@devmaster:~# cilium status --wait
kubectl get -o wide node $NODE
kubectl -n kube-system run --attach --rm --restart=Never verify-network \
  --overrides='{"spec": {"nodeName": "'$NODE'", "tolerations": [{"operator": "Exists"}]}}' \
  --image ghcr.io/nicolaka/netshoot:v0.8 -- /bin/bash -c 'ip -br addr &amp;amp;&amp;amp; curl -s -k https://$KUBERNETES_SERVICE_HOST/healthz &amp;amp;&amp;amp; echo'



&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;

&lt;p&gt;&lt;a href="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2F8du7ik90qfgy9jzed0fc.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2F8du7ik90qfgy9jzed0fc.png" alt="Image description"&gt;&lt;/a&gt;&lt;br&gt;
&lt;em&gt;Uncordon the node.&lt;/em&gt;&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;

root@devmaster:~# kubectl uncordon $NODE
root@devmaster:~# kubectl get -o wide node $NODE


&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;

&lt;p&gt;&lt;a href="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fdlmyue4lj99slr5pucfk.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fdlmyue4lj99slr5pucfk.png" alt="Image description"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;&lt;em&gt;Once you are satisfied everything has been migrated successfully, select another unmigrated node in the cluster and repeat these steps.&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;em&gt;Delete the Calico CNI&lt;/em&gt;&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;

root@devmaster:~# kubectl delete crd $(kubectl get crd | grep calico | awk '{print $1}')

root@devmaster:~# kubectl delete -n kube-system deployment calico-kube-controllers

root@devmaster:~# kubectl delete -n kube-system daemonset calico-node



&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;

&lt;p&gt;&lt;a href="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fzqxd9vhszj671xczg9nw.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fzqxd9vhszj671xczg9nw.png" alt="Image description"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;&lt;em&gt;Verify Calico CNI get Deleted&lt;/em&gt;&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;

root@devmaster:~# kubectl get pods -n kube-system
NAME                                                  READY   STATUS    RESTARTS         AGE
cilium-cwrxr                                          1/1     Running   0                15m
cilium-dr7r5                                          1/1     Running   0                5m39s
cilium-jwpck                                          1/1     Running   0                13m
cilium-operator-5b7fb7b87d-f2v97                      1/1     Running   0                6m4s
coredns-787d4945fb-65hzf                              1/1     Running   0                6m4s
coredns-787d4945fb-lfkw4                              1/1     Running   0                38m
etcd-devmaster.homecluster.store                      1/1     Running   4 (46d ago)      72d
kube-apiserver-devmaster.homecluster.store            1/1     Running   4                72d
kube-controller-manager-devmaster.homecluster.store   1/1     Running   13 (2d19h ago)   72d
kube-proxy-4gtbg                                      1/1     Running   2 (46d ago)      72d
kube-proxy-dh9z4                                      1/1     Running   2 (46d ago)      72d
kube-proxy-mtjjl                                      1/1     Running   3 (46d ago)      72d
kube-scheduler-devmaster.homecluster.store            1/1     Running   13 (2d19h ago)   72d



&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;

&lt;p&gt;&lt;em&gt;Reboot all the nodes and Check Nodes Status&lt;/em&gt;&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;

root@devmaster:~# kubectl get nodes
NAME                           STATUS   ROLES           AGE   VERSION
devmaster.homecluster.store    Ready    control-plane   72d   v1.26.0
devworker1.homecluster.store   Ready    &amp;lt;none&amp;gt;          72d   v1.26.0
devworker2.homecluster.store   Ready    &amp;lt;none&amp;gt;          72d   v1.26.0


root@devmaster:~# kubectl get pods -n metallb-system
NAME                          READY   STATUS    RESTARTS         AGE
controller-586bfc6b59-pcq87   1/1     Running   8 (5m45s ago)    65d
speaker-2zwg4                 1/1     Running   3 (3m7s ago)     72d
speaker-8n84l                 1/1     Running   12 (6m36s ago)   72d
speaker-zdxv6                 1/1     Running   3 (5m45s ago)    72d



&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;

&lt;p&gt;&lt;a href="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2F9kn5wkisymkwx24m8qzh.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2F9kn5wkisymkwx24m8qzh.png" alt="Image description"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Setting up Hubble Observability&lt;/strong&gt;&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;

root@devmaster# cilium hubble enable
root@devmaster# cilium status



&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;

&lt;p&gt;&lt;a href="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fmkzkmn7qgluj3oqkeu40.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fmkzkmn7qgluj3oqkeu40.png" alt="Image description"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;&lt;em&gt;Install the Hubble Client&lt;/em&gt;&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;

root@devmaster:/etc/cni/net.d# HUBBLE_VERSION=$(curl -s https://raw.githubusercontent.com/cilium/hubble/master/stable.txt)
HUBBLE_ARCH=amd64
if [ "$(uname -m)" = "aarch64" ]; then HUBBLE_ARCH=arm64; fi
curl -L --fail --remote-name-all https://github.com/cilium/hubble/releases/download/$HUBBLE_VERSION/hubble-linux-${HUBBLE_ARCH}.tar.gz{,.sha256sum}
sha256sum --check hubble-linux-${HUBBLE_ARCH}.tar.gz.sha256sum
sudo tar xzvfC hubble-linux-${HUBBLE_ARCH}.tar.gz /usr/local/bin
rm hubble-linux-${HUBBLE_ARCH}.tar.gz{,.sha256sum}
  % Total    % Received % Xferd  Average Speed   Time    Time     Time  Current
                                 Dload  Upload   Total   Spent    Left  Speed
  0     0    0     0    0     0      0      0 --:--:-- --:--:-- --:--:--     0
100 17.0M  100 17.0M    0     0  3692k      0  0:00:04  0:00:04 --:--:-- 5802k
  % Total    % Received % Xferd  Average Speed   Time    Time     Time  Current
                                 Dload  Upload   Total   Spent    Left  Speed
  0     0    0     0    0     0      0      0 --:--:-- --:--:-- --:--:--     0
100    92  100    92    0     0     53      0  0:00:01  0:00:01 --:--:--   219
hubble-linux-amd64.tar.gz: OK
hubble



&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;

&lt;p&gt;&lt;em&gt;Validate Hubble API Access&lt;/em&gt;&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;

root@devmaster# cilium hubble port-forward&amp;amp;



&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;

&lt;p&gt;&lt;em&gt;Now you can validate that you can access the Hubble API via the installed CLI&lt;/em&gt;&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;

root@devmaster:~# hubble status
Healthcheck (via localhost:4245): Ok
Current/Max Flows: 12,285/12,285 (100.00%)
Flows/s: 150.13
Connected Nodes: 3/3


&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;

&lt;p&gt;&lt;em&gt;Query the flow API and look for flows&lt;/em&gt;&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;

root@devmaster:~# hubble observe


&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;

&lt;p&gt;&lt;a href="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fnc3krtvo9foq6mpz33a1.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fnc3krtvo9foq6mpz33a1.png" alt="Image description"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;&lt;em&gt;Enable the Hubble UI&lt;/em&gt;&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;

root@devmaster:~# cilium hubble enable --ui


&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;

&lt;p&gt;&lt;a href="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2F5f6c51gxkbxstt2zm5ul.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2F5f6c51gxkbxstt2zm5ul.png" alt="Image description"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;&lt;a href="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fylnzncbqniz3e88h91ac.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fylnzncbqniz3e88h91ac.png" alt="Image description"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;&lt;a href="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2F0doeb6subgkbswr8oo75.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2F0doeb6subgkbswr8oo75.png" alt="Image description"&gt;&lt;/a&gt;&lt;/p&gt;

</description>
    </item>
    <item>
      <title>Exploring Kubernetes Security with Starboard Operator</title>
      <dc:creator>AMIT CHATURVEDI</dc:creator>
      <pubDate>Sat, 03 Feb 2024 15:01:14 +0000</pubDate>
      <link>https://dev.to/gittest20202/exploring-kubernetes-security-with-starboard-operator-1doo</link>
      <guid>https://dev.to/gittest20202/exploring-kubernetes-security-with-starboard-operator-1doo</guid>
      <description>&lt;p&gt;&lt;a href="https://media.dev.to/cdn-cgi/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fk78gp4ozk8fuwtgpjitt.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media.dev.to/cdn-cgi/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fk78gp4ozk8fuwtgpjitt.png" alt="Image description" width="204" height="214"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Introduction:&lt;/strong&gt;&lt;br&gt;
&lt;em&gt;Kubernetes, the container orchestration platform, has revolutionized the way we deploy and manage containerized applications. However, with great power comes great responsibility, and securing Kubernetes environments is paramount. In this blog post, we'll delve into the Starboard Operator, a powerful tool designed to enhance security within Kubernetes clusters.&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;a href="https://media.dev.to/cdn-cgi/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Ffns9m9y5kjtjfyj3wq3l.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media.dev.to/cdn-cgi/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Ffns9m9y5kjtjfyj3wq3l.png" alt="Image description" width="617" height="509"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Understanding Kubernetes Security Challenges&lt;/strong&gt;&lt;br&gt;
&lt;em&gt;Kubernetes security involves multiple layers, including container images, runtime security, network policies, and access controls. Ensuring the security of each of these layers is crucial to safeguarding your applications and data.&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Introduction to Starboard&lt;/strong&gt;&lt;br&gt;
_Starboard is an open-source Kubernetes-native security toolkit developed by Aqua Security. It provides a set of tools and custom resources to perform security and compliance checks within Kubernetes clusters. Starboard extends Kubernetes functionality, allowing users to scan container images and Kubernetes resources for vulnerabilities, misconfigurations, and other security issues.&lt;br&gt;
_&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Key Features of Starboard Operator&lt;/strong&gt;&lt;br&gt;
&lt;em&gt;1. Container Image Scanning&lt;/em&gt;&lt;br&gt;
&lt;em&gt;Starboard enables users to scan container images for vulnerabilities using popular security databases like CVE. By integrating with image scanners, Starboard ensures that only secure and trusted container images are deployed within the cluster.&lt;/em&gt;&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;root@master:~/vault# kubectl starboard get vulnerabilities

&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;&lt;em&gt;2. Kubernetes Resource Scanning&lt;/em&gt;&lt;br&gt;
&lt;em&gt;In addition to image scanning, Starboard performs security checks on Kubernetes resources. This includes ConfigAudit checks to ensure that configurations align with security best practices.&lt;/em&gt;&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;root@master:~/vault# kubectl starboard get configauditreports

&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;&lt;em&gt;3. Custom Resource Definitions&lt;/em&gt;&lt;br&gt;
&lt;em&gt;Starboard introduces custom resources such as VulnerabilityReport and ConfigAuditReport. These resources provide detailed information about vulnerabilities and configuration issues, making it easier for users to understand and address security concerns.&lt;/em&gt;&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;root@master:~/vault# kubectl get vulnerabilityreports
root@master:~/vault# kubectl get configauditreports

&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;&lt;strong&gt;Installing Starboard Operator&lt;/strong&gt;&lt;br&gt;
&lt;em&gt;Installing Starboard Operator is a straightforward process. Users can deploy it using standard Kubernetes manifests or Helm charts. The official Aqua Security documentation provides clear instructions for the installation process.&lt;/em&gt;&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;root@master:~/vault# helm install starboard-operator aqua/starboard-operator   --namespace starboard-system   --create-namespace   --set="trivy.ignoreUnfixed=true"   --version 0.10.12
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;





&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;root@master:~/vault# kubectl get pods -n starboard-system
NAME                                 READY   STATUS    RESTARTS   AGE
starboard-operator-5dbd48d67-6zh8v   1/1     Running   0          46m
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;&lt;strong&gt;Inspect created VulnerabilityReports by&lt;/strong&gt;&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;root@master:~/vault# kubectl get vulnerabilityreports --all-namespaces -o wide
NAMESPACE        NAME                                                          REPOSITORY                TAG        SCANNER   AGE   CRITICAL   HIGH   MEDIUM   LOW   UNKNOWN
argocd           replicaset-argocd-repo-server-6f5d866457-argocd-repo-server   argoproj/argocd           v2.9.2     Trivy     45m   0          0      23       12    0
calico-system    replicaset-846bc59bdd                                         calico/kube-controllers   v3.26.3    Trivy     47m   0          0      0        0     0
default          replicaset-deathstar-8464cdd4d9-deathstar                     cilium/starwars           latest     Trivy     43m   0          0      0        0     0
metallb-system   replicaset-controller-586bfc6b59-controller                   metallb/controller        v0.13.12   Trivy     39m   0          2      8        0     0

&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;&lt;strong&gt;Inspect created ConfigAuditReports by&lt;/strong&gt;&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;root@master:~/vault# kubectl get configauditreports -n default -o wide
NAME                              SCANNER     AGE   CRITICAL   HIGH   MEDIUM   LOW
replicaset-deathstar-8464cdd4d9   Starboard   72m   0          0      6        8
replicaset-nginx-54f8f9f495       Starboard   43m   0          0      6        7

&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;&lt;strong&gt;Inspect created CISKubeBenchReports by&lt;/strong&gt;&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;root@master:~/vault# kubectl get ciskubebenchreports -o wide
NAME                        SCANNER      AGE   FAIL   WARN   INFO   PASS
master.homecluster.store    kube-bench   72m   10     47     0      68
worker1.homecluster.store   kube-bench   71m   1      33     0      19
worker2.homecluster.store   kube-bench   55m   1      33     0      19
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;Deploying nginx in default namespace&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;root@master:~/vault# kubectl create deployment nginx --image nginx:1.16 -n default
root@master:~/vault# kubectl get pods -n default | grep nginx
nginx-54f8f9f495-n8s6m       1/1     Running   0          47m

&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;&lt;strong&gt;Verify the vulnerability in nginx deployment&lt;/strong&gt;&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;root@master:~/vault# kubectl get configauditreports --all-namespaces -o wide | grep default | grep nginx
default                      replicaset-nginx-54f8f9f495                                     Starboard   48m   0          0      6        7

root@master:~/vault# kubectl get configauditreports replicaset-nginx-54f8f9f495 -n default -o json
 "report": {
        "checks": [
            {
                "category": "Kubernetes Security Check",
                "checkID": "KSV020",
                "description": "Force the container to run with user ID \u003e 10000 to avoid conflicts with the host’s user table.",
                "messages": [
                    "Container 'nginx' of ReplicaSet 'nginx-54f8f9f495' should set 'securityContext.runAsUser' \u003e 10000"
                ],
                "severity": "MEDIUM",
                "success": false,
                "title": "Runs with low user ID"
            },
            {
                "category": "Kubernetes Security Check",
                "checkID": "KSV037",
                "description": "ensure that User pods are not placed in kube-system namespace",
                "severity": "MEDIUM",
                "success": true,
                "title": "User Pods should not be placed in kube-system namespace"
            },
            {
                "category": "Kubernetes Security Check",
                "checkID": "KSV024",
                "description": "HostPorts should be disallowed, or at minimum restricted to a known list.",
                "severity": "HIGH",
                "success": true,
                "title": "Access to host ports"
            },
            {
                "category": "Kubernetes Security Check",
                "checkID": "KSV003",
                "description": "The container should drop all default capabilities and add only those that are needed for its execution.",
                "messages": [
                    "Container 'nginx' of ReplicaSet 'nginx-54f8f9f495' should add 'ALL' to 'securityContext.capabilities.drop'"
                ],
                "severity": "LOW",
                "success": false,
                "title": "Default capabilities not dropped"
            },
            {
                "category": "Kubernetes Security Check",
                "checkID": "KSV013",
                "description": "It is best to avoid using the ':latest' image tag when deploying containers in production. Doing so makes it hard to track which version of the image is running, and hard to roll back the version.",
                "severity": "LOW",
                "success": true,
                "title": "Image tag ':latest' used"
            },
            {
                "category": "Kubernetes Security Check",
                "checkID": "KSV021",
                "description": "Force the container to run with group ID \u003e 10000 to avoid conflicts with the host’s user table.",
                "messages": [
                    "Container 'nginx' of ReplicaSet 'nginx-54f8f9f495' should set 'securityContext.runAsGroup' \u003e 10000"
                ],
                "severity": "MEDIUM",
                "success": false,
                "title": "Runs with low group ID"
            },
            {
                "category": "Kubernetes Security Check",
                "checkID": "KSV001",
                "description": "A program inside the container can elevate its own privileges and run as root, which might give the program control over the container and node.",
                "messages": [
                    "Container 'nginx' of ReplicaSet 'nginx-54f8f9f495' should set 'securityContext.allowPrivilegeEscalation' to false"
                ],
                "severity": "MEDIUM",
                "success": false,
                "title": "Process can elevate its own privileges"
            },
            {
                "category": "Kubernetes Security Check",
                "checkID": "KSV023",
                "description": "HostPath volumes must be forbidden.",
                "severity": "MEDIUM",
                "success": true,
                "title": "hostPath volumes mounted"
            },
            {
                "category": "Kubernetes Security Check",
                "checkID": "KSV025",
                "description": "Setting a custom SELinux user or role option should be forbidden.",
                "severity": "MEDIUM",
                "success": true,
                "title": "SELinux custom options set"
            },
            {
                "category": "Kubernetes Security Check",
                "checkID": "KSV029",
                "description": "Containers should be forbidden from running with a root primary or supplementary GID.",
                "severity": "LOW",
                "success": true,
                "title": "A root primary or supplementary GID set"
            },
            {
                "category": "Kubernetes Security Check",
                "checkID": "KSV002",
                "description": "A program inside the container can bypass AppArmor protection policies.",
                "severity": "MEDIUM",
                "success": true,
                "title": "Default AppArmor profile not set"
            },
            {
                "category": "Kubernetes Security Check",
                "checkID": "KSV007",
                "description": "Managing /etc/hosts aliases can prevent the container engine from modifying the file after a pod’s containers have already been started.",
                "severity": "LOW",
                "success": true,
                "title": "hostAliases is set"
            },
            {
                "category": "Kubernetes Security Check",
                "checkID": "KSV009",
                "description": "Sharing the host’s network namespace permits processes in the pod to communicate with processes bound to the host’s loopback adapter.",
                "severity": "HIGH",
                "success": true,
                "title": "Access to host network"
            },
            {
                "category": "Kubernetes Security Check",
                "checkID": "KSV012",
                "description": "'runAsNonRoot' forces the running image to run as a non-root user to ensure least privileges.",
                "messages": [
                    "Container 'nginx' of ReplicaSet 'nginx-54f8f9f495' should set 'securityContext.runAsNonRoot' to true"
                ],
                "severity": "MEDIUM",
                "success": false,
                "title": "Runs as root user"
            },
            {
                "category": "Kubernetes Security Check",
                "checkID": "KSV022",
                "description": "Adding NET_RAW or capabilities beyond the default set must be disallowed.",
                "severity": "MEDIUM",
                "success": true,
                "title": "Non-default capabilities added"
            },
            {
                "category": "Kubernetes Security Check",
                "checkID": "KSV027",
                "description": "The default /proc masks are set up to reduce attack surface, and should be required.",
                "severity": "MEDIUM",
                "success": true,
                "title": "Non-default /proc masks set"
            },

&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;&lt;strong&gt;Enhancing Security Workflows&lt;/strong&gt;&lt;br&gt;
&lt;em&gt;The integration of Starboard into your Kubernetes environment enhances security workflows by providing actionable insights. Security teams can use the reports generated by Starboard to identify and mitigate risks, ensuring a robust security posture.&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Conclusion&lt;/strong&gt;&lt;br&gt;
&lt;em&gt;As organizations continue to embrace Kubernetes for container orchestration, securing these environments becomes paramount. The Starboard Operator, with its container image scanning and Kubernetes resource checks, emerges as a valuable tool in the Kubernetes security toolkit. By seamlessly integrating into existing workflows, Starboard empowers users to proactively address security concerns, fortifying their Kubernetes deployments against potential threats. In conclusion, the Starboard Operator is a powerful ally in the ongoing battle to secure Kubernetes clusters, offering a comprehensive set of tools to identify and remediate security issues. Consider integrating Starboard into your Kubernetes security strategy to elevate your containerized applications' defense against modern threats.&lt;/em&gt;&lt;/p&gt;

</description>
    </item>
    <item>
      <title>Understanding Tekton: A Comprehensive Guide to Cloud-Native CI/CD</title>
      <dc:creator>AMIT CHATURVEDI</dc:creator>
      <pubDate>Wed, 03 Jan 2024 11:25:31 +0000</pubDate>
      <link>https://dev.to/gittest20202/understanding-tekton-a-comprehensive-guide-to-cloud-native-cicd-1m89</link>
      <guid>https://dev.to/gittest20202/understanding-tekton-a-comprehensive-guide-to-cloud-native-cicd-1m89</guid>
      <description>&lt;p&gt;&lt;a href="https://res.cloudinary.com/practicaldev/image/fetch/s--2rhKVBMe--/c_limit%2Cf_auto%2Cfl_progressive%2Cq_auto%2Cw_800/https://dev-to-uploads.s3.amazonaws.com/uploads/articles/hl561ywwquu1226ma0dh.png" class="article-body-image-wrapper"&gt;&lt;img src="https://res.cloudinary.com/practicaldev/image/fetch/s--2rhKVBMe--/c_limit%2Cf_auto%2Cfl_progressive%2Cq_auto%2Cw_800/https://dev-to-uploads.s3.amazonaws.com/uploads/articles/hl561ywwquu1226ma0dh.png" alt="Image description" width="268" height="148"&gt;&lt;/a&gt;&lt;br&gt;
&lt;strong&gt;Introduction:&lt;/strong&gt;&lt;br&gt;
&lt;em&gt;Tekton is an open-source project that facilitates the creation and operation of cloud-native continuous integration and continuous delivery (CI/CD) pipelines. In this blog post, we'll explore the fundamentals of Tekton, its architecture, and its role in the modern DevOps landscape.&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;What is Tekton?&lt;/strong&gt;&lt;br&gt;
&lt;em&gt;Tekton is a Kubernetes-native CI/CD framework that provides a set of custom resources and controllers to define and run CI/CD pipelines as containers in Kubernetes clusters. It is designed to be flexible, extensible, and scalable, making it an excellent choice for building cloud-native applications.&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Key Components of Tekton:&lt;/strong&gt;&lt;br&gt;
&lt;strong&gt;Task:&lt;/strong&gt;&lt;br&gt;
&lt;em&gt;A Task is the smallest unit of work in Tekton. It represents a single, well-defined step in the CI/CD pipeline. Tasks can be reused across different pipelines.&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Pipeline:&lt;/strong&gt;&lt;br&gt;
&lt;em&gt;A Pipeline defines a series of interconnected tasks to be executed as part of the CI/CD process. Pipelines enable the orchestration of tasks and define the workflow.&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;PipelineRun:&lt;/strong&gt;&lt;br&gt;
&lt;em&gt;A PipelineRun is an instantiation of a Pipeline. It represents a single execution of a CI/CD process, tying together the defined tasks and pipelines.&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Trigger:&lt;/strong&gt;&lt;br&gt;
&lt;em&gt;Tekton Triggers allow the automation of pipeline execution based on events. Events can be external (e.g., a new code push) or internal (e.g., a time-based trigger).&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Tekton Architecture:&lt;/strong&gt;&lt;br&gt;
&lt;em&gt;Tekton is built on a set of custom Kubernetes resources, allowing it to seamlessly integrate with Kubernetes-native features. The architecture includes:&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Controller:&lt;/strong&gt;&lt;br&gt;
&lt;em&gt;The Tekton Controller manages the custom resources and orchestrates the execution of tasks and pipelines. It monitors changes in resources and ensures the desired state is maintained.&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;CRDs (Custom Resource Definitions):&lt;/strong&gt;&lt;br&gt;
&lt;em&gt;Tekton introduces custom resources such as Task, Pipeline, and Trigger to represent CI/CD concepts. These CRDs define the structure of CI/CD pipelines.&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Tekton CLI:&lt;/strong&gt;&lt;br&gt;
&lt;em&gt;The Tekton CLI (tkn) provides a command-line interface to interact with and manage Tekton resources. It simplifies pipeline management and execution.&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Advantages of Tekton:&lt;/strong&gt;&lt;br&gt;
&lt;strong&gt;Portability:&lt;/strong&gt;&lt;br&gt;
&lt;em&gt;Tekton allows you to define CI/CD pipelines as code, making them portable across different Kubernetes clusters. This ensures consistent pipeline execution in various environments.&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Extensibility:&lt;/strong&gt;&lt;br&gt;
&lt;em&gt;Tekton is highly extensible, allowing users to create custom tasks and share them across projects. The extensibility ensures that Tekton can adapt to diverse CI/CD requirements.&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Kubernetes Integration:&lt;/strong&gt;&lt;br&gt;
&lt;em&gt;Tekton leverages native Kubernetes constructs, enabling seamless integration with other Kubernetes tools and services. This integration simplifies the management of CI/CD pipelines within Kubernetes environments.&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Reusability:&lt;/strong&gt;&lt;br&gt;
&lt;em&gt;The modular nature of Tekton allows for the reuse of tasks and pipelines. This promotes code sharing, reduces duplication, and enhances maintainability.&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Getting Started with Tekton:&lt;/strong&gt;&lt;br&gt;
&lt;strong&gt;Installation:&lt;/strong&gt;&lt;br&gt;
&lt;em&gt;Install Tekton in your Kubernetes cluster using the provided installation manifests or Helm charts. Tekton can run on any Kubernetes distribution.&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Defining Tasks and Pipelines:&lt;/strong&gt;&lt;br&gt;
&lt;em&gt;Use the Tekton CLI or YAML manifests to define tasks and pipelines. Tasks can be simple, like running tests, or complex, involving multiple steps.&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Executing Pipelines:&lt;/strong&gt;&lt;br&gt;
&lt;em&gt;Trigger the execution of pipelines manually or automate them using Tekton Triggers. Monitor the execution using the Tekton CLI or the Kubernetes dashboard.&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Extending Tekton:&lt;/strong&gt;&lt;br&gt;
&lt;em&gt;Explore and contribute to the Tekton ecosystem by creating custom tasks or integrating existing tools into your pipelines.&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Install Tekton Pipelines&lt;/strong&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Prerequisites&lt;/strong&gt;&lt;br&gt;
&lt;em&gt;A Kubernetes cluster running version 1.25 or later.&lt;br&gt;
Kubectl.&lt;/em&gt;&lt;br&gt;
&lt;em&gt;Grant cluster-admin privileges to the current user.&lt;/em&gt; &lt;br&gt;
&lt;em&gt;(Optional) Install a Metrics Server if you need support for high availability use cases.&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Latest official release:&lt;/strong&gt;&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;root@master:~# kubectl apply --filename https://storage.googleapis.com/tekton-releases/pipeline/latest/release.yaml

root@master:~# kubectl get pods --namespace tekton-pipelines
NAME                                           READY   STATUS    RESTARTS   AGE
tekton-events-controller-8b5bb559c-fnnvs       1/1     Running   0          6m1s
tekton-pipelines-controller-65fb8b9d46-qbmnm   1/1     Running   0          6m2s
tekton-pipelines-webhook-66ff4b6644-pvhbc      1/1     Running   0          5m54s

&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;&lt;strong&gt;Install and set up Tekton Triggers&lt;/strong&gt;&lt;br&gt;
&lt;strong&gt;Prerequisites&lt;/strong&gt;&lt;br&gt;
&lt;em&gt;Kubernetes cluster version 1.18 or later.&lt;/em&gt;&lt;br&gt;
&lt;em&gt;Kubectl.&lt;/em&gt;&lt;br&gt;
&lt;em&gt;Tekton Pipelines.&lt;/em&gt;&lt;br&gt;
&lt;em&gt;Grant cluster-admin privileges to the user that installed Tekton Pipelines.&lt;/em&gt; &lt;/p&gt;

&lt;p&gt;&lt;a href="https://res.cloudinary.com/practicaldev/image/fetch/s--y58DbpVW--/c_limit%2Cf_auto%2Cfl_progressive%2Cq_auto%2Cw_800/https://dev-to-uploads.s3.amazonaws.com/uploads/articles/rk0emb8smmb9lzz8e2wh.png" class="article-body-image-wrapper"&gt;&lt;img src="https://res.cloudinary.com/practicaldev/image/fetch/s--y58DbpVW--/c_limit%2Cf_auto%2Cfl_progressive%2Cq_auto%2Cw_800/https://dev-to-uploads.s3.amazonaws.com/uploads/articles/rk0emb8smmb9lzz8e2wh.png" alt="Image description" width="800" height="567"&gt;&lt;/a&gt;&lt;br&gt;
&lt;strong&gt;Configuring a Task&lt;/strong&gt;&lt;br&gt;
Define a Task for Building the Node.js Application:&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;#cat build-task.yaml
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
  name: build-task
spec:
  steps:
    - name: npm-install
      image: node:14
      workingDir: /workspace/source
      script: |
        npm install

    - name: build-app
      image: node:14
      workingDir: /workspace/source
      script: |
        npm run build
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;&lt;strong&gt;This Task consists of two steps:&lt;/strong&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;npm-install&lt;/strong&gt;: &lt;em&gt;Installs the Node.js dependencies.&lt;/em&gt;&lt;br&gt;
&lt;strong&gt;build-app&lt;/strong&gt;: &lt;em&gt;Builds the Node.js application&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Define a Task for Deploying the Containerized Application:&lt;/strong&gt;&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;#cat deploy-task.yaml
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
  name: deploy-task
spec:
  steps:
    - name: deploy-app
      image: gcr.io/cloud-builders/kubectl
      script: |
        kubectl apply -f /workspace/source/deployment.yaml

# kubectl get task
NAME                AGE
build-task          16m
deploy-task         16m
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;&lt;em&gt;This Task includes a single step (deploy-app) that uses the kubectl tool to apply the Kubernetes deployment manifest for your application.&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Define a Pipeline that Uses the Tasks:&lt;/strong&gt;&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;#cat example-pipeline.yaml
apiVersion: tekton.dev/v1beta1
kind: Pipeline
metadata:
  name: example-pipeline
spec:
  tasks:
    - name: build
      taskRef:
        name: build-task
    - name: deploy
      taskRef:
        name: deploy-task

# kubectl get pipeline | grep -i example
example-pipeline   15m

&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;&lt;em&gt;This Pipeline consists of two tasks: build and deploy. Each task references the previously defined tasks (build-task and deploy-task).&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Apply the Tekton Resources:&lt;/strong&gt;&lt;br&gt;
&lt;em&gt;Apply the Tekton resources to your Kubernetes cluster:&lt;/em&gt;&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;kubectl apply -f build-task.yaml
kubectl apply -f deploy-task.yaml
kubectl apply -f example-pipeline.yaml
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;&lt;strong&gt;Run the Pipeline:&lt;/strong&gt;&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;# cat pipeline-run.yaml
apiVersion: tekton.dev/v1beta1
kind: PipelineRun
metadata:
  name: example-pipeline-run
spec:
  pipelineRef:
    name: example-pipeline
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;&lt;em&gt;Apply the PipelineRun to start the execution of the pipeline&lt;/em&gt;&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;kubectl apply -f pipeline-run.yaml
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;&lt;strong&gt;Monitor PipelineRun Status&lt;/strong&gt;&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;# kubectl get PipelineRun --watch
NAME                   SUCCEEDED   REASON    STARTTIME   COMPLETIONTIME
example-pipeline-run   Unknown     Running   40s

&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



</description>
    </item>
    <item>
      <title>Migrating from Docker To Containerd</title>
      <dc:creator>AMIT CHATURVEDI</dc:creator>
      <pubDate>Tue, 26 Dec 2023 14:35:04 +0000</pubDate>
      <link>https://dev.to/gittest20202/migrating-from-docker-to-containerd-1354</link>
      <guid>https://dev.to/gittest20202/migrating-from-docker-to-containerd-1354</guid>
      <description>&lt;p&gt;&lt;em&gt;In the dynamic world of container orchestration, choosing the right container runtime is crucial for optimizing performance and resource utilization. If you've been using Docker and are contemplating a switch, this blog post will guide you through the process of migrating to Containerd. Follow this step-by-step guide to ensure a smooth migration and unlock the benefits that Containerd has to offer.&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;a href="https://res.cloudinary.com/practicaldev/image/fetch/s--5_mgFaW1--/c_limit%2Cf_auto%2Cfl_progressive%2Cq_auto%2Cw_800/https://dev-to-uploads.s3.amazonaws.com/uploads/articles/w3csf0ud6z9ue2cpuxoj.png" class="article-body-image-wrapper"&gt;&lt;img src="https://res.cloudinary.com/practicaldev/image/fetch/s--5_mgFaW1--/c_limit%2Cf_auto%2Cfl_progressive%2Cq_auto%2Cw_800/https://dev-to-uploads.s3.amazonaws.com/uploads/articles/w3csf0ud6z9ue2cpuxoj.png" alt="Image description" width="474" height="157"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;&lt;a href="https://res.cloudinary.com/practicaldev/image/fetch/s--m6NbCOTx--/c_limit%2Cf_auto%2Cfl_progressive%2Cq_auto%2Cw_800/https://dev-to-uploads.s3.amazonaws.com/uploads/articles/6tox504v7r97wf7o05uw.png" class="article-body-image-wrapper"&gt;&lt;img src="https://res.cloudinary.com/practicaldev/image/fetch/s--m6NbCOTx--/c_limit%2Cf_auto%2Cfl_progressive%2Cq_auto%2Cw_800/https://dev-to-uploads.s3.amazonaws.com/uploads/articles/6tox504v7r97wf7o05uw.png" alt="Image description" width="326" height="180"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Understanding the Basics&lt;/strong&gt;&lt;br&gt;
&lt;em&gt;Containerd Overview:&lt;/em&gt;&lt;br&gt;
Containerd serves as an essential building block for higher-level container platforms, including Docker and Kubernetes. Initially developed by Docker, Inc., it was later contributed to the community and is now a standalone project within the Cloud Native Computing Foundation (CNCF). Containerd's design emphasizes simplicity, portability, and extensibility, making it a versatile solution for various containerization needs.&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Key Features&lt;/strong&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Container Lifecycle Management:&lt;/strong&gt;&lt;br&gt;
&lt;em&gt;Containerd excels in managing the entire lifecycle of containers, handling tasks such as image transfer, container execution, and storage management. It abstracts away the complexities of these operations, providing a standardized interface for container runtimes.&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Image Distribution:&lt;/strong&gt;&lt;br&gt;
&lt;em&gt;Containerd supports the distribution and management of container images. It interfaces with container registries to efficiently pull, push, and store container images, ensuring seamless deployment across diverse environments.&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Pluggable Architecture:&lt;/strong&gt;&lt;br&gt;
&lt;em&gt;With a modular and pluggable architecture, Containerd allows users to customize and extend its functionality. This flexibility enables integrations with various container orchestration systems and supports a diverse range of use cases.&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Runtime Interface:&lt;/strong&gt;&lt;br&gt;
&lt;em&gt;Containerd adheres to the container runtime interface (CRI), making it compatible with container orchestration platforms like Kubernetes. Its compliance with industry standards enhances interoperability and ease of integration into existing container ecosystems.&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Architecture:&lt;/strong&gt;&lt;br&gt;
&lt;em&gt;Containerd's architecture is designed with a focus on simplicity and modularity. The key components include:&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Shim Layer:&lt;/strong&gt;&lt;br&gt;
&lt;em&gt;The shim layer serves as the intermediary between the container runtime and the container process. It helps in handling container execution, managing process lifecycle, and interfacing with the underlying operating system.&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Containerd Daemon:&lt;/strong&gt;&lt;br&gt;
&lt;em&gt;The core daemon manages essential container operations such as image handling, storage management, and container lifecycle. It exposes a gRPC API that enables communication with client applications and container runtimes.&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;gRPC API:&lt;/strong&gt;&lt;br&gt;
&lt;em&gt;Containerd's gRPC API provides a standardized and efficient communication channel for interacting with the daemon. It allows seamless integration with client tools, container orchestrators, and other components in the container ecosystem.&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Migrate Kubernetes from Docker to Containerd&lt;/strong&gt;&lt;/p&gt;

&lt;p&gt;&lt;a href="https://res.cloudinary.com/practicaldev/image/fetch/s--t2LmJadS--/c_limit%2Cf_auto%2Cfl_progressive%2Cq_auto%2Cw_800/https://dev-to-uploads.s3.amazonaws.com/uploads/articles/53lrr7gjd9ndy1skexxg.png" class="article-body-image-wrapper"&gt;&lt;img src="https://res.cloudinary.com/practicaldev/image/fetch/s--t2LmJadS--/c_limit%2Cf_auto%2Cfl_progressive%2Cq_auto%2Cw_800/https://dev-to-uploads.s3.amazonaws.com/uploads/articles/53lrr7gjd9ndy1skexxg.png" alt="Image description" width="439" height="180"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;&lt;em&gt;Kubernetes decided to deprecate Docker as container runtime after v1.20&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;em&gt;Explanation of why Kubernetes decided to deprecate Docker can be found here: &lt;a href="https://kubernetes.io/blog/2020/12/02/dont-panic-kubernetes-and-docker/"&gt;Reason Of Deprecate Docker&lt;/a&gt;&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;em&gt;In this post, we are going to migrate Kubernetes cluster from Docker to Containerd. These changes will apply to all nodes in the cluster. I recommend starting migrating from worker nodes.&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Prepare node&lt;/strong&gt;&lt;br&gt;
&lt;em&gt;First of all, scheduling must be disabled and all unnecessary workloads, except daemon sets, must be evicted.&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;em&gt;- Start by cordoning the node&lt;/em&gt;&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;$ kubectl cordon k8s-worker-3
node/k8s-worker-3 cordoned
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;&lt;em&gt;- Drain node&lt;/em&gt;&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;$ kubectl drain k8s-worker-3 --ignore-daemonsets --delete-emptydir-data
node/k8s-worker-3 already cordoned
node/k8s-worker-3 evicted
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;&lt;em&gt;- Stop Kubelet and docker&lt;/em&gt;&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;$ systemctl stop kubelet
$ systemctl stop docker
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;&lt;strong&gt;- Switch to containerd&lt;/strong&gt;&lt;/p&gt;

&lt;p&gt;&lt;em&gt;Node is ready to be migrated to containerd. Start by removing docker as it will not be needed anymore&lt;/em&gt;&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;$ apt purge docker-ce docker-ce-cli
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;&lt;em&gt;Ensure containerd is installed&lt;/em&gt;&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;$ ctr -n moby container list
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;&lt;em&gt;Ensure that config file for containerd in /etc/containerd/config.toml is present. You can generate it with&lt;/em&gt;&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;$ mkdir -p /etc/containerd
$ containerd config default | sudo tee /etc/containerd/config.toml
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;&lt;em&gt;If you are using the systemd as a cgroup driver, you must configure it in containerd config. In /etc/containerd/config.toml Add&lt;/em&gt;&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;&amp;lt;...&amp;gt;
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
  &amp;lt;...&amp;gt;
  [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
    SystemdCgroup = true # &amp;lt;--- This line
&amp;lt;...&amp;gt;
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;&lt;em&gt;Restart containerd&lt;/em&gt;&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;$ systemctl restart containerd
$ systemctl status containerd
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;&lt;a href="https://res.cloudinary.com/practicaldev/image/fetch/s--_hbSqrPe--/c_limit%2Cf_auto%2Cfl_progressive%2Cq_auto%2Cw_800/https://dev-to-uploads.s3.amazonaws.com/uploads/articles/pmrl2iy7n1d24zmzyz92.png" class="article-body-image-wrapper"&gt;&lt;img src="https://res.cloudinary.com/practicaldev/image/fetch/s--_hbSqrPe--/c_limit%2Cf_auto%2Cfl_progressive%2Cq_auto%2Cw_800/https://dev-to-uploads.s3.amazonaws.com/uploads/articles/pmrl2iy7n1d24zmzyz92.png" alt="Image description" width="800" height="181"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;&lt;em&gt;Edit /var/lib/kubelet/kubeadm-flags.env file by adding container runtime flags&lt;/em&gt;&lt;br&gt;
    --container-runtime=remote&lt;br&gt;
    --container-runtime-endpoint=unix:///run/containerd/containerd.sock&lt;/p&gt;

&lt;p&gt;&lt;em&gt;kubeadm-flags.env file should now look something like this&lt;/em&gt;&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;$ cat /var/lib/kubelet/kubeadm-flags.env
KUBELET_KUBEADM_ARGS="--network-plugin=cni --pod-infra-container-image=k8s.gcr.io/pause:3.2 --container-runtime=remote --container-runtime-endpoint=unix:///run/containerd/containerd.sock"
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;&lt;em&gt;Start Kubelet&lt;/em&gt;&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;$ systemctl start kubelet
$ systemctl status kubelet
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;&lt;a href="https://res.cloudinary.com/practicaldev/image/fetch/s--cRvICj-a--/c_limit%2Cf_auto%2Cfl_progressive%2Cq_auto%2Cw_800/https://dev-to-uploads.s3.amazonaws.com/uploads/articles/0u5i7l4x07rr8evm8spl.png" class="article-body-image-wrapper"&gt;&lt;img src="https://res.cloudinary.com/practicaldev/image/fetch/s--cRvICj-a--/c_limit%2Cf_auto%2Cfl_progressive%2Cq_auto%2Cw_800/https://dev-to-uploads.s3.amazonaws.com/uploads/articles/0u5i7l4x07rr8evm8spl.png" alt="Image description" width="800" height="93"&gt;&lt;/a&gt;&lt;a href="https://res.cloudinary.com/practicaldev/image/fetch/s--p05cKzvQ--/c_limit%2Cf_auto%2Cfl_progressive%2Cq_auto%2Cw_800/https://dev-to-uploads.s3.amazonaws.com/uploads/articles/idtu6bx9rk2u1tm7o11l.png" class="article-body-image-wrapper"&gt;&lt;img src="https://res.cloudinary.com/practicaldev/image/fetch/s--p05cKzvQ--/c_limit%2Cf_auto%2Cfl_progressive%2Cq_auto%2Cw_800/https://dev-to-uploads.s3.amazonaws.com/uploads/articles/idtu6bx9rk2u1tm7o11l.png" alt="Image description" width="800" height="38"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;&lt;em&gt;Check cluster status&lt;/em&gt;&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;$ kubectl get nodes
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;&lt;em&gt;Uncordon the node if everything looks good&lt;/em&gt;&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;$ kubectl uncordon k8s-worker-3
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;blockquote&gt;
&lt;p&gt;Repeat the procedure for all nodes (one by one) and in last master node.&lt;/p&gt;
&lt;/blockquote&gt;

&lt;p&gt;&lt;strong&gt;Post-migration&lt;/strong&gt;&lt;br&gt;
 &lt;em&gt;Let's free up some space by removing docker-related folders. They will not be needed anymore&lt;/em&gt;&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;$ rm -r /etc/docker
$ rm -r /var/lib/docker
$ rm -r /var/lib/dockershim
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;&lt;em&gt;If you are using kubeadm to manage your cluster initialization, joins, and updates, you might want to re-annotate nodes, so kubeadm will not get confused on your next update whether you are using docker or containterd as a runtime&lt;/em&gt;&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;$ kubectl annotate node k8s-worker-3 --overwrite kubeadm.alpha.kubernetes.io/cri-socket=unix:///run/containerd/containerd.sock
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;blockquote&gt;
&lt;p&gt;Note that removals and annotate must be executed on all nodes after the whole cluster will be migrated to containerd and ensured that the migration was successful.&lt;/p&gt;
&lt;/blockquote&gt;

</description>
    </item>
    <item>
      <title>Accelerating Kubernetes Operations with Kubernetes Go Client.</title>
      <dc:creator>AMIT CHATURVEDI</dc:creator>
      <pubDate>Mon, 25 Dec 2023 10:44:15 +0000</pubDate>
      <link>https://dev.to/gittest20202/accelerating-kubernetes-operations-with-kubernetes-go-client-4cf6</link>
      <guid>https://dev.to/gittest20202/accelerating-kubernetes-operations-with-kubernetes-go-client-4cf6</guid>
      <description>&lt;p&gt;&lt;strong&gt;Introduction:&lt;/strong&gt;&lt;br&gt;
&lt;em&gt;Explore the powerful combination of Kubernetes and Go programming in this blog post. Learn how the Kubernetes Go client empowers developers to seamlessly interact with Kubernetes clusters, providing a flexible and efficient way to automate operations.&lt;/em&gt;&lt;br&gt;
&lt;a href="https://res.cloudinary.com/practicaldev/image/fetch/s--5Azu18Sn--/c_limit%2Cf_auto%2Cfl_progressive%2Cq_auto%2Cw_800/https://dev-to-uploads.s3.amazonaws.com/uploads/articles/tz6ozyzozfklzs96fmye.png" class="article-body-image-wrapper"&gt;&lt;img src="https://res.cloudinary.com/practicaldev/image/fetch/s--5Azu18Sn--/c_limit%2Cf_auto%2Cfl_progressive%2Cq_auto%2Cw_800/https://dev-to-uploads.s3.amazonaws.com/uploads/articles/tz6ozyzozfklzs96fmye.png" alt="Image description" width="299" height="169"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Prerequisites:&lt;/strong&gt;&lt;/p&gt;

&lt;p&gt;&lt;em&gt;- Kubernetes Cluster running using minikube, AKS,EKS or GKE&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;em&gt;- Go installed in machine.&lt;/em&gt; &lt;/p&gt;

&lt;p&gt;&lt;em&gt;- Go programming basic understanding&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;em&gt;- &lt;a href="https://github.com/kubernetes/client-go"&gt;Go Client Repo&lt;/a&gt;&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;API Resources, Kinds, and Objects&lt;/strong&gt;&lt;br&gt;
&lt;em&gt;&lt;strong&gt;Resource Type:&lt;/strong&gt; In Kubernetes, a "Resource Type" refers to a specific kind of object or entity that can be managed within the Kubernetes cluster. Kubernetes uses a declarative model where users define the desired state of their applications or infrastructure in the form of YAML or JSON manifests. These manifests describe the configuration of various resource types, and Kubernetes is responsible for ensuring that the actual state of the cluster matches the desired state.&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;em&gt;&lt;strong&gt;API Group:&lt;/strong&gt; In Kubernetes, an API group is a way to organize and categorize related sets of APIs. The Kubernetes API is designed to be extensible, and API groups help manage the complexity of the API surface by grouping related resources together.The structure of a Kubernetes API endpoint typically follows the pattern:&lt;/em&gt;&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;/&amp;lt;API_GROUP&amp;gt;/&amp;lt;API_VERSION&amp;gt;/...
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;&lt;em&gt;Here:&lt;br&gt;
API_GROUP: Identifies the group to which a particular resource belongs.&lt;br&gt;
API_VERSION: Specifies the version of the API.&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;em&gt;&lt;strong&gt;Object:&lt;/strong&gt; In Kubernetes, an "object" is a basic building block or unit of the system. Objects represent the state of the cluster and can be created, modified, or deleted to manage applications and other aspects of the system. Objects are defined in Kubernetes manifests, typically written in YAML or JSON, and are submitted to the Kubernetes API server for processing.&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;em&gt;&lt;strong&gt;Kind:&lt;/strong&gt; In Kubernetes, "Kind" is a field within the metadata of a Kubernetes object that specifies the type or kind of the object. It is a required field and defines the type of resource being created, modified, or interacted with in the cluster. The kind field indicates the object's role and how it should be handled by Kubernetes.&lt;/em&gt;&lt;br&gt;
&lt;a href="https://res.cloudinary.com/practicaldev/image/fetch/s--11Y4w_mQ--/c_limit%2Cf_auto%2Cfl_progressive%2Cq_auto%2Cw_800/https://dev-to-uploads.s3.amazonaws.com/uploads/articles/up6iqsjv7mnjnutmxfe1.png" class="article-body-image-wrapper"&gt;&lt;img src="https://res.cloudinary.com/practicaldev/image/fetch/s--11Y4w_mQ--/c_limit%2Cf_auto%2Cfl_progressive%2Cq_auto%2Cw_800/https://dev-to-uploads.s3.amazonaws.com/uploads/articles/up6iqsjv7mnjnutmxfe1.png" alt="Image description" width="800" height="446"&gt;&lt;/a&gt;&lt;br&gt;
&lt;strong&gt;Module k8s.io/api&lt;/strong&gt;&lt;br&gt;
&lt;em&gt;The "k8s.io/client-go" module is the official Go client library for Kubernetes. It provides a set of packages and utilities to interact with the Kubernetes API and perform operations such as creating, updating, and deleting resources, as well as watching for changes in the cluster. To use the Kubernetes Go client libraries in your Go application, you typically import specific packages from "k8s.io/client-go."&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Module k8s.io/apimachinery&lt;/strong&gt;&lt;br&gt;
The k8s.io/apimachinery module in Go is part of the Kubernetes Go client libraries and provides a set of packages for working with Kubernetes objects and their metadata. It includes functionality for handling object serialization, conversion, and various utility functions related to Kubernetes API objects.&lt;br&gt;
Key packages within k8s.io/apimachinery include:&lt;/p&gt;

&lt;p&gt;&lt;em&gt;&lt;strong&gt;metav1:&lt;/strong&gt;&lt;br&gt;
This package provides types and functions for working with metadata in Kubernetes objects, such as labels, annotations, and timestamps.&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;em&gt;&lt;strong&gt;runtime:&lt;/strong&gt;&lt;br&gt;
The runtime package defines interfaces and functions for working with generic Kubernetes runtime objects. It includes serialization, encoding, and decoding functionality.&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;em&gt;&lt;strong&gt;util:&lt;/strong&gt;&lt;br&gt;
The util package contains utility functions for working with Kubernetes objects, such as conversion functions and label/selector matching.&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;em&gt;&lt;strong&gt;schema:&lt;/strong&gt;&lt;br&gt;
The schema package defines types and functions related to the schema of Kubernetes objects. It includes functionalities like OpenAPI validation and schema generation.&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;em&gt;Here is an example of how k8s.io/apimachinery might be used in conjunction with k8s.io/client-go&lt;/em&gt;:&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;package main

import (
    "fmt"

    metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    "k8s.io/client-go/kubernetes"
    "k8s.io/client-go/tools/clientcmd"
)

func main() {
    // ... (clientset setup code, similar to the previous example)

    // Example: Working with metav1.ObjectMeta
    labels := map[string]string{"app": "example-app", "env": "production"}
    annotations := map[string]string{"description": "An example pod"}
    objectMeta := metav1.ObjectMeta{
        Name:        "example-pod",
        Namespace:   "default",
        Labels:      labels,
        Annotations: annotations,
    }

    // Example: Using metav1.Time for timestamps
    creationTimestamp := metav1.Time{Time: /* your timestamp here */}
    fmt.Printf("Creation Timestamp: %v\n", creationTimestamp)

    // ... (more examples using k8s.io/apimachinery)
}

&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;&lt;strong&gt;Ways of using go client to connect to Kubernetes&lt;/strong&gt;&lt;br&gt;
&lt;em&gt;- Authenticating inside the cluster&lt;/em&gt;&lt;br&gt;
&lt;em&gt;- Authenticating outside the cluster&lt;/em&gt;&lt;/p&gt;

&lt;blockquote&gt;
&lt;p&gt;In this blog I am using "Authenticating outside the cluster" way.&lt;/p&gt;
&lt;/blockquote&gt;

&lt;p&gt;&lt;strong&gt;Demo&lt;/strong&gt; &lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Install go client
&lt;/li&gt;
&lt;/ul&gt;
&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;# wget https://go.dev/dl/go1.21.5.linux-amd64.tar.gz
# tar -C /usr/local -xzf go1.21.5.linux-amd64.tar.gz
# export PATH=$PATH:/usr/local/go/bin
# go version
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;


&lt;p&gt;&lt;em&gt;- Create a client directory and create main.go to connect to k8s cluster&lt;/em&gt;&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt; # mkdir client-go-example
 # cd client-go-example/
 # go mod init client-go-example
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;*&lt;em&gt;- Lets use go client to connect to k8s cluster and list the nodes *&lt;/em&gt;&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;# vim main.go
package main
import (
        "context"
        "flag"
        "fmt"
        "path/filepath"
        "k8s.io/client-go/kubernetes"
        "k8s.io/client-go/tools/clientcmd"
        "k8s.io/client-go/util/homedir"
        metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func main() {
        var kubeconfig *string
        if home := homedir.HomeDir(); home != "" {
                kubeconfig = flag.String("kubeconfig", filepath.Join(home, ".kube", "config"), "(optional) absolute path to the kubeconfig file")
        } else {
                kubeconfig = flag.String("kubeconfig", "", "absolute path to the kubeconfig file")
        }
        flag.Parse()
        // use the current context in kubeconfig
        config, err := clientcmd.BuildConfigFromFlags("", *kubeconfig)
        if err != nil {
                panic(err.Error())
        }

        // create the clientset
        clientset, err := kubernetes.NewForConfig(config)
        if err != nil {
                panic(err.Error())
        }
        nodeList, err := clientset.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
        if err != nil {
                panic(err.Error())
        }
        fmt.Println("Nodes in the cluster:")
        for _, node := range nodeList.Items {
                fmt.Printf("  %s\n", node.GetName())
        }
}

# go build
# go mod tidy
# go run main.go
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;&lt;a href="https://res.cloudinary.com/practicaldev/image/fetch/s--4brgNET3--/c_limit%2Cf_auto%2Cfl_progressive%2Cq_auto%2Cw_800/https://dev-to-uploads.s3.amazonaws.com/uploads/articles/yvwbktyycw533v9mpbhv.png" class="article-body-image-wrapper"&gt;&lt;img src="https://res.cloudinary.com/practicaldev/image/fetch/s--4brgNET3--/c_limit%2Cf_auto%2Cfl_progressive%2Cq_auto%2Cw_800/https://dev-to-uploads.s3.amazonaws.com/uploads/articles/yvwbktyycw533v9mpbhv.png" alt="Image description" width="517" height="102"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;- Lets Connect to k8s and collect deployed Namespace in Cluster&lt;/strong&gt;&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;# vim main.go
package main

import (
        "context"
        "flag"
        "fmt"
        "path/filepath"

        "k8s.io/client-go/kubernetes"
        "k8s.io/client-go/tools/clientcmd"
        "k8s.io/client-go/util/homedir"
        metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)

func ListNameSpaces(coreClient kubernetes.Interface) {
        nsList, err := coreClient.CoreV1().
                Namespaces().
                List(context.Background(), metav1.ListOptions{})
        if err != nil {
                panic(err.Error())
        }

        for _, n := range nsList.Items {
                fmt.Printf("  %s\n", n.Name)
        }
}
func main() {
        var kubeconfig *string
        if home := homedir.HomeDir(); home != "" {
                kubeconfig = flag.String("kubeconfig", filepath.Join(home, ".kube", "config"), "(optional) absolute path to the kubeconfig file")
        } else {
                kubeconfig = flag.String("kubeconfig", "", "absolute path to the kubeconfig file")
        }
        flag.Parse()

        // use the current context in kubeconfig
        config, err := clientcmd.BuildConfigFromFlags("", *kubeconfig)
        if err != nil {
                panic(err.Error())
        }

        // create the clientset
        clientset, err := kubernetes.NewForConfig(config)
        if err != nil {
                panic(err.Error())
        }
        ListNameSpaces(clientset)
}

# go run main.go
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;&lt;a href="https://res.cloudinary.com/practicaldev/image/fetch/s--D3SBCQH4--/c_limit%2Cf_auto%2Cfl_progressive%2Cq_auto%2Cw_800/https://dev-to-uploads.s3.amazonaws.com/uploads/articles/8rat8tfkicmpneh6fb5s.png" class="article-body-image-wrapper"&gt;&lt;img src="https://res.cloudinary.com/practicaldev/image/fetch/s--D3SBCQH4--/c_limit%2Cf_auto%2Cfl_progressive%2Cq_auto%2Cw_800/https://dev-to-uploads.s3.amazonaws.com/uploads/articles/8rat8tfkicmpneh6fb5s.png" alt="Image description" width="505" height="307"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;- Lets Connect to k8s and collect deployed pods id Namespace in Cluster&lt;/strong&gt;&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;package main

import (
        "context"
        "flag"
        "fmt"
        "path/filepath"

        "k8s.io/client-go/kubernetes"
        "k8s.io/client-go/tools/clientcmd"
        "k8s.io/client-go/util/homedir"
        metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)

func ListNameSpaces(coreClient kubernetes.Interface) {
        nsList, err := coreClient.CoreV1().
                Namespaces().
                List(context.Background(), metav1.ListOptions{})
        if err != nil {
                panic(err.Error())
        }

        for _, n := range nsList.Items {
               ListPods(coreClient, n.Name)
        }
}

func ListPods(coreClient kubernetes.Interface, namespace string){
        fmt.Printf("Pods in namespace: %s\n", namespace)
        pods, err := coreClient.CoreV1().Pods(namespace).List(context.TODO(), metav1.ListOptions{})
        if err != nil {
           fmt.Printf("Error getting pods in namespace %s: %v\n", namespace, err)
           }
        for _, pod := range pods.Items {
             fmt.Printf("  %s\n", pod.Name)
        }
        fmt.Println()
        }
func main() {
        var kubeconfig *string
        if home := homedir.HomeDir(); home != "" {
                kubeconfig = flag.String("kubeconfig", filepath.Join(home, ".kube", "config"), "(optional) absolute path to the kubeconfig file")
        } else {
                kubeconfig = flag.String("kubeconfig", "", "absolute path to the kubeconfig file")
        }
        flag.Parse()

        // use the current context in kubeconfig
        config, err := clientcmd.BuildConfigFromFlags("", *kubeconfig)
        if err != nil {
                panic(err.Error())
        }

        // create the clientset
        clientset, err := kubernetes.NewForConfig(config)
        if err != nil {
                panic(err.Error())
        }
        ListNameSpaces(clientset)
}

# go run main.go
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;&lt;a href="https://res.cloudinary.com/practicaldev/image/fetch/s--0svkgRdz--/c_limit%2Cf_auto%2Cfl_progressive%2Cq_auto%2Cw_800/https://dev-to-uploads.s3.amazonaws.com/uploads/articles/vfm8hhph2xydmgw7cfkn.png" class="article-body-image-wrapper"&gt;&lt;img src="https://res.cloudinary.com/practicaldev/image/fetch/s--0svkgRdz--/c_limit%2Cf_auto%2Cfl_progressive%2Cq_auto%2Cw_800/https://dev-to-uploads.s3.amazonaws.com/uploads/articles/vfm8hhph2xydmgw7cfkn.png" alt="Image description" width="523" height="646"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;blockquote&gt;
&lt;p&gt;These are some example how you can communicate with the Kubernetes cluster using go client. In next blog will see how can we mutate the Kubernets apis.&lt;/p&gt;
&lt;/blockquote&gt;

</description>
    </item>
    <item>
      <title>Introduction To Kata Containers</title>
      <dc:creator>AMIT CHATURVEDI</dc:creator>
      <pubDate>Thu, 21 Dec 2023 15:25:55 +0000</pubDate>
      <link>https://dev.to/gittest20202/introduction-to-kata-containers-4loo</link>
      <guid>https://dev.to/gittest20202/introduction-to-kata-containers-4loo</guid>
      <description>&lt;p&gt;&lt;strong&gt;Kata Containers is an open-source project that provides lightweight virtualization for container workloads. It combines the security advantages of virtual machines (VMs) with the speed and manageability of containers. Kata Containers uses a lightweight hypervisor to isolate each container in its own micro-VM, providing an additional layer of security and ensuring that containers are truly isolated from each other.&lt;/strong&gt;&lt;br&gt;
&lt;a href="https://res.cloudinary.com/practicaldev/image/fetch/s--9CpI-btR--/c_limit%2Cf_auto%2Cfl_progressive%2Cq_auto%2Cw_800/https://dev-to-uploads.s3.amazonaws.com/uploads/articles/5zdfmasyqh4so0tqlh9m.png" class="article-body-image-wrapper"&gt;&lt;img src="https://res.cloudinary.com/practicaldev/image/fetch/s--9CpI-btR--/c_limit%2Cf_auto%2Cfl_progressive%2Cq_auto%2Cw_800/https://dev-to-uploads.s3.amazonaws.com/uploads/articles/5zdfmasyqh4so0tqlh9m.png" alt="Image description" width="800" height="233"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;&lt;em&gt;Kata Containers aims to build a secure and OCI compatible container runtime that enhances the security and isolation of container workloads by putting each one of them in a lightweight virtual machine, using the hardware virtualization. Every virtual machine runs it own kernel.&lt;/em&gt; &lt;br&gt;
&lt;a href="https://res.cloudinary.com/practicaldev/image/fetch/s--Fya7Dcj5--/c_limit%2Cf_auto%2Cfl_progressive%2Cq_auto%2Cw_800/https://dev-to-uploads.s3.amazonaws.com/uploads/articles/w5lqo230a8g5k5f7pyyr.png" class="article-body-image-wrapper"&gt;&lt;img src="https://res.cloudinary.com/practicaldev/image/fetch/s--Fya7Dcj5--/c_limit%2Cf_auto%2Cfl_progressive%2Cq_auto%2Cw_800/https://dev-to-uploads.s3.amazonaws.com/uploads/articles/w5lqo230a8g5k5f7pyyr.png" alt="Image description" width="800" height="290"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;&lt;em&gt;The need for isolation in containerized environments arises from the fundamental design of containers, where multiple workloads share the same operating system (OS) kernel on a host machine. While this shared kernel architecture offers efficiency and speed, it introduces potential security challenges:&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Kernel Exploitation&lt;/strong&gt;&lt;br&gt;
&lt;em&gt;Single Point of Failure: Containers on the same host share a common kernel. If a security vulnerability is exploited in the kernel, it can potentially impact all containers on that host. This single point of failure increases the risk of a security breach.&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Resource Contentions&lt;/strong&gt;&lt;br&gt;
&lt;em&gt;Resource Competition: Containers on the same host compete for resources such as CPU, memory, and I/O. Without proper isolation, one container's resource-intensive operations can negatively impact the performance and stability of other containers, leading to a potential denial-of-service (DoS) scenario.&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Namespace and Cgroup Limitations&lt;/strong&gt;&lt;br&gt;
&lt;em&gt;Namespace Conflicts: Containers use Linux namespaces to create isolated environments for processes, but namespace conflicts can occur, allowing unintended interactions between containers. For example, two containers might share the same network namespace, potentially leading to unauthorized access.&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Data Security&lt;/strong&gt;&lt;br&gt;
&lt;em&gt;Shared Volumes: Containers often share data volumes, and inadequate isolation may lead to data breaches. Without proper access controls and encryption, sensitive information stored in shared volumes can be accessed or manipulated by unauthorized containers.&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Container Escape&lt;/strong&gt;&lt;br&gt;
&lt;em&gt;Privilege Escalation: In a multi-tenant environment, if a malicious actor gains access to a container, they might attempt to escalate privileges and break out of the container to compromise the host system. Proper isolation measures help prevent such container escape scenarios.&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Inter-Container Communication&lt;/strong&gt;&lt;br&gt;
&lt;em&gt;Unintended Communication: Containers communicate with each other through shared resources like network interfaces. Without proper isolation, one container might unintentionally communicate with or affect the behavior of another, leading to security vulnerabilities.&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Security Compliance&lt;/strong&gt;&lt;br&gt;
&lt;em&gt;Regulatory Compliance: Industries with strict security and compliance requirements, such as finance or healthcare, may face challenges meeting regulatory standards when relying solely on containerization. Isolation becomes crucial to address compliance concerns.&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Dynamic Environments:&lt;/strong&gt;&lt;br&gt;
&lt;em&gt;Dynamic Workloads: Containers are designed to be dynamic and scalable, with instances frequently starting and stopping. In such dynamic environments, maintaining proper isolation becomes challenging without robust security measures in place.&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Enter Kata Containers: Bridging Security and Containerization&lt;/strong&gt;&lt;br&gt;
&lt;em&gt;Kata Containers addresses the security challenges inherent in containerization by taking a unique approach. Unlike traditional containers that share the host OS kernel, Kata Containers leverages lightweight virtual machines to encapsulate each container. This innovative strategy fuses the best attributes of both containers and VMs, creating a symbiotic relationship between speed and security.&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Key Features of Kata Containers&lt;/strong&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Micro-VM Architecture:&lt;/strong&gt;&lt;br&gt;
&lt;em&gt;Isolation Reinvented: Kata Containers introduces a micro-VM architecture, assigning each container its own minimalistic VM. This ensures that even in a shared environment, each workload operates within its private and secure space, eliminating the risk of cross-container vulnerabilities.&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Lightweight Footprint:&lt;/strong&gt;&lt;br&gt;
&lt;em&gt;Efficiency at its Core: While embracing the security advantages of VMs, Kata Containers retains the lightweight nature of traditional containers. The overhead associated with starting and running these micro-VMs is minimal, enabling rapid deployment and resource efficiency.&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Compatibility with Container Runtimes:&lt;/strong&gt;&lt;br&gt;
&lt;em&gt;Seamless Integration: Kata Containers seamlessly integrates with popular container runtimes like Docker and container orchestration platforms like Kubernetes. This compatibility ensures that users can enjoy enhanced security without sacrificing the convenience of their preferred containerization tools.&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Getting Started with Kata Containers:&lt;/strong&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Prerequisites&lt;/strong&gt;&lt;br&gt;
&lt;em&gt;A Kubernetes Cluster bootstrapped and installed with kubeadm, kubectl and kubelet&lt;br&gt;
Container Runtime Interface (CRI) - Containerd or cri-o&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Installation of Kata-Containers&lt;/strong&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;
&lt;em&gt;Create and provision different RBAC roles to kata-deploy pod&lt;/em&gt;
&lt;/li&gt;
&lt;/ul&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;# kubectl apply -f https://raw.githubusercontent.com/kata-containers/kata-containers/main/tools/packaging/kata-deploy/kata-rbac/base/kata-rbac.yaml
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;&lt;a href="https://res.cloudinary.com/practicaldev/image/fetch/s--fYAoe7K7--/c_limit%2Cf_auto%2Cfl_progressive%2Cq_auto%2Cw_800/https://dev-to-uploads.s3.amazonaws.com/uploads/articles/wfgzi8l8wsl6xgv1k54o.png" class="article-body-image-wrapper"&gt;&lt;img src="https://res.cloudinary.com/practicaldev/image/fetch/s--fYAoe7K7--/c_limit%2Cf_auto%2Cfl_progressive%2Cq_auto%2Cw_800/https://dev-to-uploads.s3.amazonaws.com/uploads/articles/wfgzi8l8wsl6xgv1k54o.png" alt="Image description" width="800" height="63"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;
&lt;em&gt;Then create a kata-deploy pod by deploying its stable version.&lt;/em&gt;
&lt;/li&gt;
&lt;/ul&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;# kubectl apply -f https://raw.githubusercontent.com/kata-containers/kata-containers/main/tools/packaging/kata-deploy/kata-deploy/base/kata-deploy-stable.yaml
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;ul&gt;
&lt;li&gt;
&lt;em&gt;Check the kata-deploy pod status inside the kube-system namespace.&lt;/em&gt;
&lt;/li&gt;
&lt;/ul&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;# kubectl get pods -n kube-system | grep kata

# kubectl -n kube-system wait --timeout=10m --for=condition=Ready -l name=kata-deploy pod
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;&lt;a href="https://res.cloudinary.com/practicaldev/image/fetch/s--kthWuJV_--/c_limit%2Cf_auto%2Cfl_progressive%2Cq_auto%2Cw_800/https://dev-to-uploads.s3.amazonaws.com/uploads/articles/us3iiddqhuzo50iwgfag.png" class="article-body-image-wrapper"&gt;&lt;img src="https://res.cloudinary.com/practicaldev/image/fetch/s--kthWuJV_--/c_limit%2Cf_auto%2Cfl_progressive%2Cq_auto%2Cw_800/https://dev-to-uploads.s3.amazonaws.com/uploads/articles/us3iiddqhuzo50iwgfag.png" alt="Image description" width="800" height="76"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;
&lt;em&gt;Check the Kata-Containers labels on the node&lt;/em&gt;
&lt;/li&gt;
&lt;/ul&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;# kubectl get nodes --show-labels | grep kata
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;&lt;a href="https://res.cloudinary.com/practicaldev/image/fetch/s--wqIVWs02--/c_limit%2Cf_auto%2Cfl_progressive%2Cq_auto%2Cw_800/https://dev-to-uploads.s3.amazonaws.com/uploads/articles/u210wy25cpfznmoz4vo2.png" class="article-body-image-wrapper"&gt;&lt;img src="https://res.cloudinary.com/practicaldev/image/fetch/s--wqIVWs02--/c_limit%2Cf_auto%2Cfl_progressive%2Cq_auto%2Cw_800/https://dev-to-uploads.s3.amazonaws.com/uploads/articles/u210wy25cpfznmoz4vo2.png" alt="Image description" width="800" height="41"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;
&lt;em&gt;After this configure a runtime class for Kata Containers by creating a Kubernetes resource of a  &lt;strong&gt;kind:RuntimeClass&lt;/strong&gt;.&lt;/em&gt;
&lt;/li&gt;
&lt;/ul&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;# cat runtimeclass.yaml
kind: RuntimeClass
apiVersion: node.k8s.io/v1
metadata:
    name: kata-qemu
handler: kata-qemu
overhead:
    podFixed:
        memory: "160Mi"
        cpu: "250m"
scheduling:
  nodeSelector:
    katacontainers.io/kata-runtime: "true"
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;





&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;# kubectl apply -f runtimeclass.yaml
# kubectl get runtimeclass
# kubectl describe runtimeclass kata-qemu
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;&lt;a href="https://res.cloudinary.com/practicaldev/image/fetch/s--LK3LJWxa--/c_limit%2Cf_auto%2Cfl_progressive%2Cq_auto%2Cw_800/https://dev-to-uploads.s3.amazonaws.com/uploads/articles/me5xfhspa7gta3xejfuw.png" class="article-body-image-wrapper"&gt;&lt;img src="https://res.cloudinary.com/practicaldev/image/fetch/s--LK3LJWxa--/c_limit%2Cf_auto%2Cfl_progressive%2Cq_auto%2Cw_800/https://dev-to-uploads.s3.amazonaws.com/uploads/articles/me5xfhspa7gta3xejfuw.png" alt="Image description" width="670" height="887"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;
&lt;em&gt;Test the runtime class by creating an Nginx pod through it&lt;/em&gt;
&lt;/li&gt;
&lt;/ul&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;# cat nginx-kata.yaml
apiVersion: v1
kind: Pod
metadata:
  name: nginx-kata1
spec:
  runtimeClassName: kata-qemu
  containers:
  - name: nginx
    image: registry.cloudyuga.guru/library/nginx:latest
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;





&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;# kubectl apply -f nginx-kata.yaml
# kubectl get pods
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;&lt;a href="https://res.cloudinary.com/practicaldev/image/fetch/s--zmNVEsL2--/c_limit%2Cf_auto%2Cfl_progressive%2Cq_auto%2Cw_800/https://dev-to-uploads.s3.amazonaws.com/uploads/articles/1ar5tcd83nzi5se824jx.png" class="article-body-image-wrapper"&gt;&lt;img src="https://res.cloudinary.com/practicaldev/image/fetch/s--zmNVEsL2--/c_limit%2Cf_auto%2Cfl_progressive%2Cq_auto%2Cw_800/https://dev-to-uploads.s3.amazonaws.com/uploads/articles/1ar5tcd83nzi5se824jx.png" alt="Image description" width="616" height="47"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Use Cases for Kata Containers&lt;/strong&gt;&lt;/p&gt;

&lt;p&gt;&lt;em&gt;Multi-Tenancy Environments:&lt;br&gt;
Isolation in Shared Spaces: Kata Containers shines in multi-tenant environments, where ensuring strong isolation between workloads is paramount. The micro-VM architecture provides a secure boundary for each tenant, mitigating the risks of unintended interactions.&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;em&gt;Security-Critical Workloads:&lt;br&gt;
Safeguarding Sensitive Applications: Industries handling sensitive data or compliance-driven workloads, such as finance and healthcare, benefit from Kata Containers' enhanced security measures. The micro-VM approach adds an extra layer of protection to critical applications.&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Conclusion:&lt;/strong&gt;&lt;br&gt;
&lt;em&gt;As containerization continues to redefine modern software development, Kata Containers emerges as a beacon, seamlessly blending the agility of containers with the security of virtual machines. Its innovative approach, marked by the adoption of micro-VMs, positions Kata Containers as a powerful tool for organizations seeking to fortify their containerized workloads without compromising on speed and efficiency. The future of container security looks promising with Kata Containers leading the way.&lt;/em&gt;&lt;/p&gt;

</description>
    </item>
    <item>
      <title>Code your Concepts: A Guide to Diagrams As Code in DevOps World</title>
      <dc:creator>AMIT CHATURVEDI</dc:creator>
      <pubDate>Tue, 19 Dec 2023 13:21:49 +0000</pubDate>
      <link>https://dev.to/gittest20202/code-your-concepts-a-guide-to-diagrams-as-code-in-devops-world-ag</link>
      <guid>https://dev.to/gittest20202/code-your-concepts-a-guide-to-diagrams-as-code-in-devops-world-ag</guid>
      <description>&lt;p&gt;&lt;strong&gt;Creating diagrams as code has become a popular practice, especially in the context of infrastructure as code (IaC) and documentation. In this blog, we'll explore some of the popular tools for creating diagrams as code and discuss how they can be used in different scenarios.&lt;/strong&gt;&lt;/p&gt;

&lt;p&gt;&lt;a href="https://diagrams.mingrammer.com/docs/getting-started/installation"&gt;&lt;strong&gt;&lt;em&gt;- Diagram&lt;/em&gt;&lt;/strong&gt;&lt;/a&gt;&lt;br&gt;
&lt;em&gt;Using Diagram we can create diagrams for multiple environment like AWS,AZURE,GCP,Kubernetes.&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Requirements&lt;/strong&gt;&lt;/p&gt;

&lt;p&gt;&lt;em&gt;It requires Python 3.6 or higher, check your Python version first. It uses Graphviz to render the diagram, so you need to install Graphviz to use diagrams. After installing graphviz (or already have it), install the diagrams.&lt;/em&gt;&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;# pip install diagrams

(On Ubuntu)
# apt install graphviz

(On RHEL)
# yum install graphviz
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;&lt;em&gt;Create Diagram For Kubernetes&lt;/em&gt;&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;from diagrams import Cluster, Diagram, Node, Edge
from diagrams.k8s.compute import Pod
from diagrams.k8s.compute import Deploy
from diagrams.k8s.network import Ing
from diagrams.k8s.group import NS
from diagrams.k8s.podconfig import Secret
from diagrams.k8s.storage import PVC
from diagrams.k8s.rbac import CRole
from diagrams.k8s.rbac import CRB

with Diagram("Kubernetes Cluster", show=False):
  with Cluster("Kubernetes"):
    with Cluster("Rbac"):
      rbac = CRB("")
      with Cluster("Role"):
        role = CRole("")
    with Cluster("App"):
      ns = NS("")
      with Cluster("Ingress"):
        ingress = Ing("")
        with Cluster("Secret"):
          secrets = Secret("")
        with Cluster("App"):
          deploy = Deploy("")
          with Cluster("Pods"):
            pod = Pod("")
        with Cluster("PVC"):
           pvc = PVC("")
  rbac &amp;gt;&amp;gt; role &amp;gt;&amp;gt; ns
  ns &amp;gt;&amp;gt; deploy &amp;gt;&amp;gt; pod &amp;gt;&amp;gt; pvc
  pod &amp;gt;&amp;gt; secrets
  deploy &amp;gt;&amp;gt; ingress
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;&lt;a href="https://media.dev.to/cdn-cgi/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fgie346uwohac4fu9yvqb.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media.dev.to/cdn-cgi/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fgie346uwohac4fu9yvqb.png" alt="Image description" width="800" height="436"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;&lt;a href="https://mermaid.js.org/"&gt;&lt;strong&gt;- Mermaid&lt;/strong&gt;&lt;/a&gt; &lt;br&gt;
&lt;em&gt;Mermaid is a JavaScript-based diagramming and charting tool that allows users to create diagrams and flowcharts using a simple and human-readable text-based syntax. It is particularly popular for its integration with Markdown, making it easy to embed diagrams directly into documentation, README files, or other text-based formats.&lt;/em&gt;&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;graph LR;
 IpBlockS([IpBlock])-. Traffic Out From &amp;lt;br&amp;gt; The Cluster .-&amp;gt;|![Ingress Image](images/ingress.png)| ingress;
 PodNetworkS([PodNetwork])-. Traffic From &amp;lt;br&amp;gt; PodNetwork  .-&amp;gt;|![Ingress Image](images/ingress.png)| ingress;
 NameSpaceNetworkS([NameSpaceNetwork])-. Traffic From &amp;lt;br&amp;gt; NameSpaceNetwork  .-&amp;gt;|![Ingress Image](images/ingress.png)| ingress;
 ingress .-&amp;gt;|routing &amp;lt;br&amp;gt; rule|namespace[namespace];
 subgraph cluster
 ingress;
 namespace .-&amp;gt;|routing &amp;lt;br&amp;gt; rule|egress[Egress];
 end
 egress[Egress]-. Traffic Out To &amp;lt;br&amp;gt; The Cluster  .-&amp;gt;IpBlockD([IpBlock]);
 egress[Egress]-. Traffic To &amp;lt;br&amp;gt; PodNetwork  .-&amp;gt;PodNetworkD([PodNetwork]);
 egress[Egress]-. Traffic To &amp;lt;br&amp;gt; NameSpaceNetwork  .-&amp;gt;NameSpaceNetworkD([NameSpaceNetwork]);
 classDef plain fill:#ddd,stroke:#fff,stroke-width:4px,color:#000;
 classDef k8s fill:#326ce5,stroke:#fff,stroke-width:4px,color:#fff;
 classDef cluster fill:#fff,stroke:#bbb,stroke-width:2px,color:#326ce5;
 class ingress,namespace,egress k8s;
 class client plain;
 class cluster cluster;
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;&lt;a href="https://media.dev.to/cdn-cgi/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2F87h3ndfuup82uy33i99o.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media.dev.to/cdn-cgi/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2F87h3ndfuup82uy33i99o.png" alt="Image description" width="800" height="340"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;&lt;a href="https://plantuml.com/"&gt;&lt;strong&gt;&lt;em&gt;- PlantUML&lt;/em&gt;&lt;/strong&gt;&lt;/a&gt;&lt;br&gt;
&lt;em&gt;PlantUML is an open-source tool that allows users to create Unified Modeling Language (UML) diagrams using a simple and human-readable text-based syntax. UML diagrams are widely used in software development to visually represent different aspects of a system's architecture, design, and behavior. PlantUML makes it easy to express complex UML diagrams in a concise and maintainable manner.&lt;/em&gt;&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;@startyaml
!theme lightgray
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-deployment
  labels:
    app: nginx
spec:
  replicas: 3
  selector:
    matchLabels:
      app: nginx
  template:
    metadata:
      labels:
        app: nginx
    spec:
      containers:
      - name: nginx
        image: nginx:1.14.2
        ports:
        - containerPort: 80
@endyaml
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;&lt;a href="https://media.dev.to/cdn-cgi/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fs3rx9rla8hpo9nqhvi4j.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media.dev.to/cdn-cgi/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fs3rx9rla8hpo9nqhvi4j.png" alt="Image description" width="800" height="140"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;blockquote&gt;
&lt;p&gt;&lt;em&gt;We have a variety of tools at our disposal, but I've identified these specific ones for drawing diagrams. I've seamlessly integrated them into my GitLab CI/CD pipeline. I encourage you to give them a try; they prove to be highly effective for creating and managing our diagrams.&lt;/em&gt;&lt;/p&gt;
&lt;/blockquote&gt;

&lt;p&gt;&lt;strong&gt;&lt;em&gt;Hope you will like this blog and start using it in your CI/CD pipelines.&lt;/em&gt;&lt;/strong&gt;&lt;/p&gt;

</description>
    </item>
    <item>
      <title>Kubernetes Package Toolkit</title>
      <dc:creator>AMIT CHATURVEDI</dc:creator>
      <pubDate>Fri, 15 Dec 2023 10:38:30 +0000</pubDate>
      <link>https://dev.to/gittest20202/kubernetes-package-toolkit-6g9</link>
      <guid>https://dev.to/gittest20202/kubernetes-package-toolkit-6g9</guid>
      <description>&lt;p&gt;&lt;a href="https://media.dev.to/cdn-cgi/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fdraitekaixbszegxfvm5.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media.dev.to/cdn-cgi/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fdraitekaixbszegxfvm5.png" alt="Image description" width="200" height="200"&gt;&lt;/a&gt;&lt;br&gt;
&lt;em&gt;&lt;strong&gt;kpt stands for Kuberentes Package Toolkit. It is a set of tools for working with and managing Kubernetes manifests as packages. kpt helps you to organize, customize, share, and manage Kubernetes manifests in a more modular and reusable way.&lt;/strong&gt;&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Key features of kpt include:&lt;/strong&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Package Management:&lt;/strong&gt; &lt;em&gt;kpt allows you to organize your Kubernetes configuration files into packages. A package is a directory containing one or more Kubernetes manifests, and it can be versioned and shared.&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Declarative Configuration:&lt;/strong&gt; &lt;em&gt;It encourages a declarative approach to configuration, where you describe the desired state of your Kubernetes resources, making it easier to manage configurations across different environments.&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Resource Configuration:&lt;/strong&gt; &lt;em&gt;kpt provides commands to work with and transform Kubernetes resources. This includes adding, updating, or removing fields from manifests.&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;GitOps Workflow:&lt;/strong&gt; &lt;em&gt;It aligns with the GitOps approach, where changes to your infrastructure are driven by Git commits. You can use kpt to fetch, update, and apply changes to your Kubernetes manifests stored in Git repositories.&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Template Functions:&lt;/strong&gt; &lt;em&gt;kpt supports template functions that allow you to parameterize and customize your manifests based on different environments or requirements.&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Resource Composition:&lt;/strong&gt; &lt;em&gt;You can compose and customize your Kubernetes manifests by using kpt functions and tools.&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;a href="https://media.dev.to/cdn-cgi/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2F19qhkgaou2zvkl5vh8lh.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media.dev.to/cdn-cgi/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2F19qhkgaou2zvkl5vh8lh.png" alt="Image description" width="300" height="74"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;System Requirements&lt;/strong&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;KPT must be installed
&lt;/li&gt;
&lt;/ul&gt;
&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt; #wget https://github.com/GoogleContainerTools/kpt/releases/download/v1.0.0-beta.44/kpt_linux_amd64
 #chmod +x kpt_linux_amd64
 #cp kpt_linux_amd64 /usr/local/bin/kpt
 #kpt version
 1.0.0-beta.44
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;


&lt;ul&gt;
&lt;li&gt;Git must be installed
&lt;/li&gt;
&lt;/ul&gt;
&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;# git version
git version 2.40.1
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;


&lt;ul&gt;
&lt;li&gt;Kubernetes cluster
&lt;/li&gt;
&lt;/ul&gt;
&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;root@master:~/nginx# kubectl get nodes
NAME                        STATUS   ROLES           AGE   VERSION
master.homecluster.store    Ready    control-plane   38d   v1.26.0
worker1.homecluster.store   Ready    &amp;lt;none&amp;gt;          38d   v1.26.0
worker2.homecluster.store   Ready    &amp;lt;none&amp;gt;          38d   v1.26.0

&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;


&lt;blockquote&gt;
&lt;p&gt;kpt is fully integrated with Git and enables forking, rebasing and versioning a package of configuration using the underlying Git version control system.&lt;/p&gt;
&lt;/blockquote&gt;

&lt;p&gt;&lt;em&gt;First, let’s fetch the kpt package from Git to your local filesystem:&lt;/em&gt;&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;# kpt pkg get https://github.com/GoogleContainerTools/kpt/package-examples/nginx@v0.9
# cd nginx
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;blockquote&gt;
&lt;p&gt;kpt pkg commands provide the functionality for working with packages on Git and on your local filesystem.&lt;br&gt;
&lt;/p&gt;
&lt;/blockquote&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;#kpt pkg tree
Package "nginx"
├── [Kptfile]  Kptfile nginx
├── [deployment.yaml]  Deployment my-nginx
└── [svc.yaml]  Service my-nginx-svc
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;&lt;strong&gt;Apply the Package&lt;/strong&gt;&lt;/p&gt;

&lt;blockquote&gt;
&lt;p&gt;kpt live commands provide the functionality for deploying packages to a Kubernetes cluster.&lt;/p&gt;
&lt;/blockquote&gt;

&lt;p&gt;&lt;em&gt;Initialize the kpt package:&lt;/em&gt;&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt; # kpt live init

&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;&lt;em&gt;Apply the resources to the cluster:&lt;/em&gt;&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;#root@master:~/nginx/nginx# kubectl get pods
no resource found
# kpt live apply --reconcile-timeout=15m
inventory update started
inventory update finished
apply phase started
service/my-nginx-svc apply successful
deployment.apps/my-nginx apply successful
apply phase finished
reconcile phase started
service/my-nginx-svc reconcile successful
deployment.apps/my-nginx reconcile pending
deployment.apps/my-nginx reconcile successful
reconcile phase finished
inventory update started
inventory update finished
apply result: 2 attempted, 2 successful, 0 skipped, 0 failed
reconcile result: 2 attempted, 2 successful, 0 skipped, 0 failed, 0 timed out

root@master:~/nginx/nginx# kubectl get pods
NAME                        READY   STATUS    RESTARTS   AGE
my-nginx-66f8758855-57q7v   1/1     Running   0          81s
my-nginx-66f8758855-5zj88   1/1     Running   0          81s
my-nginx-66f8758855-lbpmq   1/1     Running   0          81s
my-nginx-66f8758855-zp6nm   1/1     Running   0          81s

&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;&lt;em&gt;Delete the package from the cluster:&lt;/em&gt;&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;# kpt live destroy
delete phase started
deployment.apps/my-nginx delete successful
service/my-nginx-svc delete successful
delete phase finished
reconcile phase started
deployment.apps/my-nginx reconcile successful
service/my-nginx-svc reconcile successful
reconcile phase finished
inventory update started
inventory update finished
delete result: 2 attempted, 2 successful, 0 skipped, 0 failed
reconcile result: 2 attempted, 2 successful, 0 skipped, 0 failed, 0 timed out

&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



</description>
    </item>
    <item>
      <title>Automating Container Image Security with GitLab: A Comprehensive Guide using Dockerlinter, Conftest, SNYK API, and Docker Hub</title>
      <dc:creator>AMIT CHATURVEDI</dc:creator>
      <pubDate>Thu, 14 Dec 2023 13:13:56 +0000</pubDate>
      <link>https://dev.to/gittest20202/automating-container-image-security-with-gitlab-a-comprehensive-guide-using-dockerlinter-conftest-snyk-api-and-docker-hub-2fn0</link>
      <guid>https://dev.to/gittest20202/automating-container-image-security-with-gitlab-a-comprehensive-guide-using-dockerlinter-conftest-snyk-api-and-docker-hub-2fn0</guid>
      <description>&lt;p&gt;&lt;strong&gt;Introduction:&lt;/strong&gt;&lt;br&gt;
&lt;em&gt;As containerized applications become integral to modern DevOps workflows, ensuring the security of container images is paramount. This guide explores a GitLab CI pipeline setup that seamlessly integrates security scanning using the SNYK API, validation with Conftest, and artifact management on Docker Hub and linting the Dockerfile using Dockerlinter&lt;/em&gt;.&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Key Components and Tools:&lt;/strong&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Conftest&lt;/strong&gt;: &lt;em&gt;Policy as Code (PaC) tool for validating configuration files against Rego policies.&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;SNYK Secure API:&lt;/strong&gt; &lt;em&gt;Leveraging Snyk for vulnerability scanning and image security.&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Docker Hub:&lt;/strong&gt; &lt;em&gt;Centralized container image registry for storing and managing Docker images.&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;DockerLinter:&lt;/strong&gt; &lt;em&gt;Dockerlinter is a Dockerfile linter that focuses on best practices and recommendations from the official Docker documentation&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;&lt;em&gt;DockerFile Sample&lt;/em&gt;&lt;/strong&gt;&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;# Use an official Python runtime as a parent image
FROM python:3.8-slim

# Set the working directory to /app
WORKDIR /app

# Copy the current directory contents into the container at /app
COPY . /app

# Install any needed packages specified in requirements.txt
RUN pip install --no-cache-dir -r requirements.txt

# Make port 80 available to the world outside this container
EXPOSE 80

# Define environment variable
ENV NAME World

# Run app.py when the container launches
CMD ["python", "app.py"]

&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;&lt;strong&gt;Sample Rego Code to Validate DockerFile&lt;/strong&gt;&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;package main

suspicious_env_keys = [
    "passwd",
    "password"
]

# Looking for suspicious environment variable settings
deny[msg] {
    dockerenvs := [val | input[i].Cmd == "env"; val := input[i].Value]
    dockerenv := dockerenvs[_]
    envvar := dockerenv[_]
    lower(envvar) == suspicious_env_keys[_]
    msg = sprintf("Potential secret in ENV found: %s", [envvar])
}

&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;blockquote&gt;
&lt;p&gt;You can create your own Rego code to enforce granular restrictions or policies in a more fine-grained manner, you can customize the Rego policy accordingly. &lt;/p&gt;
&lt;/blockquote&gt;

&lt;p&gt;&lt;strong&gt;Here is the pipeline&lt;/strong&gt;&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;---
variables:
  IMAGE_CONFTEST: "omvedi25/conftest:v0.2"
  IMAGE_SNYK: "omvedi25/snyk:v0.1"
  IMAGE_LINTER: "omvedi25/dockerlinter:v0.0"

stages:
  - lint
  - validation
  - scan
  - push

linting:Dockerfile:
  image: "$IMAGE_LINTER"
  stage: lint
  script:
    - dockerlinter -f Dockerfile -e

validation:Dockerfile:
  image: "$IMAGE_CONFTEST"
  stage: validation
  script:
    - conftest test -p /policy Dockerfile

scanning:Image:
  image: "$IMAGE_SNYK"
  stage: scan
  script:
    - podman build -t test .
    - image_id=`podman images --format "table {{.Repository}}\t{{.ID}}" | awk '$1 == "localhost/test" {print $2}'`
    - podman save $image_id -o test.tar
    - snyk auth $SNYK_TOKEN
    - snyk container test docker-archive:test.tar --json &amp;gt; results.json || true
    - snyk-to-html -i results.json -o results.html
  artifacts:
    paths:
      - results.html
    expire_in: 1 week

pushing:Image:
  image: "$IMAGE_SNYK"
  stage: push
  script:
    - podman login -u $USERNAME -p $PASSWORD docker.io 
    - podman build -t test .
    - podman tag localhost/test:latest omvedi25/$CI_PROJECT_NAME:$CI_COMMIT_SHA
    - podman images
    - podman push omvedi25/$CI_PROJECT_NAME:$CI_COMMIT_SHA
  when: manual  

&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;&lt;strong&gt;Pipeline Stages&lt;/strong&gt;&lt;/p&gt;

&lt;p&gt;&lt;a href="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fdsgaep27msp5uycxom26.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fdsgaep27msp5uycxom26.png" alt="Image description"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;&lt;em&gt;Linting Output&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;a href="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fjvqhhmsxh80ivmxmdbkd.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fjvqhhmsxh80ivmxmdbkd.png" alt="Image description"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;&lt;em&gt;Validation Output&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;a href="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fosdrpvo6wp0tfladlzps.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fosdrpvo6wp0tfladlzps.png" alt="Image description"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;&lt;em&gt;SNKY HTML REPORT&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;a href="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2F9sx3bnlqnahhimzhzgpw.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2F9sx3bnlqnahhimzhzgpw.png" alt="Image description"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;&lt;em&gt;PUSH IMAGE&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;a href="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fc5xhii7aesh5395pzacu.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fc5xhii7aesh5395pzacu.png" alt="Image description"&gt;&lt;/a&gt;&lt;/p&gt;

</description>
    </item>
  </channel>
</rss>
