<?xml version="1.0" encoding="UTF-8"?>
<rss version="2.0" xmlns:atom="http://www.w3.org/2005/Atom" xmlns:dc="http://purl.org/dc/elements/1.1/">
  <channel>
    <title>DEV Community: Naresh Maharaj</title>
    <description>The latest articles on DEV Community by Naresh Maharaj (@naresh_maharaj_c4b8fbd4aa).</description>
    <link>https://dev.to/naresh_maharaj_c4b8fbd4aa</link>
    
    <atom:link rel="self" type="application/rss+xml" href="https://dev.to/feed/naresh_maharaj_c4b8fbd4aa"/>
    <language>en</language>
    <item>
      <title>How to Copy GCP Storage Buckets Between Accounts Without Using the Internet</title>
      <dc:creator>Naresh Maharaj</dc:creator>
      <pubDate>Wed, 12 Feb 2025 17:00:11 +0000</pubDate>
      <link>https://dev.to/naresh_maharaj_c4b8fbd4aa/how-to-copy-gcp-storage-buckets-between-accounts-without-using-the-internet-2o6f</link>
      <guid>https://dev.to/naresh_maharaj_c4b8fbd4aa/how-to-copy-gcp-storage-buckets-between-accounts-without-using-the-internet-2o6f</guid>
      <description>&lt;p&gt;In this article, we’ll walk you through the process of copying a Google Cloud Platform (GCP) storage bucket and its contents from one GCP account to another, entirely within GCP’s internal network—without relying on the internet. This method is ideal for securely transferring large amounts of data between accounts while maintaining high performance and security.&lt;/p&gt;

&lt;h2&gt;
  
  
  Prerequisites
&lt;/h2&gt;

&lt;p&gt;Before we begin, ensure you have the following:&lt;/p&gt;

&lt;ol&gt;
&lt;li&gt;
&lt;strong&gt;Two GCP Accounts&lt;/strong&gt;: You’ll need access to both the source and destination GCP accounts.&lt;/li&gt;
&lt;li&gt;
&lt;strong&gt;Projects in Each Account&lt;/strong&gt;:

&lt;ul&gt;
&lt;li&gt;In the source account, create a project named &lt;code&gt;SourceBucketProject&lt;/code&gt;.&lt;/li&gt;
&lt;li&gt;In the destination account, create a project named &lt;code&gt;DestinationBucketProject&lt;/code&gt;.&lt;/li&gt;
&lt;/ul&gt;
&lt;/li&gt;
&lt;/ol&gt;

&lt;p&gt;In account #1 I have a project named &lt;code&gt;SourceBucketProject&lt;/code&gt;&lt;/p&gt;

&lt;p&gt;&lt;a href="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fbywuibqsxkkudkh27ehn.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fbywuibqsxkkudkh27ehn.png" alt="Image description" width="740" height="184"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;In the destination project I called it &lt;code&gt;DestinationBucketProject&lt;/code&gt;&lt;/p&gt;

&lt;p&gt;&lt;a href="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2F3mfg238fbyokv30f7b4i.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2F3mfg238fbyokv30f7b4i.png" alt="Image description" width="738" height="184"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;h2&gt;
  
  
  Step 1: Create a Source Bucket in Account#1
&lt;/h2&gt;

&lt;ol&gt;
&lt;li&gt;Navigate to &lt;strong&gt;Cloud Storage&lt;/strong&gt; &amp;gt; &lt;strong&gt;Buckets&lt;/strong&gt; in the source account (&lt;code&gt;SourceBucketProject&lt;/code&gt;).&lt;/li&gt;
&lt;li&gt;Click &lt;strong&gt;Create&lt;/strong&gt; to create a new bucket. Name it &lt;code&gt;my-src-bucket-001&lt;/code&gt;.&lt;/li&gt;
&lt;li&gt;Inside the bucket, create a folder named &lt;code&gt;folder1&lt;/code&gt; and upload a sample file to it.&lt;/li&gt;
&lt;/ol&gt;

&lt;p&gt;&lt;a href="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2F9j9ywbhewgg8yuj7p9aw.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2F9j9ywbhewgg8yuj7p9aw.png" alt="Image description" width="800" height="178"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;h2&gt;
  
  
  Step 2: Set Up a Transfer Job in Account #2
&lt;/h2&gt;

&lt;ol&gt;
&lt;li&gt;In the destination account (&lt;code&gt;DestinationBucketProject&lt;/code&gt;), search for &lt;strong&gt;Transfer Job&lt;/strong&gt; in the GCP Console.&lt;/li&gt;
&lt;/ol&gt;

&lt;p&gt;&lt;a href="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2F460erpkifbs17nshmo5i.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2F460erpkifbs17nshmo5i.png" alt="Image description" width="323" height="126"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;ol&gt;
&lt;li&gt;Click &lt;strong&gt;Create Transfer Job&lt;/strong&gt;.&lt;/li&gt;
&lt;li&gt;Set both the source and destination as &lt;strong&gt;Google Cloud Storage&lt;/strong&gt;.&lt;/li&gt;
&lt;/ol&gt;

&lt;p&gt;&lt;a href="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2F99rjr1k867g9w2ih8s86.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2F99rjr1k867g9w2ih8s86.png" alt="Image description" width="549" height="434"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;ol&gt;
&lt;li&gt;&lt;p&gt;Specify the source bucket as &lt;code&gt;my-src-bucket-001&lt;/code&gt; and choose or create a destination bucket with a unique name (e.g., &lt;code&gt;my-dest-bucket-010&lt;/code&gt;).&lt;/p&gt;&lt;/li&gt;
&lt;li&gt;&lt;p&gt;Start the transfer process.&lt;/p&gt;&lt;/li&gt;
&lt;/ol&gt;

&lt;p&gt;At this point, you may encounter an error related to permissions. This is expected and will be resolved in the next steps.&lt;/p&gt;

&lt;p&gt;&lt;a href="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fcfoyd17rym1er6hhaqur.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fcfoyd17rym1er6hhaqur.png" alt="Image description" width="378" height="286"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;h2&gt;
  
  
  Step 3: Grant Necessary Permissions
&lt;/h2&gt;

&lt;ol&gt;
&lt;li&gt;&lt;p&gt;Note the &lt;strong&gt;Principal Service Account&lt;/strong&gt; mentioned in the error message. It will look something like this:&lt;br&gt;&lt;br&gt;
&lt;code&gt;project-1069962656103@storage-transfer-service.iam.gserviceaccount.com&lt;/code&gt;.&lt;/p&gt;&lt;/li&gt;
&lt;li&gt;&lt;p&gt;Go back to the source account (&lt;code&gt;SourceBucketProject&lt;/code&gt;) and navigate to &lt;strong&gt;IAM &amp;amp; Admin&lt;/strong&gt; &amp;gt; &lt;strong&gt;Roles&lt;/strong&gt;.&lt;/p&gt;&lt;/li&gt;
&lt;li&gt;
&lt;p&gt;Create a new custom role named &lt;code&gt;GsBucketDataTransferRead&lt;/code&gt;. Add the following permissions (which are derived from built-in roles like &lt;strong&gt;Storage Object Admin&lt;/strong&gt; and &lt;strong&gt;Storage Legacy Bucket Reader&lt;/strong&gt;):&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;&lt;code&gt;resourcemanager.projects.get&lt;/code&gt;&lt;/li&gt;
&lt;li&gt;&lt;code&gt;storage.buckets.get&lt;/code&gt;&lt;/li&gt;
&lt;li&gt;&lt;code&gt;storage.folders.get&lt;/code&gt;&lt;/li&gt;
&lt;li&gt;&lt;code&gt;storage.folders.list&lt;/code&gt;&lt;/li&gt;
&lt;li&gt;&lt;code&gt;storage.managedFolders.get&lt;/code&gt;&lt;/li&gt;
&lt;li&gt;&lt;code&gt;storage.managedFolders.list&lt;/code&gt;&lt;/li&gt;
&lt;li&gt;&lt;code&gt;storage.multipartUploads.list&lt;/code&gt;&lt;/li&gt;
&lt;li&gt;&lt;code&gt;storage.objects.get&lt;/code&gt;&lt;/li&gt;
&lt;li&gt;&lt;code&gt;storage.objects.list&lt;/code&gt;&lt;/li&gt;
&lt;/ul&gt;
&lt;/li&gt;
&lt;li&gt;&lt;p&gt;Save the custom role.&lt;/p&gt;&lt;/li&gt;
&lt;li&gt;&lt;p&gt;Navigate to &lt;strong&gt;IAM &amp;amp; Admin&lt;/strong&gt; &amp;gt; &lt;strong&gt;IAM&lt;/strong&gt; and click &lt;strong&gt;Grant Access&lt;/strong&gt;.&lt;/p&gt;&lt;/li&gt;
&lt;li&gt;&lt;p&gt;Add the Principal Service Account (noted earlier) and assign it the custom role &lt;code&gt;GsBucketDataTransferRead&lt;/code&gt;.&lt;/p&gt;&lt;/li&gt;
&lt;/ol&gt;

&lt;h2&gt;
  
  
  Step 4: Complete the Transfer
&lt;/h2&gt;

&lt;ol&gt;
&lt;li&gt;Return to the destination account (&lt;code&gt;DestinationBucketProject&lt;/code&gt;) and retry the transfer job.&lt;/li&gt;
&lt;li&gt;Once the transfer is complete, you’ll see a confirmation message.&lt;/li&gt;
&lt;/ol&gt;

&lt;p&gt;&lt;a href="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Ff5thq9gr0i1dwh6822o6.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Ff5thq9gr0i1dwh6822o6.png" alt="Image description" width="800" height="495"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;h2&gt;
  
  
  Step 5: Verify the Data
&lt;/h2&gt;

&lt;ol&gt;
&lt;li&gt;Navigate to the destination bucket (&lt;code&gt;my-dest-bucket-010&lt;/code&gt;) and confirm that the file from the source bucket has been successfully transferred.&lt;/li&gt;
&lt;/ol&gt;

&lt;p&gt;&lt;a href="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fo25olc4l52sm2z26tgkk.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fo25olc4l52sm2z26tgkk.png" alt="Image description" width="800" height="281"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;h2&gt;
  
  
  Conclusion
&lt;/h2&gt;

&lt;p&gt;And that’s it! You’ve successfully copied a GCP storage bucket and its contents from one account to another without using the internet. This method leverages GCP’s internal network for secure and efficient data transfer, making it an excellent choice for enterprise-level data migrations.&lt;/p&gt;

&lt;p&gt;We hope this guide has been helpful. If you have any questions or run into issues, feel free to reach out in the comments below!&lt;br&gt;
New chat&lt;/p&gt;

</description>
    </item>
    <item>
      <title>Implementing RBAC in Kubernetes: A Real-World Example</title>
      <dc:creator>Naresh Maharaj</dc:creator>
      <pubDate>Tue, 21 Jan 2025 22:08:34 +0000</pubDate>
      <link>https://dev.to/naresh_maharaj_c4b8fbd4aa/implementing-rbac-in-kubernetes-a-real-world-example-4die</link>
      <guid>https://dev.to/naresh_maharaj_c4b8fbd4aa/implementing-rbac-in-kubernetes-a-real-world-example-4die</guid>
      <description>&lt;h1&gt;
  
  
  Implementing RBAC in Kubernetes: A Real-World Example
&lt;/h1&gt;

&lt;p&gt;In this article, we'll dive into how to implement &lt;strong&gt;Role-Based Access Control (RBAC)&lt;/strong&gt; in Kubernetes through a practical, real-world example. While many tutorials cover RBAC concepts, they often remain abstract or overly simplified. By walking through an actual use case, you'll gain a better understanding of how to apply RBAC effectively in a Kubernetes cluster.&lt;/p&gt;

&lt;h2&gt;
  
  
  What is RBAC?
&lt;/h2&gt;

&lt;p&gt;RBAC stands for &lt;strong&gt;Role-Based Access Control&lt;/strong&gt;. It's a method for regulating access to resources in a system based on the roles of individual users within an organization. Essentially, RBAC is a policy-neutral access control mechanism that assigns permissions to users according to their roles. The key components of RBAC in Kubernetes include:&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;
&lt;strong&gt;Roles&lt;/strong&gt;: Define what actions are allowed on which resources.&lt;/li&gt;
&lt;li&gt;
&lt;strong&gt;RoleBindings&lt;/strong&gt;: Link roles to users, granting them the associated permissions.&lt;/li&gt;
&lt;li&gt;
&lt;strong&gt;ServiceAccounts&lt;/strong&gt;: Represent identities for processes running inside pods.&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;RBAC simplifies user assignments and ensures that access control is clear, consistent, and manageable.&lt;/p&gt;

&lt;h2&gt;
  
  
  What is a Role?
&lt;/h2&gt;

&lt;p&gt;In Kubernetes, a &lt;strong&gt;Role&lt;/strong&gt; is a collection of permissions that define what actions are allowed on certain resources. Roles can be applied to users, groups, or even other roles (creating hierarchies).&lt;/p&gt;

&lt;p&gt;For example:&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;An "Admin" role might have permissions to &lt;strong&gt;create, read, update, and delete&lt;/strong&gt; resources.&lt;/li&gt;
&lt;li&gt;A "Developer" role might only have permissions to &lt;strong&gt;create, read, and update&lt;/strong&gt; resources.&lt;/li&gt;
&lt;li&gt;A "Viewer" role might only have permissions to &lt;strong&gt;read&lt;/strong&gt; resources.&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;Roles are a key part of RBAC as they encapsulate the actions users or processes can perform within Kubernetes.&lt;/p&gt;

&lt;h2&gt;
  
  
  What is a RoleBinding?
&lt;/h2&gt;

&lt;p&gt;A &lt;strong&gt;RoleBinding&lt;/strong&gt; is the mechanism that binds a Role to a user, a group of users, or even other roles. A RoleBinding effectively grants the permissions defined in a Role to whoever is bound to it.&lt;/p&gt;

&lt;p&gt;For example:&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;You can bind the "Admin" role to a user, which grants them full access to Kubernetes resources.&lt;/li&gt;
&lt;li&gt;You can also create a RoleBinding that binds the "Viewer" role to a group of users, allowing them to only read the resources.&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;RoleBindings are essential for controlling access to Kubernetes resources, ensuring that users have the correct permissions according to their roles.&lt;/p&gt;

&lt;h2&gt;
  
  
  Real-World Example: A Python Server and Client in Kubernetes
&lt;/h2&gt;

&lt;p&gt;In this example, we will implement RBAC in a Kubernetes environment to control access between two applications: a &lt;strong&gt;server&lt;/strong&gt; and a &lt;strong&gt;client&lt;/strong&gt;.&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;
&lt;strong&gt;Server&lt;/strong&gt;: This application generates a table of random numbers.&lt;/li&gt;
&lt;li&gt;
&lt;strong&gt;Client&lt;/strong&gt;: This application consumes the random number data from the server.&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;We will containerize both applications using &lt;strong&gt;Docker&lt;/strong&gt; and then deploy them to a &lt;strong&gt;Kubernetes cluster&lt;/strong&gt;. Finally, we'll create a &lt;strong&gt;RoleBinding&lt;/strong&gt; to allow the client to access the server.&lt;/p&gt;

&lt;h3&gt;
  
  
  Prerequisites
&lt;/h3&gt;

&lt;p&gt;Before we begin, ensure you have the following installed on your machine:&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;
&lt;code&gt;Docker&lt;/code&gt; (e.g., Docker Desktop)&lt;/li&gt;
&lt;li&gt;
&lt;code&gt;Kubernetes&lt;/code&gt; (e.g., Minikube, kind or a local Kubernetes setup)&lt;/li&gt;
&lt;li&gt;
&lt;code&gt;Python&lt;/code&gt; (preferably version 3.x)&lt;/li&gt;
&lt;li&gt;
&lt;code&gt;pip&lt;/code&gt; (Python package manager)&lt;/li&gt;
&lt;li&gt;
&lt;code&gt;virtualenv&lt;/code&gt; (for managing Python environments)&lt;/li&gt;
&lt;li&gt;
&lt;code&gt;kubectl&lt;/code&gt; (Kubernetes command-line tool)&lt;/li&gt;
&lt;li&gt;
&lt;code&gt;netcat&lt;/code&gt; (for network testing)&lt;/li&gt;
&lt;/ul&gt;

&lt;h3&gt;
  
  
  Clone the Git Repository to Follow Along
&lt;/h3&gt;

&lt;p&gt;Access the source code on &lt;a href="https://github.com/nareshmaharaj-consultant/kubernetes_rbac_real_example" rel="noopener noreferrer"&gt;GitHub&lt;/a&gt;.&lt;/p&gt;

&lt;h3&gt;
  
  
  Server Code
&lt;/h3&gt;

&lt;p&gt;The &lt;strong&gt;server code&lt;/strong&gt; is responsible for generating random numbers and exposing them over a network. It is located under: &lt;code&gt;kubernetes_rbac_real_example/randomNumberServer/server.py&lt;/code&gt;&lt;/p&gt;

&lt;h4&gt;
  
  
  Create the Docker Image for the Server
&lt;/h4&gt;

&lt;p&gt;First, we need to create a Dockerfile for the server: &lt;code&gt;kubernetes_rbac_real_example&lt;br&gt;
/randomNumberServer/Dockerfile&lt;/code&gt;&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight docker"&gt;&lt;code&gt;&lt;span class="k"&gt;FROM&lt;/span&gt;&lt;span class="s"&gt; python:3.7&lt;/span&gt;
&lt;span class="k"&gt;COPY&lt;/span&gt;&lt;span class="s"&gt; . /app&lt;/span&gt;
&lt;span class="k"&gt;WORKDIR&lt;/span&gt;&lt;span class="s"&gt; /app&lt;/span&gt;
&lt;span class="k"&gt;CMD&lt;/span&gt;&lt;span class="s"&gt; ["python", "server.py"]&lt;/span&gt;
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;To build the Docker image:&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;&lt;span class="nb"&gt;export &lt;/span&gt;&lt;span class="nv"&gt;dockerhub_username&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&amp;lt;your_dockerhub_username&amp;gt;
docker build &lt;span class="nt"&gt;-t&lt;/span&gt; &lt;span class="k"&gt;${&lt;/span&gt;&lt;span class="nv"&gt;dockerhub_username&lt;/span&gt;&lt;span class="k"&gt;}&lt;/span&gt;/random-number-server &lt;span class="nb"&gt;.&lt;/span&gt;
docker push &lt;span class="k"&gt;${&lt;/span&gt;&lt;span class="nv"&gt;dockerhub_username&lt;/span&gt;&lt;span class="k"&gt;}&lt;/span&gt;/random-number-server
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;h3&gt;
  
  
  Client Code
&lt;/h3&gt;

&lt;p&gt;The &lt;strong&gt;client code&lt;/strong&gt; connects to the server and retrieves the random numbers. It is located under: &lt;code&gt;kubernetes_rbac_real_example/randomNumberClient/client.py&lt;/code&gt;&lt;/p&gt;

&lt;h4&gt;
  
  
  Create the Docker Image for the Client
&lt;/h4&gt;

&lt;p&gt;Create a Dockerfile for the client: &lt;code&gt;kubernetes_rbac_real_example&lt;br&gt;
/randomNumberClient/Dockerfile&lt;/code&gt;&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight docker"&gt;&lt;code&gt;&lt;span class="k"&gt;FROM&lt;/span&gt;&lt;span class="s"&gt; python:3.7&lt;/span&gt;
&lt;span class="k"&gt;COPY&lt;/span&gt;&lt;span class="s"&gt; . /app&lt;/span&gt;
&lt;span class="k"&gt;WORKDIR&lt;/span&gt;&lt;span class="s"&gt; /app&lt;/span&gt;
&lt;span class="k"&gt;CMD&lt;/span&gt;&lt;span class="s"&gt; ["python", "client.py"]&lt;/span&gt;
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;To build the Docker image:&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;&lt;span class="nb"&gt;export &lt;/span&gt;&lt;span class="nv"&gt;dockerhub_username&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&amp;lt;your_dockerhub_username&amp;gt;
docker build &lt;span class="nt"&gt;-t&lt;/span&gt; &lt;span class="k"&gt;${&lt;/span&gt;&lt;span class="nv"&gt;dockerhub_username&lt;/span&gt;&lt;span class="k"&gt;}&lt;/span&gt;/random-number-client &lt;span class="nb"&gt;.&lt;/span&gt;
docker push &lt;span class="k"&gt;${&lt;/span&gt;&lt;span class="nv"&gt;dockerhub_username&lt;/span&gt;&lt;span class="k"&gt;}&lt;/span&gt;/random-number-client
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;h3&gt;
  
  
  Running the Docker Containers
&lt;/h3&gt;

&lt;p&gt;Next, we'll set up a &lt;strong&gt;Docker network&lt;/strong&gt; to allow communication between the server and client containers:&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;docker network create &lt;span class="nt"&gt;-d&lt;/span&gt; bridge random-net
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;Now, run the &lt;strong&gt;server container&lt;/strong&gt;:&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;docker run &lt;span class="nt"&gt;-it&lt;/span&gt; &lt;span class="nt"&gt;-d&lt;/span&gt; &lt;span class="nt"&gt;--name&lt;/span&gt; random-number-server &lt;span class="nt"&gt;-p&lt;/span&gt; 3215 &lt;span class="nt"&gt;--network&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;random-net &lt;span class="nt"&gt;--hostname&lt;/span&gt; random01 random-number-server
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;Run the &lt;strong&gt;client container&lt;/strong&gt;:&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;docker run &lt;span class="nt"&gt;-it&lt;/span&gt; &lt;span class="nt"&gt;-d&lt;/span&gt; &lt;span class="nt"&gt;--name&lt;/span&gt; random-number-client &lt;span class="nt"&gt;-p&lt;/span&gt; 3216:3216 &lt;span class="nt"&gt;--network&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;random-net &lt;span class="nt"&gt;-e&lt;/span&gt; &lt;span class="nv"&gt;RANDOM_SERVER&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;random01 &lt;span class="nt"&gt;--hostname&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;random02 random-number-client
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;h3&gt;
  
  
  Testing the Setup with &lt;code&gt;netcat&lt;/code&gt;
&lt;/h3&gt;

&lt;p&gt;In a new shell, use &lt;strong&gt;netcat&lt;/strong&gt; to connect to the client container's port and view the random numbers it receives:&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;nc localhost 3216
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;You'll be prompted to enter parameters for the number generation. Here's an example of output you should see:&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;Enter 3 numbers min,max,cols separated by commas: 1,900,12

565    636    362    538    483    103    898    188    81    432    245    519
120    644    866    487    407    534    156    870    630    418    581    231
174    43     675    9      380    60     555    127    505    471    764    191
...
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;h3&gt;
  
  
  Kubernetes Environment using Kind
&lt;/h3&gt;

&lt;p&gt;Visit the &lt;a href="https://kind.sigs.k8s.io/docs/user/quick-start/" rel="noopener noreferrer"&gt;Kind documentation&lt;/a&gt; for detailed instructions on setting up a Kubernetes cluster using Kind.&lt;/p&gt;

&lt;p&gt;Once installed and your cluster set up, check the nodes in your cluster:&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;kind get nodes

kind-worker2
kind-control-plane
kind-worker
kind-worker3
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;h3&gt;
  
  
  Deploying RBAC to Kubernetes
&lt;/h3&gt;

&lt;p&gt;Once we've confirmed that the Docker containers work correctly, we can deploy both the server and client to a &lt;strong&gt;Kubernetes cluster&lt;/strong&gt;. &lt;/p&gt;

&lt;p&gt;Create a pod file to deploy the server and client. If you wish change the image to use your own dockerhub account. E.g. if your account name is &lt;code&gt;johnmcollins&lt;/code&gt;, then the image is set to &lt;code&gt;johnmcollins/random-number-server&lt;/code&gt; and &lt;code&gt;johnmcollins/random-number-client&lt;/code&gt;.&lt;/p&gt;

&lt;p&gt;First, create the namespace:&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;kubectl create namespace random-numbers
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;h4&gt;
  
  
  Server
&lt;/h4&gt;



&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;&lt;span class="nb"&gt;cat&lt;/span&gt; &lt;span class="o"&gt;&amp;lt;&amp;lt;&lt;/span&gt;&lt;span class="no"&gt;EOF&lt;/span&gt;&lt;span class="sh"&gt;&amp;gt; random-number-server.yaml
apiVersion: v1
kind: Pod
metadata:
  labels:
    app: random-number-server
  name: random-number-server
  namespace: random-numbers
spec:
  containers:
  - name: random-number-server
    image: contactnkm/random-number-server:latest
    ports:
    - containerPort: 3215
&lt;/span&gt;&lt;span class="no"&gt;EOF
&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;Create the service for this pod:&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;&lt;span class="nb"&gt;cat&lt;/span&gt; &lt;span class="o"&gt;&amp;lt;&amp;lt;&lt;/span&gt;&lt;span class="no"&gt;EOF&lt;/span&gt;&lt;span class="sh"&gt;&amp;gt; random-number-service.yaml
apiVersion: v1
kind: Service
metadata:
  name: random-number-service
  namespace: random-numbers
spec:
  selector:
    app: random-number-server
  ports:
    - protocol: TCP
      port: 3215
      targetPort: 3215
&lt;/span&gt;&lt;span class="no"&gt;EOF
&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;Run the following commands to deploy the random number server and random number service:&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;kubectl apply &lt;span class="nt"&gt;-f&lt;/span&gt; random-number-server.yaml
kubectl apply &lt;span class="nt"&gt;-f&lt;/span&gt; random-number-service.yaml
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;We need to ensure our service DNS is working correctly. We can do this by creating a pod that will run a &lt;code&gt;nslookup&lt;/code&gt; command to the service.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;&lt;span class="nb"&gt;cat&lt;/span&gt; &lt;span class="o"&gt;&amp;lt;&amp;lt;&lt;/span&gt;&lt;span class="no"&gt;EOF&lt;/span&gt;&lt;span class="sh"&gt;&amp;gt; dnsutils.yaml
apiVersion: v1
kind: Pod
metadata:
  name: dnsutils
  namespace: default
spec:
  containers:
  - name: dnsutils
    image: gcr.io/kubernetes-e2e-test-images/dnsutils:1.3
    command:
      - sleep
      - "3600"
    imagePullPolicy: IfNotPresent
  restartPolicy: Always
&lt;/span&gt;&lt;span class="no"&gt;EOF
&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;





&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;kubectl apply &lt;span class="nt"&gt;-f&lt;/span&gt; dnsutils.yaml
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;Run the &lt;code&gt;nslookup&lt;/code&gt; command to verify the service:&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;kubectl &lt;span class="nb"&gt;exec&lt;/span&gt; &lt;span class="nt"&gt;-ti&lt;/span&gt; dnsutils &lt;span class="nt"&gt;-n&lt;/span&gt; random-numbers &lt;span class="nt"&gt;--&lt;/span&gt; nslookup random-number-service
Server:         10.96.0.10
Address:        10.96.0.10#53

Name:   random-number-service.random-numbers.svc.cluster.local
Address: 10.96.35.141
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;So we now know for sure our DNS is working correctly, and our FQDN is &lt;code&gt;random-number-service.random-numbers.svc.cluster.local&lt;/code&gt;.&lt;/p&gt;

&lt;p&gt;In the client pod, set the environment variable &lt;code&gt;RANDOM_NUMBER_SERVER&lt;/code&gt; to the FQDN of the server service.&lt;/p&gt;

&lt;h4&gt;
  
  
  Client
&lt;/h4&gt;



&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;&lt;span class="nb"&gt;cat&lt;/span&gt; &lt;span class="o"&gt;&amp;lt;&amp;lt;&lt;/span&gt;&lt;span class="no"&gt;EOF&lt;/span&gt;&lt;span class="sh"&gt;&amp;gt; random-number-client.yaml
apiVersion: v1
kind: Pod
metadata:
  labels:
    app: random-number-client
  name: random-number-client
  namespace: random-numbers
spec:
  containers:
  - name: random-number-client
    image: contactnkm/random-number-client:latest
    ports:
    - containerPort: 3216
    env:
    - name: RANDOM_SERVER
      value: "random-number-service.random-numbers.svc.cluster.local"
&lt;/span&gt;&lt;span class="no"&gt;EOF
&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;Create a random number client service:&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;&lt;span class="nb"&gt;cat&lt;/span&gt; &lt;span class="o"&gt;&amp;lt;&amp;lt;&lt;/span&gt;&lt;span class="no"&gt;EOF&lt;/span&gt;&lt;span class="sh"&gt;&amp;gt; random-number-client-service.yaml
apiVersion: v1
kind: Service
metadata:
  name: random-number-client-service
  namespace: random-numbers
spec:
  selector:
    app: random-number-client
  ports:
    - protocol: TCP
      port: 3216
      targetPort: 3216
&lt;/span&gt;&lt;span class="no"&gt;EOF
&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;Deploy the client and random number client service to the cluster:&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;kubectl apply &lt;span class="nt"&gt;-f&lt;/span&gt; random-number-client.yaml
kubectl apply &lt;span class="nt"&gt;-f&lt;/span&gt; random-number-client-service.yaml
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;Check that all the service endpoints are created; it should not show &lt;code&gt;none&lt;/code&gt;:&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;kubectl get ep &lt;span class="nt"&gt;-n&lt;/span&gt; random-numbers

NAME                           ENDPOINTS          AGE
random-number-client-service   10.244.2.6:3216    12s
random-number-service          10.244.1.12:3215   87s
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;h3&gt;
  
  
  Testing the Client and Server
&lt;/h3&gt;

&lt;p&gt;Connect to the host using the dnsutils pod and test the client and server:&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;kubectl &lt;span class="nb"&gt;exec&lt;/span&gt; &lt;span class="nt"&gt;-ti&lt;/span&gt; dnsutils &lt;span class="nt"&gt;-n&lt;/span&gt; random-numbers &lt;span class="nt"&gt;--&lt;/span&gt; sh
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;Test the application and use &lt;code&gt;Ctrl-C&lt;/code&gt; to exit:&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;nc random-number-client-service 3216

Enter 3 numbers min,max,cols separated by commas: 10,36,6
16      19      17      30      31      25
28      14      35      32      29      34
20      18      12      33      27      24
36      23      22      26      21      10
15      11      13
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;h3&gt;
  
  
  Using RBAC in Kubernetes
&lt;/h3&gt;

&lt;p&gt;The next step is to set up &lt;strong&gt;RBAC&lt;/strong&gt; to allow the client to interact with the server.&lt;/p&gt;

&lt;p&gt;Create a Service Account.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;kubectl create sa random-numbers-sa &lt;span class="nt"&gt;-n&lt;/span&gt;  random-numbers
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;h4&gt;
  
  
  Step 1: Create a Role for Access
&lt;/h4&gt;

&lt;p&gt;First, we create a &lt;strong&gt;Role&lt;/strong&gt; that defines the permissions for accessing the server. This role grants read access to the server's resources.&lt;/p&gt;

&lt;p&gt;For example, the role could be defined in a YAML file:&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight yaml"&gt;&lt;code&gt;&lt;span class="s"&gt;cat &amp;lt;&amp;lt;EOF&amp;gt; roles.yaml&lt;/span&gt;
&lt;span class="na"&gt;apiVersion&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;rbac.authorization.k8s.io/v1&lt;/span&gt;
&lt;span class="na"&gt;kind&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;Role&lt;/span&gt;
&lt;span class="na"&gt;metadata&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
  &lt;span class="na"&gt;namespace&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;random-numbers&lt;/span&gt;
  &lt;span class="na"&gt;name&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;client-access-role&lt;/span&gt;
&lt;span class="na"&gt;rules&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
&lt;span class="pi"&gt;-&lt;/span&gt; &lt;span class="na"&gt;apiGroups&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="pi"&gt;[&lt;/span&gt;&lt;span class="s2"&gt;"&lt;/span&gt;&lt;span class="s"&gt;"&lt;/span&gt;&lt;span class="pi"&gt;]&lt;/span&gt;
  &lt;span class="na"&gt;resources&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
    &lt;span class="pi"&gt;-&lt;/span&gt; &lt;span class="s"&gt;configmaps&lt;/span&gt;
    &lt;span class="pi"&gt;-&lt;/span&gt; &lt;span class="s"&gt;services&lt;/span&gt; &lt;span class="c1"&gt;# Add other resources as needed&lt;/span&gt;
  &lt;span class="na"&gt;verbs&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; 
    &lt;span class="pi"&gt;-&lt;/span&gt; &lt;span class="s"&gt;get&lt;/span&gt;
    &lt;span class="pi"&gt;-&lt;/span&gt; &lt;span class="s"&gt;list&lt;/span&gt; &lt;span class="c1"&gt;# Add other verbs as needed&lt;/span&gt;
&lt;span class="s"&gt;EOF&lt;/span&gt;
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;h4&gt;
  
  
  Step 2: Create a RoleBinding
&lt;/h4&gt;

&lt;p&gt;Now, we create a &lt;strong&gt;RoleBinding&lt;/strong&gt; to bind the "client-access-role" to the client pod. This will grant the client the necessary permissions to interact with the server pod.&lt;/p&gt;

&lt;p&gt;For example, the RoleBinding might look like this:&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight yaml"&gt;&lt;code&gt;&lt;span class="s"&gt;cat &amp;lt;&amp;lt;EOF&amp;gt; role-binding.yaml&lt;/span&gt;
&lt;span class="na"&gt;apiVersion&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;rbac.authorization.k8s.io/v1&lt;/span&gt;
&lt;span class="na"&gt;kind&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;RoleBinding&lt;/span&gt;
&lt;span class="na"&gt;metadata&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
  &lt;span class="na"&gt;name&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;client-access-role-binding&lt;/span&gt;
  &lt;span class="na"&gt;namespace&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;random-numbers&lt;/span&gt;
&lt;span class="na"&gt;subjects&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
&lt;span class="pi"&gt;-&lt;/span&gt; &lt;span class="na"&gt;kind&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;ServiceAccount&lt;/span&gt;
  &lt;span class="na"&gt;name&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;random-numbers-sa&lt;/span&gt;
  &lt;span class="na"&gt;namespace&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;random-numbers&lt;/span&gt;
&lt;span class="na"&gt;roleRef&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
  &lt;span class="na"&gt;kind&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;Role&lt;/span&gt;
  &lt;span class="na"&gt;name&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;client-access-role&lt;/span&gt;
  &lt;span class="na"&gt;apiGroup&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;rbac.authorization.k8s.io&lt;/span&gt;
&lt;span class="s"&gt;EOF&lt;/span&gt;
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;h3&gt;
  
  
  Testing in Kubernetes
&lt;/h3&gt;

&lt;p&gt;After deploying the server and client containers to Kubernetes, you can test the connection between them and verify that the &lt;strong&gt;RoleBinding&lt;/strong&gt; is working. We will do this with a simple curl application.&lt;/p&gt;

&lt;p&gt;Create the curl application:&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;&lt;span class="nb"&gt;cat&lt;/span&gt; &lt;span class="o"&gt;&amp;lt;&amp;lt;&lt;/span&gt;&lt;span class="no"&gt;EOF&lt;/span&gt;&lt;span class="sh"&gt;&amp;gt; curlapp.yaml
apiVersion: v1
kind: Pod
metadata:   
  name: curlo
  namespace: random-numbers
  labels:
    app: curlo
spec:
  serviceAccountName: random-numbers-sa
  containers:
  - name: curlo
    image: curlimages/curl
    command: ["sleep","999999"]
&lt;/span&gt;&lt;span class="no"&gt;EOF

&lt;/span&gt;kubectl apply &lt;span class="nt"&gt;-f&lt;/span&gt; curlapp.yaml
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;Connect to the host using the following command&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;kubectl &lt;span class="nb"&gt;exec&lt;/span&gt; &lt;span class="nt"&gt;-it&lt;/span&gt; curlo &lt;span class="nt"&gt;-n&lt;/span&gt; random-numbers &lt;span class="nt"&gt;--&lt;/span&gt; /bin/sh
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;In order to use curl with https we will need to use the certificate file &lt;code&gt;ca.crt&lt;/code&gt; file and &lt;code&gt;token&lt;/code&gt; in the &lt;code&gt;curl&lt;/code&gt; command.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;&lt;span class="nb"&gt;cat&lt;/span&gt; /var/run/secrets/kubernetes.io/serviceaccount/token &lt;span class="o"&gt;&amp;gt;&lt;/span&gt; TOKEN
&lt;span class="nb"&gt;export &lt;/span&gt;&lt;span class="nv"&gt;TOKEN&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="si"&gt;$(&lt;/span&gt;&lt;span class="nb"&gt;cat &lt;/span&gt;TOKEN&lt;span class="si"&gt;)&lt;/span&gt;
curl &lt;span class="nt"&gt;-k&lt;/span&gt; &lt;span class="nt"&gt;--header&lt;/span&gt; &lt;span class="s2"&gt;"Authorization: Bearer &lt;/span&gt;&lt;span class="nv"&gt;$TOKEN&lt;/span&gt;&lt;span class="s2"&gt;"&lt;/span&gt; &lt;span class="nt"&gt;--cacert&lt;/span&gt; /var/run/secrets/kubernetes.io/serviceaccount/ca.crt https://kubernetes.default.svc
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;For various reasons which I will let you discover, you will notice how this can be shortened to:&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;&lt;span class="nb"&gt;export &lt;/span&gt;&lt;span class="nv"&gt;CURL_CA_BUNDLE&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;/var/run/secrets/kubernetes.io/serviceaccount/ca.crt
curl &lt;span class="nt"&gt;-k&lt;/span&gt; &lt;span class="nt"&gt;--header&lt;/span&gt; &lt;span class="s2"&gt;"Authorization: Bearer &lt;/span&gt;&lt;span class="nv"&gt;$TOKEN&lt;/span&gt;&lt;span class="s2"&gt;"&lt;/span&gt; https://kubernetes.default.svc
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;With the following command you should be able to see the service we had created earlier&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;curl &lt;span class="nt"&gt;-k&lt;/span&gt; &lt;span class="nt"&gt;--header&lt;/span&gt; &lt;span class="s2"&gt;"Authorization: Bearer &lt;/span&gt;&lt;span class="nv"&gt;$TOKEN&lt;/span&gt;&lt;span class="s2"&gt;"&lt;/span&gt; https://kubernetes.default.svc/api/v1/namespaces/random-numbers/services/random-number-service
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;Result:&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;&lt;span class="o"&gt;{&lt;/span&gt;
  &lt;span class="s2"&gt;"kind"&lt;/span&gt;: &lt;span class="s2"&gt;"Service"&lt;/span&gt;,
  &lt;span class="s2"&gt;"apiVersion"&lt;/span&gt;: &lt;span class="s2"&gt;"v1"&lt;/span&gt;,
  &lt;span class="s2"&gt;"metadata"&lt;/span&gt;: &lt;span class="o"&gt;{&lt;/span&gt;
    &lt;span class="s2"&gt;"name"&lt;/span&gt;: &lt;span class="s2"&gt;"random-number-service"&lt;/span&gt;,
    &lt;span class="s2"&gt;"namespace"&lt;/span&gt;: &lt;span class="s2"&gt;"random-numbers"&lt;/span&gt;,
    &lt;span class="s2"&gt;"uid"&lt;/span&gt;: &lt;span class="s2"&gt;"504cd44b-7e10-4ea2-9ee8-a912909f6dee"&lt;/span&gt;,
    &lt;span class="s2"&gt;"resourceVersion"&lt;/span&gt;: &lt;span class="s2"&gt;"183093"&lt;/span&gt;,
    &lt;span class="s2"&gt;"creationTimestamp"&lt;/span&gt;: &lt;span class="s2"&gt;"2025-01-19T19:52:28Z"&lt;/span&gt;,
    &lt;span class="s2"&gt;"managedFields"&lt;/span&gt;: &lt;span class="o"&gt;[&lt;/span&gt;
  ...
  &lt;span class="s2"&gt;"status"&lt;/span&gt;: &lt;span class="o"&gt;{&lt;/span&gt;
    &lt;span class="s2"&gt;"loadBalancer"&lt;/span&gt;: &lt;span class="o"&gt;{}&lt;/span&gt;
  &lt;span class="o"&gt;}&lt;/span&gt;
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;Try to list the other pods that are running in the cluster within the same namespace using the following &lt;code&gt;curl&lt;/code&gt; command. You should get a &lt;code&gt;403 Forbidden error&lt;/code&gt;.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;curl &lt;span class="nt"&gt;-k&lt;/span&gt; &lt;span class="nt"&gt;--header&lt;/span&gt; &lt;span class="s2"&gt;"Authorization: Bearer &lt;/span&gt;&lt;span class="nv"&gt;$TOKEN&lt;/span&gt;&lt;span class="s2"&gt;"&lt;/span&gt; https://kubernetes.default.svc/api/v1/namespaces/random-numbers/pods
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;Result&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight json"&gt;&lt;code&gt;&lt;span class="p"&gt;{&lt;/span&gt;&lt;span class="w"&gt;
  &lt;/span&gt;&lt;span class="nl"&gt;"kind"&lt;/span&gt;&lt;span class="p"&gt;:&lt;/span&gt;&lt;span class="w"&gt; &lt;/span&gt;&lt;span class="s2"&gt;"Status"&lt;/span&gt;&lt;span class="p"&gt;,&lt;/span&gt;&lt;span class="w"&gt;
  &lt;/span&gt;&lt;span class="nl"&gt;"apiVersion"&lt;/span&gt;&lt;span class="p"&gt;:&lt;/span&gt;&lt;span class="w"&gt; &lt;/span&gt;&lt;span class="s2"&gt;"v1"&lt;/span&gt;&lt;span class="p"&gt;,&lt;/span&gt;&lt;span class="w"&gt;
  &lt;/span&gt;&lt;span class="nl"&gt;"metadata"&lt;/span&gt;&lt;span class="p"&gt;:&lt;/span&gt;&lt;span class="w"&gt; &lt;/span&gt;&lt;span class="p"&gt;{},&lt;/span&gt;&lt;span class="w"&gt;
  &lt;/span&gt;&lt;span class="nl"&gt;"status"&lt;/span&gt;&lt;span class="p"&gt;:&lt;/span&gt;&lt;span class="w"&gt; &lt;/span&gt;&lt;span class="s2"&gt;"Failure"&lt;/span&gt;&lt;span class="p"&gt;,&lt;/span&gt;&lt;span class="w"&gt;
  &lt;/span&gt;&lt;span class="nl"&gt;"message"&lt;/span&gt;&lt;span class="p"&gt;:&lt;/span&gt;&lt;span class="w"&gt; &lt;/span&gt;&lt;span class="s2"&gt;"pods is forbidden: User &lt;/span&gt;&lt;span class="se"&gt;\"&lt;/span&gt;&lt;span class="s2"&gt;system:serviceaccount:random-numbers:random-numbers-sa&lt;/span&gt;&lt;span class="se"&gt;\"&lt;/span&gt;&lt;span class="s2"&gt; cannot list resource &lt;/span&gt;&lt;span class="se"&gt;\"&lt;/span&gt;&lt;span class="s2"&gt;pods&lt;/span&gt;&lt;span class="se"&gt;\"&lt;/span&gt;&lt;span class="s2"&gt; in API group &lt;/span&gt;&lt;span class="se"&gt;\"\"&lt;/span&gt;&lt;span class="s2"&gt; in the namespace &lt;/span&gt;&lt;span class="se"&gt;\"&lt;/span&gt;&lt;span class="s2"&gt;random-numbers&lt;/span&gt;&lt;span class="se"&gt;\"&lt;/span&gt;&lt;span class="s2"&gt;"&lt;/span&gt;&lt;span class="p"&gt;,&lt;/span&gt;&lt;span class="w"&gt;
  &lt;/span&gt;&lt;span class="nl"&gt;"reason"&lt;/span&gt;&lt;span class="p"&gt;:&lt;/span&gt;&lt;span class="w"&gt; &lt;/span&gt;&lt;span class="s2"&gt;"Forbidden"&lt;/span&gt;&lt;span class="p"&gt;,&lt;/span&gt;&lt;span class="w"&gt;
  &lt;/span&gt;&lt;span class="nl"&gt;"details"&lt;/span&gt;&lt;span class="p"&gt;:&lt;/span&gt;&lt;span class="w"&gt; &lt;/span&gt;&lt;span class="p"&gt;{&lt;/span&gt;&lt;span class="w"&gt;
    &lt;/span&gt;&lt;span class="nl"&gt;"kind"&lt;/span&gt;&lt;span class="p"&gt;:&lt;/span&gt;&lt;span class="w"&gt; &lt;/span&gt;&lt;span class="s2"&gt;"pods"&lt;/span&gt;&lt;span class="w"&gt;
  &lt;/span&gt;&lt;span class="p"&gt;},&lt;/span&gt;&lt;span class="w"&gt;
  &lt;/span&gt;&lt;span class="nl"&gt;"code"&lt;/span&gt;&lt;span class="p"&gt;:&lt;/span&gt;&lt;span class="w"&gt; &lt;/span&gt;&lt;span class="mi"&gt;403&lt;/span&gt;&lt;span class="w"&gt;
&lt;/span&gt;&lt;span class="p"&gt;}&lt;/span&gt;&lt;span class="w"&gt;
&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;Great! Now we have achieved the exact level of role-based access control needed to limit access exclusively to &lt;code&gt;services&lt;/code&gt; and &lt;code&gt;configmaps&lt;/code&gt; within the &lt;code&gt;random-numbers&lt;/code&gt; namespace.&lt;/p&gt;

&lt;p&gt;Next, create a &lt;code&gt;ConfigMap&lt;/code&gt; to store the &lt;code&gt;min&lt;/code&gt;, &lt;code&gt;max&lt;/code&gt;, and &lt;code&gt;table -size&lt;/code&gt; values for our random number generator. These values will be used by the client pod to generate the random number table.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;kubectl create configmap random-number-config &lt;span class="nt"&gt;--from-literal&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="nv"&gt;min&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;1 &lt;span class="nt"&gt;--from-literal&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="nv"&gt;max&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;100 &lt;span class="nt"&gt;--from-literal&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;table-size&lt;span class="o"&gt;=&lt;/span&gt;10 &lt;span class="nt"&gt;-n&lt;/span&gt; random-numbers
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;Here is the yaml file version of the same command:&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight yaml"&gt;&lt;code&gt;&lt;span class="s"&gt;cat &amp;lt;&amp;lt;EOF&amp;gt; random-number-configmap.yaml&lt;/span&gt;
&lt;span class="na"&gt;apiVersion&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;v1&lt;/span&gt;
&lt;span class="na"&gt;kind&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;ConfigMap&lt;/span&gt;
&lt;span class="na"&gt;metadata&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
  &lt;span class="na"&gt;name&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;random-number-config&lt;/span&gt;
  &lt;span class="na"&gt;namespace&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;random-numbers&lt;/span&gt;
&lt;span class="na"&gt;data&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
  &lt;span class="na"&gt;min&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s2"&gt;"&lt;/span&gt;&lt;span class="s"&gt;1"&lt;/span&gt;
  &lt;span class="na"&gt;max&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s2"&gt;"&lt;/span&gt;&lt;span class="s"&gt;100"&lt;/span&gt;
  &lt;span class="na"&gt;table-size&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s2"&gt;"&lt;/span&gt;&lt;span class="s"&gt;10"&lt;/span&gt;
  &lt;span class="na"&gt;table-name&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s2"&gt;"&lt;/span&gt;&lt;span class="s"&gt;random-numbers"&lt;/span&gt;
&lt;span class="s"&gt;EOF&lt;/span&gt;

&lt;span class="s"&gt;kubectl apply -f random-number-configmap.yaml&lt;/span&gt;
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;h3&gt;
  
  
  Python Kubernetes API Client
&lt;/h3&gt;

&lt;p&gt;We need to modify the client pod to use the Kubernetes API to get the &lt;code&gt;configmap&lt;/code&gt; values and use them to generate the random number table.&lt;/p&gt;

&lt;p&gt;We will use the Kubernetes API client for Python to get the configmap values.&lt;/p&gt;

&lt;p&gt;The source code can be found here: &lt;code&gt;kubernetes_rbac_real_example/randomNumberClientCM/client-cm.py&lt;/code&gt;&lt;/p&gt;

&lt;p&gt;We will need to add the kubernetes library to our Docker image. Use &lt;code&gt;pip&lt;/code&gt; to install the Kubernetes API client for Python in your Dockerfile ( note we have already done this for you ).&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;pip &lt;span class="nb"&gt;install &lt;/span&gt;kubernetes
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;Here is the code required to get the configmap values:&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight python"&gt;&lt;code&gt;&lt;span class="kn"&gt;import&lt;/span&gt; &lt;span class="n"&gt;os&lt;/span&gt;
&lt;span class="kn"&gt;from&lt;/span&gt; &lt;span class="n"&gt;kubernetes&lt;/span&gt; &lt;span class="kn"&gt;import&lt;/span&gt; &lt;span class="n"&gt;client&lt;/span&gt;&lt;span class="p"&gt;,&lt;/span&gt; &lt;span class="n"&gt;config&lt;/span&gt;
&lt;span class="k"&gt;def&lt;/span&gt; &lt;span class="nf"&gt;get_configmap_values&lt;/span&gt;&lt;span class="p"&gt;():&lt;/span&gt;
    &lt;span class="n"&gt;config&lt;/span&gt;&lt;span class="p"&gt;.&lt;/span&gt;&lt;span class="nf"&gt;load_incluster_config&lt;/span&gt;&lt;span class="p"&gt;()&lt;/span&gt;
    &lt;span class="n"&gt;v1&lt;/span&gt; &lt;span class="o"&gt;=&lt;/span&gt; &lt;span class="n"&gt;client&lt;/span&gt;&lt;span class="p"&gt;.&lt;/span&gt;&lt;span class="nc"&gt;CoreV1Api&lt;/span&gt;&lt;span class="p"&gt;()&lt;/span&gt;
    &lt;span class="n"&gt;configmap&lt;/span&gt; &lt;span class="o"&gt;=&lt;/span&gt; &lt;span class="n"&gt;v1&lt;/span&gt;&lt;span class="p"&gt;.&lt;/span&gt;&lt;span class="nf"&gt;read_namespaced_config_map&lt;/span&gt;&lt;span class="p"&gt;(&lt;/span&gt;&lt;span class="n"&gt;name&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="sh"&gt;"&lt;/span&gt;&lt;span class="s"&gt;random-number-config&lt;/span&gt;&lt;span class="sh"&gt;"&lt;/span&gt;&lt;span class="p"&gt;,&lt;/span&gt; &lt;span class="n"&gt;namespace&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="sh"&gt;"&lt;/span&gt;&lt;span class="s"&gt;random-numbers&lt;/span&gt;&lt;span class="sh"&gt;"&lt;/span&gt;&lt;span class="p"&gt;)&lt;/span&gt;
    &lt;span class="n"&gt;min_value&lt;/span&gt; &lt;span class="o"&gt;=&lt;/span&gt; &lt;span class="nf"&gt;int&lt;/span&gt;&lt;span class="p"&gt;(&lt;/span&gt;&lt;span class="n"&gt;configmap&lt;/span&gt;&lt;span class="p"&gt;.&lt;/span&gt;&lt;span class="n"&gt;data&lt;/span&gt;&lt;span class="p"&gt;[&lt;/span&gt;&lt;span class="sh"&gt;"&lt;/span&gt;&lt;span class="s"&gt;min&lt;/span&gt;&lt;span class="sh"&gt;"&lt;/span&gt;&lt;span class="p"&gt;])&lt;/span&gt;
    &lt;span class="n"&gt;max_value&lt;/span&gt; &lt;span class="o"&gt;=&lt;/span&gt; &lt;span class="nf"&gt;int&lt;/span&gt;&lt;span class="p"&gt;(&lt;/span&gt;&lt;span class="n"&gt;configmap&lt;/span&gt;&lt;span class="p"&gt;.&lt;/span&gt;&lt;span class="n"&gt;data&lt;/span&gt;&lt;span class="p"&gt;[&lt;/span&gt;&lt;span class="sh"&gt;"&lt;/span&gt;&lt;span class="s"&gt;max&lt;/span&gt;&lt;span class="sh"&gt;"&lt;/span&gt;&lt;span class="p"&gt;])&lt;/span&gt;
    &lt;span class="n"&gt;table_size&lt;/span&gt; &lt;span class="o"&gt;=&lt;/span&gt; &lt;span class="nf"&gt;int&lt;/span&gt;&lt;span class="p"&gt;(&lt;/span&gt;&lt;span class="n"&gt;configmap&lt;/span&gt;&lt;span class="p"&gt;.&lt;/span&gt;&lt;span class="n"&gt;data&lt;/span&gt;&lt;span class="p"&gt;[&lt;/span&gt;&lt;span class="sh"&gt;"&lt;/span&gt;&lt;span class="s"&gt;table-size&lt;/span&gt;&lt;span class="sh"&gt;"&lt;/span&gt;&lt;span class="p"&gt;])&lt;/span&gt;
    &lt;span class="k"&gt;return&lt;/span&gt; &lt;span class="n"&gt;min_value&lt;/span&gt;&lt;span class="p"&gt;,&lt;/span&gt; &lt;span class="n"&gt;max_value&lt;/span&gt;&lt;span class="p"&gt;,&lt;/span&gt; &lt;span class="n"&gt;table_size&lt;/span&gt;
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;Build a new docker image called &lt;code&gt;{your-username}/random-number-client-cm&lt;/code&gt; and push it to docker hub.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;docker build &lt;span class="nt"&gt;-t&lt;/span&gt; &lt;span class="o"&gt;{&lt;/span&gt;your-username&lt;span class="o"&gt;}&lt;/span&gt;/random-number-client-cm &lt;span class="nb"&gt;.&lt;/span&gt;
docker push &lt;span class="o"&gt;{&lt;/span&gt;your-username&lt;span class="o"&gt;}&lt;/span&gt;/random-number-client-cm
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;Lets double check we have our service account added the client pod definition and the lastest docker image for the client pod. Remember, the service account is used to restrict access to the Kubernetes API using RBAC.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;apiVersion: v1
kind: Pod
metadata:
  labels:
    app: random-number-client
  name: random-number-client
  namespace: random-numbers
spec:
  serviceAccountName: random-numbers-sa
  containers:
  - name: random-number-client
    image: &lt;span class="o"&gt;{&lt;/span&gt;your-docker-username&lt;span class="o"&gt;}&lt;/span&gt;/random-number-client-cm:latest
    ports:
    - containerPort: 3216
    &lt;span class="nb"&gt;env&lt;/span&gt;:
    - name: RANDOM_SERVER
      value: &lt;span class="s2"&gt;"random-number-service.random-numbers.svc.cluster.local"&lt;/span&gt;
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;





&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;kubectl delete &lt;span class="nt"&gt;-f&lt;/span&gt; random-number-client.yaml
kubectl apply &lt;span class="nt"&gt;-f&lt;/span&gt; random-number-client.yaml
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;If everything is set up correctly, the client should be able to access the server and receive the random number table, just like it did in the Docker environment but can't do very much else. Let's test it out:&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;kubectl &lt;span class="nb"&gt;exec&lt;/span&gt; &lt;span class="nt"&gt;-ti&lt;/span&gt; dnsutils &lt;span class="nt"&gt;-n&lt;/span&gt; random-numbers &lt;span class="nt"&gt;--&lt;/span&gt; sh

/ &lt;span class="c"&gt;# nc random-number-client-service 3216&lt;/span&gt;

Press &lt;span class="o"&gt;[&lt;/span&gt;Enter] to get a new &lt;span class="nb"&gt;set &lt;/span&gt;of values using the kubernetes config map: 
Will send these values to server: 1,100,10
Waiting &lt;span class="k"&gt;for &lt;/span&gt;server response...
74      80      40      26      17      72      87      38      64      79
97      96      63      86      7       58      75      52      76      32
47      29      5       83      68      90      60      100     16      70
84      9       44      37      48      49      98      81      27      54
45      61      51      25      43      57      65      89      92      19
22      24      55      28      53      15      13      8       10      39
20      77      31      93      91      12      62      94      34      42
35      1       67      85      78      59      14      99      11      33
21      30      95      2       6       82      4       3       56      66
50      69      18      23      46      71      36      88      41      73

Press &lt;span class="o"&gt;[&lt;/span&gt;Enter] to get a new &lt;span class="nb"&gt;set &lt;/span&gt;of values using the kubernetes config map:

&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;Now try changing the &lt;code&gt;min&lt;/code&gt;, &lt;code&gt;max&lt;/code&gt; and &lt;code&gt;table-size&lt;/code&gt; values in the config map and see if the client can pick up the new values.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;apiVersion: v1
kind: ConfigMap
metadata:
  name: random-number-config
  namespace: random-numbers
data:
  min: &lt;span class="s2"&gt;"64"&lt;/span&gt;
  max: &lt;span class="s2"&gt;"256"&lt;/span&gt;
  table-size: &lt;span class="s2"&gt;"8"&lt;/span&gt;
  table-name: &lt;span class="s2"&gt;"random-numbers"&lt;/span&gt;
...
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;Save the file apply the changes.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;kubectl apply &lt;span class="nt"&gt;-f&lt;/span&gt; random-number-configmap.yaml
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;Visit the window where you are running the dnsutils pod and hit enter again in the terminal. This should now fetch a new table using the updated &lt;code&gt;min&lt;/code&gt;,&lt;code&gt;max&lt;/code&gt; and &lt;code&gt;table-size&lt;/code&gt; values.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;Press &lt;span class="o"&gt;[&lt;/span&gt;Enter] to get a new &lt;span class="nb"&gt;set &lt;/span&gt;of values using the kubernetes config map: 
Will send these values to server: 64,256,8
Waiting &lt;span class="k"&gt;for &lt;/span&gt;server response...
219     195     91      88      104     93      134     144
246     131     241     239     128     210     190     114
163     183     191     161     115     145     160     70
136     218     171     67      98      237     248     256
227     173     80      202     206     71      129     126
249     154     245     216     193     204     92      66
75      169     106     189     155     185     238     179
103     230     99      221     149     137     152     138
117     174     100     247     215     170     105     68
120     74      180     141     231     186     139     250
79      122     209     121     127     123     83      157
94      235     199     211     201     125     188     95
229     205     220     198     147     236     217     176
97      167     213     234     203     225     84      172
208     142     254     150     175     148     87      228
158     253     135     64      242     81      233     212
133     72      118     73      243     143     226     159
146     140     178     77      124     151     112     109
164     184     251     153     96      197     65      240
78      132     166     252     165     194     102     244
86      187     116     232     222     196     110     111
119     90      168     255     101     76      82      85
162     89      108     69      223     200     214     182
207     192     113     177     130     156     107     181
224
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;






&lt;h3&gt;
  
  
  Conclusion
&lt;/h3&gt;

&lt;p&gt;In this article, we've walked through a real-world example of using &lt;strong&gt;RBAC&lt;/strong&gt; in a Kubernetes environment. We demonstrated how to set up roles and RoleBindings to control access between two applications—one acting as a server and the other as a client. By building the applications with &lt;strong&gt;Docker&lt;/strong&gt;, deploying them to Kubernetes, and using RBAC, you now have a solid understanding of how to implement access control in a Kubernetes cluster.&lt;/p&gt;

&lt;p&gt;Remember, RBAC is a powerful feature that helps you ensure that users and services in your cluster have the appropriate permissions based on their roles. With RBAC, you can implement fine-grained access control to safeguard your resources.&lt;/p&gt;




</description>
    </item>
    <item>
      <title>MongoDB Sharding - Single Node Macbook - Quick Setup</title>
      <dc:creator>Naresh Maharaj</dc:creator>
      <pubDate>Sat, 05 Oct 2024 18:09:48 +0000</pubDate>
      <link>https://dev.to/naresh_maharaj_c4b8fbd4aa/mongodb-sharding-single-node-macbook-quick-setup-4513</link>
      <guid>https://dev.to/naresh_maharaj_c4b8fbd4aa/mongodb-sharding-single-node-macbook-quick-setup-4513</guid>
      <description>&lt;h1&gt;
  
  
  MongoDB Sharding - Single Node Macbook - Quick Setup
&lt;/h1&gt;

&lt;p&gt;&lt;a href="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fbddniua8fp71jzbcjd1q.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fbddniua8fp71jzbcjd1q.png" alt="Image description" width="800" height="745"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;There are times when I need to quickly set up a shard, or even a MongoDB cluster with 2 or 3 shards. This guide makes it simple and configurable to do so with a single command. No Docker is involved—just plain old MongoDB, specifically version 8, which is the latest at the time of writing.&lt;/p&gt;

&lt;p&gt;Git repo here in case you want to skip reading this:&lt;br&gt;
&lt;a href="https://github.com/nareshmaharaj-consultant/mongodb-sharding-bash" rel="noopener noreferrer"&gt;https://github.com/nareshmaharaj-consultant/mongodb-sharding-bash&lt;/a&gt;&lt;/p&gt;
&lt;h2&gt;
  
  
  Overview
&lt;/h2&gt;

&lt;p&gt;Let's begin by installing all the necessary binaries.&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;
&lt;strong&gt;mongosh&lt;/strong&gt;: Download the &lt;a href="https://www.mongodb.com/try/download/shell" rel="noopener noreferrer"&gt;&lt;strong&gt;&lt;em&gt;MongoDB shell&lt;/em&gt;&lt;/strong&gt;&lt;/a&gt;, as we'll need this multiple times throughout the setup.&lt;/li&gt;
&lt;li&gt;
&lt;strong&gt;mongod&lt;/strong&gt;: Grab the latest &lt;a href="https://www.mongodb.com/try/download/enterprise" rel="noopener noreferrer"&gt;&lt;strong&gt;&lt;em&gt;MongoDB server&lt;/em&gt;&lt;/strong&gt;&lt;/a&gt;, required for each instance.&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;After downloading, unzip the contents and move the &lt;code&gt;bin&lt;/code&gt; folder contents to your system’s environment paths.&lt;/p&gt;

&lt;p&gt;You should now be able to run the following commands to verify the versions of both.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;mongosh --version
2.3.1

mongod --version
db version v8.0.0
Build Info: {
    "version": "8.0.0",
    "gitVersion": "d7cd03b239ac39a3c7d63f7145e91aca36f93db6",
    "modules": [
        "enterprise"
    ],
    "allocator": "system",
    "environment": {
        "distarch": "x86_64",
        "target_arch": "x86_64"
    }
}
mongos --version
mongos version v8.0.0
Build Info: {
    "version": "8.0.0",
    "gitVersion": "d7cd03b239ac39a3c7d63f7145e91aca36f93db6",
    "modules": [
        "enterprise"
    ],
    "allocator": "system",
    "environment": {
        "distarch": "x86_64",
        "target_arch": "x86_64"
    }
}
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;Once everything is confirmed, we're good to go. We'll follow the sequence below:&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;
&lt;strong&gt;Replica Sets&lt;/strong&gt;: First, we'll create a simple replica set and then tear it down.&lt;/li&gt;
&lt;li&gt;
&lt;strong&gt;Shards&lt;/strong&gt;: Set up a sharded cluster with 2 shards.&lt;/li&gt;
&lt;li&gt;
&lt;strong&gt;Data&lt;/strong&gt;: Insert some sample data.&lt;/li&gt;
&lt;li&gt;
&lt;strong&gt;Collections&lt;/strong&gt;: Shard the collection.&lt;/li&gt;
&lt;li&gt;
&lt;strong&gt;Config Shard&lt;/strong&gt;: Convert the config server(s) into a data shard.&lt;/li&gt;
&lt;li&gt;
&lt;strong&gt;Wrap up&lt;/strong&gt;: What’s next?&lt;/li&gt;
&lt;/ul&gt;

&lt;h3&gt;
  
  
  Replica Set
&lt;/h3&gt;

&lt;p&gt;Start by creating a file named &lt;code&gt;mongo-replicaset-create&lt;/code&gt; to set up the replica set.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;&lt;span class="nv"&gt;NAME&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="nv"&gt;$1&lt;/span&gt;
&lt;span class="nv"&gt;PORT&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="nv"&gt;$2&lt;/span&gt;
&lt;span class="nv"&gt;DIR&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;~/data/&lt;span class="nv"&gt;$NAME&lt;/span&gt;
&lt;span class="nv"&gt;NODES&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="nv"&gt;$3&lt;/span&gt;
&lt;span class="nv"&gt;USAGE&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s2"&gt;"&lt;/span&gt;&lt;span class="se"&gt;\n&lt;/span&gt;&lt;span class="s2"&gt;Usage:  mongo-replicaset-create {name} {port} {numNodes} { Optional:[ --shardsvr, --configsvr ] }&lt;/span&gt;&lt;span class="se"&gt;\n&lt;/span&gt;&lt;span class="s2"&gt;Set env variable &lt;/span&gt;&lt;span class="se"&gt;\$&lt;/span&gt;&lt;span class="s2"&gt;HOST for all nodes to use this domain name."&lt;/span&gt;
&lt;span class="nv"&gt;SLEEPTIME&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;8
&lt;span class="nv"&gt;CONFIGSVR&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s2"&gt;""&lt;/span&gt;
&lt;span class="nv"&gt;SHARDSVR&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s2"&gt;""&lt;/span&gt;
&lt;span class="nv"&gt;GIVEN_HOSTNAME&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;localhost

&lt;span class="k"&gt;if&lt;/span&gt; &lt;span class="o"&gt;[&lt;/span&gt; &lt;span class="nt"&gt;-z&lt;/span&gt; &lt;span class="s2"&gt;"&lt;/span&gt;&lt;span class="nv"&gt;$HOST&lt;/span&gt;&lt;span class="s2"&gt;"&lt;/span&gt; &lt;span class="o"&gt;]&lt;/span&gt;
&lt;span class="k"&gt;then
      &lt;/span&gt;&lt;span class="nb"&gt;echo&lt;/span&gt; &lt;span class="s2"&gt;"&lt;/span&gt;&lt;span class="se"&gt;\$&lt;/span&gt;&lt;span class="s2"&gt;HOST not set using localhost"&lt;/span&gt;
&lt;span class="k"&gt;else
      &lt;/span&gt;&lt;span class="nv"&gt;GIVEN_HOSTNAME&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="nv"&gt;$HOST&lt;/span&gt;
&lt;span class="k"&gt;fi

if&lt;/span&gt; &lt;span class="o"&gt;[&lt;/span&gt; &lt;span class="s2"&gt;"&lt;/span&gt;&lt;span class="nv"&gt;$1&lt;/span&gt;&lt;span class="s2"&gt;"&lt;/span&gt; &lt;span class="o"&gt;==&lt;/span&gt; &lt;span class="s2"&gt;"-h"&lt;/span&gt; &lt;span class="o"&gt;]&lt;/span&gt;&lt;span class="p"&gt;;&lt;/span&gt; &lt;span class="k"&gt;then
  &lt;/span&gt;&lt;span class="nb"&gt;echo&lt;/span&gt; &lt;span class="nt"&gt;-e&lt;/span&gt; &lt;span class="nv"&gt;$USAGE&lt;/span&gt;
  &lt;span class="nb"&gt;exit &lt;/span&gt;0
&lt;span class="k"&gt;fi

if&lt;/span&gt; &lt;span class="o"&gt;[&lt;/span&gt; &lt;span class="nt"&gt;-z&lt;/span&gt; &lt;span class="s2"&gt;"&lt;/span&gt;&lt;span class="nv"&gt;$1&lt;/span&gt;&lt;span class="s2"&gt;"&lt;/span&gt; &lt;span class="o"&gt;]&lt;/span&gt;
  &lt;span class="k"&gt;then
    &lt;/span&gt;&lt;span class="nb"&gt;echo&lt;/span&gt; &lt;span class="nt"&gt;-e&lt;/span&gt; &lt;span class="s2"&gt;"No NAME argument supplied &lt;/span&gt;&lt;span class="nv"&gt;$USAGE&lt;/span&gt;&lt;span class="s2"&gt;"&lt;/span&gt;
    &lt;span class="nb"&gt;exit &lt;/span&gt;0
&lt;span class="k"&gt;fi

if&lt;/span&gt; &lt;span class="o"&gt;[&lt;/span&gt; &lt;span class="nt"&gt;-z&lt;/span&gt; &lt;span class="s2"&gt;"&lt;/span&gt;&lt;span class="nv"&gt;$2&lt;/span&gt;&lt;span class="s2"&gt;"&lt;/span&gt; &lt;span class="o"&gt;]&lt;/span&gt;
  &lt;span class="k"&gt;then
    &lt;/span&gt;&lt;span class="nb"&gt;echo&lt;/span&gt; &lt;span class="nt"&gt;-e&lt;/span&gt; &lt;span class="s2"&gt;"No PORT argument supplied &lt;/span&gt;&lt;span class="nv"&gt;$USAGE&lt;/span&gt;&lt;span class="s2"&gt;"&lt;/span&gt;
    &lt;span class="nb"&gt;exit &lt;/span&gt;0
&lt;span class="k"&gt;fi

if&lt;/span&gt; &lt;span class="o"&gt;[&lt;/span&gt; &lt;span class="nt"&gt;-z&lt;/span&gt; &lt;span class="s2"&gt;"&lt;/span&gt;&lt;span class="nv"&gt;$3&lt;/span&gt;&lt;span class="s2"&gt;"&lt;/span&gt; &lt;span class="o"&gt;]&lt;/span&gt;
  &lt;span class="k"&gt;then
    &lt;/span&gt;&lt;span class="nb"&gt;echo&lt;/span&gt; &lt;span class="nt"&gt;-e&lt;/span&gt; &lt;span class="s2"&gt;"Number of NODES  argument supplied &lt;/span&gt;&lt;span class="nv"&gt;$USAGE&lt;/span&gt;&lt;span class="s2"&gt;"&lt;/span&gt;
    &lt;span class="nb"&gt;exit &lt;/span&gt;0
&lt;span class="k"&gt;fi&lt;/span&gt;

&lt;span class="c"&gt;# idiomatic parameter and option handling in sh&lt;/span&gt;
&lt;span class="k"&gt;while &lt;/span&gt;&lt;span class="nb"&gt;test&lt;/span&gt; &lt;span class="nv"&gt;$# &lt;/span&gt;&lt;span class="nt"&gt;-gt&lt;/span&gt; 0
&lt;span class="k"&gt;do
    case&lt;/span&gt; &lt;span class="s2"&gt;"&lt;/span&gt;&lt;span class="nv"&gt;$4&lt;/span&gt;&lt;span class="s2"&gt;"&lt;/span&gt; &lt;span class="k"&gt;in&lt;/span&gt;
        &lt;span class="nt"&gt;--shardsvr&lt;/span&gt;&lt;span class="p"&gt;)&lt;/span&gt; &lt;span class="nb"&gt;echo&lt;/span&gt; &lt;span class="s2"&gt;"Using option --shardsvr"&lt;/span&gt;
        &lt;span class="nv"&gt;SHARDSVR&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s2"&gt;"--shardsvr"&lt;/span&gt;
            &lt;span class="p"&gt;;;&lt;/span&gt;
        &lt;span class="nt"&gt;--configsvr&lt;/span&gt;&lt;span class="p"&gt;)&lt;/span&gt; &lt;span class="nb"&gt;echo&lt;/span&gt; &lt;span class="s2"&gt;"Using option --configsvr"&lt;/span&gt;
        &lt;span class="nv"&gt;CONFIGSVR&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s2"&gt;"--configsvr"&lt;/span&gt;
            &lt;span class="p"&gt;;;&lt;/span&gt;
        &lt;span class="nt"&gt;--&lt;/span&gt;&lt;span class="k"&gt;*&lt;/span&gt;&lt;span class="p"&gt;)&lt;/span&gt; &lt;span class="nb"&gt;echo&lt;/span&gt; &lt;span class="s2"&gt;"bad option &lt;/span&gt;&lt;span class="nv"&gt;$1&lt;/span&gt;&lt;span class="s2"&gt;"&lt;/span&gt;
            &lt;span class="p"&gt;;;&lt;/span&gt;
    &lt;span class="k"&gt;esac&lt;/span&gt;
    &lt;span class="nb"&gt;shift
&lt;/span&gt;&lt;span class="k"&gt;done

&lt;/span&gt;&lt;span class="nb"&gt;rm&lt;/span&gt; &lt;span class="nt"&gt;-fr&lt;/span&gt; &lt;span class="nv"&gt;$DIR&lt;/span&gt;
&lt;span class="nb"&gt;mkdir&lt;/span&gt; &lt;span class="nt"&gt;-p&lt;/span&gt; &lt;span class="nv"&gt;$DIR&lt;/span&gt;

&lt;span class="nv"&gt;COUNTER&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;0
  &lt;span class="k"&gt;until&lt;/span&gt; &lt;span class="o"&gt;[&lt;/span&gt;  &lt;span class="nv"&gt;$COUNTER&lt;/span&gt; &lt;span class="nt"&gt;-ge&lt;/span&gt; &lt;span class="nv"&gt;$NODES&lt;/span&gt; &lt;span class="o"&gt;]&lt;/span&gt;&lt;span class="p"&gt;;&lt;/span&gt; &lt;span class="k"&gt;do
    &lt;/span&gt;&lt;span class="nv"&gt;LOC&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="nv"&gt;$DIR&lt;/span&gt;/&lt;span class="nv"&gt;$COUNTER&lt;/span&gt;
    &lt;span class="nb"&gt;mkdir&lt;/span&gt; &lt;span class="nt"&gt;-p&lt;/span&gt; &lt;span class="nv"&gt;$LOC&lt;/span&gt;
    &lt;span class="nv"&gt;PC&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="k"&gt;$((&lt;/span&gt;&lt;span class="nv"&gt;$PORT&lt;/span&gt;&lt;span class="o"&gt;+&lt;/span&gt;&lt;span class="nv"&gt;$COUNTER&lt;/span&gt;&lt;span class="k"&gt;))&lt;/span&gt;
    &lt;span class="nb"&gt;echo&lt;/span&gt; &lt;span class="s2"&gt;"[ ++ Executing:  mongod --port &lt;/span&gt;&lt;span class="nv"&gt;$PC&lt;/span&gt;&lt;span class="s2"&gt; --fork --dbpath &lt;/span&gt;&lt;span class="nv"&gt;$LOC&lt;/span&gt;&lt;span class="s2"&gt; --logpath &lt;/span&gt;&lt;span class="nv"&gt;$LOC&lt;/span&gt;&lt;span class="s2"&gt;/log.out -replSet &lt;/span&gt;&lt;span class="nv"&gt;$NAME&lt;/span&gt;&lt;span class="s2"&gt; &lt;/span&gt;&lt;span class="nv"&gt;$CONFIGSVR&lt;/span&gt;&lt;span class="s2"&gt; &lt;/span&gt;&lt;span class="nv"&gt;$SHARDSVR&lt;/span&gt;&lt;span class="s2"&gt; ]"&lt;/span&gt;
    mongod &lt;span class="nt"&gt;--port&lt;/span&gt; &lt;span class="nv"&gt;$PC&lt;/span&gt; &lt;span class="nt"&gt;--fork&lt;/span&gt; &lt;span class="nt"&gt;--bind_ip&lt;/span&gt; localhost,&lt;span class="nv"&gt;$GIVEN_HOSTNAME&lt;/span&gt; &lt;span class="nt"&gt;--dbpath&lt;/span&gt; &lt;span class="nv"&gt;$LOC&lt;/span&gt; &lt;span class="nt"&gt;--logpath&lt;/span&gt; &lt;span class="nv"&gt;$LOC&lt;/span&gt;/log.out &lt;span class="nt"&gt;--replSet&lt;/span&gt; &lt;span class="nv"&gt;$NAME&lt;/span&gt; &lt;span class="nv"&gt;$CONFIGSVR&lt;/span&gt; &lt;span class="nv"&gt;$SHARDSVR&lt;/span&gt;
    &lt;span class="nb"&gt;let &lt;/span&gt;COUNTER+&lt;span class="o"&gt;=&lt;/span&gt;1
  &lt;span class="k"&gt;done

&lt;/span&gt;&lt;span class="nb"&gt;echo&lt;/span&gt; &lt;span class="s2"&gt;"Sleeping for &lt;/span&gt;&lt;span class="nv"&gt;$SLEEPTIME&lt;/span&gt;&lt;span class="s2"&gt; seconds"&lt;/span&gt;
&lt;span class="nb"&gt;sleep&lt;/span&gt; &lt;span class="nv"&gt;$SLEEPTIME&lt;/span&gt;

&lt;span class="nb"&gt;echo&lt;/span&gt; &lt;span class="s2"&gt;"[ ++ Executing:  mongosh --port &lt;/span&gt;&lt;span class="nv"&gt;$PORT&lt;/span&gt;&lt;span class="s2"&gt; -eval &lt;/span&gt;&lt;span class="se"&gt;\"&lt;/span&gt;&lt;span class="s2"&gt;rs.initiate()&lt;/span&gt;&lt;span class="se"&gt;\"&lt;/span&gt;&lt;span class="s2"&gt; ]"&lt;/span&gt;
mongosh &lt;span class="nt"&gt;--port&lt;/span&gt; &lt;span class="nv"&gt;$PORT&lt;/span&gt; &lt;span class="nt"&gt;-eval&lt;/span&gt; &lt;span class="s2"&gt;"rs.initiate()"&lt;/span&gt;
&lt;span class="nb"&gt;sleep&lt;/span&gt; &lt;span class="nv"&gt;$SLEEPTIME&lt;/span&gt;

&lt;span class="nv"&gt;COUNTER&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;1
  &lt;span class="k"&gt;until&lt;/span&gt; &lt;span class="o"&gt;[&lt;/span&gt;  &lt;span class="nv"&gt;$COUNTER&lt;/span&gt; &lt;span class="nt"&gt;-ge&lt;/span&gt; &lt;span class="nv"&gt;$NODES&lt;/span&gt; &lt;span class="o"&gt;]&lt;/span&gt;&lt;span class="p"&gt;;&lt;/span&gt; &lt;span class="k"&gt;do
    &lt;/span&gt;&lt;span class="nv"&gt;PC&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="k"&gt;$((&lt;/span&gt;&lt;span class="nv"&gt;$PORT&lt;/span&gt;&lt;span class="o"&gt;+&lt;/span&gt;&lt;span class="nv"&gt;$COUNTER&lt;/span&gt;&lt;span class="k"&gt;))&lt;/span&gt;
    &lt;span class="k"&gt;if&lt;/span&gt; &lt;span class="o"&gt;[&lt;/span&gt; &lt;span class="nv"&gt;$COUNTER&lt;/span&gt; &lt;span class="nt"&gt;-gt&lt;/span&gt; 6 &lt;span class="o"&gt;]&lt;/span&gt;&lt;span class="p"&gt;;&lt;/span&gt; &lt;span class="k"&gt;then
    &lt;/span&gt;&lt;span class="nb"&gt;echo&lt;/span&gt; &lt;span class="s2"&gt;"[ ++ Executing: mongosh --port &lt;/span&gt;&lt;span class="nv"&gt;$PORT&lt;/span&gt;&lt;span class="s2"&gt; -eval &lt;/span&gt;&lt;span class="se"&gt;\"&lt;/span&gt;&lt;span class="s2"&gt;rs.add({host: '&lt;/span&gt;&lt;span class="nv"&gt;$GIVEN_HOSTNAME&lt;/span&gt;&lt;span class="s2"&gt;:&lt;/span&gt;&lt;span class="nv"&gt;$PC&lt;/span&gt;&lt;span class="s2"&gt;', priority:0, votes:0})&lt;/span&gt;&lt;span class="se"&gt;\"&lt;/span&gt;&lt;span class="s2"&gt; ]"&lt;/span&gt;
    mongosh &lt;span class="nt"&gt;--port&lt;/span&gt; &lt;span class="nv"&gt;$PORT&lt;/span&gt; &lt;span class="nt"&gt;-eval&lt;/span&gt; &lt;span class="s2"&gt;"rs.add({host: '&lt;/span&gt;&lt;span class="nv"&gt;$GIVEN_HOSTNAME&lt;/span&gt;&lt;span class="s2"&gt;:&lt;/span&gt;&lt;span class="nv"&gt;$PC&lt;/span&gt;&lt;span class="s2"&gt;', priority:0, votes:0})"&lt;/span&gt;
    &lt;span class="k"&gt;else
        &lt;/span&gt;&lt;span class="nb"&gt;echo&lt;/span&gt; &lt;span class="s2"&gt;"[ ++ Executing:  mongosh --port &lt;/span&gt;&lt;span class="nv"&gt;$PORT&lt;/span&gt;&lt;span class="s2"&gt; -eval &lt;/span&gt;&lt;span class="se"&gt;\"&lt;/span&gt;&lt;span class="s2"&gt;rs.add('&lt;/span&gt;&lt;span class="nv"&gt;$GIVEN_HOSTNAME&lt;/span&gt;&lt;span class="s2"&gt;:&lt;/span&gt;&lt;span class="nv"&gt;$PC&lt;/span&gt;&lt;span class="s2"&gt;')&lt;/span&gt;&lt;span class="se"&gt;\"&lt;/span&gt;&lt;span class="s2"&gt; ]"&lt;/span&gt;
        mongosh &lt;span class="nt"&gt;--port&lt;/span&gt; &lt;span class="nv"&gt;$PORT&lt;/span&gt; &lt;span class="nt"&gt;-eval&lt;/span&gt; &lt;span class="s2"&gt;"rs.add('&lt;/span&gt;&lt;span class="nv"&gt;$GIVEN_HOSTNAME&lt;/span&gt;&lt;span class="s2"&gt;:&lt;/span&gt;&lt;span class="nv"&gt;$PC&lt;/span&gt;&lt;span class="s2"&gt;')"&lt;/span&gt;
    &lt;span class="k"&gt;fi
    &lt;/span&gt;&lt;span class="nb"&gt;let &lt;/span&gt;COUNTER+&lt;span class="o"&gt;=&lt;/span&gt;1
  &lt;span class="k"&gt;done

&lt;/span&gt;&lt;span class="nb"&gt;sleep&lt;/span&gt; &lt;span class="nv"&gt;$SLEEPTIME&lt;/span&gt;
mongosh &lt;span class="nt"&gt;--port&lt;/span&gt; &lt;span class="nv"&gt;$PORT&lt;/span&gt; &lt;span class="nt"&gt;-eval&lt;/span&gt; &lt;span class="s2"&gt;"rs.status()"&lt;/span&gt;
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;Create the replica set and capture the stdout if you want to inspect the output later. It makes sense to set the &lt;code&gt;HOST&lt;/code&gt; variable so that it can be used as a Fully Qualified Domain Name (FQDN) during the creation process.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;&lt;span class="c"&gt;# Set the host&lt;/span&gt;
&lt;span class="nb"&gt;export &lt;/span&gt;&lt;span class="nv"&gt;HOST&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="sb"&gt;`&lt;/span&gt;&lt;span class="nb"&gt;hostname&lt;/span&gt;&lt;span class="sb"&gt;`&lt;/span&gt;

&lt;span class="c"&gt;# Check we set it correctly&lt;/span&gt;
&lt;span class="nb"&gt;env&lt;/span&gt; | &lt;span class="nb"&gt;grep &lt;/span&gt;HOST
&lt;span class="nv"&gt;HOST&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;nmaharaj-MBP-MMD6T.local
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;Run the script&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;mongo-replicaset-create myrs 27017 3
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;Here is the result and output&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;[ ++ Executing:  mongod --port 27017 --fork --dbpath /Users/nareshmaharaj/data/myrs/0 --logpath /Users/nareshmaharaj/data/myrs/0/log.out -replSet myrs   ]
about to fork child process, waiting until server is ready for connections.
forked process: 43633
child process started successfully, parent exiting
[ ++ Executing:  mongod --port 27018 --fork --dbpath /Users/nareshmaharaj/data/myrs/1 --logpath /Users/nareshmaharaj/data/myrs/1/log.out -replSet myrs   ]
about to fork child process, waiting until server is ready for connections.
forked process: 43664
child process started successfully, parent exiting
[ ++ Executing:  mongod --port 27019 --fork --dbpath /Users/nareshmaharaj/data/myrs/2 --logpath /Users/nareshmaharaj/data/myrs/2/log.out -replSet myrs   ]
about to fork child process, waiting until server is ready for connections.
forked process: 43689
child process started successfully, parent exiting
Sleeping for 8 seconds
[ ++ Executing:  mongosh --port 27017 -eval "rs.initiate()" ]
{
  info2: 'no configuration specified. Using a default configuration for the set',
  me: 'nmaharaj-MBP-MMD6T.local:27017',
  ok: 1,
  '$clusterTime': {
    clusterTime: Timestamp({ t: 1728146621, i: 1 }),
    signature: {
      hash: Binary.createFromBase64('AAAAAAAAAAAAAAAAAAAAAAAAAAA=', 0),
      keyId: Long('0')
    }
  },
  operationTime: Timestamp({ t: 1728146621, i: 1 })
}
[ ++ Executing:  mongosh --port 27017 -eval "rs.add('nmaharaj-MBP-MMD6T.local:27018')" ]
{
  ok: 1,
  '$clusterTime': {
    clusterTime: Timestamp({ t: 1728146631, i: 1 }),
    signature: {
      hash: Binary.createFromBase64('AAAAAAAAAAAAAAAAAAAAAAAAAAA=', 0),
      keyId: Long('0')
    }
  },
  operationTime: Timestamp({ t: 1728146631, i: 1 })
}
[ ++ Executing:  mongosh --port 27017 -eval "rs.add('nmaharaj-MBP-MMD6T.local:27019')" ]
{
  ok: 1,
  '$clusterTime': {
    clusterTime: Timestamp({ t: 1728146633, i: 1 }),
    signature: {
      hash: Binary.createFromBase64('AAAAAAAAAAAAAAAAAAAAAAAAAAA=', 0),
      keyId: Long('0')
    }
  },
  operationTime: Timestamp({ t: 1728146633, i: 1 })
}
{
  set: 'myrs',
  date: ISODate('2024-10-05T16:44:02.989Z'),
  myState: 1,
  term: Long('1'),
  syncSourceHost: '',
  syncSourceId: -1,
  heartbeatIntervalMillis: Long('2000'),
  majorityVoteCount: 2,
  writeMajorityCount: 2,
  votingMembersCount: 3,
  writableVotingMembersCount: 3,
  optimes: {
    lastCommittedOpTime: { ts: Timestamp({ t: 1728146641, i: 1 }), t: Long('1') },
    lastCommittedWallTime: ISODate('2024-10-05T16:44:01.930Z'),
    readConcernMajorityOpTime: { ts: Timestamp({ t: 1728146641, i: 1 }), t: Long('1') },
    appliedOpTime: { ts: Timestamp({ t: 1728146641, i: 1 }), t: Long('1') },
    durableOpTime: { ts: Timestamp({ t: 1728146641, i: 1 }), t: Long('1') },
    writtenOpTime: { ts: Timestamp({ t: 1728146641, i: 1 }), t: Long('1') },
    lastAppliedWallTime: ISODate('2024-10-05T16:44:01.930Z'),
    lastDurableWallTime: ISODate('2024-10-05T16:44:01.930Z'),
    lastWrittenWallTime: ISODate('2024-10-05T16:44:01.930Z')
  },
  lastStableRecoveryTimestamp: Timestamp({ t: 1728146621, i: 1 }),
  electionCandidateMetrics: {
    lastElectionReason: 'electionTimeout',
    lastElectionDate: ISODate('2024-10-05T16:43:42.053Z'),
    electionTerm: Long('1'),
    lastCommittedOpTimeAtElection: { ts: Timestamp({ t: 1728146621, i: 1 }), t: Long('-1') },
    lastSeenWrittenOpTimeAtElection: { ts: Timestamp({ t: 1728146621, i: 1 }), t: Long('-1') },
    lastSeenOpTimeAtElection: { ts: Timestamp({ t: 1728146621, i: 1 }), t: Long('-1') },
    numVotesNeeded: 1,
    priorityAtElection: 1,
    electionTimeoutMillis: Long('10000'),
    newTermStartDate: ISODate('2024-10-05T16:43:42.465Z'),
    wMajorityWriteAvailabilityDate: ISODate('2024-10-05T16:43:42.735Z')
  },
  members: [
    {
      _id: 0,
      name: 'nmaharaj-MBP-MMD6T.local:27017',
      health: 1,
      state: 1,
      stateStr: 'PRIMARY',
      uptime: 40,
      optime: { ts: Timestamp({ t: 1728146641, i: 1 }), t: Long('1') },
      optimeDate: ISODate('2024-10-05T16:44:01.000Z'),
      optimeWritten: { ts: Timestamp({ t: 1728146641, i: 1 }), t: Long('1') },
      optimeWrittenDate: ISODate('2024-10-05T16:44:01.000Z'),
      lastAppliedWallTime: ISODate('2024-10-05T16:44:01.930Z'),
      lastDurableWallTime: ISODate('2024-10-05T16:44:01.930Z'),
      lastWrittenWallTime: ISODate('2024-10-05T16:44:01.930Z'),
      syncSourceHost: '',
      syncSourceId: -1,
      infoMessage: 'Could not find member to sync from',
      electionTime: Timestamp({ t: 1728146622, i: 1 }),
      electionDate: ISODate('2024-10-05T16:43:42.000Z'),
      configVersion: 5,
      configTerm: 1,
      self: true,
      lastHeartbeatMessage: ''
    },
    {
      _id: 1,
      name: 'nmaharaj-MBP-MMD6T.local:27018',
      health: 1,
      state: 2,
      stateStr: 'SECONDARY',
      uptime: 11,
      optime: { ts: Timestamp({ t: 1728146641, i: 1 }), t: Long('1') },
      optimeDurable: { ts: Timestamp({ t: 1728146641, i: 1 }), t: Long('1') },
      optimeWritten: { ts: Timestamp({ t: 1728146641, i: 1 }), t: Long('1') },
      optimeDate: ISODate('2024-10-05T16:44:01.000Z'),
      optimeDurableDate: ISODate('2024-10-05T16:44:01.000Z'),
      optimeWrittenDate: ISODate('2024-10-05T16:44:01.000Z'),
      lastAppliedWallTime: ISODate('2024-10-05T16:44:01.930Z'),
      lastDurableWallTime: ISODate('2024-10-05T16:44:01.930Z'),
      lastWrittenWallTime: ISODate('2024-10-05T16:44:01.930Z'),
      lastHeartbeat: ISODate('2024-10-05T16:44:02.075Z'),
      lastHeartbeatRecv: ISODate('2024-10-05T16:44:02.075Z'),
      pingMs: Long('9'),
      lastHeartbeatMessage: '',
      syncSourceHost: 'nmaharaj-MBP-MMD6T.local:27017',
      syncSourceId: 0,
      infoMessage: '',
      configVersion: 5,
      configTerm: 1
    },
    {
      _id: 2,
      name: 'nmaharaj-MBP-MMD6T.local:27019',
      health: 1,
      state: 2,
      stateStr: 'SECONDARY',
      uptime: 9,
      optime: { ts: Timestamp({ t: 1728146637, i: 1 }), t: Long('1') },
      optimeDurable: { ts: Timestamp({ t: 1728146637, i: 1 }), t: Long('1') },
      optimeWritten: { ts: Timestamp({ t: 1728146637, i: 1 }), t: Long('1') },
      optimeDate: ISODate('2024-10-05T16:43:57.000Z'),
      optimeDurableDate: ISODate('2024-10-05T16:43:57.000Z'),
      optimeWrittenDate: ISODate('2024-10-05T16:43:57.000Z'),
      lastAppliedWallTime: ISODate('2024-10-05T16:44:01.930Z'),
      lastDurableWallTime: ISODate('2024-10-05T16:44:01.930Z'),
      lastWrittenWallTime: ISODate('2024-10-05T16:44:01.930Z'),
      lastHeartbeat: ISODate('2024-10-05T16:44:02.078Z'),
      lastHeartbeatRecv: ISODate('2024-10-05T16:44:02.579Z'),
      pingMs: Long('3'),
      lastHeartbeatMessage: '',
      syncSourceHost: '',
      syncSourceId: -1,
      infoMessage: '',
      configVersion: 5,
      configTerm: 1
    }
  ],
  ok: 1,
  '$clusterTime': {
    clusterTime: Timestamp({ t: 1728146641, i: 1 }),
    signature: {
      hash: Binary.createFromBase64('AAAAAAAAAAAAAAAAAAAAAAAAAAA=', 0),
      keyId: Long('0')
    }
  },
  operationTime: Timestamp({ t: 1728146641, i: 1 })
}
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;All the commands that were run are shown with the prefix: ++ Executing: ...&lt;/p&gt;

&lt;p&gt;Next, log in to the host.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;mongosh nmaharaj-MBP-MMD6T.local:27019
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;Now, tear everything down by running the following commands. Make sure to wait for all processes to be completely removed.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;&lt;span class="nb"&gt;kill&lt;/span&gt; &lt;span class="sb"&gt;`&lt;/span&gt;ps &lt;span class="nt"&gt;-elf&lt;/span&gt; | &lt;span class="nb"&gt;grep &lt;/span&gt;mongo | &lt;span class="nb"&gt;awk&lt;/span&gt; &lt;span class="s1"&gt;'{print $2}'&lt;/span&gt; | &lt;span class="nb"&gt;tr&lt;/span&gt; &lt;span class="s1"&gt;'\n'&lt;/span&gt; &lt;span class="s1"&gt;' '&lt;/span&gt; | &lt;span class="nb"&gt;awk&lt;/span&gt; &lt;span class="s1"&gt;'{$1=$1};1'&lt;/span&gt;&lt;span class="sb"&gt;`&lt;/span&gt; &lt;span class="o"&gt;&amp;gt;&lt;/span&gt; /dev/null 2&amp;gt;&amp;amp;1
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;h3&gt;
  
  
  Sharded Cluster
&lt;/h3&gt;

&lt;p&gt;To create a sharded cluster with a config server replicaset and a mongos router, create a file named &lt;code&gt;mongo-shard-create&lt;/code&gt; with the following contents.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;&lt;span class="nv"&gt;PORT&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="nv"&gt;$1&lt;/span&gt;
&lt;span class="nv"&gt;SHARDS&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="nv"&gt;$2&lt;/span&gt;
&lt;span class="nv"&gt;LISTNAMES&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="nv"&gt;$3&lt;/span&gt;
&lt;span class="nv"&gt;DIR&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;~/data/mongos/
&lt;span class="nv"&gt;USAGE&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s2"&gt;"Usage:  mongo-shard-create {port} {numShards} { shardName1,shardName2,..}"&lt;/span&gt;
&lt;span class="nv"&gt;GIVEN_HOSTNAME&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;localhost
&lt;span class="nv"&gt;SLEEPTIME&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;1

&lt;span class="k"&gt;if&lt;/span&gt; &lt;span class="o"&gt;[&lt;/span&gt; &lt;span class="nt"&gt;-z&lt;/span&gt; &lt;span class="s2"&gt;"&lt;/span&gt;&lt;span class="nv"&gt;$HOST&lt;/span&gt;&lt;span class="s2"&gt;"&lt;/span&gt; &lt;span class="o"&gt;]&lt;/span&gt;
&lt;span class="k"&gt;then
      &lt;/span&gt;&lt;span class="nb"&gt;echo&lt;/span&gt; &lt;span class="s2"&gt;"&lt;/span&gt;&lt;span class="se"&gt;\$&lt;/span&gt;&lt;span class="s2"&gt;HOST is empty but is optional - will use localhost"&lt;/span&gt;
&lt;span class="k"&gt;else
      &lt;/span&gt;&lt;span class="nv"&gt;GIVEN_HOSTNAME&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="nv"&gt;$HOST&lt;/span&gt;
&lt;span class="k"&gt;fi


if&lt;/span&gt; &lt;span class="o"&gt;[&lt;/span&gt; &lt;span class="nt"&gt;-z&lt;/span&gt; &lt;span class="s2"&gt;"&lt;/span&gt;&lt;span class="nv"&gt;$1&lt;/span&gt;&lt;span class="s2"&gt;"&lt;/span&gt; &lt;span class="o"&gt;]&lt;/span&gt;
  &lt;span class="k"&gt;then
    &lt;/span&gt;&lt;span class="nb"&gt;echo&lt;/span&gt; &lt;span class="s2"&gt;"No PORT argument supplied - &lt;/span&gt;&lt;span class="nv"&gt;$USAGE&lt;/span&gt;&lt;span class="s2"&gt;"&lt;/span&gt;
    &lt;span class="nb"&gt;exit &lt;/span&gt;0
&lt;span class="k"&gt;fi

if&lt;/span&gt; &lt;span class="o"&gt;[&lt;/span&gt; &lt;span class="nt"&gt;-z&lt;/span&gt; &lt;span class="s2"&gt;"&lt;/span&gt;&lt;span class="nv"&gt;$2&lt;/span&gt;&lt;span class="s2"&gt;"&lt;/span&gt; &lt;span class="o"&gt;]&lt;/span&gt;
  &lt;span class="k"&gt;then
    &lt;/span&gt;&lt;span class="nb"&gt;echo&lt;/span&gt; &lt;span class="s2"&gt;"Number of Shards required - &lt;/span&gt;&lt;span class="nv"&gt;$USAGE&lt;/span&gt;&lt;span class="s2"&gt;"&lt;/span&gt;
    &lt;span class="nb"&gt;exit &lt;/span&gt;0
&lt;span class="k"&gt;fi

if&lt;/span&gt; &lt;span class="o"&gt;[&lt;/span&gt; &lt;span class="nt"&gt;-z&lt;/span&gt; &lt;span class="s2"&gt;"&lt;/span&gt;&lt;span class="nv"&gt;$3&lt;/span&gt;&lt;span class="s2"&gt;"&lt;/span&gt; &lt;span class="o"&gt;]&lt;/span&gt;
  &lt;span class="k"&gt;then
    &lt;/span&gt;&lt;span class="nb"&gt;echo&lt;/span&gt; &lt;span class="s2"&gt;"Name of each shard csv required - &lt;/span&gt;&lt;span class="nv"&gt;$USAGE&lt;/span&gt;&lt;span class="s2"&gt;"&lt;/span&gt;
    &lt;span class="nb"&gt;exit &lt;/span&gt;0
&lt;span class="k"&gt;fi&lt;/span&gt;

&lt;span class="c"&gt;# Split the names for each RS&lt;/span&gt;
&lt;span class="nv"&gt;rs_name_arr&lt;/span&gt;&lt;span class="o"&gt;=(&lt;/span&gt;&lt;span class="si"&gt;$(&lt;/span&gt;&lt;span class="nb"&gt;echo&lt;/span&gt; &lt;span class="s2"&gt;"&lt;/span&gt;&lt;span class="nv"&gt;$3&lt;/span&gt;&lt;span class="s2"&gt;"&lt;/span&gt; | &lt;span class="nb"&gt;sed&lt;/span&gt; &lt;span class="s2"&gt;"s/ //g"&lt;/span&gt; | &lt;span class="nb"&gt;tr&lt;/span&gt; &lt;span class="s1"&gt;','&lt;/span&gt; &lt;span class="s1"&gt;'\n'&lt;/span&gt;&lt;span class="si"&gt;)&lt;/span&gt;&lt;span class="o"&gt;)&lt;/span&gt;
&lt;span class="nv"&gt;count&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="k"&gt;${#&lt;/span&gt;&lt;span class="nv"&gt;rs_name_arr&lt;/span&gt;&lt;span class="p"&gt;[@]&lt;/span&gt;&lt;span class="k"&gt;}&lt;/span&gt;
&lt;span class="k"&gt;if&lt;/span&gt; &lt;span class="o"&gt;[&lt;/span&gt; &lt;span class="nv"&gt;$count&lt;/span&gt; &lt;span class="nt"&gt;-lt&lt;/span&gt; &lt;span class="nv"&gt;$SHARDS&lt;/span&gt; &lt;span class="o"&gt;]&lt;/span&gt;
  &lt;span class="k"&gt;then
    &lt;/span&gt;&lt;span class="nb"&gt;echo&lt;/span&gt; &lt;span class="s2"&gt;"Shard names(s) count does not match number of Shards required - &lt;/span&gt;&lt;span class="nv"&gt;$USAGE&lt;/span&gt;&lt;span class="s2"&gt;"&lt;/span&gt;
    &lt;span class="nb"&gt;exit &lt;/span&gt;0
&lt;span class="k"&gt;fi

&lt;/span&gt;&lt;span class="nb"&gt;echo&lt;/span&gt; &lt;span class="s2"&gt;"##########################################################################################################"&lt;/span&gt;
&lt;span class="nb"&gt;echo&lt;/span&gt; &lt;span class="s2"&gt;"######################################### Creating Replica Sets ##########################################"&lt;/span&gt;
&lt;span class="nb"&gt;echo&lt;/span&gt; &lt;span class="s2"&gt;"##########################################################################################################"&lt;/span&gt;
&lt;span class="nv"&gt;COUNTER&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;1
  &lt;span class="k"&gt;until&lt;/span&gt; &lt;span class="o"&gt;[&lt;/span&gt;  &lt;span class="nv"&gt;$COUNTER&lt;/span&gt; &lt;span class="nt"&gt;-gt&lt;/span&gt; &lt;span class="nv"&gt;$SHARDS&lt;/span&gt; &lt;span class="o"&gt;]&lt;/span&gt;&lt;span class="p"&gt;;&lt;/span&gt; &lt;span class="k"&gt;do
  &lt;/span&gt;&lt;span class="nv"&gt;name&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="k"&gt;${&lt;/span&gt;&lt;span class="nv"&gt;rs_name_arr&lt;/span&gt;&lt;span class="p"&gt;[&lt;/span&gt;&lt;span class="nv"&gt;$COUNTER&lt;/span&gt;&lt;span class="p"&gt;-1]&lt;/span&gt;&lt;span class="k"&gt;}&lt;/span&gt;
    &lt;span class="nv"&gt;PC&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="k"&gt;$((&lt;/span&gt;&lt;span class="nv"&gt;$PORT&lt;/span&gt;&lt;span class="o"&gt;+&lt;/span&gt;&lt;span class="m"&gt;1000&lt;/span&gt;&lt;span class="o"&gt;*&lt;/span&gt;&lt;span class="nv"&gt;$COUNTER&lt;/span&gt;&lt;span class="k"&gt;))&lt;/span&gt;
    &lt;span class="c"&gt;#PC=$(($PORT+17+1000*$COUNTER))&lt;/span&gt;
    mongo-replicaset-create &lt;span class="nv"&gt;$name&lt;/span&gt; &lt;span class="nv"&gt;$PC&lt;/span&gt; 3 &lt;span class="nt"&gt;--shardsvr&lt;/span&gt;
    &lt;span class="nb"&gt;let &lt;/span&gt;COUNTER+&lt;span class="o"&gt;=&lt;/span&gt;1
  &lt;span class="k"&gt;done

&lt;/span&gt;&lt;span class="nb"&gt;echo&lt;/span&gt; &lt;span class="s2"&gt;"##########################################################################################################"&lt;/span&gt;
&lt;span class="nb"&gt;echo&lt;/span&gt; &lt;span class="s2"&gt;"######################################### Creating Config Sets ###########################################"&lt;/span&gt;
&lt;span class="nb"&gt;echo&lt;/span&gt; &lt;span class="s2"&gt;"##########################################################################################################"&lt;/span&gt;
&lt;span class="c"&gt;# Config Servers&lt;/span&gt;
&lt;span class="c"&gt;#PC=$(($PORT+17+$SHARDS*1000+1000))&lt;/span&gt;
&lt;span class="nv"&gt;PC&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="k"&gt;$((&lt;/span&gt;&lt;span class="nv"&gt;$PORT&lt;/span&gt;&lt;span class="o"&gt;+&lt;/span&gt;&lt;span class="nv"&gt;$SHARDS&lt;/span&gt;&lt;span class="o"&gt;*&lt;/span&gt;&lt;span class="m"&gt;1000&lt;/span&gt;&lt;span class="o"&gt;+&lt;/span&gt;&lt;span class="m"&gt;1000&lt;/span&gt;&lt;span class="k"&gt;))&lt;/span&gt;
mongo-replicaset-create csrs &lt;span class="nv"&gt;$PC&lt;/span&gt; 3 &lt;span class="nt"&gt;--configsvr&lt;/span&gt;

&lt;span class="nb"&gt;echo&lt;/span&gt; &lt;span class="s2"&gt;" *** Adding user: 'admin', pwd: 'password' to Config Server - please change password *** "&lt;/span&gt;
&lt;span class="nv"&gt;CSRS_HOSTS&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s2"&gt;"csrs/&lt;/span&gt;&lt;span class="nv"&gt;$GIVEN_HOSTNAME&lt;/span&gt;&lt;span class="s2"&gt;:&lt;/span&gt;&lt;span class="nv"&gt;$PC&lt;/span&gt;&lt;span class="s2"&gt;,&lt;/span&gt;&lt;span class="nv"&gt;$GIVEN_HOSTNAME&lt;/span&gt;&lt;span class="s2"&gt;:&lt;/span&gt;&lt;span class="k"&gt;$((&lt;/span&gt;&lt;span class="nv"&gt;$PC&lt;/span&gt;&lt;span class="o"&gt;+&lt;/span&gt;&lt;span class="m"&gt;1&lt;/span&gt;&lt;span class="k"&gt;))&lt;/span&gt;&lt;span class="s2"&gt;,&lt;/span&gt;&lt;span class="nv"&gt;$GIVEN_HOSTNAME&lt;/span&gt;&lt;span class="s2"&gt;:&lt;/span&gt;&lt;span class="k"&gt;$((&lt;/span&gt;&lt;span class="nv"&gt;$PC&lt;/span&gt;&lt;span class="o"&gt;+&lt;/span&gt;&lt;span class="m"&gt;2&lt;/span&gt;&lt;span class="k"&gt;))&lt;/span&gt;&lt;span class="s2"&gt;"&lt;/span&gt;
mongosh &lt;span class="nt"&gt;--host&lt;/span&gt; &lt;span class="nv"&gt;$CSRS_HOSTS&lt;/span&gt; &lt;span class="nt"&gt;--eval&lt;/span&gt; &lt;span class="s2"&gt;"db.getSiblingDB('admin').createUser({ user: 'admin', pwd: 'password', roles: [ {role: 'root', db: 'admin'} ] })"&lt;/span&gt;


&lt;span class="nb"&gt;echo&lt;/span&gt; &lt;span class="s2"&gt;"##########################################################################################################"&lt;/span&gt;
&lt;span class="nb"&gt;echo&lt;/span&gt; &lt;span class="s2"&gt;"######################################### Creating mongos #### ###########################################"&lt;/span&gt;
&lt;span class="nb"&gt;echo&lt;/span&gt; &lt;span class="s2"&gt;"##########################################################################################################"&lt;/span&gt;
&lt;span class="nv"&gt;LOG_DIR_MONGOS&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;~/data/mongos/
&lt;span class="nb"&gt;mkdir&lt;/span&gt; &lt;span class="nt"&gt;-p&lt;/span&gt; &lt;span class="nv"&gt;$LOG_DIR_MONGOS&lt;/span&gt;
&lt;span class="c"&gt;#echo "[ ++ Executing:  mongos --port $(($PORT+17)) --bind_ip_all --fork --logpath $LOG_DIR_MONGOS/mongos.log --configdb $CSRS_HOSTS ]"&lt;/span&gt;
&lt;span class="c"&gt;#mongos --port $(($PORT+17)) --bind_ip_all --fork --logpath $LOG_DIR_MONGOS/mongos.log --configdb $CSRS_HOSTS&lt;/span&gt;
&lt;span class="nb"&gt;echo&lt;/span&gt; &lt;span class="s2"&gt;"[ ++ Executing:  mongos --port &lt;/span&gt;&lt;span class="k"&gt;$((&lt;/span&gt;&lt;span class="nv"&gt;$PORT&lt;/span&gt;&lt;span class="k"&gt;))&lt;/span&gt;&lt;span class="s2"&gt; --bind_ip_all --fork --logpath &lt;/span&gt;&lt;span class="nv"&gt;$LOG_DIR_MONGOS&lt;/span&gt;&lt;span class="s2"&gt;/mongos.log --configdb &lt;/span&gt;&lt;span class="nv"&gt;$CSRS_HOSTS&lt;/span&gt;&lt;span class="s2"&gt; ]"&lt;/span&gt;
mongos &lt;span class="nt"&gt;--port&lt;/span&gt; &lt;span class="k"&gt;$((&lt;/span&gt;&lt;span class="nv"&gt;$PORT&lt;/span&gt;&lt;span class="k"&gt;))&lt;/span&gt; &lt;span class="nt"&gt;--bind_ip_all&lt;/span&gt; &lt;span class="nt"&gt;--fork&lt;/span&gt; &lt;span class="nt"&gt;--logpath&lt;/span&gt; &lt;span class="nv"&gt;$LOG_DIR_MONGOS&lt;/span&gt;/mongos.log &lt;span class="nt"&gt;--configdb&lt;/span&gt; &lt;span class="nv"&gt;$CSRS_HOSTS&lt;/span&gt;

&lt;span class="nv"&gt;COUNTER&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;1
  &lt;span class="k"&gt;until&lt;/span&gt; &lt;span class="o"&gt;[&lt;/span&gt;  &lt;span class="nv"&gt;$COUNTER&lt;/span&gt; &lt;span class="nt"&gt;-gt&lt;/span&gt; &lt;span class="nv"&gt;$SHARDS&lt;/span&gt; &lt;span class="o"&gt;]&lt;/span&gt;&lt;span class="p"&gt;;&lt;/span&gt; &lt;span class="k"&gt;do
  &lt;/span&gt;&lt;span class="nv"&gt;name&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="k"&gt;${&lt;/span&gt;&lt;span class="nv"&gt;rs_name_arr&lt;/span&gt;&lt;span class="p"&gt;[&lt;/span&gt;&lt;span class="nv"&gt;$COUNTER&lt;/span&gt;&lt;span class="p"&gt;-1]&lt;/span&gt;&lt;span class="k"&gt;}&lt;/span&gt;
    &lt;span class="c"&gt;#PC=$(($PORT+17+1000*$COUNTER))&lt;/span&gt;
    &lt;span class="c"&gt;#echo "[ ++ Executing: mongosh --port $(($PORT+17)) --eval \"sh.addShard('$name/$GIVEN_HOSTNAME:$PC')\" ]"&lt;/span&gt;
    &lt;span class="c"&gt;#mongosh --port $(($PORT+17)) --eval "sh.addShard('$name/$GIVEN_HOSTNAME:$PC')"&lt;/span&gt;
    &lt;span class="nv"&gt;PC&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="k"&gt;$((&lt;/span&gt;&lt;span class="nv"&gt;$PORT&lt;/span&gt;&lt;span class="o"&gt;+&lt;/span&gt;&lt;span class="m"&gt;1000&lt;/span&gt;&lt;span class="o"&gt;*&lt;/span&gt;&lt;span class="nv"&gt;$COUNTER&lt;/span&gt;&lt;span class="k"&gt;))&lt;/span&gt;
    &lt;span class="nb"&gt;echo&lt;/span&gt; &lt;span class="s2"&gt;"[ ++ Executing: mongosh --port &lt;/span&gt;&lt;span class="k"&gt;$((&lt;/span&gt;&lt;span class="nv"&gt;$PORT&lt;/span&gt;&lt;span class="k"&gt;))&lt;/span&gt;&lt;span class="s2"&gt; --eval &lt;/span&gt;&lt;span class="se"&gt;\"&lt;/span&gt;&lt;span class="s2"&gt;sh.addShard('&lt;/span&gt;&lt;span class="nv"&gt;$name&lt;/span&gt;&lt;span class="s2"&gt;/&lt;/span&gt;&lt;span class="nv"&gt;$GIVEN_HOSTNAME&lt;/span&gt;&lt;span class="s2"&gt;:&lt;/span&gt;&lt;span class="nv"&gt;$PC&lt;/span&gt;&lt;span class="s2"&gt;')&lt;/span&gt;&lt;span class="se"&gt;\"&lt;/span&gt;&lt;span class="s2"&gt; ]"&lt;/span&gt;
    mongosh &lt;span class="nt"&gt;--port&lt;/span&gt; &lt;span class="k"&gt;$((&lt;/span&gt;&lt;span class="nv"&gt;$PORT&lt;/span&gt;&lt;span class="k"&gt;))&lt;/span&gt; &lt;span class="nt"&gt;--eval&lt;/span&gt; &lt;span class="s2"&gt;"sh.addShard('&lt;/span&gt;&lt;span class="nv"&gt;$name&lt;/span&gt;&lt;span class="s2"&gt;/&lt;/span&gt;&lt;span class="nv"&gt;$GIVEN_HOSTNAME&lt;/span&gt;&lt;span class="s2"&gt;:&lt;/span&gt;&lt;span class="nv"&gt;$PC&lt;/span&gt;&lt;span class="s2"&gt;')"&lt;/span&gt;
    &lt;span class="nb"&gt;let &lt;/span&gt;COUNTER+&lt;span class="o"&gt;=&lt;/span&gt;1
  &lt;span class="k"&gt;done

&lt;/span&gt;&lt;span class="nb"&gt;sleep &lt;/span&gt;5
&lt;span class="c"&gt;#mongosh --port $(($PORT+17)) --eval "sh.status()"&lt;/span&gt;
mongosh &lt;span class="nt"&gt;--port&lt;/span&gt; &lt;span class="k"&gt;$((&lt;/span&gt;&lt;span class="nv"&gt;$PORT&lt;/span&gt;&lt;span class="k"&gt;))&lt;/span&gt; &lt;span class="nt"&gt;--eval&lt;/span&gt; &lt;span class="s2"&gt;"sh.status()"&lt;/span&gt;
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;Run the following command to create a MongoDB sharded cluster with 2 shards.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;mongo-shard-create 27017 2 sh1,sh2 | &lt;span class="nb"&gt;tee&lt;/span&gt; ~/out.log
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;Here is the output; however, we have omitted most of it due to its large size. We have included all the executed commands and the last part of the stdout.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;# commands executed
cat ~/out.log | grep -e Exec -e pass
[ ++ Executing:  mongod --port 28017 --fork --dbpath /Users/nareshmaharaj/data/sh1/0 --logpath /Users/nareshmaharaj/data/sh1/0/log.out -replSet sh1  --shardsvr ]
[ ++ Executing:  mongod --port 28018 --fork --dbpath /Users/nareshmaharaj/data/sh1/1 --logpath /Users/nareshmaharaj/data/sh1/1/log.out -replSet sh1  --shardsvr ]
[ ++ Executing:  mongod --port 28019 --fork --dbpath /Users/nareshmaharaj/data/sh1/2 --logpath /Users/nareshmaharaj/data/sh1/2/log.out -replSet sh1  --shardsvr ]
[ ++ Executing:  mongosh --port 28017 -eval "rs.initiate()" ]
[ ++ Executing:  mongosh --port 28017 -eval "rs.add('nmaharaj-MBP-MMD6T.local:28018')" ]
[ ++ Executing:  mongosh --port 28017 -eval "rs.add('nmaharaj-MBP-MMD6T.local:28019')" ]
[ ++ Executing:  mongod --port 29017 --fork --dbpath /Users/nareshmaharaj/data/sh2/0 --logpath /Users/nareshmaharaj/data/sh2/0/log.out -replSet sh2  --shardsvr ]
[ ++ Executing:  mongod --port 29018 --fork --dbpath /Users/nareshmaharaj/data/sh2/1 --logpath /Users/nareshmaharaj/data/sh2/1/log.out -replSet sh2  --shardsvr ]
[ ++ Executing:  mongod --port 29019 --fork --dbpath /Users/nareshmaharaj/data/sh2/2 --logpath /Users/nareshmaharaj/data/sh2/2/log.out -replSet sh2  --shardsvr ]
[ ++ Executing:  mongosh --port 29017 -eval "rs.initiate()" ]
[ ++ Executing:  mongosh --port 29017 -eval "rs.add('nmaharaj-MBP-MMD6T.local:29018')" ]
[ ++ Executing:  mongosh --port 29017 -eval "rs.add('nmaharaj-MBP-MMD6T.local:29019')" ]
[ ++ Executing:  mongod --port 30017 --fork --dbpath /Users/nareshmaharaj/data/csrs/0 --logpath /Users/nareshmaharaj/data/csrs/0/log.out -replSet csrs --configsvr  ]
[ ++ Executing:  mongod --port 30018 --fork --dbpath /Users/nareshmaharaj/data/csrs/1 --logpath /Users/nareshmaharaj/data/csrs/1/log.out -replSet csrs --configsvr  ]
[ ++ Executing:  mongod --port 30019 --fork --dbpath /Users/nareshmaharaj/data/csrs/2 --logpath /Users/nareshmaharaj/data/csrs/2/log.out -replSet csrs --configsvr  ]
[ ++ Executing:  mongosh --port 30017 -eval "rs.initiate()" ]
[ ++ Executing:  mongosh --port 30017 -eval "rs.add('nmaharaj-MBP-MMD6T.local:30018')" ]
[ ++ Executing:  mongosh --port 30017 -eval "rs.add('nmaharaj-MBP-MMD6T.local:30019')" ]
 *** Adding user: 'admin', pwd: 'password' to Config Server - please change password ***
[ ++ Executing:  mongos --port 27017 --bind_ip_all --fork --logpath /Users/nareshmaharaj/data/mongos//mongos.log --configdb csrs/nmaharaj-MBP-MMD6T.local:30017,nmaharaj-MBP-MMD6T.local:30018,nmaharaj-MBP-MMD6T.local:30019 ]
[ ++ Executing: mongosh --port 27017 --eval "sh.addShard('sh1/nmaharaj-MBP-MMD6T.local:28017')" ]
[ ++ Executing: mongosh --port 27017 --eval "sh.addShard('sh2/nmaharaj-MBP-MMD6T.local:29017')" ]

# stdout
...
##########################################################################################################
######################################### Creating mongos #### ###########################################
##########################################################################################################
[ ++ Executing:  mongos --port 27017 --bind_ip_all --fork --logpath /Users/nareshmaharaj/data/mongos//mongos.log --configdb csrs/nmaharaj-MBP-MMD6T.local:30017,nmaharaj-MBP-MMD6T.local:30018,nmaharaj-MBP-MMD6T.local:30019 ]
about to fork child process, waiting until server is ready for connections.
forked process: 51366
child process started successfully, parent exiting
[ ++ Executing: mongosh --port 27017 --eval "sh.addShard('sh1/nmaharaj-MBP-MMD6T.local:28017')" ]
{
  shardAdded: 'sh1',
  ok: 1,
  '$clusterTime': {
    clusterTime: Timestamp({ t: 1728147532, i: 6 }),
    signature: {
      hash: Binary.createFromBase64('AAAAAAAAAAAAAAAAAAAAAAAAAAA=', 0),
      keyId: Long('0')
    }
  },
  operationTime: Timestamp({ t: 1728147532, i: 6 })
}
[ ++ Executing: mongosh --port 27017 --eval "sh.addShard('sh2/nmaharaj-MBP-MMD6T.local:29017')" ]
{
  shardAdded: 'sh2',
  ok: 1,
  '$clusterTime': {
    clusterTime: Timestamp({ t: 1728147535, i: 16 }),
    signature: {
      hash: Binary.createFromBase64('AAAAAAAAAAAAAAAAAAAAAAAAAAA=', 0),
      keyId: Long('0')
    }
  },
  operationTime: Timestamp({ t: 1728147535, i: 10 })
}
shardingVersion
{ _id: 1, clusterId: ObjectId('670170310cdeaa5de24f0045') }
---
shards
[
  {
    _id: 'sh1',
    host: 'sh1/nmaharaj-MBP-MMD6T.local:28017,nmaharaj-MBP-MMD6T.local:28018,nmaharaj-MBP-MMD6T.local:28019',
    state: 1,
    topologyTime: Timestamp({ t: 1728147531, i: 6 }),
    replSetConfigVersion: Long('-1')
  },
  {
    _id: 'sh2',
    host: 'sh2/nmaharaj-MBP-MMD6T.local:29017,nmaharaj-MBP-MMD6T.local:29018,nmaharaj-MBP-MMD6T.local:29019',
    state: 1,
    topologyTime: Timestamp({ t: 1728147535, i: 1 }),
    replSetConfigVersion: Long('-1')
  }
]
---
active mongoses
[ { '8.0.0': 1 } ]
---
autosplit
{ 'Currently enabled': 'yes' }
---
balancer
{
  'Currently enabled': 'yes',
  'Currently running': 'no',
  'Failed balancer rounds in last 5 attempts': 0,
  'Migration Results for the last 24 hours': 'No recent migrations'
}
---
databases
[
  {
    database: { _id: 'config', primary: 'config', partitioned: true },
    collections: {}
  }
]
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;As you can see, we now have a fully configured sharded cluster, and it only takes about a minute to set up.&lt;/p&gt;

&lt;p&gt;Log into mongos using &lt;code&gt;mongosh&lt;/code&gt; on port 27017 and check the status of the shards.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;mongosh --port 27017 --eval "sh.status()"
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;Note that there are only 2 shards. As of version 8.0, you can transition the config servers to &lt;a href="https://www.mongodb.com/docs/manual/reference/command/transitionFromDedicatedConfigServer/#mongodb-dbcommand-dbcmd.transitionFromDedicatedConfigServer" rel="noopener noreferrer"&gt;data shards&lt;/a&gt;. Let's see how this is done.&lt;/p&gt;

&lt;p&gt;Log in to mongos using &lt;code&gt;mongosh&lt;/code&gt; on port 27017.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;mongosh --port 27017"
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;Run the following command:&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;db.adminCommand&lt;span class="o"&gt;({&lt;/span&gt;transitionFromDedicatedConfigServer:1&lt;span class="o"&gt;})&lt;/span&gt;
&lt;span class="o"&gt;{&lt;/span&gt;
  ok: 1,
  &lt;span class="s1"&gt;'$clusterTime'&lt;/span&gt;: &lt;span class="o"&gt;{&lt;/span&gt;
    clusterTime: Timestamp&lt;span class="o"&gt;({&lt;/span&gt; t: 1728148222, i: 12 &lt;span class="o"&gt;})&lt;/span&gt;,
    signature: &lt;span class="o"&gt;{&lt;/span&gt;
      &lt;span class="nb"&gt;hash&lt;/span&gt;: Binary.createFromBase64&lt;span class="o"&gt;(&lt;/span&gt;&lt;span class="s1"&gt;'AAAAAAAAAAAAAAAAAAAAAAAAAAA='&lt;/span&gt;, 0&lt;span class="o"&gt;)&lt;/span&gt;,
      keyId: Long&lt;span class="o"&gt;(&lt;/span&gt;&lt;span class="s1"&gt;'0'&lt;/span&gt;&lt;span class="o"&gt;)&lt;/span&gt;
    &lt;span class="o"&gt;}&lt;/span&gt;
  &lt;span class="o"&gt;}&lt;/span&gt;,
  operationTime: Timestamp&lt;span class="o"&gt;({&lt;/span&gt; t: 1728148222, i: 12 &lt;span class="o"&gt;})&lt;/span&gt;
&lt;span class="o"&gt;}&lt;/span&gt;
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;Now, check to see how many data shards are present.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;mongosh --port 27017 --eval "sh.status()"
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;You should now see all 3&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;shards
[
  {
    _id: 'config',
    host: 'csrs/nmaharaj-MBP-MMD6T.local:30017,nmaharaj-MBP-MMD6T.local:30018,nmaharaj-MBP-MMD6T.local:30019',
    state: 1,
    topologyTime: Timestamp({ t: 1728148222, i: 3 }),
    replSetConfigVersion: Long('-1')
  },
  {
    _id: 'sh1',
    host: 'sh1/nmaharaj-MBP-MMD6T.local:28017,nmaharaj-MBP-MMD6T.local:28018,nmaharaj-MBP-MMD6T.local:28019',
    state: 1,
    topologyTime: Timestamp({ t: 1728147531, i: 6 }),
    replSetConfigVersion: Long('-1')
  },
  {
    _id: 'sh2',
    host: 'sh2/nmaharaj-MBP-MMD6T.local:29017,nmaharaj-MBP-MMD6T.local:29018,nmaharaj-MBP-MMD6T.local:29019',
    state: 1,
    topologyTime: Timestamp({ t: 1728147535, i: 1 }),
    replSetConfigVersion: Long('-1')
  }
]
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



</description>
    </item>
    <item>
      <title>VPC Peering, Split Brain with Distributed Cross Region NoSQL-DB</title>
      <dc:creator>Naresh Maharaj</dc:creator>
      <pubDate>Sat, 31 Aug 2024 09:52:39 +0000</pubDate>
      <link>https://dev.to/naresh_maharaj_c4b8fbd4aa/vpc-peering-split-brain-with-distributed-cross-region-nosql-db-4i0c</link>
      <guid>https://dev.to/naresh_maharaj_c4b8fbd4aa/vpc-peering-split-brain-with-distributed-cross-region-nosql-db-4i0c</guid>
      <description>&lt;h1&gt;
  
  
  Network Partitioning - confidence and knowledge read.
&lt;/h1&gt;

&lt;p&gt;&lt;a href="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fsuz64tljt5sqkgnmqkn7.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fsuz64tljt5sqkgnmqkn7.png" alt="Image description" width="800" height="272"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;h2&gt;
  
  
  Motivation
&lt;/h2&gt;

&lt;p&gt;In this experiment, we knowingly and abruptly create a network separation within a live distributed database. The system is split evenly across two geo-regions in equal proportion. Each region maintains its partition subset. By doing this, we can clearly understand the robustness and behavior of a distributed system under partitioned conditions.Understanding these scenarios can help solution architects design resilient systems that handle various partitioning cases effectively.&lt;/p&gt;

&lt;p&gt;While this article focuses on an equal partition split, it's also crucial to test unequal splits. In a minority-majority scenario, the majority partition will continue to handle operations with quorum, while the minority partition may face availability issues. I will discuss this in detail in a separate article.&lt;/p&gt;

&lt;p&gt;I've structured this blog into a series of five articles, each corresponding to a different phase of the experiment. Each phase will demand a unique set of skills. For instance, part 1, you'll be exploring Virtual Private Networks and Peering, while on part 2, you'll take on the role of a DBA, each part introducing you to different challenges.&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt; &lt;strong&gt;Part 1&lt;/strong&gt; - Simple cross region messaging application&lt;/li&gt;
&lt;li&gt; &lt;strong&gt;Part 2&lt;/strong&gt; - Installing an Aerospike NoSQL DB as a Stretch Cluster&lt;/li&gt;
&lt;li&gt; &lt;strong&gt;Part 3&lt;/strong&gt; - Building a Python Application to insert test Data&lt;/li&gt;
&lt;li&gt; &lt;strong&gt;Part 4&lt;/strong&gt; - Split Brain network partitioning&lt;/li&gt;
&lt;li&gt; &lt;strong&gt;Part 5&lt;/strong&gt; - Partition Management Strong Consistency&lt;/li&gt;
&lt;li&gt; &lt;strong&gt;Wrap up&lt;/strong&gt; - What's next&lt;/li&gt;
&lt;/ul&gt;

&lt;h2&gt;
  
  
  Overview
&lt;/h2&gt;

&lt;p&gt;This is what we plan to do over the next 5 parts.&lt;/p&gt;

&lt;ol&gt;
&lt;li&gt;Create 2 unrelated VPCs in AWS, each VPC will be in a different region.&lt;/li&gt;
&lt;li&gt;Establish basic communication using a simple  message app over the private network.&lt;/li&gt;
&lt;li&gt;Demonstrate traffic blocking by splitting the cross-regional connection.&lt;/li&gt;
&lt;li&gt;Install a Distributed Database spanning the 2 regions and treat it as a single system.&lt;/li&gt;
&lt;li&gt;Verify data integrity by enabling strong consistency features and rules.&lt;/li&gt;
&lt;li&gt;Simulate real-world traffic using a simple Python data loader&lt;/li&gt;
&lt;li&gt;Enforce a network partition that will create a well known split brain scenario.&lt;/li&gt;
&lt;li&gt;Evaluate the Results. &lt;/li&gt;
&lt;/ol&gt;


&lt;h2&gt;Part 1: Talking Cross Region&lt;/h2&gt;

&lt;p&gt;Selecting Regions&lt;/p&gt;

&lt;ol&gt;
&lt;li&gt;In a browser navigate to the top right hand corner of the AWS console&lt;/li&gt;
&lt;li&gt;Select a unique region. 

&lt;ol&gt;
&lt;li&gt;For this example we selected &lt;code&gt;eu-west-2&lt;/code&gt;, being London. &lt;/li&gt;
&lt;/ol&gt;
&lt;/li&gt;
&lt;li&gt;Verify you have the correct key pairs downloaded, as you will need these later to log into the host.&lt;/li&gt;
&lt;/ol&gt;

&lt;p&gt;&lt;a href="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fub9nkrewwer939uc7t7v.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fub9nkrewwer939uc7t7v.png" alt="Image description" width="408" height="122"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;ol&gt;
&lt;li&gt;Open a new tab and select a different region. 

&lt;ol&gt;
&lt;li&gt;For the second region, we will use &lt;code&gt;eu-west-3&lt;/code&gt;, which is Paris. &lt;/li&gt;
&lt;/ol&gt;
&lt;/li&gt;
&lt;li&gt;Again, verify you have the correct key pairs downloaded for logging into the host.&lt;/li&gt;
&lt;/ol&gt;

&lt;p&gt;&lt;a href="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2F1mcuwky0gz2uodzd8nl1.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2F1mcuwky0gz2uodzd8nl1.png" alt="Image description" width="402" height="115"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;By following these steps, we will demonstrate the impact of a network split on a distributed, cross-regional NoSQL database.But before that, we will test our cross-regional connections with a simple messaging tool with no application coding required. &lt;/p&gt;
&lt;h4&gt;
  
  
  VPC in Region London 🏴󠁧󠁢󠁥󠁮󠁧󠁿 󠁧󠁢󠁥󠁮󠁧󠁿󠁧󠁢󠁥
&lt;/h4&gt;

&lt;p&gt;From the AWS console, visit the VPC Dashboard and create a new VPC named 'my-vpc-london-2' with the IPv4 CIDR block 172.32.0.0/16.&lt;/p&gt;

&lt;p&gt;Next, we will add subnets for the various availability zones and attach a new internet gateway, linking all of these to a new routing table.&lt;/p&gt;

&lt;p&gt;Create the subnets for each availability zone in the VPC we just created:&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;
&lt;p&gt;First Availability Zone&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Set the IPv4 subnet CIDR block to 172.32.32.0/20.&lt;/li&gt;
&lt;li&gt;Subnet name: my-subnet-2a&lt;/li&gt;
&lt;li&gt;Select the availability zone: eu-west-2a&lt;/li&gt;
&lt;/ul&gt;
&lt;/li&gt;
&lt;li&gt;
&lt;p&gt;Second Availability Zone&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Set the IPv4 subnet CIDR block to 172.32.16.0/20.&lt;/li&gt;
&lt;li&gt;Subnet name: my-subnet-2b&lt;/li&gt;
&lt;li&gt;Select the availability zone: eu-west-2b&lt;/li&gt;
&lt;/ul&gt;
&lt;/li&gt;
&lt;li&gt;
&lt;p&gt;Third Availability Zone&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Set the IPv4 subnet CIDR block to 172.32.0.0/20.&lt;/li&gt;
&lt;li&gt;Subnet name: my-subnet-2c&lt;/li&gt;
&lt;li&gt;Select the availability zone: eu-west-2c&lt;/li&gt;
&lt;/ul&gt;
&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;&lt;a href="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fimdsciurm35wkq2d3od7.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fimdsciurm35wkq2d3od7.png" alt="Image description" width="800" height="237"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;Under Your VPCs → Resource Map, you should now see the subnets added.&lt;/p&gt;

&lt;p&gt;&lt;a href="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fsr0ao8eg8yd6634l0yhw.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fsr0ao8eg8yd6634l0yhw.png" alt="Image description" width="800" height="227"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;Create a new Internet Gateway and then add it to the Routing Table. Check that you can see this in the Resource Map.&lt;/p&gt;
&lt;h4&gt;
  
  
  EC2 Host in Region London 🏴󠁧󠁢󠁥󠁮󠁧󠁿
&lt;/h4&gt;

&lt;p&gt;Launch an EC2 instance with the following settings:&lt;/p&gt;

&lt;div class="table-wrapper-paragraph"&gt;&lt;table&gt;
&lt;thead&gt;
&lt;tr&gt;
&lt;th&gt;Parameter&lt;/th&gt;
&lt;th&gt;Value&lt;/th&gt;
&lt;/tr&gt;
&lt;/thead&gt;
&lt;tbody&gt;
&lt;tr&gt;
&lt;td&gt;Image&lt;/td&gt;
&lt;td&gt;Canonical, Ubuntu, 22.04 LTS, amd64 jammy image built on 2024-07-01&lt;/td&gt;
&lt;/tr&gt;
&lt;tr&gt;
&lt;td&gt;Instance Type&lt;/td&gt;
&lt;td&gt;t2-micro&lt;/td&gt;
&lt;/tr&gt;
&lt;tr&gt;
&lt;td&gt;Key Pair&lt;/td&gt;
&lt;td&gt;Select the key you created earlier and downloaded safely.&lt;/td&gt;
&lt;/tr&gt;
&lt;tr&gt;
&lt;td&gt;VPC&lt;/td&gt;
&lt;td&gt;Select the VPC we created earlier.&lt;/td&gt;
&lt;/tr&gt;
&lt;tr&gt;
&lt;td&gt;Subnet&lt;/td&gt;
&lt;td&gt;Choose the subnet we created earlier for the availability zone this host will be placed in.&lt;/td&gt;
&lt;/tr&gt;
&lt;tr&gt;
&lt;td&gt;Auto Assign Public IP&lt;/td&gt;
&lt;td&gt;In production, you would probably disable this and use a jump box. For simplicity, we will SSH directly using the public IP.&lt;/td&gt;
&lt;/tr&gt;
&lt;tr&gt;
&lt;td&gt;Security Group&lt;/td&gt;
&lt;td&gt;Create a new security group named my-sg-1.&lt;/td&gt;
&lt;/tr&gt;
&lt;tr&gt;
&lt;td&gt;Security Group Rule&lt;/td&gt;
&lt;td&gt;Add a custom TCP rule for ports 3000-3003, source from anywhere.&lt;/td&gt;
&lt;/tr&gt;
&lt;/tbody&gt;
&lt;/table&gt;&lt;/div&gt;

&lt;p&gt;Login using SSH and your associated key to verify step 1 completed successfully:&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;ssh &lt;span class="nt"&gt;-o&lt;/span&gt; &lt;span class="nv"&gt;IdentitiesOnly&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="nb"&gt;yes&lt;/span&gt; &lt;span class="nt"&gt;-i&lt;/span&gt; aws-instance-key-london-2.pem ubuntu@35.177.110.209
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;&lt;a href="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Frw09zwozt64yhcf27whu.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Frw09zwozt64yhcf27whu.png" alt="Image description" width="800" height="293"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;Congratulations, your first region is complete. Let's move on to the second region, Paris!&lt;/p&gt;

&lt;h4&gt;
  
  
  VPC in Region Paris 🇫🇷
&lt;/h4&gt;

&lt;p&gt;From the browser tab with the Paris region selected, go to the VPC Dashboard and create a new VPC. Verify the CIDR blocks do not overlap with the London VPC. Use the IPv4 CIDR block 172.33.0.0/16.&lt;/p&gt;

&lt;p&gt;Next, we will add subnets for the various availability zones and attach a new internet gateway, linking all of these to a new routing table, just as we did before.&lt;/p&gt;

&lt;p&gt;Create the subnets for each availability zone in the VPC we just created:&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;
&lt;p&gt;First Availability Zone&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Set the IPv4 subnet CIDR block to 172.33.16.0/20.&lt;/li&gt;
&lt;li&gt;Subnet name: my-subnet-3a&lt;/li&gt;
&lt;li&gt;Select the availability zone: eu-west-3a&lt;/li&gt;
&lt;/ul&gt;


&lt;/li&gt;

&lt;li&gt;

&lt;p&gt;Second Availability Zone&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Set the IPv4 subnet CIDR block to 172.33.0.0/20.&lt;/li&gt;
&lt;li&gt;Subnet name: my-subnet-3b&lt;/li&gt;
&lt;li&gt;Select the availability zone: eu-west-3b&lt;/li&gt;
&lt;/ul&gt;


&lt;/li&gt;

&lt;li&gt;

&lt;p&gt;Third Availability Zone&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Set the IPv4 subnet CIDR block to 172.33.32.0/20.&lt;/li&gt;
&lt;li&gt;Subnet name: my-subnet-3c&lt;/li&gt;
&lt;li&gt;Select the availability zone: eu-west-3c&lt;/li&gt;
&lt;/ul&gt;


&lt;/li&gt;

&lt;/ul&gt;

&lt;p&gt;&lt;a href="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fw8wsk2glz0w6p897omhq.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fw8wsk2glz0w6p897omhq.png" alt="Image description" width="800" height="227"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;Create a new Internet Gateway and then add it to the Routing Table. Check that you can see this in the Resource Map.&lt;/p&gt;

&lt;p&gt;&lt;a href="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2F1efpzf5vgkra1wkbmvzs.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2F1efpzf5vgkra1wkbmvzs.png" alt="Image description" width="800" height="205"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;h4&gt;
  
  
  EC2 Host in Region Paris 🇫🇷
&lt;/h4&gt;

&lt;p&gt;Launch an EC2 instance with the following settings:&lt;/p&gt;

&lt;div class="table-wrapper-paragraph"&gt;&lt;table&gt;
&lt;thead&gt;
&lt;tr&gt;
&lt;th&gt;Parameter&lt;/th&gt;
&lt;th&gt;Value&lt;/th&gt;
&lt;/tr&gt;
&lt;/thead&gt;
&lt;tbody&gt;
&lt;tr&gt;
&lt;td&gt;Image&lt;/td&gt;
&lt;td&gt;ubuntu/images/hvm-ssd/ubuntu-jammy-22.04-amd64-server-20240701&lt;/td&gt;
&lt;/tr&gt;
&lt;tr&gt;
&lt;td&gt;Instance Type&lt;/td&gt;
&lt;td&gt;t2-micro&lt;/td&gt;
&lt;/tr&gt;
&lt;tr&gt;
&lt;td&gt;Key Pair&lt;/td&gt;
&lt;td&gt;Select the key you created earlier and downloaded safely.&lt;/td&gt;
&lt;/tr&gt;
&lt;tr&gt;
&lt;td&gt;VPC&lt;/td&gt;
&lt;td&gt;Select the VPC we created earlier.&lt;/td&gt;
&lt;/tr&gt;
&lt;tr&gt;
&lt;td&gt;Subnet&lt;/td&gt;
&lt;td&gt;Choose the subnet we created earlier for the availability zone this host will be placed in.&lt;/td&gt;
&lt;/tr&gt;
&lt;tr&gt;
&lt;td&gt;Auto Assign Public IP&lt;/td&gt;
&lt;td&gt;In production, you would probably disable this and use a secure jump box. For simplicity, we will SSH directly using the public IP.&lt;/td&gt;
&lt;/tr&gt;
&lt;tr&gt;
&lt;td&gt;Security Group&lt;/td&gt;
&lt;td&gt;Create a new security group named my-sg-1.&lt;/td&gt;
&lt;/tr&gt;
&lt;tr&gt;
&lt;td&gt;Security Group Rule&lt;/td&gt;
&lt;td&gt;Add a custom TCP rule for ports 3000-3003, source from anywhere.&lt;/td&gt;
&lt;/tr&gt;
&lt;/tbody&gt;
&lt;/table&gt;&lt;/div&gt;

&lt;p&gt;Login using SSH and associated key to verify step 2 completed successfully:&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;ssh &lt;span class="nt"&gt;-o&lt;/span&gt; &lt;span class="nv"&gt;IdentitiesOnly&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="nb"&gt;yes&lt;/span&gt; &lt;span class="nt"&gt;-i&lt;/span&gt; aws-instance-key-paris-1.pem ubuntu@13.38.38.248
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;&lt;a href="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Ff1wxiyr9h1mfkn8kqwed.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Ff1wxiyr9h1mfkn8kqwed.png" alt="Image description" width="800" height="285"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;Congratulations, your second region is complete.&lt;/p&gt;

&lt;h4&gt;
  
  
  Network Peering - Stretched Network
&lt;/h4&gt;

&lt;p&gt;The following diagram shows what we intend to achieve with our cross-regional network. We will use AWS's VPC peering to achieve this seamlessly. We will test that we can reach each region with a simple yet powerful networking tool.&lt;/p&gt;

&lt;p&gt;&lt;a href="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fmrwg0hzrkqmbxrhkcjsf.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fmrwg0hzrkqmbxrhkcjsf.png" alt="Image description" width="800" height="549"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;
&lt;strong&gt;Paris Region&lt;/strong&gt; 🇫🇷

&lt;ul&gt;
&lt;li&gt;Under Your VPCs → Peering Connections, create a new peering connection.&lt;/li&gt;
&lt;li&gt;Name it 'my-pc-to-london-1'.&lt;/li&gt;
&lt;li&gt;As the VPC ID (Requester), select the VPC we created earlier.&lt;/li&gt;
&lt;li&gt;Select another VPC to peer with in another region; in our example, it's London (eu-west-2). Enter the VPC ID for the VPC in London.&lt;/li&gt;
&lt;li&gt;Go to the Paris VPCs 

&lt;ul&gt;
&lt;li&gt;Update the routing table:&lt;/li&gt;
&lt;li&gt;Target: VPC peering&lt;/li&gt;
&lt;li&gt;Destination: London CIDR 172.32.0.0/16&lt;/li&gt;
&lt;/ul&gt;


&lt;/li&gt;

&lt;/ul&gt;

&lt;/li&gt;

&lt;/ul&gt;

&lt;p&gt;&lt;a href="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2F3uw7jter9wec6oizayvy.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2F3uw7jter9wec6oizayvy.png" alt="Image description" width="800" height="503"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;
&lt;strong&gt;London Region&lt;/strong&gt; 🏴󠁧󠁢󠁥󠁮󠁧󠁿

&lt;ul&gt;
&lt;li&gt;Go to the London VPCs → Peering Connections and accept the request made from the Paris VPC. You might be prompted to update the routing table. If so, accept it.&lt;/li&gt;
&lt;li&gt;Update the routing table:

&lt;ul&gt;
&lt;li&gt;Target: VPC peering&lt;/li&gt;
&lt;li&gt;Destination: Paris CIDR 172.33.0.0/16&lt;/li&gt;
&lt;/ul&gt;


&lt;/li&gt;

&lt;/ul&gt;

&lt;/li&gt;

&lt;/ul&gt;

&lt;p&gt;&lt;a href="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fhp1a1nl0sp2vdgubou0y.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fhp1a1nl0sp2vdgubou0y.png" alt="Image description" width="800" height="335"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;h4&gt;
  
  
  Messaging 'chat' (using &lt;code&gt;nc&lt;/code&gt; Netcat)
&lt;/h4&gt;

&lt;p&gt;From the London EC2 instance, start an &lt;code&gt;nc&lt;/code&gt; server on port 3000 with the following switches:&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;nc &lt;span class="nt"&gt;-l&lt;/span&gt; &lt;span class="nt"&gt;-k&lt;/span&gt; &lt;span class="nt"&gt;-p&lt;/span&gt; 3000
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;From the Paris EC2 instance, establish a client connection to the London server:&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;nc 172.32.34.147 3000
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;You can now start chatting as all your messages are being sent across the channel literally!&lt;/p&gt;

&lt;p&gt;&lt;a href="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fmshzh0ogftm9t230sagn.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fmshzh0ogftm9t230sagn.png" alt="Image description" width="712" height="302"&gt;&lt;/a&gt;&lt;/p&gt;


&lt;h2&gt;Part 2: Aerospike NoSQL DB Stretch Cluster&lt;/h2&gt;

&lt;p&gt;In this section, we will create a 6-node stretch cluster NoSQL DB, with each region hosting 3 nodes. The following diagram illustrates the stretch cluster configuration, where every node interconnects with each other node. Due to VPC peering, additional latencies may be observed for replica updates, but this is not a concern for this topic.&lt;/p&gt;

&lt;p&gt;&lt;a href="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fbjqf1hyg2mgzeownfq2k.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fbjqf1hyg2mgzeownfq2k.png" alt="Image description" width="800" height="548"&gt;&lt;/a&gt;&lt;/p&gt;
&lt;h4&gt;
  
  
  Create Database Hosts
&lt;/h4&gt;

&lt;p&gt;For each region, create three nodes. Select the VPC you previously set up, enable public IP assignments, and use the same security group.&lt;/p&gt;
&lt;h4&gt;
  
  
  EC2 Database Hosts Region London 🏴󠁧󠁢󠁥󠁮󠁧󠁿
&lt;/h4&gt;

&lt;p&gt;Launch 3 x EC2 instance with the following settings:&lt;/p&gt;

&lt;div class="table-wrapper-paragraph"&gt;&lt;table&gt;
&lt;thead&gt;
&lt;tr&gt;
&lt;th&gt;Parameter&lt;/th&gt;
&lt;th&gt;Value&lt;/th&gt;
&lt;/tr&gt;
&lt;/thead&gt;
&lt;tbody&gt;
&lt;tr&gt;
&lt;td&gt;Image&lt;/td&gt;
&lt;td&gt;Rocky-8-EC2-Base-8.7-20230215.0.x86_64 (ami-07d2b4d8d9980a125)&lt;/td&gt;
&lt;/tr&gt;
&lt;tr&gt;
&lt;td&gt;Instance Type&lt;/td&gt;
&lt;td&gt;t3a.medium (not what you would use in production)&lt;/td&gt;
&lt;/tr&gt;
&lt;tr&gt;
&lt;td&gt;Key Pair&lt;/td&gt;
&lt;td&gt;Select the key you created earlier and downloaded safely.&lt;/td&gt;
&lt;/tr&gt;
&lt;tr&gt;
&lt;td&gt;VPC&lt;/td&gt;
&lt;td&gt;Select the VPC we created earlier.&lt;/td&gt;
&lt;/tr&gt;
&lt;tr&gt;
&lt;td&gt;Subnet&lt;/td&gt;
&lt;td&gt;Choose the subnet we created earlier for the availability zone this host will be placed in.&lt;/td&gt;
&lt;/tr&gt;
&lt;tr&gt;
&lt;td&gt;Auto Assign Public IP&lt;/td&gt;
&lt;td&gt;In production, you would probably disable this and use a jump box. For simplicity, we will SSH directly using the public IP.&lt;/td&gt;
&lt;/tr&gt;
&lt;tr&gt;
&lt;td&gt;Security Group&lt;/td&gt;
&lt;td&gt;Use the same security group from earlier.&lt;/td&gt;
&lt;/tr&gt;
&lt;tr&gt;
&lt;td&gt;Security Group Rule&lt;/td&gt;
&lt;td&gt;None so far&lt;/td&gt;
&lt;/tr&gt;
&lt;tr&gt;
&lt;td&gt;Volumes&lt;/td&gt;
&lt;td&gt;Root Volume: 1x10GB-gp2, 1x8GB-gp3&lt;/td&gt;
&lt;/tr&gt;
&lt;/tbody&gt;
&lt;/table&gt;&lt;/div&gt;
&lt;h4&gt;
  
  
  EC2 Database Hosts Region Paris 🇫🇷
&lt;/h4&gt;

&lt;p&gt;Launch 3 x EC2 instance with the following settings:&lt;/p&gt;

&lt;div class="table-wrapper-paragraph"&gt;&lt;table&gt;
&lt;thead&gt;
&lt;tr&gt;
&lt;th&gt;Parameter&lt;/th&gt;
&lt;th&gt;Value&lt;/th&gt;
&lt;/tr&gt;
&lt;/thead&gt;
&lt;tbody&gt;
&lt;tr&gt;
&lt;td&gt;Image&lt;/td&gt;
&lt;td&gt;Rocky-8-EC2-LVM-8.7-20230215.0.x86_64 (ami-064a83a6b9c2edb23)&lt;/td&gt;
&lt;/tr&gt;
&lt;tr&gt;
&lt;td&gt;Instance Type&lt;/td&gt;
&lt;td&gt;t3a.medium (not what you would use in production)&lt;/td&gt;
&lt;/tr&gt;
&lt;tr&gt;
&lt;td&gt;Key Pair&lt;/td&gt;
&lt;td&gt;Select the key you created earlier and downloaded safely.&lt;/td&gt;
&lt;/tr&gt;
&lt;tr&gt;
&lt;td&gt;VPC&lt;/td&gt;
&lt;td&gt;Select the VPC we created earlier.&lt;/td&gt;
&lt;/tr&gt;
&lt;tr&gt;
&lt;td&gt;Subnet&lt;/td&gt;
&lt;td&gt;Choose the subnet we created earlier for the availability zone this host will be placed in.&lt;/td&gt;
&lt;/tr&gt;
&lt;tr&gt;
&lt;td&gt;Auto Assign Public IP&lt;/td&gt;
&lt;td&gt;In production, you would probably disable this and use a jump box. For simplicity, we will SSH directly using the public IP.&lt;/td&gt;
&lt;/tr&gt;
&lt;tr&gt;
&lt;td&gt;Security Group&lt;/td&gt;
&lt;td&gt;Use the same security group from earlier.&lt;/td&gt;
&lt;/tr&gt;
&lt;tr&gt;
&lt;td&gt;Security Group Rule&lt;/td&gt;
&lt;td&gt;None so far&lt;/td&gt;
&lt;/tr&gt;
&lt;tr&gt;
&lt;td&gt;Volumes&lt;/td&gt;
&lt;td&gt;Root Volume: 1x10GB-gp2, 1x8GB-gp3&lt;/td&gt;
&lt;/tr&gt;
&lt;/tbody&gt;
&lt;/table&gt;&lt;/div&gt;
&lt;h4&gt;
  
  
  Installing Aerospike NoSQL DB
&lt;/h4&gt;

&lt;p&gt;Log into each host in a single region using SSH and install Aerospike. There are comments in the file below to remind you about specific changes required for each host. Although there are several automation tools available, we will manually configure each of the six nodes to keep things simple and aid learning.&lt;/p&gt;

&lt;p&gt;If you are interested in knowing more about Aerospike, visit the &lt;a href="https://aerospike.com/developer/" rel="noopener noreferrer"&gt;Developer website&lt;/a&gt;.&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;If you have a license key, also known as a feature file, copy this across to the host.&lt;/li&gt;
&lt;li&gt;SSH into each host machine and run the following.&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;EC2 database hosts in Region London 🏴󠁧󠁢󠁥󠁮󠁧󠁿&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;&lt;span class="nb"&gt;export &lt;/span&gt;&lt;span class="nv"&gt;VER&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s2"&gt;"7.1.0.0"&lt;/span&gt;
&lt;span class="nb"&gt;export &lt;/span&gt;&lt;span class="nv"&gt;TOOLS&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;11.0.0
&lt;span class="nb"&gt;export &lt;/span&gt;&lt;span class="nv"&gt;OS&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;el8
&lt;span class="nb"&gt;export &lt;/span&gt;&lt;span class="nv"&gt;ARCH&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;x86_64
&lt;span class="nb"&gt;sudo &lt;/span&gt;yum &lt;span class="nb"&gt;install &lt;/span&gt;java-11-openjdk.x86_64 java-11-openjdk java-11-openjdk-devel python3 openssl-devel wget git gcc maven bind-utils sysstat nc &lt;span class="nt"&gt;-y&lt;/span&gt;
&lt;span class="nv"&gt;SERVER_BIN&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;aerospike-server-enterprise_&lt;span class="k"&gt;${&lt;/span&gt;&lt;span class="nv"&gt;VER&lt;/span&gt;&lt;span class="k"&gt;}&lt;/span&gt;_tools-&lt;span class="k"&gt;${&lt;/span&gt;&lt;span class="nv"&gt;TOOLS&lt;/span&gt;&lt;span class="k"&gt;}&lt;/span&gt;_&lt;span class="k"&gt;${&lt;/span&gt;&lt;span class="nv"&gt;OS&lt;/span&gt;&lt;span class="k"&gt;}&lt;/span&gt;_&lt;span class="k"&gt;${&lt;/span&gt;&lt;span class="nv"&gt;ARCH&lt;/span&gt;&lt;span class="k"&gt;}&lt;/span&gt;
&lt;span class="nv"&gt;LINK&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;https://download.aerospike.com/artifacts/aerospike-server-enterprise/&lt;span class="k"&gt;${&lt;/span&gt;&lt;span class="nv"&gt;VER&lt;/span&gt;&lt;span class="k"&gt;}&lt;/span&gt;/&lt;span class="k"&gt;${&lt;/span&gt;&lt;span class="nv"&gt;SERVER_BIN&lt;/span&gt;&lt;span class="k"&gt;}&lt;/span&gt;.tgz
wget &lt;span class="nt"&gt;-q&lt;/span&gt; &lt;span class="nv"&gt;$LINK&lt;/span&gt;
&lt;span class="nb"&gt;tar&lt;/span&gt; &lt;span class="nt"&gt;-xvf&lt;/span&gt; &lt;span class="k"&gt;${&lt;/span&gt;&lt;span class="nv"&gt;SERVER_BIN&lt;/span&gt;&lt;span class="k"&gt;}&lt;/span&gt;.tgz
&lt;span class="nv"&gt;NS&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;mydata
&lt;span class="nb"&gt;sudo mkdir&lt;/span&gt; &lt;span class="nt"&gt;-p&lt;/span&gt; /var/log/aerospike/
&lt;span class="nb"&gt;sudo mkdir&lt;/span&gt; &lt;span class="nt"&gt;-p&lt;/span&gt; /etc/aerospike/
&lt;span class="c"&gt;# Zero the data disks&lt;/span&gt;
&lt;span class="nb"&gt;ls&lt;/span&gt; &lt;span class="nt"&gt;-l&lt;/span&gt; /dev/nvme1n1 | &lt;span class="nb"&gt;awk&lt;/span&gt; &lt;span class="s1"&gt;'{print $NF}'&lt;/span&gt; | &lt;span class="k"&gt;while &lt;/span&gt;&lt;span class="nb"&gt;read&lt;/span&gt; &lt;span class="nt"&gt;-r&lt;/span&gt; line&lt;span class="p"&gt;;&lt;/span&gt; &lt;span class="k"&gt;do &lt;/span&gt;&lt;span class="nb"&gt;sudo dd &lt;/span&gt;&lt;span class="k"&gt;if&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;/dev/zero &lt;span class="nv"&gt;of&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="nv"&gt;$line&lt;/span&gt; &lt;span class="nv"&gt;bs&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;1024 &lt;span class="nv"&gt;count&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;8192 &lt;span class="nv"&gt;oflag&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;direct&lt;span class="p"&gt;;&lt;/span&gt; &lt;span class="k"&gt;done

&lt;/span&gt;&lt;span class="nb"&gt;export &lt;/span&gt;&lt;span class="nv"&gt;ID&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="k"&gt;$((&lt;/span&gt;&lt;span class="m"&gt;1&lt;/span&gt; &lt;span class="o"&gt;+&lt;/span&gt; &lt;span class="nv"&gt;$RANDOM&lt;/span&gt; &lt;span class="o"&gt;%&lt;/span&gt; &lt;span class="m"&gt;1000&lt;/span&gt;&lt;span class="k"&gt;))&lt;/span&gt;
&lt;span class="nb"&gt;export &lt;/span&gt;&lt;span class="nv"&gt;INDEX&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;A&lt;span class="nv"&gt;$ID&lt;/span&gt;
&lt;span class="nb"&gt;export &lt;/span&gt;&lt;span class="nv"&gt;IP&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="sb"&gt;`&lt;/span&gt;ip a | &lt;span class="nb"&gt;grep &lt;/span&gt;172 | &lt;span class="nb"&gt;awk&lt;/span&gt; &lt;span class="o"&gt;{&lt;/span&gt;&lt;span class="s1"&gt;'print $2'&lt;/span&gt;&lt;span class="o"&gt;}&lt;/span&gt; | &lt;span class="nb"&gt;cut&lt;/span&gt; &lt;span class="nt"&gt;-f1&lt;/span&gt; &lt;span class="nt"&gt;-d&lt;/span&gt;&lt;span class="s1"&gt;'/'&lt;/span&gt;&lt;span class="sb"&gt;`&lt;/span&gt;
&lt;span class="nb"&gt;export &lt;/span&gt;&lt;span class="nv"&gt;PIP&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="sb"&gt;`&lt;/span&gt;dig +short myip.opendns.com @resolver1.opendns.com&lt;span class="sb"&gt;`&lt;/span&gt;
&lt;span class="nb"&gt;export &lt;/span&gt;&lt;span class="nv"&gt;S1IP&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;172.32.15.231 &lt;span class="c"&gt;# another london node for IP seeding &amp;lt;&amp;lt;--------------- Change this&lt;/span&gt;
&lt;span class="nb"&gt;export &lt;/span&gt;&lt;span class="nv"&gt;S2IP&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;172.33.16.172 &lt;span class="c"&gt;# another paris node for IP seeding &amp;lt;&amp;lt;--------------- Change this&lt;/span&gt;
&lt;span class="nb"&gt;export &lt;/span&gt;&lt;span class="nv"&gt;DEV1&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;/dev/nvme1n1 &lt;span class="c"&gt;# &amp;lt;&amp;lt;--------------- Maybe Change this&lt;/span&gt;

&lt;span class="nb"&gt;cat&lt;/span&gt; &lt;span class="o"&gt;&amp;lt;&amp;lt;&lt;/span&gt;&lt;span class="no"&gt;EOF&lt;/span&gt;&lt;span class="sh"&gt;&amp;gt; aerospike.conf
service {
        proto-fd-max 15000

        node-id &lt;/span&gt;&lt;span class="nv"&gt;$INDEX&lt;/span&gt;&lt;span class="sh"&gt;
        cluster-name test-aerocluster.eu
        transaction-max-ms 1500
        log-local-time true
}

logging {
        file /var/log/aerospike/aerospike.log {
                context any info
        }
}
network {
        service {
               address any
               access-address &lt;/span&gt;&lt;span class="nv"&gt;$IP&lt;/span&gt;&lt;span class="sh"&gt;
               alternate-access-address &lt;/span&gt;&lt;span class="nv"&gt;$PIP&lt;/span&gt;&lt;span class="sh"&gt;
               port 3000
       }

       heartbeat {
              mode mesh
              address &lt;/span&gt;&lt;span class="nv"&gt;$IP&lt;/span&gt;&lt;span class="sh"&gt;
              port 3002 # Heartbeat port for this node.
              mesh-seed-address-port &lt;/span&gt;&lt;span class="nv"&gt;$S1IP&lt;/span&gt;&lt;span class="sh"&gt; 3002
              mesh-seed-address-port &lt;/span&gt;&lt;span class="nv"&gt;$S2IP&lt;/span&gt;&lt;span class="sh"&gt; 3002
              interval 150 # controls how often to send a heartbeat packet
              timeout 10 # number of intervals after which a node is considered to be missing
       }

        fabric {
              port 3001
              channel-meta-recv-threads 8
        }

}
security {
        # enable-security true

        log {
                report-authentication true
                report-sys-admin true
                report-user-admin true
                report-violation true
        }
}

namespace mydata {
        # How many copies of the data
        replication-factor 2

        # How full may the memory become before the server begins eviction
        # (expiring records early)
        evict-sys-memory-pct 50

        # How full may the memory become before the server goes read only
        stop-writes-sys-memory-pct 60

        # How long (in seconds) to keep data after it is written Default days,
        # use 0 to never expire/evict.
        default-ttl 0

  # Specify a percentage of record expiration time, read within this interval of the record’s end of life will generate a touch
        # e.g. with default-ttl of 60s, a read with 12 seconds remaining will touch the record. [ 60 x ( 1 - default-read-touch-ttl-pct ) = 12 ]
        default-read-touch-ttl-pct 20

        # The interval at which the main expiration/eviction thread wakes up,
        # to process the namespace.
        nsup-period 120

        # Disables eviction that may occur at cold start for this namespace only
        disable-cold-start-eviction True

  # Data high availability across racks
        rack-id &lt;/span&gt;&lt;span class="k"&gt;${&lt;/span&gt;&lt;span class="nv"&gt;ID&lt;/span&gt;&lt;span class="k"&gt;}&lt;/span&gt;&lt;span class="sh"&gt;

  # SC Mode
        strong-consistency true

        # (optional) write-block is 8MiB in server 7.0 or later so max-record-size can be used to limit the record size.
        max-record-size 128K

#        storage-engine device {
#                device &lt;/span&gt;&lt;span class="nv"&gt;$DEV1&lt;/span&gt;&lt;span class="sh"&gt;
#
#                post-write-cache 64
#                read-page-cache true
#
#         # How full may the disk become before the server begins eviction
#         # (expiring records early)
#                evict-used-pct 45
#        }
        storage-engine memory {
                file /opt/aerospike/ns1.dat   # Location of a namespace data file on server
                filesize 1G                   # Max size of each file in GiB. Maximum size is 2TiB
                stop-writes-avail-pct 5       # (optional) stop-writes threshold as a percentage of
                                              # devices/files size or data-size.
                stop-writes-used-pct 70       # (optional) stop-writes threshold as a percentage of
                                              # devices/files size, or data-size.
                evict-used-pct 60             # (optional) eviction threshold, as a percentage of
                                              # devices/files size, or data-size.
        }
}
&lt;/span&gt;&lt;span class="no"&gt;EOF
&lt;/span&gt;&lt;span class="nb"&gt;sudo cp &lt;/span&gt;aerospike.conf /etc/aerospike/aerospike.conf


&lt;span class="nb"&gt;cat&lt;/span&gt; &lt;span class="o"&gt;&amp;lt;&amp;lt;&lt;/span&gt;&lt;span class="no"&gt;EOF&lt;/span&gt;&lt;span class="sh"&gt;&amp;gt; aerospike.log.rotation
/var/log/aerospike/aerospike.log {
    daily
    rotate 90
    dateext
    compress
    olddir /var/log/aerospike/
    sharedscripts
    postrotate
        /bin/kill -HUP `pgrep -x asd`
    endscript
}
&lt;/span&gt;&lt;span class="no"&gt;EOF
&lt;/span&gt;&lt;span class="nb"&gt;sudo cp &lt;/span&gt;aerospike.log.rotation /etc/logrotate.d/aerospike

&lt;span class="nb"&gt;sudo cp &lt;/span&gt;features.conf /etc/aerospike/features.conf

&lt;span class="nb"&gt;cd&lt;/span&gt; &lt;span class="nv"&gt;$SERVER_BIN&lt;/span&gt;
&lt;span class="nb"&gt;sudo&lt;/span&gt; ./asinstall

&lt;span class="nb"&gt;sudo &lt;/span&gt;systemctl start aerospike
&lt;span class="nb"&gt;sudo &lt;/span&gt;systemctl status aerospike
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;h4&gt;
  
  
  ACL User Authentication
&lt;/h4&gt;

&lt;p&gt;You can run the following command once on a single host to allow the &lt;code&gt;admin&lt;/code&gt; user to add records to the db. Typically, you would set up different users and roles for such tasks, but for simplicity, we are using the &lt;code&gt;admin&lt;/code&gt; role (which is not recommended for production environments).&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;asadm &lt;span class="nt"&gt;-Uadmin&lt;/span&gt; &lt;span class="nt"&gt;-Padmin&lt;/span&gt; &lt;span class="nt"&gt;-e&lt;/span&gt; &lt;span class="s2"&gt;"enable; manage acl grant user admin roles read-write"&lt;/span&gt;
asadm &lt;span class="nt"&gt;-Uadmin&lt;/span&gt; &lt;span class="nt"&gt;-Padmin&lt;/span&gt; &lt;span class="nt"&gt;-e&lt;/span&gt; &lt;span class="s2"&gt;"enable; manage acl grant user admin roles sys-admin"&lt;/span&gt;
asadm &lt;span class="nt"&gt;-Uadmin&lt;/span&gt; &lt;span class="nt"&gt;-Padmin&lt;/span&gt; &lt;span class="nt"&gt;-e&lt;/span&gt; &lt;span class="s2"&gt;"enable; show user"&lt;/span&gt;
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;To get a view of your current Aerospike cluster, including the nodes you've added, you can run the following command. At this stage, you should have added at least three nodes in the London region.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;asadm &lt;span class="nt"&gt;-e&lt;/span&gt; i &lt;span class="nt"&gt;-Uadmin&lt;/span&gt; &lt;span class="nt"&gt;-Padmin&lt;/span&gt;

Seed:        &lt;span class="o"&gt;[(&lt;/span&gt;&lt;span class="s1"&gt;'127.0.0.1'&lt;/span&gt;, 3000, None&lt;span class="o"&gt;)]&lt;/span&gt;
Config_file: /home/rocky/.aerospike/astools.conf, /etc/aerospike/astools.conf
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~Network Information &lt;span class="o"&gt;(&lt;/span&gt;2024-08-12 09:44:44 UTC&lt;span class="o"&gt;)&lt;/span&gt;~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
                                           Node| Node|                IP|    Build|Migrations|~~~~~~~~~~~~~~Cluster~~~~~~~~~~~~~~~|Client|  Uptime
                                               |   ID|                  |         |          |Size|        Key|Integrity|Principal| Conns|
172.32.15.231:3000                             | A352|172.32.15.231:3000|E-7.1.0.0|   0.000  |   3|155A640ADB8|True     |     A600|     7|00:08:16
172.32.4.2:3000                                |&lt;span class="k"&gt;*&lt;/span&gt;A600|172.32.4.2:3000   |E-7.1.0.0|   0.000  |   3|155A640ADB8|True     |     A600|     7|00:07:43
ip-172-32-5-239.eu-west-2.compute.internal:3000| A129|172.32.5.239:3000 |E-7.1.0.0|   0.000  |   3|155A640ADB8|True     |     A600|     7|00:09:38
Number of rows: 3

~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~Namespace Usage Information &lt;span class="o"&gt;(&lt;/span&gt;2024-08-12 09:44:44 UTC&lt;span class="o"&gt;)&lt;/span&gt;~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Namespace|                                           Node|Evictions|  Stop|~System Memory~|~~~~Primary Index~~~~|~~Secondary~~~|~~~~~~~~~~~~~~~~~Storage Engine~~~~~~~~~~~~~~~~~
         |                                               |         |Writes| Avail%| Evict%| Type|    Used|Evict%|~~~~Index~~~~~|  Type|    Used|Used%|Evict%|  Used|Avail%|Avail
         |                                               |         |      |       |       |     |        |      | Type|    Used|      |        |     |      | Stop%|      |Stop%
mydata   |172.32.15.231:3000                             |  0.000  |False |     82|     50|shmem|0.000 B | 0.0 %|shmem|0.000 B |memory|0.000 B |0.0 %|60.0 %|70.0 %|99.0 %|5.0 %
mydata   |172.32.4.2:3000                                |  0.000  |False |     82|     50|shmem|0.000 B | 0.0 %|shmem|0.000 B |memory|0.000 B |0.0 %|60.0 %|70.0 %|99.0 %|5.0 %
mydata   |ip-172-32-5-239.eu-west-2.compute.internal:3000|  0.000  |False |     81|     50|shmem|0.000 B | 0.0 %|shmem|0.000 B |memory|0.000 B |0.0 %|60.0 %|70.0 %|99.0 %|5.0 %
mydata   |                                               |  0.000  |      |       |       |     |0.000 B |      |     |0.000 B |      |0.000 B |0.0 %|      |      |      |
Number of rows: 3

~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~Namespace Object Information &lt;span class="o"&gt;(&lt;/span&gt;2024-08-12 09:44:44 UTC&lt;span class="o"&gt;)&lt;/span&gt;~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Namespace|                                           Node|Rack|  Repl|Expirations|  Total|~~~~~~~~~~Objects~~~~~~~~~~|~~~~~~~~~Tombstones~~~~~~~~|~~~~Pending~~~~
         |                                               |  ID|Factor|           |Records| Master|  Prole|Non-Replica| Master|  Prole|Non-Replica|~~~~Migrates~~~
         |                                               |    |      |           |       |       |       |           |       |       |           |     Tx|     Rx
mydata   |172.32.15.231:3000                             | 352|     0|    0.000  |0.000  |0.000  |0.000  |    0.000  |0.000  |0.000  |    0.000  |0.000  |0.000
mydata   |172.32.4.2:3000                                | 600|     0|    0.000  |0.000  |0.000  |0.000  |    0.000  |0.000  |0.000  |    0.000  |0.000  |0.000
mydata   |ip-172-32-5-239.eu-west-2.compute.internal:3000| 129|     0|    0.000  |0.000  |0.000  |0.000  |    0.000  |0.000  |0.000  |    0.000  |0.000  |0.000
mydata   |                                               |    |      |    0.000  |0.000  |0.000  |0.000  |    0.000  |0.000  |0.000  |    0.000  |0.000  |0.000
Number of rows: 3
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;Congratulations! You should have added all six nodes in your stretch cluster. This setup includes the three nodes you configured in the London region and the three nodes in the Paris region.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;asadm &lt;span class="nt"&gt;-e&lt;/span&gt; i &lt;span class="nt"&gt;-Uadmin&lt;/span&gt; &lt;span class="nt"&gt;-Padmin&lt;/span&gt;
Seed:        &lt;span class="o"&gt;[(&lt;/span&gt;&lt;span class="s1"&gt;'127.0.0.1'&lt;/span&gt;, 3000, None&lt;span class="o"&gt;)]&lt;/span&gt;
Config_file: /home/rocky/.aerospike/astools.conf, /etc/aerospike/astools.conf
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~Network Information &lt;span class="o"&gt;(&lt;/span&gt;2024-08-12 09:56:34 UTC&lt;span class="o"&gt;)&lt;/span&gt;~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
                                          Node| Node|                IP|    Build|Migrations|~~~~~~~~~~~~~~~Cluster~~~~~~~~~~~~~~~|Client|  Uptime
                                              |   ID|                  |         |          |Size|         Key|Integrity|Principal| Conns|
172.32.15.231:3000                            | A352|172.32.15.231:3000|E-7.1.0.0|   0.000  |   6|ECACBC564992|True     |     A882|     7|00:20:06
172.32.4.2:3000                               | A600|172.32.4.2:3000   |E-7.1.0.0|   0.000  |   6|ECACBC564992|True     |     A882|     7|00:19:33
172.32.5.239:3000                             | A129|172.32.5.239:3000 |E-7.1.0.0|   0.000  |   6|ECACBC564992|True     |     A882|     7|00:21:28
172.33.11.90:3000                             |  A70|172.33.11.90:3000 |E-7.1.0.0|   0.000  |   6|ECACBC564992|True     |     A882|     6|00:00:39
172.33.8.38:3000                              |&lt;span class="k"&gt;*&lt;/span&gt;A882|172.33.8.38:3000  |E-7.1.0.0|   0.000  |   6|ECACBC564992|True     |     A882|     7|00:00:46
ip-172-33-7-44.eu-west-3.compute.internal:3000| A517|172.33.7.44:3000  |E-7.1.0.0|   0.000  |   6|ECACBC564992|True     |     A882|     7|00:00:39
Number of rows: 6

~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~Namespace Usage Information &lt;span class="o"&gt;(&lt;/span&gt;2024-08-12 09:56:34 UTC&lt;span class="o"&gt;)&lt;/span&gt;~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Namespace|                                          Node|Evictions|  Stop|~System Memory~|~~~~Primary Index~~~~|~~Secondary~~~|~~~~~~~~~~~~~~~~~Storage Engine~~~~~~~~~~~~~~~~~
         |                                              |         |Writes| Avail%| Evict%| Type|    Used|Evict%|~~~~Index~~~~~|  Type|    Used|Used%|Evict%|  Used|Avail%|Avail
         |                                              |         |      |       |       |     |        |      | Type|    Used|      |        |     |      | Stop%|      |Stop%
mydata   |172.32.15.231:3000                            |  0.000  |False |     78|     50|shmem|0.000 B | 0.0 %|shmem|0.000 B |memory|0.000 B |0.0 %|60.0 %|70.0 %|99.0 %|5.0 %
mydata   |172.32.4.2:3000                               |  0.000  |False |     78|     50|shmem|0.000 B | 0.0 %|shmem|0.000 B |memory|0.000 B |0.0 %|60.0 %|70.0 %|99.0 %|5.0 %
mydata   |172.32.5.239:3000                             |  0.000  |False |     78|     50|shmem|0.000 B | 0.0 %|shmem|0.000 B |memory|0.000 B |0.0 %|60.0 %|70.0 %|99.0 %|5.0 %
mydata   |172.33.11.90:3000                             |  0.000  |False |     78|     50|shmem|0.000 B | 0.0 %|shmem|0.000 B |memory|0.000 B |0.0 %|60.0 %|70.0 %|99.0 %|5.0 %
mydata   |172.33.8.38:3000                              |  0.000  |False |     78|     50|shmem|0.000 B | 0.0 %|shmem|0.000 B |memory|0.000 B |0.0 %|60.0 %|70.0 %|99.0 %|5.0 %
mydata   |ip-172-33-7-44.eu-west-3.compute.internal:3000|  0.000  |False |     77|     50|shmem|0.000 B | 0.0 %|shmem|0.000 B |memory|0.000 B |0.0 %|60.0 %|70.0 %|99.0 %|5.0 %
mydata   |                                              |  0.000  |      |       |       |     |0.000 B |      |     |0.000 B |      |0.000 B |0.0 %|      |      |      |
Number of rows: 6

~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~Namespace Object Information &lt;span class="o"&gt;(&lt;/span&gt;2024-08-12 09:56:34 UTC&lt;span class="o"&gt;)&lt;/span&gt;~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Namespace|                                          Node|Rack|  Repl|Expirations|  Total|~~~~~~~~~~Objects~~~~~~~~~~|~~~~~~~~~Tombstones~~~~~~~~|~~~~Pending~~~~
         |                                              |  ID|Factor|           |Records| Master|  Prole|Non-Replica| Master|  Prole|Non-Replica|~~~~Migrates~~~
         |                                              |    |      |           |       |       |       |           |       |       |           |     Tx|     Rx
mydata   |172.32.15.231:3000                            | 352|     0|    0.000  |0.000  |0.000  |0.000  |    0.000  |0.000  |0.000  |    0.000  |0.000  |0.000
mydata   |172.32.4.2:3000                               | 600|     0|    0.000  |0.000  |0.000  |0.000  |    0.000  |0.000  |0.000  |    0.000  |0.000  |0.000
mydata   |172.32.5.239:3000                             | 129|     0|    0.000  |0.000  |0.000  |0.000  |    0.000  |0.000  |0.000  |    0.000  |0.000  |0.000
mydata   |172.33.11.90:3000                             |  70|     0|    0.000  |0.000  |0.000  |0.000  |    0.000  |0.000  |0.000  |    0.000  |0.000  |0.000
mydata   |172.33.8.38:3000                              | 882|     0|    0.000  |0.000  |0.000  |0.000  |    0.000  |0.000  |0.000  |    0.000  |0.000  |0.000
mydata   |ip-172-33-7-44.eu-west-3.compute.internal:3000| 517|     0|    0.000  |0.000  |0.000  |0.000  |    0.000  |0.000  |0.000  |    0.000  |0.000  |0.000
mydata   |                                              |    |      |    0.000  |0.000  |0.000  |0.000  |    0.000  |0.000  |0.000  |    0.000  |0.000  |0.000
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;h4&gt;
  
  
  Strong Consistency
&lt;/h4&gt;

&lt;p&gt;To enable Strong Consistency (SC) rules in Aerospike, you will need to run a few administrative commands. This will reinforce that your database maintains strict consistency across all nodes in the cluster.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;&lt;span class="nb"&gt;enable
&lt;/span&gt;manage roster stage observed ns mydata
manage recluster
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;Run the following to verify that the nodes are part of the roster&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;show racks
~Racks &lt;span class="o"&gt;(&lt;/span&gt;2024-08-12 10:00:00 UTC&lt;span class="o"&gt;)&lt;/span&gt;~
Namespace|Rack|Nodes
         |  ID|
mydata   |  70|A70
mydata   | 129|A129
mydata   | 352|A352
mydata   | 517|A517
mydata   | 600|A600
mydata   | 882|A882
Number of rows: 6

show roster
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~Roster &lt;span class="o"&gt;(&lt;/span&gt;2024-08-12 10:00:03 UTC&lt;span class="o"&gt;)&lt;/span&gt;~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
                                          Node| Node|Namespace|                                     Current Roster|                                     Pending Roster|                                     Observed Nodes
                                              |   ID|         |                                                   |                                                   |
172.32.5.239:3000                             |A129 |mydata   |A882@882,A600@600,A517@517,A352@352,A129@129,A70@70|A882@882,A600@600,A517@517,A352@352,A129@129,A70@70|A882@882,A600@600,A517@517,A352@352,A129@129,A70@70
172.32.15.231:3000                            |A352 |mydata   |A882@882,A600@600,A517@517,A352@352,A129@129,A70@70|A882@882,A600@600,A517@517,A352@352,A129@129,A70@70|A882@882,A600@600,A517@517,A352@352,A129@129,A70@70
ip-172-33-7-44.eu-west-3.compute.internal:3000|A517 |mydata   |A882@882,A600@600,A517@517,A352@352,A129@129,A70@70|A882@882,A600@600,A517@517,A352@352,A129@129,A70@70|A882@882,A600@600,A517@517,A352@352,A129@129,A70@70
172.32.4.2:3000                               |A600 |mydata   |A882@882,A600@600,A517@517,A352@352,A129@129,A70@70|A882@882,A600@600,A517@517,A352@352,A129@129,A70@70|A882@882,A600@600,A517@517,A352@352,A129@129,A70@70
172.33.11.90:3000                             |A70  |mydata   |A882@882,A600@600,A517@517,A352@352,A129@129,A70@70|A882@882,A600@600,A517@517,A352@352,A129@129,A70@70|A882@882,A600@600,A517@517,A352@352,A129@129,A70@70
172.33.8.38:3000                              |&lt;span class="k"&gt;*&lt;/span&gt;A882|mydata   |A882@882,A600@600,A517@517,A352@352,A129@129,A70@70|A882@882,A600@600,A517@517,A352@352,A129@129,A70@70|A882@882,A600@600,A517@517,A352@352,A129@129,A70@70
Number of rows: 6
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;h4&gt;
  
  
  Edit RACK-IDs
&lt;/h4&gt;

&lt;p&gt;All six nodes (three in London and three in Paris) should appear in the output, indicating they are part of the current cluster roster and are correctly configured.&lt;/p&gt;

&lt;p&gt;But wait!&lt;/p&gt;

&lt;p&gt;It appears there are currently six racks displayed in your Aerospike cluster configuration, which doesn’t align with your setup of 3 nodes per region and a total of 2 regions. Since all nodes in each region are in a single subnet, they could be grouped into two logical racks.&lt;/p&gt;

&lt;p&gt;To correct this, you need to edit the cluster configuration to consolidate the existing racks into the appropriate number of rack-id(s). See the diagram below.&lt;/p&gt;

&lt;p&gt;&lt;a href="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2F4ulei6osn1pytjag8z8g.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2F4ulei6osn1pytjag8z8g.png" alt="Image description" width="800" height="559"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;Action: Edit the cluster so we have only 2 logical racks.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;&lt;span class="c"&gt;# Login to the admin tool&lt;/span&gt;
asadm &lt;span class="nt"&gt;-Uadmin&lt;/span&gt; &lt;span class="nt"&gt;-Padmin&lt;/span&gt;

&lt;span class="c"&gt;# show the roster&lt;/span&gt;
show roster

&lt;span class="c"&gt;# Output&lt;/span&gt;
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~Roster &lt;span class="o"&gt;(&lt;/span&gt;2024-08-12 10:00:03 UTC&lt;span class="o"&gt;)&lt;/span&gt;~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
                                          Node| Node|Namespace|                                     Current Roster|                                     Pending Roster|                                     Observed Nodes
                                              |   ID|         |                                                   |                                                   |
172.32.5.239:3000                             |A129 |mydata   |A882@882,A600@600,A517@517,A352@352,A129@129,A70@70|A882@882,A600@600,A517@517,A352@352,A129@129,A70@70|A882@882,A600@600,A517@517,A352@352,A129@129,A70@70
172.32.15.231:3000                            |A352 |mydata   |A882@882,A600@600,A517@517,A352@352,A129@129,A70@70|A882@882,A600@600,A517@517,A352@352,A129@129,A70@70|A882@882,A600@600,A517@517,A352@352,A129@129,A70@70
ip-172-33-7-44.eu-west-3.compute.internal:3000|A517 |mydata   |A882@882,A600@600,A517@517,A352@352,A129@129,A70@70|A882@882,A600@600,A517@517,A352@352,A129@129,A70@70|A882@882,A600@600,A517@517,A352@352,A129@129,A70@70
172.32.4.2:3000                               |A600 |mydata   |A882@882,A600@600,A517@517,A352@352,A129@129,A70@70|A882@882,A600@600,A517@517,A352@352,A129@129,A70@70|A882@882,A600@600,A517@517,A352@352,A129@129,A70@70
172.33.11.90:3000                             |A70  |mydata   |A882@882,A600@600,A517@517,A352@352,A129@129,A70@70|A882@882,A600@600,A517@517,A352@352,A129@129,A70@70|A882@882,A600@600,A517@517,A352@352,A129@129,A70@70
172.33.8.38:3000                              |&lt;span class="k"&gt;*&lt;/span&gt;A882|mydata   |A882@882,A600@600,A517@517,A352@352,A129@129,A70@70|A882@882,A600@600,A517@517,A352@352,A129@129,A70@70|A882@882,A600@600,A517@517,A352@352,A129@129,A70@70
Number of rows: 6

&lt;span class="c"&gt;# remove the n-1 nodes from the cluster&lt;/span&gt;
manage roster remove nodes A882@882 A600@600 A517@517 A352@352 A129@129 ns mydata

&lt;span class="c"&gt;# check the current roster should only be a single node&lt;/span&gt;
show roster

&lt;span class="c"&gt;# Output&lt;/span&gt;
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~Roster &lt;span class="o"&gt;(&lt;/span&gt;2024-08-12 11:25:26 UTC&lt;span class="o"&gt;)&lt;/span&gt;~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
                                           Node| Node|Namespace|Current|Pending|                          Observed Nodes
                                               |   ID|         | Roster| Roster|
ip-172-32-5-239.eu-west-2.compute.internal:3000|A129 |mydata   |A70@70  |A70@70  |A882@882,A600@600,A517@517,A352@352,A129@129,A70@70
172.32.15.231:3000                             |A352 |mydata   |A70@70  |A70@70  |A882@882,A600@600,A517@517,A352@352,A129@129,A70@70
172.33.7.44:3000                               |A517 |mydata   |A70@70  |A70@70  |A882@882,A600@600,A517@517,A352@352,A129@129,A70@70
172.32.4.2:3000                                |A600 |mydata   |A70@70  |A70@70  |A882@882,A600@600,A517@517,A352@352,A129@129,A70@70
172.33.11.90:3000                              |&lt;span class="k"&gt;*&lt;/span&gt;A70 |mydata   |A70@70  |A70@70  |A882@882,A600@600,A517@517,A352@352,A129@129,A70@70
172.33.8.38:3000                               |A882 |mydata   |A70@70  |A70@70  |A882@882,A600@600,A517@517,A352@352,A129@129,A70@70
Number of rows: 6

&lt;span class="c"&gt;# change the rack ids&lt;/span&gt;
manage config namespace mydata param rack-id to 32 with A129 A352 A600
manage recluster
info

manage config namespace mydata param rack-id to 33 with A70 A517 A882
manage recluster
info

manage roster stage observed A882@33,A600@32,A517@33,A352@32,A129@32,A70@33 ns mydata
manage recluster
show roster

&lt;span class="c"&gt;# Output&lt;/span&gt;
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~Roster &lt;span class="o"&gt;(&lt;/span&gt;2024-08-12 11:31:08 UTC&lt;span class="o"&gt;)&lt;/span&gt;~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
                                           Node| Node|Namespace|                                Current Roster|                                Pending Roster|                                Observed Nodes
                                               |   ID|         |                                              |                                              |
ip-172-32-5-239.eu-west-2.compute.internal:3000|A129 |mydata   |A882@33,A600@32,A517@33,A352@32,A129@32,A70@33|A882@33,A600@32,A517@33,A352@32,A129@32,A70@33|A882@33,A600@32,A517@33,A352@32,A129@32,A70@33
172.32.15.231:3000                             |A352 |mydata   |A882@33,A600@32,A517@33,A352@32,A129@32,A70@33|A882@33,A600@32,A517@33,A352@32,A129@32,A70@33|A882@33,A600@32,A517@33,A352@32,A129@32,A70@33
172.33.7.44:3000                               |A517 |mydata   |A882@33,A600@32,A517@33,A352@32,A129@32,A70@33|A882@33,A600@32,A517@33,A352@32,A129@32,A70@33|A882@33,A600@32,A517@33,A352@32,A129@32,A70@33
172.32.4.2:3000                                |A600 |mydata   |A882@33,A600@32,A517@33,A352@32,A129@32,A70@33|A882@33,A600@32,A517@33,A352@32,A129@32,A70@33|A882@33,A600@32,A517@33,A352@32,A129@32,A70@33
172.33.11.90:3000                              |&lt;span class="k"&gt;*&lt;/span&gt;A70 |mydata   |A882@33,A600@32,A517@33,A352@32,A129@32,A70@33|A882@33,A600@32,A517@33,A352@32,A129@32,A70@33|A882@33,A600@32,A517@33,A352@32,A129@32,A70@33
172.33.8.38:3000                               |A882 |mydata   |A882@33,A600@32,A517@33,A352@32,A129@32,A70@33|A882@33,A600@32,A517@33,A352@32,A129@32,A70@33|A882@33,A600@32,A517@33,A352@32,A129@32,A70@33
Number of rows: 6

show racks

&lt;span class="c"&gt;# Output&lt;/span&gt;
~Racks &lt;span class="o"&gt;(&lt;/span&gt;2024-08-12 11:31:34 UTC&lt;span class="o"&gt;)&lt;/span&gt;~
Namespace|Rack|         Nodes
         |  ID|
mydata   |  32|A600,A352,A129
mydata   |  33|A882,A517,A70
Number of rows: 2
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;Congratulations! You have successfully updated the rack configuration for your cross-regional Aerospike cluster. The cluster now accurately reflects two logical racks—one for each region. Don’t forget to verify and update the &lt;code&gt;rack-id&lt;/code&gt; values in your Aerospike configuration file to match the revised rack setup. This will make sure that the configuration aligns with your intended architecture.&lt;/p&gt;


&lt;h2&gt;Part 3: Insert some records&lt;/h2&gt;

&lt;p&gt;You will want to verify that data is being written to your Aerospike database while performing the split brain scenarios. To achieve this you will create a basic Python application to insert data. This will help you verify the cluster's behavior and data consistency under test conditions. Below is a simple Python script that inserts some data into the Aerospike database. This script uses the &lt;code&gt;aerospike&lt;/code&gt; client library to connect to the cluster and perform data operations.&lt;/p&gt;

&lt;p&gt;When you run the provided Python script to insert data into your Aerospike database, the data should be structured and stored as follows. Here’s an example of how the inserted data might look:&lt;/p&gt;

&lt;p&gt;Create an additional ec2 host in one of the subnets to run your code.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;aql&amp;gt; select * from mydata.dummy limit 10
+------+------+--------------+------------------------+--------------------------------------------------------------------------------------------------------------------+
| PK   | freq | country_code | logged_by              | report                                                                                                             |
+------+------+--------------+------------------------+--------------------------------------------------------------------------------------------------------------------+
| 134  | 340  | 199          | "FJ4Z50qm1YFKLC5g98T2" | MAP('{"colour-scheme":["purple", "magenta"], "date_mfg":"2024-02-09", "machine":["Surface"], "pixels":524288}')    |
| 3758 | 408  | 121          | "rMyHrqM6eZcfYQCcCQFC" | MAP('{"colour-scheme":["cyan", "brown"], "date_mfg":"2023-10-07", "machine":["Inspiron"], "pixels":65536}')        |
| 2297 | 323  | 81           | "kDeHYVgb4QqzCPj1RkOw" | MAP('{"colour-scheme":["orange", "green"], "date_mfg":"2021-08-18", "machine":["MacBook"], "pixels":16777216}')    |
| 1841 | 833  | 224          | "2bedyAaZll3nPGKyty44" | MAP('{"colour-scheme":["green", "purple"], "date_mfg":"2022-07-02", "machine":["Chromebook"], "pixels":16777216}') |
| 3017 | 898  | 213          | "qwGXGe6BdbUHh8ZBGGit" | MAP('{"colour-scheme":["purple", "cyan"], "date_mfg":"2024-06-22", "machine":["ZenBook"], "pixels":32768}')        |
| 3589 | 250  | 165          | "Od4R4ADltbWCD8budaco" | MAP('{"colour-scheme":["yellow", "green"], "date_mfg":"2018-08-02", "machine":["ThinkPad"], "pixels":65536}')      |
| 2432 | 796  | 133          | "DD1Evor4WGFX9yr9WVuc" | MAP('{"colour-scheme":["brown", "cyan"], "date_mfg":"2022-02-04", "machine":["ThinkPad"], "pixels":4194304}')      |
| 1652 | 623  | 1            | "HTkLNYHIPyYwUqtlZ883" | MAP('{"colour-scheme":["blue", "magenta"], "date_mfg":"2019-08-06", "machine":["Latitude"], "pixels":4096}')       |
| 970  | 348  | 91           | "Cao8qtth9x981pjkpp9M" | MAP('{"colour-scheme":["red", "magenta"], "date_mfg":"2019-09-14", "machine":["Latitude"], "pixels":1048576}')     |
| 2683 | 442  | 12           | "W9U9PBvCWodrTvf59FMz" | MAP('{"colour-scheme":["brown", "blue"], "date_mfg":"2024-01-14", "machine":["Latitude"], "pixels":2097152}')      |
+------+------+--------------+------------------------+--------------------------------------------------------------------------------------------------------------------+
10 rows in set (0.033 secs)
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;To control how long your Python application should run and insert data into the Aerospike database, modify the script's timeout execution period. This allows you to set a specific duration for the script to run.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight python"&gt;&lt;code&gt;&lt;span class="c1"&gt;# Set a timeout value in seconds
&lt;/span&gt;&lt;span class="n"&gt;run_for_sec&lt;/span&gt; &lt;span class="o"&gt;=&lt;/span&gt; &lt;span class="mi"&gt;30&lt;/span&gt;  &lt;span class="c1"&gt;# Adjust this value based on your needs
&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;Edit the seed hosts for your Aerospike cluster. I have chosen 1 node from London and 1 from Paris.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight python"&gt;&lt;code&gt;&lt;span class="n"&gt;hosts&lt;/span&gt; &lt;span class="o"&gt;=&lt;/span&gt; &lt;span class="p"&gt;[&lt;/span&gt; &lt;span class="p"&gt;(&lt;/span&gt;&lt;span class="sh"&gt;'&lt;/span&gt;&lt;span class="s"&gt;172.33.7.44&lt;/span&gt;&lt;span class="sh"&gt;'&lt;/span&gt;&lt;span class="p"&gt;,&lt;/span&gt; &lt;span class="mi"&gt;3000&lt;/span&gt;&lt;span class="p"&gt;),&lt;/span&gt; &lt;span class="p"&gt;(&lt;/span&gt;&lt;span class="sh"&gt;'&lt;/span&gt;&lt;span class="s"&gt;172.32.5.239&lt;/span&gt;&lt;span class="sh"&gt;'&lt;/span&gt;&lt;span class="p"&gt;,&lt;/span&gt; &lt;span class="mi"&gt;3000&lt;/span&gt;&lt;span class="p"&gt;)&lt;/span&gt; &lt;span class="p"&gt;]&lt;/span&gt;
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;To successfully install the Aerospike Python client library, you need to ensure that certain dependencies are met.&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;python3-devel 
&lt;/li&gt;
&lt;li&gt;python3.8 
&lt;/li&gt;
&lt;li&gt;make&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;e.g. use the following to install&lt;/p&gt;

&lt;blockquote&gt;
&lt;p&gt;sudo yum install python3-devel python3.8 make -y&lt;/p&gt;
&lt;/blockquote&gt;

&lt;p&gt;Next install Aerospike Client Lib using pip.&lt;/p&gt;

&lt;blockquote&gt;
&lt;p&gt;sudo pip3.8 install aerospike&lt;/p&gt;
&lt;/blockquote&gt;

&lt;p&gt;Here is the code:&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight python"&gt;&lt;code&gt;&lt;span class="kn"&gt;import&lt;/span&gt; &lt;span class="n"&gt;aerospike&lt;/span&gt;
&lt;span class="kn"&gt;from&lt;/span&gt; &lt;span class="n"&gt;aerospike&lt;/span&gt; &lt;span class="kn"&gt;import&lt;/span&gt; &lt;span class="n"&gt;exception&lt;/span&gt; &lt;span class="k"&gt;as&lt;/span&gt; &lt;span class="n"&gt;ex&lt;/span&gt;
&lt;span class="kn"&gt;import&lt;/span&gt; &lt;span class="n"&gt;sys&lt;/span&gt;
&lt;span class="kn"&gt;import&lt;/span&gt; &lt;span class="n"&gt;random&lt;/span&gt;
&lt;span class="kn"&gt;import&lt;/span&gt; &lt;span class="n"&gt;string&lt;/span&gt;
&lt;span class="kn"&gt;from&lt;/span&gt; &lt;span class="n"&gt;datetime&lt;/span&gt; &lt;span class="kn"&gt;import&lt;/span&gt; &lt;span class="n"&gt;datetime&lt;/span&gt;&lt;span class="p"&gt;,&lt;/span&gt; &lt;span class="n"&gt;timedelta&lt;/span&gt;
&lt;span class="kn"&gt;import&lt;/span&gt; &lt;span class="n"&gt;time&lt;/span&gt;

&lt;span class="n"&gt;hosts&lt;/span&gt; &lt;span class="o"&gt;=&lt;/span&gt; &lt;span class="p"&gt;[&lt;/span&gt; &lt;span class="p"&gt;(&lt;/span&gt;&lt;span class="sh"&gt;'&lt;/span&gt;&lt;span class="s"&gt;172.33.7.44&lt;/span&gt;&lt;span class="sh"&gt;'&lt;/span&gt;&lt;span class="p"&gt;,&lt;/span&gt; &lt;span class="mi"&gt;3000&lt;/span&gt;&lt;span class="p"&gt;),&lt;/span&gt; &lt;span class="p"&gt;(&lt;/span&gt;&lt;span class="sh"&gt;'&lt;/span&gt;&lt;span class="s"&gt;172.32.5.239&lt;/span&gt;&lt;span class="sh"&gt;'&lt;/span&gt;&lt;span class="p"&gt;,&lt;/span&gt; &lt;span class="mi"&gt;3000&lt;/span&gt;&lt;span class="p"&gt;)&lt;/span&gt; &lt;span class="p"&gt;]&lt;/span&gt;
&lt;span class="n"&gt;run_for_sec&lt;/span&gt; &lt;span class="o"&gt;=&lt;/span&gt; &lt;span class="mi"&gt;60&lt;/span&gt;

&lt;span class="c1"&gt;# Sleep function to pause execution for a specified number of milliseconds
&lt;/span&gt;&lt;span class="k"&gt;def&lt;/span&gt; &lt;span class="nf"&gt;sleep_ms&lt;/span&gt;&lt;span class="p"&gt;(&lt;/span&gt;&lt;span class="n"&gt;milliseconds&lt;/span&gt;&lt;span class="p"&gt;):&lt;/span&gt;
    &lt;span class="n"&gt;time&lt;/span&gt;&lt;span class="p"&gt;.&lt;/span&gt;&lt;span class="nf"&gt;sleep&lt;/span&gt;&lt;span class="p"&gt;(&lt;/span&gt;&lt;span class="n"&gt;milliseconds&lt;/span&gt; &lt;span class="o"&gt;/&lt;/span&gt; &lt;span class="mf"&gt;1000.0&lt;/span&gt;&lt;span class="p"&gt;)&lt;/span&gt;

&lt;span class="c1"&gt;# Function to generate a list of random dates within a specified range
&lt;/span&gt;&lt;span class="k"&gt;def&lt;/span&gt; &lt;span class="nf"&gt;generate_random_dates&lt;/span&gt;&lt;span class="p"&gt;(&lt;/span&gt;&lt;span class="n"&gt;num_dates&lt;/span&gt;&lt;span class="p"&gt;):&lt;/span&gt;
    &lt;span class="n"&gt;start_date&lt;/span&gt; &lt;span class="o"&gt;=&lt;/span&gt; &lt;span class="nf"&gt;datetime&lt;/span&gt;&lt;span class="p"&gt;(&lt;/span&gt;&lt;span class="mi"&gt;2018&lt;/span&gt;&lt;span class="p"&gt;,&lt;/span&gt; &lt;span class="mi"&gt;1&lt;/span&gt;&lt;span class="p"&gt;,&lt;/span&gt; &lt;span class="mi"&gt;1&lt;/span&gt;&lt;span class="p"&gt;)&lt;/span&gt;  &lt;span class="c1"&gt;# Start date
&lt;/span&gt;    &lt;span class="n"&gt;end_date&lt;/span&gt; &lt;span class="o"&gt;=&lt;/span&gt; &lt;span class="nf"&gt;datetime&lt;/span&gt;&lt;span class="p"&gt;(&lt;/span&gt;&lt;span class="mi"&gt;2024&lt;/span&gt;&lt;span class="p"&gt;,&lt;/span&gt; &lt;span class="mi"&gt;8&lt;/span&gt;&lt;span class="p"&gt;,&lt;/span&gt; &lt;span class="mi"&gt;31&lt;/span&gt;&lt;span class="p"&gt;)&lt;/span&gt;  &lt;span class="c1"&gt;# End date
&lt;/span&gt;    &lt;span class="n"&gt;date_range&lt;/span&gt; &lt;span class="o"&gt;=&lt;/span&gt; &lt;span class="n"&gt;end_date&lt;/span&gt; &lt;span class="o"&gt;-&lt;/span&gt; &lt;span class="n"&gt;start_date&lt;/span&gt;  &lt;span class="c1"&gt;# Calculate date range
&lt;/span&gt;
    &lt;span class="n"&gt;random_dates&lt;/span&gt; &lt;span class="o"&gt;=&lt;/span&gt; &lt;span class="p"&gt;[]&lt;/span&gt;
    &lt;span class="k"&gt;for&lt;/span&gt; &lt;span class="n"&gt;_&lt;/span&gt; &lt;span class="ow"&gt;in&lt;/span&gt; &lt;span class="nf"&gt;range&lt;/span&gt;&lt;span class="p"&gt;(&lt;/span&gt;&lt;span class="n"&gt;num_dates&lt;/span&gt;&lt;span class="p"&gt;):&lt;/span&gt;
        &lt;span class="n"&gt;random_days&lt;/span&gt; &lt;span class="o"&gt;=&lt;/span&gt; &lt;span class="n"&gt;random&lt;/span&gt;&lt;span class="p"&gt;.&lt;/span&gt;&lt;span class="nf"&gt;randint&lt;/span&gt;&lt;span class="p"&gt;(&lt;/span&gt;&lt;span class="mi"&gt;0&lt;/span&gt;&lt;span class="p"&gt;,&lt;/span&gt; &lt;span class="n"&gt;date_range&lt;/span&gt;&lt;span class="p"&gt;.&lt;/span&gt;&lt;span class="n"&gt;days&lt;/span&gt;&lt;span class="p"&gt;)&lt;/span&gt;  &lt;span class="c1"&gt;# Generate random number of days
&lt;/span&gt;        &lt;span class="n"&gt;random_date&lt;/span&gt; &lt;span class="o"&gt;=&lt;/span&gt; &lt;span class="n"&gt;start_date&lt;/span&gt; &lt;span class="o"&gt;+&lt;/span&gt; &lt;span class="nf"&gt;timedelta&lt;/span&gt;&lt;span class="p"&gt;(&lt;/span&gt;&lt;span class="n"&gt;days&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="n"&gt;random_days&lt;/span&gt;&lt;span class="p"&gt;)&lt;/span&gt;  &lt;span class="c1"&gt;# Add random days to start date
&lt;/span&gt;        &lt;span class="n"&gt;random_dates&lt;/span&gt;&lt;span class="p"&gt;.&lt;/span&gt;&lt;span class="nf"&gt;append&lt;/span&gt;&lt;span class="p"&gt;(&lt;/span&gt;&lt;span class="n"&gt;random_date&lt;/span&gt;&lt;span class="p"&gt;)&lt;/span&gt;

    &lt;span class="k"&gt;return&lt;/span&gt; &lt;span class="n"&gt;random_dates&lt;/span&gt;

&lt;span class="c1"&gt;# Function to generate a random username of a given length
&lt;/span&gt;&lt;span class="k"&gt;def&lt;/span&gt; &lt;span class="nf"&gt;generate_username&lt;/span&gt;&lt;span class="p"&gt;(&lt;/span&gt;&lt;span class="n"&gt;length&lt;/span&gt;&lt;span class="p"&gt;):&lt;/span&gt;
    &lt;span class="n"&gt;characters&lt;/span&gt; &lt;span class="o"&gt;=&lt;/span&gt; &lt;span class="n"&gt;string&lt;/span&gt;&lt;span class="p"&gt;.&lt;/span&gt;&lt;span class="n"&gt;ascii_letters&lt;/span&gt; &lt;span class="o"&gt;+&lt;/span&gt; &lt;span class="n"&gt;string&lt;/span&gt;&lt;span class="p"&gt;.&lt;/span&gt;&lt;span class="n"&gt;digits&lt;/span&gt;  &lt;span class="c1"&gt;# Pool of characters
&lt;/span&gt;    &lt;span class="n"&gt;username&lt;/span&gt; &lt;span class="o"&gt;=&lt;/span&gt; &lt;span class="sh"&gt;''&lt;/span&gt;&lt;span class="p"&gt;.&lt;/span&gt;&lt;span class="nf"&gt;join&lt;/span&gt;&lt;span class="p"&gt;(&lt;/span&gt;&lt;span class="n"&gt;random&lt;/span&gt;&lt;span class="p"&gt;.&lt;/span&gt;&lt;span class="nf"&gt;choice&lt;/span&gt;&lt;span class="p"&gt;(&lt;/span&gt;&lt;span class="n"&gt;characters&lt;/span&gt;&lt;span class="p"&gt;)&lt;/span&gt; &lt;span class="k"&gt;for&lt;/span&gt; &lt;span class="n"&gt;_&lt;/span&gt; &lt;span class="ow"&gt;in&lt;/span&gt; &lt;span class="nf"&gt;range&lt;/span&gt;&lt;span class="p"&gt;(&lt;/span&gt;&lt;span class="n"&gt;length&lt;/span&gt;&lt;span class="p"&gt;))&lt;/span&gt;
    &lt;span class="k"&gt;return&lt;/span&gt; &lt;span class="n"&gt;username&lt;/span&gt;

&lt;span class="c1"&gt;# Function to generate a list of random colors from a predefined set
&lt;/span&gt;&lt;span class="k"&gt;def&lt;/span&gt; &lt;span class="nf"&gt;generate_random_colors&lt;/span&gt;&lt;span class="p"&gt;(&lt;/span&gt;&lt;span class="n"&gt;num_colors&lt;/span&gt;&lt;span class="p"&gt;):&lt;/span&gt;
    &lt;span class="n"&gt;colors&lt;/span&gt; &lt;span class="o"&gt;=&lt;/span&gt; &lt;span class="p"&gt;[&lt;/span&gt;&lt;span class="sh"&gt;'&lt;/span&gt;&lt;span class="s"&gt;red&lt;/span&gt;&lt;span class="sh"&gt;'&lt;/span&gt;&lt;span class="p"&gt;,&lt;/span&gt; &lt;span class="sh"&gt;'&lt;/span&gt;&lt;span class="s"&gt;blue&lt;/span&gt;&lt;span class="sh"&gt;'&lt;/span&gt;&lt;span class="p"&gt;,&lt;/span&gt; &lt;span class="sh"&gt;'&lt;/span&gt;&lt;span class="s"&gt;green&lt;/span&gt;&lt;span class="sh"&gt;'&lt;/span&gt;&lt;span class="p"&gt;,&lt;/span&gt; &lt;span class="sh"&gt;'&lt;/span&gt;&lt;span class="s"&gt;yellow&lt;/span&gt;&lt;span class="sh"&gt;'&lt;/span&gt;&lt;span class="p"&gt;,&lt;/span&gt; &lt;span class="sh"&gt;'&lt;/span&gt;&lt;span class="s"&gt;orange&lt;/span&gt;&lt;span class="sh"&gt;'&lt;/span&gt;&lt;span class="p"&gt;,&lt;/span&gt; &lt;span class="sh"&gt;'&lt;/span&gt;&lt;span class="s"&gt;purple&lt;/span&gt;&lt;span class="sh"&gt;'&lt;/span&gt;&lt;span class="p"&gt;,&lt;/span&gt; &lt;span class="sh"&gt;'&lt;/span&gt;&lt;span class="s"&gt;pink&lt;/span&gt;&lt;span class="sh"&gt;'&lt;/span&gt;&lt;span class="p"&gt;,&lt;/span&gt; &lt;span class="sh"&gt;'&lt;/span&gt;&lt;span class="s"&gt;brown&lt;/span&gt;&lt;span class="sh"&gt;'&lt;/span&gt;&lt;span class="p"&gt;,&lt;/span&gt; &lt;span class="sh"&gt;'&lt;/span&gt;&lt;span class="s"&gt;cyan&lt;/span&gt;&lt;span class="sh"&gt;'&lt;/span&gt;&lt;span class="p"&gt;,&lt;/span&gt; &lt;span class="sh"&gt;'&lt;/span&gt;&lt;span class="s"&gt;magenta&lt;/span&gt;&lt;span class="sh"&gt;'&lt;/span&gt;&lt;span class="p"&gt;]&lt;/span&gt;
    &lt;span class="n"&gt;random_colors&lt;/span&gt; &lt;span class="o"&gt;=&lt;/span&gt; &lt;span class="n"&gt;random&lt;/span&gt;&lt;span class="p"&gt;.&lt;/span&gt;&lt;span class="nf"&gt;choices&lt;/span&gt;&lt;span class="p"&gt;(&lt;/span&gt;&lt;span class="n"&gt;colors&lt;/span&gt;&lt;span class="p"&gt;,&lt;/span&gt; &lt;span class="n"&gt;k&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="n"&gt;num_colors&lt;/span&gt;&lt;span class="p"&gt;)&lt;/span&gt;
    &lt;span class="k"&gt;return&lt;/span&gt; &lt;span class="n"&gt;random_colors&lt;/span&gt;

&lt;span class="c1"&gt;# Function to generate a list of random computer names from a predefined set
&lt;/span&gt;&lt;span class="k"&gt;def&lt;/span&gt; &lt;span class="nf"&gt;generate_computer_names&lt;/span&gt;&lt;span class="p"&gt;(&lt;/span&gt;&lt;span class="n"&gt;num_computers&lt;/span&gt;&lt;span class="p"&gt;):&lt;/span&gt;
    &lt;span class="n"&gt;computer_types&lt;/span&gt; &lt;span class="o"&gt;=&lt;/span&gt; &lt;span class="p"&gt;[&lt;/span&gt;&lt;span class="sh"&gt;'&lt;/span&gt;&lt;span class="s"&gt;MacBook&lt;/span&gt;&lt;span class="sh"&gt;'&lt;/span&gt;&lt;span class="p"&gt;,&lt;/span&gt; &lt;span class="sh"&gt;'&lt;/span&gt;&lt;span class="s"&gt;ThinkPad&lt;/span&gt;&lt;span class="sh"&gt;'&lt;/span&gt;&lt;span class="p"&gt;,&lt;/span&gt; &lt;span class="sh"&gt;'&lt;/span&gt;&lt;span class="s"&gt;Chromebook&lt;/span&gt;&lt;span class="sh"&gt;'&lt;/span&gt;&lt;span class="p"&gt;,&lt;/span&gt; &lt;span class="sh"&gt;'&lt;/span&gt;&lt;span class="s"&gt;Surface&lt;/span&gt;&lt;span class="sh"&gt;'&lt;/span&gt;&lt;span class="p"&gt;,&lt;/span&gt; &lt;span class="sh"&gt;'&lt;/span&gt;&lt;span class="s"&gt;Latitude&lt;/span&gt;&lt;span class="sh"&gt;'&lt;/span&gt;&lt;span class="p"&gt;,&lt;/span&gt; &lt;span class="sh"&gt;'&lt;/span&gt;&lt;span class="s"&gt;Surface Book&lt;/span&gt;&lt;span class="sh"&gt;'&lt;/span&gt;&lt;span class="p"&gt;,&lt;/span&gt; &lt;span class="sh"&gt;'&lt;/span&gt;&lt;span class="s"&gt;Alienware&lt;/span&gt;&lt;span class="sh"&gt;'&lt;/span&gt;&lt;span class="p"&gt;,&lt;/span&gt; &lt;span class="sh"&gt;'&lt;/span&gt;&lt;span class="s"&gt;ZenBook&lt;/span&gt;&lt;span class="sh"&gt;'&lt;/span&gt;&lt;span class="p"&gt;,&lt;/span&gt; &lt;span class="sh"&gt;'&lt;/span&gt;&lt;span class="s"&gt;Inspiron&lt;/span&gt;&lt;span class="sh"&gt;'&lt;/span&gt;&lt;span class="p"&gt;,&lt;/span&gt; &lt;span class="sh"&gt;'&lt;/span&gt;&lt;span class="s"&gt;Pavilion&lt;/span&gt;&lt;span class="sh"&gt;'&lt;/span&gt;&lt;span class="p"&gt;]&lt;/span&gt;
    &lt;span class="n"&gt;names&lt;/span&gt; &lt;span class="o"&gt;=&lt;/span&gt; &lt;span class="n"&gt;random&lt;/span&gt;&lt;span class="p"&gt;.&lt;/span&gt;&lt;span class="nf"&gt;sample&lt;/span&gt;&lt;span class="p"&gt;(&lt;/span&gt;&lt;span class="n"&gt;computer_types&lt;/span&gt;&lt;span class="p"&gt;,&lt;/span&gt; &lt;span class="n"&gt;num_computers&lt;/span&gt;&lt;span class="p"&gt;)&lt;/span&gt;
    &lt;span class="k"&gt;return&lt;/span&gt; &lt;span class="n"&gt;names&lt;/span&gt;

&lt;span class="c1"&gt;# Configuration for Aerospike client
&lt;/span&gt;&lt;span class="n"&gt;config&lt;/span&gt; &lt;span class="o"&gt;=&lt;/span&gt; &lt;span class="p"&gt;{&lt;/span&gt;
  &lt;span class="sh"&gt;'&lt;/span&gt;&lt;span class="s"&gt;hosts&lt;/span&gt;&lt;span class="sh"&gt;'&lt;/span&gt;&lt;span class="p"&gt;:&lt;/span&gt; &lt;span class="n"&gt;hosts&lt;/span&gt;&lt;span class="p"&gt;,&lt;/span&gt;  &lt;span class="c1"&gt;# Aerospike cluster hosts
&lt;/span&gt;  &lt;span class="sh"&gt;'&lt;/span&gt;&lt;span class="s"&gt;user&lt;/span&gt;&lt;span class="sh"&gt;'&lt;/span&gt;&lt;span class="p"&gt;:&lt;/span&gt; &lt;span class="sh"&gt;"&lt;/span&gt;&lt;span class="s"&gt;admin&lt;/span&gt;&lt;span class="sh"&gt;"&lt;/span&gt;&lt;span class="p"&gt;,&lt;/span&gt;
  &lt;span class="sh"&gt;'&lt;/span&gt;&lt;span class="s"&gt;password&lt;/span&gt;&lt;span class="sh"&gt;'&lt;/span&gt;&lt;span class="p"&gt;:&lt;/span&gt; &lt;span class="sh"&gt;"&lt;/span&gt;&lt;span class="s"&gt;admin&lt;/span&gt;&lt;span class="sh"&gt;"&lt;/span&gt;
&lt;span class="p"&gt;}&lt;/span&gt;

&lt;span class="n"&gt;namespace&lt;/span&gt; &lt;span class="o"&gt;=&lt;/span&gt; &lt;span class="sh"&gt;'&lt;/span&gt;&lt;span class="s"&gt;mydata&lt;/span&gt;&lt;span class="sh"&gt;'&lt;/span&gt;
&lt;span class="nb"&gt;set&lt;/span&gt; &lt;span class="o"&gt;=&lt;/span&gt; &lt;span class="sh"&gt;'&lt;/span&gt;&lt;span class="s"&gt;dummy&lt;/span&gt;&lt;span class="sh"&gt;'&lt;/span&gt;

&lt;span class="k"&gt;try&lt;/span&gt;&lt;span class="p"&gt;:&lt;/span&gt;
    &lt;span class="c1"&gt;# Connect to Aerospike client
&lt;/span&gt;    &lt;span class="n"&gt;client&lt;/span&gt; &lt;span class="o"&gt;=&lt;/span&gt; &lt;span class="n"&gt;aerospike&lt;/span&gt;&lt;span class="p"&gt;.&lt;/span&gt;&lt;span class="nf"&gt;client&lt;/span&gt;&lt;span class="p"&gt;(&lt;/span&gt;&lt;span class="n"&gt;config&lt;/span&gt;&lt;span class="p"&gt;).&lt;/span&gt;&lt;span class="nf"&gt;connect&lt;/span&gt;&lt;span class="p"&gt;()&lt;/span&gt;
    &lt;span class="nf"&gt;print&lt;/span&gt;&lt;span class="p"&gt;(&lt;/span&gt;&lt;span class="sh"&gt;"&lt;/span&gt;&lt;span class="s"&gt;Connected to Server&lt;/span&gt;&lt;span class="sh"&gt;"&lt;/span&gt;&lt;span class="p"&gt;)&lt;/span&gt;

    &lt;span class="c1"&gt;# Create new write policy
&lt;/span&gt;    &lt;span class="n"&gt;write_policy&lt;/span&gt; &lt;span class="o"&gt;=&lt;/span&gt; &lt;span class="p"&gt;{&lt;/span&gt;&lt;span class="sh"&gt;'&lt;/span&gt;&lt;span class="s"&gt;key&lt;/span&gt;&lt;span class="sh"&gt;'&lt;/span&gt;&lt;span class="p"&gt;:&lt;/span&gt; &lt;span class="n"&gt;aerospike&lt;/span&gt;&lt;span class="p"&gt;.&lt;/span&gt;&lt;span class="n"&gt;POLICY_KEY_SEND&lt;/span&gt;&lt;span class="p"&gt;}&lt;/span&gt;

    &lt;span class="c1"&gt;# Set a timeout value in seconds
&lt;/span&gt;    &lt;span class="n"&gt;timeout&lt;/span&gt; &lt;span class="o"&gt;=&lt;/span&gt; &lt;span class="n"&gt;run_for_sec&lt;/span&gt;  &lt;span class="c1"&gt;# Adjust this value based on your needs
&lt;/span&gt;
    &lt;span class="c1"&gt;# Define the start time
&lt;/span&gt;    &lt;span class="n"&gt;start_time&lt;/span&gt; &lt;span class="o"&gt;=&lt;/span&gt; &lt;span class="n"&gt;time&lt;/span&gt;&lt;span class="p"&gt;.&lt;/span&gt;&lt;span class="nf"&gt;time&lt;/span&gt;&lt;span class="p"&gt;()&lt;/span&gt;
    &lt;span class="n"&gt;count&lt;/span&gt; &lt;span class="o"&gt;=&lt;/span&gt; &lt;span class="mi"&gt;0&lt;/span&gt;
    &lt;span class="k"&gt;while&lt;/span&gt; &lt;span class="bp"&gt;True&lt;/span&gt;&lt;span class="p"&gt;:&lt;/span&gt;
        &lt;span class="c1"&gt;# Generate a random key
&lt;/span&gt;        &lt;span class="n"&gt;key&lt;/span&gt; &lt;span class="o"&gt;=&lt;/span&gt; &lt;span class="p"&gt;(&lt;/span&gt;&lt;span class="n"&gt;namespace&lt;/span&gt;&lt;span class="p"&gt;,&lt;/span&gt; &lt;span class="nb"&gt;set&lt;/span&gt;&lt;span class="p"&gt;,&lt;/span&gt; &lt;span class="n"&gt;random&lt;/span&gt;&lt;span class="p"&gt;.&lt;/span&gt;&lt;span class="nf"&gt;randint&lt;/span&gt;&lt;span class="p"&gt;(&lt;/span&gt;&lt;span class="mi"&gt;0&lt;/span&gt;&lt;span class="p"&gt;,&lt;/span&gt; &lt;span class="mi"&gt;4095&lt;/span&gt;&lt;span class="p"&gt;))&lt;/span&gt;

        &lt;span class="c1"&gt;# Generate random data for bins
&lt;/span&gt;        &lt;span class="n"&gt;number_sightings&lt;/span&gt; &lt;span class="o"&gt;=&lt;/span&gt; &lt;span class="n"&gt;random&lt;/span&gt;&lt;span class="p"&gt;.&lt;/span&gt;&lt;span class="nf"&gt;randint&lt;/span&gt;&lt;span class="p"&gt;(&lt;/span&gt;&lt;span class="mi"&gt;0&lt;/span&gt;&lt;span class="p"&gt;,&lt;/span&gt; &lt;span class="mi"&gt;1000&lt;/span&gt;&lt;span class="p"&gt;)&lt;/span&gt;
        &lt;span class="n"&gt;cc&lt;/span&gt; &lt;span class="o"&gt;=&lt;/span&gt; &lt;span class="n"&gt;random&lt;/span&gt;&lt;span class="p"&gt;.&lt;/span&gt;&lt;span class="nf"&gt;randint&lt;/span&gt;&lt;span class="p"&gt;(&lt;/span&gt;&lt;span class="mi"&gt;0&lt;/span&gt;&lt;span class="p"&gt;,&lt;/span&gt; &lt;span class="mi"&gt;252&lt;/span&gt;&lt;span class="p"&gt;)&lt;/span&gt;
        &lt;span class="n"&gt;user&lt;/span&gt; &lt;span class="o"&gt;=&lt;/span&gt; &lt;span class="nf"&gt;generate_username&lt;/span&gt;&lt;span class="p"&gt;(&lt;/span&gt;&lt;span class="mi"&gt;20&lt;/span&gt;&lt;span class="p"&gt;)&lt;/span&gt;
        &lt;span class="n"&gt;date_made&lt;/span&gt; &lt;span class="o"&gt;=&lt;/span&gt; &lt;span class="nf"&gt;generate_random_dates&lt;/span&gt;&lt;span class="p"&gt;(&lt;/span&gt;&lt;span class="mi"&gt;1&lt;/span&gt;&lt;span class="p"&gt;)&lt;/span&gt;

        &lt;span class="n"&gt;data&lt;/span&gt; &lt;span class="o"&gt;=&lt;/span&gt; &lt;span class="p"&gt;{&lt;/span&gt;
            &lt;span class="sh"&gt;'&lt;/span&gt;&lt;span class="s"&gt;machine&lt;/span&gt;&lt;span class="sh"&gt;'&lt;/span&gt;&lt;span class="p"&gt;:&lt;/span&gt; &lt;span class="nf"&gt;generate_computer_names&lt;/span&gt;&lt;span class="p"&gt;(&lt;/span&gt;&lt;span class="mi"&gt;1&lt;/span&gt;&lt;span class="p"&gt;),&lt;/span&gt;
            &lt;span class="sh"&gt;'&lt;/span&gt;&lt;span class="s"&gt;pixels&lt;/span&gt;&lt;span class="sh"&gt;'&lt;/span&gt;&lt;span class="p"&gt;:&lt;/span&gt; &lt;span class="mi"&gt;2&lt;/span&gt; &lt;span class="o"&gt;**&lt;/span&gt; &lt;span class="n"&gt;random&lt;/span&gt;&lt;span class="p"&gt;.&lt;/span&gt;&lt;span class="nf"&gt;randint&lt;/span&gt;&lt;span class="p"&gt;(&lt;/span&gt;&lt;span class="mi"&gt;12&lt;/span&gt;&lt;span class="p"&gt;,&lt;/span&gt; &lt;span class="mi"&gt;24&lt;/span&gt;&lt;span class="p"&gt;),&lt;/span&gt;
            &lt;span class="sh"&gt;'&lt;/span&gt;&lt;span class="s"&gt;colour-scheme&lt;/span&gt;&lt;span class="sh"&gt;'&lt;/span&gt;&lt;span class="p"&gt;:&lt;/span&gt; &lt;span class="nf"&gt;generate_random_colors&lt;/span&gt;&lt;span class="p"&gt;(&lt;/span&gt;&lt;span class="mi"&gt;2&lt;/span&gt;&lt;span class="p"&gt;),&lt;/span&gt;
            &lt;span class="sh"&gt;'&lt;/span&gt;&lt;span class="s"&gt;date_mfg&lt;/span&gt;&lt;span class="sh"&gt;'&lt;/span&gt;&lt;span class="p"&gt;:&lt;/span&gt; &lt;span class="n"&gt;date_made&lt;/span&gt;&lt;span class="p"&gt;[&lt;/span&gt;&lt;span class="mi"&gt;0&lt;/span&gt;&lt;span class="p"&gt;].&lt;/span&gt;&lt;span class="nf"&gt;strftime&lt;/span&gt;&lt;span class="p"&gt;(&lt;/span&gt;&lt;span class="sh"&gt;"&lt;/span&gt;&lt;span class="s"&gt;%Y-%m-%d&lt;/span&gt;&lt;span class="sh"&gt;"&lt;/span&gt;&lt;span class="p"&gt;)&lt;/span&gt;
        &lt;span class="p"&gt;}&lt;/span&gt;

        &lt;span class="c1"&gt;# Create the bins
&lt;/span&gt;        &lt;span class="n"&gt;bins&lt;/span&gt; &lt;span class="o"&gt;=&lt;/span&gt; &lt;span class="p"&gt;{&lt;/span&gt;
            &lt;span class="sh"&gt;'&lt;/span&gt;&lt;span class="s"&gt;freq&lt;/span&gt;&lt;span class="sh"&gt;'&lt;/span&gt;&lt;span class="p"&gt;:&lt;/span&gt; &lt;span class="n"&gt;number_sightings&lt;/span&gt;&lt;span class="p"&gt;,&lt;/span&gt;
            &lt;span class="sh"&gt;'&lt;/span&gt;&lt;span class="s"&gt;country_code&lt;/span&gt;&lt;span class="sh"&gt;'&lt;/span&gt;&lt;span class="p"&gt;:&lt;/span&gt; &lt;span class="n"&gt;cc&lt;/span&gt;&lt;span class="p"&gt;,&lt;/span&gt;
            &lt;span class="sh"&gt;'&lt;/span&gt;&lt;span class="s"&gt;logged_by&lt;/span&gt;&lt;span class="sh"&gt;'&lt;/span&gt;&lt;span class="p"&gt;:&lt;/span&gt; &lt;span class="n"&gt;user&lt;/span&gt;&lt;span class="p"&gt;,&lt;/span&gt;
            &lt;span class="sh"&gt;'&lt;/span&gt;&lt;span class="s"&gt;report&lt;/span&gt;&lt;span class="sh"&gt;'&lt;/span&gt;&lt;span class="p"&gt;:&lt;/span&gt; &lt;span class="n"&gt;data&lt;/span&gt;&lt;span class="p"&gt;,&lt;/span&gt;
        &lt;span class="p"&gt;}&lt;/span&gt;

        &lt;span class="c1"&gt;# Put the record into the Aerospike database
&lt;/span&gt;        &lt;span class="n"&gt;client&lt;/span&gt;&lt;span class="p"&gt;.&lt;/span&gt;&lt;span class="nf"&gt;put&lt;/span&gt;&lt;span class="p"&gt;(&lt;/span&gt;&lt;span class="n"&gt;key&lt;/span&gt;&lt;span class="p"&gt;,&lt;/span&gt; &lt;span class="n"&gt;bins&lt;/span&gt;&lt;span class="p"&gt;,&lt;/span&gt; &lt;span class="n"&gt;policy&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="n"&gt;write_policy&lt;/span&gt;&lt;span class="p"&gt;)&lt;/span&gt;
        &lt;span class="n"&gt;count&lt;/span&gt; &lt;span class="o"&gt;=&lt;/span&gt; &lt;span class="n"&gt;count&lt;/span&gt; &lt;span class="o"&gt;+&lt;/span&gt; &lt;span class="mi"&gt;1&lt;/span&gt;
        &lt;span class="c1"&gt;# Check if the current time has exceeded the timeout
&lt;/span&gt;        &lt;span class="k"&gt;if&lt;/span&gt; &lt;span class="n"&gt;time&lt;/span&gt;&lt;span class="p"&gt;.&lt;/span&gt;&lt;span class="nf"&gt;time&lt;/span&gt;&lt;span class="p"&gt;()&lt;/span&gt; &lt;span class="o"&gt;-&lt;/span&gt; &lt;span class="n"&gt;start_time&lt;/span&gt; &lt;span class="o"&gt;&amp;gt;&lt;/span&gt; &lt;span class="n"&gt;timeout&lt;/span&gt;&lt;span class="p"&gt;:&lt;/span&gt;
            &lt;span class="nf"&gt;print&lt;/span&gt;&lt;span class="p"&gt;(&lt;/span&gt;&lt;span class="sh"&gt;"&lt;/span&gt;&lt;span class="s"&gt;Duration reached. Records[r/u]:&lt;/span&gt;&lt;span class="sh"&gt;"&lt;/span&gt;&lt;span class="p"&gt;,&lt;/span&gt;&lt;span class="n"&gt;count&lt;/span&gt;&lt;span class="p"&gt;)&lt;/span&gt;
            &lt;span class="k"&gt;break&lt;/span&gt;

        &lt;span class="c1"&gt;# Sleep for 200 milliseconds
&lt;/span&gt;        &lt;span class="nf"&gt;sleep_ms&lt;/span&gt;&lt;span class="p"&gt;(&lt;/span&gt;&lt;span class="mi"&gt;200&lt;/span&gt;&lt;span class="p"&gt;)&lt;/span&gt;

    &lt;span class="c1"&gt;# Close the client connection
&lt;/span&gt;    &lt;span class="n"&gt;client&lt;/span&gt;&lt;span class="p"&gt;.&lt;/span&gt;&lt;span class="nf"&gt;close&lt;/span&gt;&lt;span class="p"&gt;()&lt;/span&gt;
&lt;span class="k"&gt;except&lt;/span&gt; &lt;span class="n"&gt;ex&lt;/span&gt;&lt;span class="p"&gt;.&lt;/span&gt;&lt;span class="n"&gt;ClientError&lt;/span&gt; &lt;span class="k"&gt;as&lt;/span&gt; &lt;span class="n"&gt;e&lt;/span&gt;&lt;span class="p"&gt;:&lt;/span&gt;
    &lt;span class="c1"&gt;# Handle client errors
&lt;/span&gt;    &lt;span class="nf"&gt;print&lt;/span&gt;&lt;span class="p"&gt;(&lt;/span&gt;&lt;span class="sh"&gt;"&lt;/span&gt;&lt;span class="s"&gt;Error: {0} [{1}]&lt;/span&gt;&lt;span class="sh"&gt;"&lt;/span&gt;&lt;span class="p"&gt;.&lt;/span&gt;&lt;span class="nf"&gt;format&lt;/span&gt;&lt;span class="p"&gt;(&lt;/span&gt;&lt;span class="n"&gt;e&lt;/span&gt;&lt;span class="p"&gt;.&lt;/span&gt;&lt;span class="n"&gt;msg&lt;/span&gt;&lt;span class="p"&gt;,&lt;/span&gt; &lt;span class="n"&gt;e&lt;/span&gt;&lt;span class="p"&gt;.&lt;/span&gt;&lt;span class="n"&gt;code&lt;/span&gt;&lt;span class="p"&gt;))&lt;/span&gt;
    &lt;span class="n"&gt;sys&lt;/span&gt;&lt;span class="p"&gt;.&lt;/span&gt;&lt;span class="nf"&gt;exit&lt;/span&gt;&lt;span class="p"&gt;(&lt;/span&gt;&lt;span class="mi"&gt;1&lt;/span&gt;&lt;span class="p"&gt;)&lt;/span&gt;
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;Ensure that your Python application continues to run in the background for an extended period, allowing you to perform tests and simulate various scenarios.&lt;/p&gt;

&lt;blockquote&gt;
&lt;p&gt;python3.8 clientApp.py&lt;/p&gt;
&lt;/blockquote&gt;


&lt;h2&gt;Part 4: Split Brain&lt;/h2&gt;

&lt;p&gt;Here is an illustration of a split brain scenario in a distributed data system.This image depicts how two halves of a data system (Paris Region and London Region) might split up and start operating independently due to network separation. Some might even think thats cool!&lt;/p&gt;

&lt;p&gt;&lt;a href="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fb1hexmwbka8bwtd7t6zp.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fb1hexmwbka8bwtd7t6zp.png" alt="Image description" width="800" height="457"&gt;&lt;/a&gt;&lt;/p&gt;
&lt;h4&gt;
  
  
  Understanding Strong Consistency in Aerospike
&lt;/h4&gt;

&lt;p&gt;In the context of database systems, &lt;a href="https://aerospike.com/docs/server/guide/consistency#consistency-guarantees" rel="noopener noreferrer"&gt;&lt;strong&gt;Strong Consistency (SC)&lt;/strong&gt;&lt;/a&gt; is a critical concept, especially when dealing with scenarios such as split-brain conditions. A split-brain scenario occurs when a network partition divides the database cluster into two or more partitions, potentially leading to data inconsistency.&lt;/p&gt;

&lt;p&gt;Aerospike's Strong Consistency mode is designed to address such challenges by ensuring that all writes to a single record are applied in a strict, sequential order. This guarantees that no writes are reordered or skipped, and thus, no data is lost.&lt;/p&gt;

&lt;p&gt;Here’s a deeper look at how Strong Consistency works and its importance:&lt;/p&gt;
&lt;h4&gt;
  
  
  Key Features of Strong Consistency
&lt;/h4&gt;

&lt;ol&gt;
&lt;li&gt;&lt;p&gt;&lt;em&gt;Sequential Writes&lt;/em&gt;:&lt;br&gt;
All writes to a single record are applied in the order they are received. This ensures that the state of the record is predictable and consistent across all nodes in the cluster.&lt;/p&gt;&lt;/li&gt;
&lt;li&gt;&lt;p&gt;&lt;em&gt;No Data Loss&lt;/em&gt;: Aerospike SC mode ensures that data is not lost, even in the event of network partitions or node failures. However, there are rare exceptions, such as simultaneous hardware failures on different nodes, which could potentially result in data loss.&lt;/p&gt;&lt;/li&gt;
&lt;li&gt;&lt;p&gt;&lt;em&gt;Quorum-based Commit&lt;/em&gt;: Writes are only considered successful when a quorum of nodes acknowledge the write operation. This means that a majority of nodes must agree on the write, ensuring data consistency even in the presence of node failures or network issues.&lt;/p&gt;&lt;/li&gt;
&lt;li&gt;&lt;p&gt;&lt;em&gt;Immediate Consistency&lt;/em&gt;: As soon as a write operation is confirmed, all subsequent read operations will reflect this write. This contrasts with eventual consistency, where reads might temporarily return stale data.&lt;/p&gt;&lt;/li&gt;
&lt;/ol&gt;
&lt;h4&gt;
  
  
  Evaluating Strong Consistency in Split-Brain Scenarios
&lt;/h4&gt;

&lt;p&gt;During a split-brain scenario, the network partition can lead to isolated clusters of nodes. Evaluating the behavior of Aerospike under such conditions is crucial to understanding the robustness of its SC mode. Here’s how SC mode helps:&lt;/p&gt;

&lt;ol&gt;
&lt;li&gt;&lt;p&gt;&lt;em&gt;Write Operations&lt;/em&gt;: In a split-brain scenario, writes can only be processed by the nodes that form a majority partition. This prevents conflicting writes to the same record from different partitions, maintaining data integrity.&lt;/p&gt;&lt;/li&gt;
&lt;li&gt;&lt;p&gt;&lt;em&gt;Read Operations:&lt;/em&gt; Reads will always return the most recent write that was acknowledged by a majority of nodes. If a node is isolated in a minority partition, it will not serve stale data to clients.&lt;/p&gt;&lt;/li&gt;
&lt;li&gt;&lt;p&gt;&lt;em&gt;Reconciliation Post-Recovery&lt;/em&gt;: Once the network partition is resolved, Aerospike uses SC mode to reconcile any divergent states. The system ensures that the state of records is consistent across all nodes, based on the majority writes that were committed during the partition.&lt;/p&gt;&lt;/li&gt;
&lt;/ol&gt;
&lt;h4&gt;
  
  
  How to create the Split Brain
&lt;/h4&gt;

&lt;p&gt;The image below shows the overall network has been split between the 2 regions and their corresponding subnets. Within each subnet, the subcluster now has its own view of the world. Each subcluster is communicating only with nodes inside its own subnet.&lt;/p&gt;

&lt;p&gt;&lt;a href="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2F9cswpf15c4soxb77jlhn.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2F9cswpf15c4soxb77jlhn.png" alt="Image description" width="800" height="512"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;To simulate a network split between regions in AWS, Log into the Paris AWS Console. Observe that the source is set to &lt;code&gt;0.0.0.0/0&lt;/code&gt;, which means that connections can be established from anywhere.&lt;br&gt;
This configuration allows traffic from any IP address to access these ports.&lt;/p&gt;

&lt;p&gt;&lt;a href="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fu5irwiaxfzcadngy3jt2.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fu5irwiaxfzcadngy3jt2.png" alt="Image description" width="800" height="212"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;To simulate a network split, you would need to restrict the traffic so that only nodes within the same subnet or region can communicate. For example, you can change the source to the specific CIDR block of your subnet or reference the security group itself. By applying these changes, will ensure that:&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Nodes in the Paris region can communicate with nodes in both the Paris and London regions.&lt;/li&gt;
&lt;li&gt;Nodes in the London region can only communicate with other nodes within the London region.&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;This configuration effectively simulates a network split where the London subcluster is isolated from Paris, while Paris can still interact with both regions.&lt;/p&gt;

&lt;p&gt;&lt;a href="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2F2inle1kwpuvlj5dkdbxk.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2F2inle1kwpuvlj5dkdbxk.png" alt="Image description" width="800" height="162"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;By examining these details, it becomes evident that the number of records in London has been reduced by half following the network separation.&lt;/p&gt;

&lt;p&gt;Before 940 records 🏴󠁧󠁢󠁥󠁮󠁧󠁿&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~Namespace Object Information (2024-08-14 15:17:18 UTC)~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Namespace|                                            Node|Rack|  Repl|Expirations|    Total|~~~~~~~~~~~~Objects~~~~~~~~~~~~|~~~~~~~~~Tombstones~~~~~~~~|~~~~Pending~~~~
         |                                                |  ID|Factor|           |  Records|   Master|    Prole|Non-Replica| Master|  Prole|Non-Replica|~~~~Migrates~~~
         |                                                |    |      |           |         |         |         |           |       |       |           |     Tx|     Rx
mydata   |172.32.4.2:3000                                 |  32|     2|    0.000  |165.000  | 92.000  | 73.000  |    0.000  |0.000  |0.000  |    0.000  |0.000  |0.000
mydata   |172.32.5.239:3000                               |  32|     2|    0.000  |162.000  | 88.000  | 74.000  |    0.000  |0.000  |0.000  |    0.000  |0.000  |0.000
mydata   |172.33.11.90:3000                               |  33|     2|    0.000  |148.000  | 77.000  | 71.000  |    0.000  |0.000  |0.000  |    0.000  |0.000  |0.000
mydata   |172.33.7.44:3000                                |  33|     2|    0.000  |152.000  | 65.000  | 87.000  |    0.000  |0.000  |0.000  |    0.000  |0.000  |0.000
mydata   |172.33.8.38:3000                                |  33|     2|    0.000  |170.000  | 82.000  | 88.000  |    0.000  |0.000  |0.000  |    0.000  |0.000  |0.000
mydata   |ip-172-32-15-231.eu-west-2.compute.internal:3000|  32|     2|    0.000  |143.000  | 66.000  | 77.000  |    0.000  |0.000  |0.000  |    0.000  |0.000  |0.000
mydata   |                                                |    |      |    0.000  |940.000  |470.000  |470.000  |    0.000  |0.000  |0.000  |    0.000  |0.000  |0.000
Number of rows: 6
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;After 470 records 🏴󠁧󠁢󠁥󠁮󠁧󠁿&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~Namespace Object Information (2024-08-14 15:33:09 UTC)~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Namespace|                                            Node|Rack|  Repl|Expirations|    Total|~~~~~~~~~~~~Objects~~~~~~~~~~~~|~~~~~~~~~Tombstones~~~~~~~~|~~~~Pending~~~~
         |                                                |  ID|Factor|           |  Records|   Master|    Prole|Non-Replica| Master|  Prole|Non-Replica|~~~~Migrates~~~
         |                                                |    |      |           |         |         |         |           |       |       |           |     Tx|     Rx
~~       |172.33.11.90:3000                               |  ~~|    ~~|         ~~|       ~~|       ~~|       ~~|         ~~|     ~~|     ~~|         ~~|     ~~|     ~~
~~       |172.33.7.44:3000                                |  ~~|    ~~|         ~~|       ~~|       ~~|       ~~|         ~~|     ~~|     ~~|         ~~|     ~~|     ~~
~~       |172.33.8.38:3000                                |  ~~|    ~~|         ~~|       ~~|       ~~|       ~~|         ~~|     ~~|     ~~|         ~~|     ~~|     ~~
~~       |                                                |    |      |         ~~|       ~~|       ~~|       ~~|         ~~|     ~~|     ~~|         ~~|     ~~|     ~~
mydata   |172.32.4.2:3000                                 |  32|     2|    0.000  |165.000  | 92.000  | 73.000  |    0.000  |0.000  |0.000  |    0.000  |0.000  |0.000
mydata   |172.32.5.239:3000                               |  32|     2|    0.000  |162.000  | 88.000  | 74.000  |    0.000  |0.000  |0.000  |    0.000  |0.000  |0.000
mydata   |ip-172-32-15-231.eu-west-2.compute.internal:3000|  32|     2|    0.000  |143.000  | 66.000  | 77.000  |    0.000  |0.000  |0.000  |    0.000  |0.000  |0.000
mydata   |                                                |    |      |    0.000  |470.000  |246.000  |224.000  |    0.000  |0.000  |0.000  |    0.000  |0.000  |0.000
Number of rows: 6
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;As we can see, the Paris 🇫🇷 nodes have not experienced any changes in the number of records or their status. This is because the London ports are still open to sources &lt;code&gt;0.0.0.0/0&lt;/code&gt;, allowing communication between the regions.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~Namespace Object Information (2024-08-14 15:53:59 UTC)~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Namespace|                                          Node|Rack|  Repl|Expirations|    Total|~~~~~~~~~~~~Objects~~~~~~~~~~~~|~~~~~~~~~Tombstones~~~~~~~~|~~~~Pending~~~~
         |                                              |  ID|Factor|           |  Records|   Master|    Prole|Non-Replica| Master|  Prole|Non-Replica|~~~~Migrates~~~
         |                                              |    |      |           |         |         |         |           |       |       |           |     Tx|     Rx
mydata   |172.32.15.231:3000                            |  32|     2|    0.000  |143.000  | 66.000  | 77.000  |    0.000  |0.000  |0.000  |    0.000  |0.000  |0.000
mydata   |172.32.4.2:3000                               |  32|     2|    0.000  |165.000  | 92.000  | 73.000  |    0.000  |0.000  |0.000  |    0.000  |0.000  |0.000
mydata   |172.32.5.239:3000                             |  32|     2|    0.000  |162.000  | 88.000  | 74.000  |    0.000  |0.000  |0.000  |    0.000  |0.000  |0.000
mydata   |172.33.11.90:3000                             |  33|     2|    0.000  |148.000  | 77.000  | 71.000  |    0.000  |0.000  |0.000  |    0.000  |0.000  |0.000
mydata   |172.33.8.38:3000                              |  33|     2|    0.000  |170.000  | 82.000  | 88.000  |    0.000  |0.000  |0.000  |    0.000  |0.000  |0.000
mydata   |ip-172-33-7-44.eu-west-3.compute.internal:3000|  33|     2|    0.000  |152.000  | 65.000  | 87.000  |    0.000  |0.000  |0.000  |    0.000  |0.000  |0.000
mydata   |                                              |    |      |    0.000  |940.000  |470.000  |470.000  |    0.000  |0.000  |0.000  |    0.000  |0.000  |0.000
Number of rows: 6

Admin&amp;gt;
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;Run your Python application from a subnet in London to simulate more inbound data from another region. Recall our previous data was created from Paris.&lt;/p&gt;

&lt;p&gt;Modify the London security group inbound rules to allow traffic only from the London subnet and block all external traffic, including traffic from Paris. By isolating London from Paris, you have successfully created a full split-brain scenario. This setup helps in understanding how such network partitions affect data distribution and cluster communication in a distributed database environment like Aerospike.&lt;/p&gt;

&lt;p&gt;Let's see the results from &lt;code&gt;asadm&lt;/code&gt;, Aerospike CLI managemnt tool.&lt;/p&gt;

&lt;p&gt;London 🏴󠁧󠁢󠁥󠁮󠁧󠁿&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~Namespace Object Information (2024-08-14 16:18:29 UTC)~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Namespace|                                            Node|Rack|  Repl|Expirations|    Total|~~~~~~~~~~~~Objects~~~~~~~~~~~~|~~~~~~~~~Tombstones~~~~~~~~|~~~~Pending~~~~
         |                                                |  ID|Factor|           |  Records|   Master|    Prole|Non-Replica| Master|  Prole|Non-Replica|~~~~Migrates~~~
         |                                                |    |      |           |         |         |         |           |       |       |           |     Tx|     Rx
~~       |172.33.11.90:3000                               |  ~~|    ~~|         ~~|       ~~|       ~~|       ~~|         ~~|     ~~|     ~~|         ~~|     ~~|     ~~
~~       |172.33.7.44:3000                                |  ~~|    ~~|         ~~|       ~~|       ~~|       ~~|         ~~|     ~~|     ~~|         ~~|     ~~|     ~~
~~       |172.33.8.38:3000                                |  ~~|    ~~|         ~~|       ~~|       ~~|       ~~|         ~~|     ~~|     ~~|         ~~|     ~~|     ~~
~~       |                                                |    |      |         ~~|       ~~|       ~~|       ~~|         ~~|     ~~|     ~~|         ~~|     ~~|     ~~
mydata   |172.32.4.2:3000                                 |  32|     2|    0.000  |481.000  |155.000  |171.000  |  155.000  |0.000  |0.000  |    0.000  |0.000  |0.000
mydata   |172.32.5.239:3000                               |  32|     2|    0.000  |485.000  |178.000  |149.000  |  158.000  |0.000  |0.000  |    0.000  |0.000  |0.000
mydata   |ip-172-32-15-231.eu-west-2.compute.internal:3000|  32|     2|    0.000  |454.000  |147.000  |160.000  |  147.000  |0.000  |0.000  |    0.000  |0.000  |0.000
mydata   |                                                |    |      |    0.000  |  1.420 K|480.000  |480.000  |  460.000  |0.000  |0.000  |    0.000  |0.000  |0.000
Number of rows: 6
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;Paris 🇫🇷&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~Namespace Object Information (2024-08-14 16:18:16 UTC)~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Namespace|                                          Node|Rack|  Repl|Expirations|    Total|~~~~~~~~~~~~Objects~~~~~~~~~~~~|~~~~~~~~~Tombstones~~~~~~~~|~~~~Pending~~~~
         |                                              |  ID|Factor|           |  Records|   Master|    Prole|Non-Replica| Master|  Prole|Non-Replica|~~~~Migrates~~~
         |                                              |    |      |           |         |         |         |           |       |       |           |     Tx|     Rx
~~       |172.32.15.231:3000                            |  ~~|    ~~|         ~~|       ~~|       ~~|       ~~|         ~~|     ~~|     ~~|         ~~|     ~~|     ~~
~~       |172.32.4.2:3000                               |  ~~|    ~~|         ~~|       ~~|       ~~|       ~~|         ~~|     ~~|     ~~|         ~~|     ~~|     ~~
~~       |172.32.5.239:3000                             |  ~~|    ~~|         ~~|       ~~|       ~~|       ~~|         ~~|     ~~|     ~~|         ~~|     ~~|     ~~
~~       |                                              |    |      |         ~~|       ~~|       ~~|       ~~|         ~~|     ~~|     ~~|         ~~|     ~~|     ~~
mydata   |172.33.11.90:3000                             |  33|     2|    0.000  |471.000  |165.000  |156.000  |  150.000  |0.000  |0.000  |    0.000  |0.000  |0.000
mydata   |172.33.8.38:3000                              |  33|     2|    0.000  |463.000  |153.000  |154.000  |  156.000  |0.000  |0.000  |    0.000  |0.000  |0.000
mydata   |ip-172-33-7-44.eu-west-3.compute.internal:3000|  33|     2|    0.000  |466.000  |142.000  |150.000  |  174.000  |0.000  |0.000  |    0.000  |0.000  |0.000
mydata   |                                              |    |      |    0.000  |  1.400 K|460.000  |460.000  |  480.000  |0.000  |0.000  |    0.000  |0.000  |0.000
Number of rows: 6
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;Run the client application based in London now we have a full network partition. Notice how it writes a couple of records for the partitions it has before failing.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;python3.6 aerospike-client.py
Connected to Server
Error: Node not found for partition mydata:3773 [-8]
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;London 🏴󠁧󠁢󠁥󠁮󠁧󠁿&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~Namespace Object Information (2024-08-14 16:33:08 UTC)~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Namespace|                                            Node|Rack|  Repl|Expirations|    Total|~~~~~~~~~~~~Objects~~~~~~~~~~~~|~~~~~~~~~Tombstones~~~~~~~~|~~~~Pending~~~~
         |                                                |  ID|Factor|           |  Records|   Master|    Prole|Non-Replica| Master|  Prole|Non-Replica|~~~~Migrates~~~
         |                                                |    |      |           |         |         |         |           |       |       |           |     Tx|     Rx
~~       |172.33.11.90:3000                               |  ~~|    ~~|         ~~|       ~~|       ~~|       ~~|         ~~|     ~~|     ~~|         ~~|     ~~|     ~~
~~       |172.33.7.44:3000                                |  ~~|    ~~|         ~~|       ~~|       ~~|       ~~|         ~~|     ~~|     ~~|         ~~|     ~~|     ~~
~~       |172.33.8.38:3000                                |  ~~|    ~~|         ~~|       ~~|       ~~|       ~~|         ~~|     ~~|     ~~|         ~~|     ~~|     ~~
~~       |                                                |    |      |         ~~|       ~~|       ~~|       ~~|         ~~|     ~~|     ~~|         ~~|     ~~|     ~~
mydata   |172.32.4.2:3000                                 |  32|     2|    0.000  |483.000  |156.000  |172.000  |  155.000  |0.000  |0.000  |    0.000  |0.000  |0.000
mydata   |172.32.5.239:3000                               |  32|     2|    0.000  |487.000  |179.000  |150.000  |  158.000  |0.000  |0.000  |    0.000  |0.000  |0.000
mydata   |ip-172-32-15-231.eu-west-2.compute.internal:3000|  32|     2|    0.000  |454.000  |147.000  |160.000  |  147.000  |0.000  |0.000  |    0.000  |0.000  |0.000
mydata   |                                                |    |      |    0.000  |  1.424 K|482.000  |482.000  |  460.000  |0.000  |0.000  |    0.000  |0.000  |0.000
Number of rows: 6
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;Paris 🇫🇷 - we dont expect any record count changes as we cannot reach Paris&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~Namespace Object Information (2024-08-14 16:34:00 UTC)~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Namespace|                                          Node|Rack|  Repl|Expirations|    Total|~~~~~~~~~~~~Objects~~~~~~~~~~~~|~~~~~~~~~Tombstones~~~~~~~~|~~~~Pending~~~~
         |                                              |  ID|Factor|           |  Records|   Master|    Prole|Non-Replica| Master|  Prole|Non-Replica|~~~~Migrates~~~
         |                                              |    |      |           |         |         |         |           |       |       |           |     Tx|     Rx
~~       |172.32.15.231:3000                            |  ~~|    ~~|         ~~|       ~~|       ~~|       ~~|         ~~|     ~~|     ~~|         ~~|     ~~|     ~~
~~       |172.32.4.2:3000                               |  ~~|    ~~|         ~~|       ~~|       ~~|       ~~|         ~~|     ~~|     ~~|         ~~|     ~~|     ~~
~~       |172.32.5.239:3000                             |  ~~|    ~~|         ~~|       ~~|       ~~|       ~~|         ~~|     ~~|     ~~|         ~~|     ~~|     ~~
~~       |                                              |    |      |         ~~|       ~~|       ~~|       ~~|         ~~|     ~~|     ~~|         ~~|     ~~|     ~~
mydata   |172.33.11.90:3000                             |  33|     2|    0.000  |471.000  |165.000  |156.000  |  150.000  |0.000  |0.000  |    0.000  |0.000  |0.000
mydata   |172.33.8.38:3000                              |  33|     2|    0.000  |463.000  |153.000  |154.000  |  156.000  |0.000  |0.000  |    0.000  |0.000  |0.000
mydata   |ip-172-33-7-44.eu-west-3.compute.internal:3000|  33|     2|    0.000  |466.000  |142.000  |150.000  |  174.000  |0.000  |0.000  |    0.000  |0.000  |0.000
mydata   |                                              |    |      |    0.000  |  1.400 K|460.000  |460.000  |  480.000  |0.000  |0.000  |    0.000  |0.000  |0.000
Number of rows: 6
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;To summarise, we have an even split of nodes in each sub-cluster and each is up and running but only for the partitions it owns.&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Key Observations:&lt;/strong&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;&lt;p&gt;&lt;em&gt;Even Split&lt;/em&gt;: Each region is operating independently, handling only the partitions it owns. &lt;/p&gt;&lt;/li&gt;
&lt;li&gt;&lt;p&gt;&lt;em&gt;Partition Ownership&lt;/em&gt;: Each region manages 50% of the total partitions. This means that Paris handles half of the partitions, and London handles the other half.&lt;/p&gt;&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;Paris 🇫🇷&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;Admin&amp;gt; show pmap
~~~~~~~~~~~~~~~~~~~~~~~~~~~~Partition Map Analysis (2024-08-14 16:35:12 UTC)~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Namespace|                                          Node| Cluster Key|~~~~~~~~~~~~Partitions~~~~~~~~~~~~
         |                                              |            |Primary|Secondary|Unavailable|Dead
~~       |172.32.15.231:3000                            |          ~~|     ~~|       ~~|         ~~|  ~~
~~       |172.32.4.2:3000                               |          ~~|     ~~|       ~~|         ~~|  ~~
~~       |172.32.5.239:3000                             |          ~~|     ~~|       ~~|         ~~|  ~~
~~       |                                              |            |     ~~|       ~~|         ~~|  ~~
mydata   |172.33.11.90:3000                             |3CF08B51D327|    682|      700|       2048|   0
mydata   |172.33.8.38:3000                              |3CF08B51D327|    683|      669|       2048|   0
mydata   |ip-172-33-7-44.eu-west-3.compute.internal:3000|3CF08B51D327|    683|      679|       2048|   0
mydata   |                                              |            |   2048|     2048|       6144|   0
Number of rows: 6
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;ul&gt;
&lt;li&gt;&lt;p&gt;&lt;em&gt;Partition Distribution&lt;/em&gt;: The &lt;code&gt;show pmap&lt;/code&gt; command confirms that the partitions are evenly split between the Paris and London regions. Each region’s nodes manage an equal share of the partitions, which aligns with the network isolation we’ve implemented.&lt;/p&gt;&lt;/li&gt;
&lt;li&gt;&lt;p&gt;&lt;em&gt;Cluster Operation&lt;/em&gt;: Both clusters (Paris and London) are fully operational but only for the partitions they currently own. This demonstrates how partition ownership and data distribution are maintained even during a network split.&lt;/p&gt;&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;By analyzing this command output, it’s clear that each subcluster is functioning correctly within its partition scope, illustrating the impact of the network partition on the Aerospike database’s partition management.&lt;/p&gt;

&lt;h4&gt;
  
  
  Restoring Network Partition Configuration
&lt;/h4&gt;

&lt;p&gt;To resolve the network partition and restore full connectivity, you need to undo the previous security group rule changes made and set the inbound rules back to allow traffic from all sources (&lt;code&gt;0.0.0.0/0&lt;/code&gt;).&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;&lt;p&gt;&lt;em&gt;Partition Map&lt;/em&gt;: After removing the restrictions, the &lt;code&gt;show pmap&lt;/code&gt; command should show all 4096 partitions being managed correctly across the cluster, indicating that the data is now fully distributed and accessible.&lt;/p&gt;&lt;/li&gt;
&lt;li&gt;&lt;p&gt;&lt;em&gt;Node Communication&lt;/em&gt;: All nodes should be active and successfully heartbeating with each other.&lt;/p&gt;&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;By following these steps, you have restored the Aerospike cluster to it's full operational state, ensuring all nodes can communicate and data distribution is consistent across the entire system.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;Admin&amp;gt; show pmap
~~~~~~~~~~~~~~~~~~~~~~~~~~~~Partition Map Analysis (2024-08-14 16:46:53 UTC)~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Namespace|                                          Node| Cluster Key|~~~~~~~~~~~~Partitions~~~~~~~~~~~~
         |                                              |            |Primary|Secondary|Unavailable|Dead
mydata   |172.32.15.231:3000                            |4A4F30116D58|    683|      682|          0|   0
mydata   |172.32.4.2:3000                               |4A4F30116D58|    683|      683|          0|   0
mydata   |172.32.5.239:3000                             |4A4F30116D58|    682|      683|          0|   0
mydata   |172.33.11.90:3000                             |4A4F30116D58|    682|      683|          0|   0
mydata   |172.33.8.38:3000                              |4A4F30116D58|    683|      683|          0|   0
mydata   |ip-172-33-7-44.eu-west-3.compute.internal:3000|4A4F30116D58|    683|      682|          0|   0
mydata   |                                              |            |   4096|     4096|          0|   0
Number of rows: 6

Admin&amp;gt; i
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~Namespace Object Information (2024-08-14 16:46:58 UTC)~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Namespace|                                          Node|Rack|  Repl|Expirations|    Total|~~~~~~~~~~~~Objects~~~~~~~~~~~~|~~~~~~~~~Tombstones~~~~~~~~|~~Pending Migrates~
         |                                              |  ID|Factor|           |  Records|   Master|    Prole|Non-Replica| Master|  Prole|Non-Replica|       Tx|       Rx
mydata   |172.32.15.231:3000                            |  32|     2|    0.000  |434.000  |147.000  |147.000  |  140.000  |0.000  |0.000  |    0.000  |431.000  |102.000
mydata   |172.32.4.2:3000                               |  32|     2|    0.000  |461.000  |156.000  |155.000  |  150.000  |0.000  |0.000  |    0.000  |423.000  |593.000
mydata   |172.32.5.239:3000                             |  32|     2|    0.000  |434.000  |179.000  |158.000  |   97.000  |0.000  |0.000  |    0.000  |425.000  |593.000
mydata   |172.33.11.90:3000                             |  33|     2|    0.000  |436.000  |165.000  |150.000  |  121.000  |0.000  |0.000  |    0.000  |440.000  |422.000
mydata   |172.33.8.38:3000                              |  33|     2|    0.000  |446.000  |153.000  |156.000  |  137.000  |0.000  |0.000  |    0.000  |426.000  |440.000
mydata   |ip-172-33-7-44.eu-west-3.compute.internal:3000|  33|     2|    0.000  |446.000  |142.000  |174.000  |  130.000  |0.000  |0.000  |    0.000  |425.000  |418.000
mydata   |                                              |    |      |    0.000  |  2.657 K|942.000  |940.000  |  775.000  |0.000  |0.000  |    0.000  |  2.570 K|  2.568 K
Number of rows: 6
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;







&lt;h2&gt;Part 5: Understanding Partition Management in Strong Consistency&lt;/h2&gt;

&lt;p&gt;By understanding how Aerospike maintains partitions under Strong Consistency (SC), application developers and solution architects can design their systems to handle network partitions and maintain data integrity effectively. Here's how this knowledge can be applied:&lt;/p&gt;
&lt;h4&gt;
  
  
  Key Points About Aerospike’s Partition Management
&lt;/h4&gt;

&lt;ol&gt;
&lt;li&gt;
&lt;p&gt;Strong Consistency Guarantees:&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Aerospike’s SC mode ensures that all writes to a single record are applied in a specific order, sequentially.&lt;/li&gt;
&lt;li&gt;This means that even in a network partition or split-brain scenario, data consistency is preserved as long as the partition ownership and quorum requirements are met.&lt;/li&gt;
&lt;/ul&gt;
&lt;/li&gt;
&lt;li&gt;
&lt;p&gt;Partition Ownership:&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Each node in the Aerospike cluster manages a portion of the partitions. During a network split, nodes in each region will only manage the partitions they own. &lt;/li&gt;
&lt;/ul&gt;
&lt;/li&gt;
&lt;/ol&gt;

&lt;ul&gt;
&lt;li&gt;This partition ownership helps ensure that data is consistently handled within each partition subset.&lt;/li&gt;
&lt;/ul&gt;

&lt;ol&gt;
&lt;li&gt;
&lt;p&gt;Data Distribution:&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;In a distributed system like Aerospike, data is divided into partitions and distributed across nodes. During a split, nodes in each region will continue to manage and serve the partitions they own. This partition-centric approach helps in maintaining operational continuity even when parts of the network are isolated.&lt;/li&gt;
&lt;/ul&gt;
&lt;/li&gt;
&lt;li&gt;
&lt;p&gt;Handling Network Partitions:&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;When designing systems with Aerospike, it's important to account for the possibility of network partitions. Understanding how partitions are managed and how strong consistency is maintained allows for better planning and mitigation strategies to handle such scenarios.&lt;/li&gt;
&lt;/ul&gt;
&lt;/li&gt;
&lt;li&gt;
&lt;p&gt;Application Design Considerations:&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;
&lt;em&gt;Data Redundancy&lt;/em&gt;: Verify that data is replicated across multiple nodes to prevent data loss in case of node or region failures.&lt;/li&gt;
&lt;li&gt;
&lt;em&gt;Quorum Configuration&lt;/em&gt;: Configure the quorum settings to balance between performance and data consistency, considering the potential for network partitions.&lt;/li&gt;
&lt;li&gt;
&lt;em&gt;Monitoring and Alerts&lt;/em&gt;: Implement robust monitoring and alerting mechanisms to detect and respond to network partitions and split-brain scenarios promptly.&lt;/li&gt;
&lt;/ul&gt;
&lt;/li&gt;
&lt;li&gt;
&lt;p&gt;Solution Architecture:&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Design the architecture to minimize the impact of network partitions.&lt;/li&gt;
&lt;li&gt;This includes configuring the network and security settings to control access between regions and ensuring that the system can handle partitions gracefully without significant data inconsistencies.&lt;/li&gt;
&lt;/ul&gt;
&lt;/li&gt;
&lt;/ol&gt;

&lt;p&gt;By incorporating these considerations into your application design and solution architecture, you can leverage Aerospike’s strong consistency features to build robust, fault-tolerant systems that maintain data integrity even in complex network conditions. &lt;/p&gt;


&lt;h2&gt;Finally: What's next&lt;/h2&gt;
&lt;h4&gt;
  
  
  Exploring Different Network Partition Scenarios
&lt;/h4&gt;

&lt;p&gt;In this article, we explored how a network split (split brain) can affect a distributed data system. We initially focused on an equal split across two regions, but there are numerous permutations of network partitions that can yield interesting and varied results. &lt;br&gt;
Here, we discuss a few of these scenarios.&lt;/p&gt;

&lt;p&gt;&lt;b&gt;A&lt;/b&gt;: Uneven subnet split within a region&lt;/p&gt;

&lt;ol&gt;
&lt;li&gt;
&lt;p&gt;Configuration:&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;London Region: 6 nodes (2 in each of 3 subnets)&lt;/li&gt;
&lt;li&gt;Paris Region: 6 nodes (2 in each of 3 subnets)&lt;/li&gt;
&lt;/ul&gt;
&lt;/li&gt;
&lt;li&gt;
&lt;p&gt;Network Partition:&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Partition localized to a single subnet within the London region.&lt;/li&gt;
&lt;/ul&gt;
&lt;/li&gt;
&lt;li&gt;
&lt;p&gt;Expected Outcome:&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;The majority subnet with the network partition will operate independently, leading to an uneven split within the region and will most likely have all active database partitions.&lt;/li&gt;
&lt;li&gt;With replication factor &amp;gt; 1 and a properly configured logical rack layout all the database partitions will be available in the sub-cluster of 5 subnets.&lt;/li&gt;
&lt;li&gt;Nodes in the single subnet will continue to function normally but may not have any active database partitions.&lt;/li&gt;
&lt;/ul&gt;
&lt;/li&gt;
&lt;/ol&gt;

&lt;p&gt;&lt;b&gt;B&lt;/b&gt;: Uneven split across regions - 2 racks per region&lt;/p&gt;

&lt;p&gt;Each region contains 2 racks, configured for strong consistency with a replication factor of 3. This replication factor ensures that a copy of the data is always written to the alternate data center. Having more than 1 rack allows for resilience against a data center or rack failure, which is a more realistic scenario.&lt;/p&gt;

&lt;ol&gt;
&lt;li&gt;
&lt;p&gt;Configuration:&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;London Region: 4 nodes ( 2 racks )&lt;/li&gt;
&lt;li&gt;Paris Region: 3 nodes ( 2 racks )&lt;/li&gt;
&lt;/ul&gt;
&lt;/li&gt;
&lt;li&gt;
&lt;p&gt;Network Partition:&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Split between the two regions.&lt;/li&gt;
&lt;/ul&gt;
&lt;/li&gt;
&lt;li&gt;
&lt;p&gt;Expected Outcome:&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Data will be at rest in the minorty Paris cluster but unavailable&lt;/li&gt;
&lt;li&gt;With replication factor 3 or more and strong consistency manually resetting the roster to the available nodes will bring the system back online without data loss.&lt;/li&gt;
&lt;/ul&gt;
&lt;/li&gt;
&lt;/ol&gt;

&lt;p&gt;&lt;a href="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fxuv63eipfegzx46yvhpo.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fxuv63eipfegzx46yvhpo.png" alt="Image description" width="800" height="684"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;Under normal circumstances, when writing a record with a replication factor of 3 (rf:3), the master write and replicas can be distributed across any of the 4 racks. Since each region has only 2 racks, at least one copy is guaranteed to be written to a different rack within the other region.&lt;/p&gt;

&lt;p&gt;See the possible combinations below:&lt;/p&gt;

&lt;blockquote&gt;
&lt;p&gt;[100,101,102]&lt;br&gt;&lt;br&gt;
[100,101,103]&lt;br&gt;&lt;br&gt;
[100,102,103]&lt;br&gt;&lt;br&gt;
[101,102,103]&lt;/p&gt;
&lt;/blockquote&gt;

&lt;p&gt;To bring the minority cluster online, you will need to reassign the 3 nodes in Paris.&lt;/p&gt;

&lt;p&gt;After manually re-rostering to the new remaining nodes, the cluster is now operational as below.&lt;/p&gt;

&lt;p&gt;&lt;a href="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fcglea0f9rqpk4rnkxqi2.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fcglea0f9rqpk4rnkxqi2.png" alt="Image description" width="800" height="694"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;&lt;b&gt;C&lt;/b&gt;: Uneven split across regions&lt;/p&gt;

&lt;p&gt;Catastrophic event where the majority of the London region's 4 nodes go permanently offline. What would the cluster state look like, and how would client connectivity be affected?&lt;/p&gt;

&lt;ol&gt;
&lt;li&gt;
&lt;p&gt;Configuration:&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;London Region: 4 nodes&lt;/li&gt;
&lt;li&gt;Paris Region: 3 nodes&lt;/li&gt;
&lt;/ul&gt;
&lt;/li&gt;
&lt;li&gt;
&lt;p&gt;Network Partition:&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Split between the two regions.&lt;/li&gt;
&lt;/ul&gt;
&lt;/li&gt;
&lt;li&gt;
&lt;p&gt;Expected Outcome:&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Data will remain at rest in the minority Paris cluster but will be unavailable.&lt;/li&gt;
&lt;li&gt;With a replication factor of 2 or more and strong consistency, manually resetting the roster to the available nodes will restore the system without any data loss.&lt;/li&gt;
&lt;/ul&gt;
&lt;/li&gt;
&lt;/ol&gt;

&lt;p&gt;We'll have a detailed focus on this particular scenario as it presents an intriguing challenge. &lt;/p&gt;

&lt;p&gt;By now, you should be familiar with setting up VPCs across two regions and establishing a peering connection between them. If you need a refresher, refer to Part 1 of the series titled "Simple Cross-Region Messaging Application."&lt;/p&gt;

&lt;p&gt;Next, create 4 EC2 instances in each region, but only install Aerospike on 7 of the nodes. This ensures an odd-sized cluster, which is crucial for this experiment. If you need guidance on this step, visit Part 2 - "Installing an Aerospike NoSQL DB as a Stretch Cluster." You'll find everything you need there.&lt;/p&gt;

&lt;p&gt;Once completed, you should have strong consistency enabled with 2 racks numbered 32 and 33.&lt;/p&gt;

&lt;p&gt;Now it's time to insert some test data. We covered this on Part 3, so feel free to reuse the same code, updating only the seed host addresses.&lt;/p&gt;

&lt;p&gt;&lt;a href="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fzb4ln3n4tpc8qudizg31.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fzb4ln3n4tpc8qudizg31.png" alt="Image description" width="800" height="554"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;Before adding any data, verify the current state of the cluster. There should be no data present at this stage.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;# 4:3 split over 2 racks '32' and '33'

~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~Namespace Object Information (2024-08-21 12:18:31 UTC)~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Namespace|                                           Node|Rack|  Repl|Expirations|  Total|~~~~~~~~~~Objects~~~~~~~~~~|~~~~~~~~~Tombstones~~~~~~~~|~~~~Pending~~~~
         |                                               |  ID|Factor|           |Records| Master|  Prole|Non-Replica| Master|  Prole|Non-Replica|~~~~Migrates~~~
         |                                               |    |      |           |       |       |       |           |       |       |           |     Tx|     Rx
mydata   |172.32.0.79:3000                               |  32|     2|    0.000  |0.000  |0.000  |0.000  |    0.000  |0.000  |0.000  |    0.000  |0.000  |0.000
mydata   |172.32.1.105:3000                              |  32|     2|    0.000  |0.000  |0.000  |0.000  |    0.000  |0.000  |0.000  |    0.000  |0.000  |0.000
mydata   |172.32.7.25:3000                               |  32|     2|    0.000  |0.000  |0.000  |0.000  |    0.000  |0.000  |0.000  |    0.000  |0.000  |0.000
mydata   |172.32.8.190:3000                              |  32|     2|    0.000  |0.000  |0.000  |0.000  |    0.000  |0.000  |0.000  |    0.000  |0.000  |0.000
mydata   |172.33.35.102:3000                             |  33|     2|    0.000  |0.000  |0.000  |0.000  |    0.000  |0.000  |0.000  |    0.000  |0.000  |0.000
mydata   |172.33.42.240:3000                             |  33|     2|    0.000  |0.000  |0.000  |0.000  |    0.000  |0.000  |0.000  |    0.000  |0.000  |0.000
mydata   |ip-172-33-46-58.eu-west-3.compute.internal:3000|  33|     2|    0.000  |0.000  |0.000  |0.000  |    0.000  |0.000  |0.000  |    0.000  |0.000  |0.000
mydata   |          
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;Insert some test records using your Python Application&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;python3.8 app.py
Connected to Server
Timeout reached. Exiting loop.
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;For our first run we had 271 records split across all nodes and regions. Interestingly if you sum up the master records in rack '32' [ 35+40+33+38=146] this should equal the prole records in rack '33' [52+53+41=146]. &lt;/p&gt;

&lt;p&gt;This is because we have 2 racks with replication factor of 2. We expect under normal circumstances a write will be written to rack rx with a copy written to rack ry and in strong consistency we guarantee no ambiguity about whether a record is written or not written.( Note: with replication factor 2 and a single rack available with 2 or more nodes, clients will still be able write data successfully)&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;# 271 master records
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~Namespace Object Information (2024-08-21 12:21:31 UTC)~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Namespace|                                           Node|Rack|  Repl|Expirations|    Total|~~~~~~~~~~~~Objects~~~~~~~~~~~~|~~~~~~~~~Tombstones~~~~~~~~|~~~~Pending~~~~
         |                                               |  ID|Factor|           |  Records|   Master|    Prole|Non-Replica| Master|  Prole|Non-Replica|~~~~Migrates~~~
         |                                               |    |      |           |         |         |         |           |       |       |           |     Tx|     Rx
mydata   |172.32.0.79:3000                               |  32|     2|    0.000  | 71.000  | 35.000  | 36.000  |    0.000  |0.000  |0.000  |    0.000  |0.000  |0.000
mydata   |172.32.1.105:3000                              |  32|     2|    0.000  | 71.000  | 40.000  | 31.000  |    0.000  |0.000  |0.000  |    0.000  |0.000  |0.000
mydata   |172.32.7.25:3000                               |  32|     2|    0.000  | 60.000  | 33.000  | 27.000  |    0.000  |0.000  |0.000  |    0.000  |0.000  |0.000
mydata   |172.32.8.190:3000                              |  32|     2|    0.000  | 69.000  | 38.000  | 31.000  |    0.000  |0.000  |0.000  |    0.000  |0.000  |0.000
mydata   |172.33.35.102:3000                             |  33|     2|    0.000  | 98.000  | 46.000  | 52.000  |    0.000  |0.000  |0.000  |    0.000  |0.000  |0.000
mydata   |172.33.42.240:3000                             |  33|     2|    0.000  | 88.000  | 35.000  | 53.000  |    0.000  |0.000  |0.000  |    0.000  |0.000  |0.000
mydata   |ip-172-33-46-58.eu-west-3.compute.internal:3000|  33|     2|    0.000  | 85.000  | 44.000  | 41.000  |    0.000  |0.000  |0.000  |    0.000  |0.000  |0.000
mydata   |                                               |    |      |    0.000  |542.000  |271.000  |271.000  |    0.000  |0.000  |0.000  |    0.000  |0.000  |0.000
Number of rows: 7
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;Go ahead and insert some more data and perhaps try doing some accounting checks like above.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;python3.8 app.py
Connected to Server
Timeout reached. Exiting loop.
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;





&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;# 517 master records
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~Namespace Object Information (2024-08-21 12:24:13 UTC)~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Namespace|                                           Node|Rack|  Repl|Expirations|    Total|~~~~~~~~~~~~Objects~~~~~~~~~~~~|~~~~~~~~~Tombstones~~~~~~~~|~~~~Pending~~~~
         |                                               |  ID|Factor|           |  Records|   Master|    Prole|Non-Replica| Master|  Prole|Non-Replica|~~~~Migrates~~~
         |                                               |    |      |           |         |         |         |           |       |       |           |     Tx|     Rx
mydata   |172.32.0.79:3000                               |  32|     2|    0.000  |134.000  | 68.000  | 66.000  |    0.000  |0.000  |0.000  |    0.000  |0.000  |0.000
mydata   |172.32.1.105:3000                              |  32|     2|    0.000  |137.000  | 78.000  | 59.000  |    0.000  |0.000  |0.000  |    0.000  |0.000  |0.000
mydata   |172.32.7.25:3000                               |  32|     2|    0.000  |110.000  | 63.000  | 47.000  |    0.000  |0.000  |0.000  |    0.000  |0.000  |0.000
mydata   |172.32.8.190:3000                              |  32|     2|    0.000  |136.000  | 77.000  | 59.000  |    0.000  |0.000  |0.000  |    0.000  |0.000  |0.000
mydata   |172.33.35.102:3000                             |  33|     2|    0.000  |192.000  | 79.000  |113.000  |    0.000  |0.000  |0.000  |    0.000  |0.000  |0.000
mydata   |172.33.42.240:3000                             |  33|     2|    0.000  |160.000  | 67.000  | 93.000  |    0.000  |0.000  |0.000  |    0.000  |0.000  |0.000
mydata   |ip-172-33-46-58.eu-west-3.compute.internal:3000|  33|     2|    0.000  |165.000  | 85.000  | 80.000  |    0.000  |0.000  |0.000  |    0.000  |0.000  |0.000
mydata   |                                               |    |      |    0.000  |  1.034 K|517.000  |517.000  |    0.000  |0.000  |0.000  |    0.000  |0.000  |0.000
Number of rows: 7
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;How many master records did you get in rack 32 and how many prole records did you get in rack 33?&lt;/p&gt;

&lt;p&gt;Bin bang! Go ahead and shutdown all 4 nodes in the London region. Your &lt;code&gt;asadm&lt;/code&gt; info and pmap commands should resemble below:&lt;/p&gt;

&lt;p&gt;&lt;a href="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fs3hdj5mz4fn4xevyrvtc.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fs3hdj5mz4fn4xevyrvtc.png" alt="Image description" width="800" height="546"&gt;&lt;/a&gt;&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;Admin+&amp;gt; info
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~Namespace Object Information (2024-08-21 12:26:44 UTC)~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Namespace|                                           Node|Rack|  Repl|Expirations|    Total|~~~~~~~~~~Objects~~~~~~~~~~|~~~~~~~~~Tombstones~~~~~~~~|~~~~Pending~~~~
         |                                               |  ID|Factor|           |  Records| Master|  Prole|Non-Replica| Master|  Prole|Non-Replica|~~~~Migrates~~~
         |                                               |    |      |           |         |       |       |           |       |       |           |     Tx|     Rx
~~       |172.32.0.79:3000                               |  ~~|    ~~|         ~~|       ~~|     ~~|     ~~|         ~~|     ~~|     ~~|         ~~|     ~~|     ~~
~~       |172.32.1.105:3000                              |  ~~|    ~~|         ~~|       ~~|     ~~|     ~~|         ~~|     ~~|     ~~|         ~~|     ~~|     ~~
~~       |172.32.7.25:3000                               |  ~~|    ~~|         ~~|       ~~|     ~~|     ~~|         ~~|     ~~|     ~~|         ~~|     ~~|     ~~
~~       |172.32.8.190:3000                              |  ~~|    ~~|         ~~|       ~~|     ~~|     ~~|         ~~|     ~~|     ~~|         ~~|     ~~|     ~~
~~       |                                               |    |      |         ~~|       ~~|     ~~|     ~~|         ~~|     ~~|     ~~|         ~~|     ~~|     ~~
mydata   |172.33.35.102:3000                             |  33|     2|    0.000  |192.000  |0.000  |0.000  |  192.000  |0.000  |0.000  |    0.000  |0.000  |0.000
mydata   |172.33.42.240:3000                             |  33|     2|    0.000  |160.000  |0.000  |0.000  |  160.000  |0.000  |0.000  |    0.000  |0.000  |0.000
mydata   |ip-172-33-46-58.eu-west-3.compute.internal:3000|  33|     2|    0.000  |165.000  |0.000  |0.000  |  165.000  |0.000  |0.000  |    0.000  |0.000  |0.000
mydata   |                                               |    |      |    0.000  |517.000  |0.000  |0.000  |  517.000  |0.000  |0.000  |    0.000  |0.000  |0.000
Number of rows: 7

Admin+&amp;gt; show pmap
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~Partition Map Analysis (2024-08-21 12:28:14 UTC)~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Namespace|                                            Node| Cluster Key|~~~~~~~~~~~~Partitions~~~~~~~~~~~~
         |                                                |            |Primary|Secondary|Unavailable|Dead
~~       |172.32.0.79:3000                                |          ~~|     ~~|       ~~|         ~~|  ~~
~~       |172.32.1.105:3000                               |          ~~|     ~~|       ~~|         ~~|  ~~
~~       |172.32.7.25:3000                                |          ~~|     ~~|       ~~|         ~~|  ~~
~~       |172.32.8.190:3000                               |          ~~|     ~~|       ~~|         ~~|  ~~
~~       |                                                |            |     ~~|       ~~|         ~~|  ~~
mydata   |172.33.35.102:3000                              |39A2BCE70658|      0|        0|       4096|   0
mydata   |172.33.46.58:3000                               |39A2BCE70658|      0|        0|       4096|   0
mydata   |ip-172-33-42-240.eu-west-3.compute.internal:3000|39A2BCE70658|      0|        0|       4096|   0
mydata   |                                                |            |      0|        0|      12288|   0
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;At this stage your client cannot access any partitions. To resolve this you will need to issue an operator command once sure the system is stable. Essentially, remove all nodes on rack '32' from the roster.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;Admin+&amp;gt; manage roster remove nodes A541@32 A445@32 A333@32 A332@32  ns mydata

Node(s) successfully removed from pending-roster.

Run "manage recluster" for your changes to take affect.
Admin+&amp;gt; manage recluster
Successfully started recluster

Admin+&amp;gt; show roster
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~Roster (2024-08-21 12:29:03 UTC)~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
                                            Node|        Node ID|Namespace|         Current Roster|         Pending Roster|         Observed Nodes
172.32.1.105:3000                               |000000000000000|~~       |~~                     |~~                     |~~
172.32.8.190:3000                               |000000000000000|~~       |~~                     |~~                     |~~
172.32.7.25:3000                                |000000000000000|~~       |~~                     |~~                     |~~
172.32.0.79:3000                                |000000000000000|~~       |~~                     |~~                     |~~
172.33.46.58:3000                               |A250           |mydata   |A829@33,A476@33,A250@33|A829@33,A476@33,A250@33|A829@33,A476@33,A250@33
ip-172-33-42-240.eu-west-3.compute.internal:3000|A476           |mydata   |A829@33,A476@33,A250@33|A829@33,A476@33,A250@33|A829@33,A476@33,A250@33
172.33.35.102:3000                              |*A829          |mydata   |A829@33,A476@33,A250@33|A829@33,A476@33,A250@33|A829@33,A476@33,A250@33
Number of rows: 7

Admin+&amp;gt; show pmap
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~Partition Map Analysis (2024-08-21 12:29:11 UTC)~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Namespace|                                            Node| Cluster Key|~~~~~~~~~~~~Partitions~~~~~~~~~~~~
         |                                                |            |Primary|Secondary|Unavailable|Dead
~~       |172.32.0.79:3000                                |          ~~|     ~~|       ~~|         ~~|  ~~
~~       |172.32.1.105:3000                               |          ~~|     ~~|       ~~|         ~~|  ~~
~~       |172.32.7.25:3000                                |          ~~|     ~~|       ~~|         ~~|  ~~
~~       |172.32.8.190:3000                               |          ~~|     ~~|       ~~|         ~~|  ~~
~~       |                                                |            |     ~~|       ~~|         ~~|  ~~
mydata   |172.33.35.102:3000                              |E6BE8E200C70|   1366|     1365|          0|   0
mydata   |172.33.46.58:3000                               |E6BE8E200C70|   1365|     1365|          0|   0
mydata   |ip-172-33-42-240.eu-west-3.compute.internal:3000|E6BE8E200C70|   1365|     1366|          0|   0
mydata   |                                                |            |   4096|     4096|          0|   0
Number of rows: 7
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;At this point your client should be able to write data. However, let's validate the overall record count.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;# After recluster we have 517 records as before

~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~Namespace Object Information (2024-08-21 12:29:31 UTC)~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Namespace|                                           Node|Rack|  Repl|Expirations|    Total|~~~~~~~~~~~~Objects~~~~~~~~~~~~|~~~~~~~~~Tombstones~~~~~~~~|~~~~Pending~~~~
         |                                               |  ID|Factor|           |  Records|   Master|    Prole|Non-Replica| Master|  Prole|Non-Replica|~~~~Migrates~~~
         |                                               |    |      |           |         |         |         |           |       |       |           |     Tx|     Rx
~~       |172.32.0.79:3000                               |  ~~|    ~~|         ~~|       ~~|       ~~|       ~~|         ~~|     ~~|     ~~|         ~~|     ~~|     ~~
~~       |172.32.1.105:3000                              |  ~~|    ~~|         ~~|       ~~|       ~~|       ~~|         ~~|     ~~|     ~~|         ~~|     ~~|     ~~
~~       |172.32.7.25:3000                               |  ~~|    ~~|         ~~|       ~~|       ~~|       ~~|         ~~|     ~~|     ~~|         ~~|     ~~|     ~~
~~       |172.32.8.190:3000                              |  ~~|    ~~|         ~~|       ~~|       ~~|       ~~|         ~~|     ~~|     ~~|         ~~|     ~~|     ~~
~~       |                                               |    |      |         ~~|       ~~|       ~~|       ~~|         ~~|     ~~|     ~~|         ~~|     ~~|     ~~
mydata   |172.33.35.102:3000                             |  33|     2|    0.000  |364.000  |195.000  |169.000  |    0.000  |0.000  |0.000  |    0.000  |0.000  |0.000
mydata   |172.33.42.240:3000                             |  33|     2|    0.000  |331.000  |158.000  |173.000  |    0.000  |0.000  |0.000  |    0.000  |0.000  |0.000
mydata   |ip-172-33-46-58.eu-west-3.compute.internal:3000|  33|     2|    0.000  |339.000  |164.000  |175.000  |    0.000  |0.000  |0.000  |    0.000  |0.000  |0.000
mydata   |                                               |    |      |    0.000  |  1.034 K|517.000  |517.000  |    0.000  |0.000  |0.000  |    0.000  |0.000  |0.000
Number of rows: 7
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;Insert some additional test data before reviving rack '32' in London.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;python3.8 app.py
Connected to Server
Timeout reached. Exiting loop.
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;





&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;&lt;span class="c"&gt;# We now have 778 records&lt;/span&gt;
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~Namespace Object Information &lt;span class="o"&gt;(&lt;/span&gt;2024-08-21 12:44:51 UTC&lt;span class="o"&gt;)&lt;/span&gt;~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Namespace|                                           Node|Rack|  Repl|Expirations|    Total|~~~~~~~~~~~~Objects~~~~~~~~~~~~|~~~~~~~~~Tombstones~~~~~~~~|~~~~Pending~~~~
         |                                               |  ID|Factor|           |  Records|   Master|    Prole|Non-Replica| Master|  Prole|Non-Replica|~~~~Migrates~~~
         |                                               |    |      |           |         |         |         |           |       |       |           |     Tx|     Rx
~~       |172.32.0.79:3000                               |  ~~|    ~~|         ~~|       ~~|       ~~|       ~~|         ~~|     ~~|     ~~|         ~~|     ~~|     ~~
~~       |172.32.1.105:3000                              |  ~~|    ~~|         ~~|       ~~|       ~~|       ~~|         ~~|     ~~|     ~~|         ~~|     ~~|     ~~
~~       |172.32.7.25:3000                               |  ~~|    ~~|         ~~|       ~~|       ~~|       ~~|         ~~|     ~~|     ~~|         ~~|     ~~|     ~~
~~       |172.32.8.190:3000                              |  ~~|    ~~|         ~~|       ~~|       ~~|       ~~|         ~~|     ~~|     ~~|         ~~|     ~~|     ~~
~~       |                                               |    |      |         ~~|       ~~|       ~~|       ~~|         ~~|     ~~|     ~~|         ~~|     ~~|     ~~
mydata   |172.33.35.102:3000                             |  33|     2|    0.000  |557.000  |295.000  |262.000  |    0.000  |0.000  |0.000  |    0.000  |0.000  |0.000
mydata   |172.33.42.240:3000                             |  33|     2|    0.000  |498.000  |250.000  |248.000  |    0.000  |0.000  |0.000  |    0.000  |0.000  |0.000
mydata   |ip-172-33-46-58.eu-west-3.compute.internal:3000|  33|     2|    0.000  |501.000  |233.000  |268.000  |    0.000  |0.000  |0.000  |    0.000  |0.000  |0.000
mydata   |                                               |    |      |    0.000  |  1.556 K|778.000  |778.000  |    0.000  |0.000  |0.000  |    0.000  |0.000  |0.000
Number of rows: 7
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;Now you have been reliably informed the outage has been rectified, bring all the nodes in rack '32' back online.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;Admin+&amp;gt; manage roster stage observed A541@32,A445@32,A333@32,A332@32 ns mydata
Pending roster now contains observed nodes.
Run "manage recluster" for your changes to take affect.

Admin+&amp;gt; manage recluster
Successfully started recluster

Admin+&amp;gt; show racks
~Racks (2024-08-21 12:48:07 UTC)~~
Namespace|Rack|              Nodes
         |  ID|
mydata   |  32|A541,A445,A333,A332
mydata   |  33|A829,A476,A250
Number of rows: 2

Admin+&amp;gt; show pmap
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~Partition Map Analysis (2024-08-21 12:48:13 UTC)~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Namespace|                                            Node| Cluster Key|~~~~~~~~~~~~Partitions~~~~~~~~~~~~
         |                                                |            |Primary|Secondary|Unavailable|Dead
mydata   |172.32.0.79:3000                                |4AC63FB091A3|    109|      915|          0|   0
mydata   |172.32.1.105:3000                               |4AC63FB091A3|    103|      921|          0|   0
mydata   |172.32.7.25:3000                                |4AC63FB091A3|    111|      913|          0|   0
mydata   |172.32.8.190:3000                               |4AC63FB091A3|     99|      925|          0|   0
mydata   |172.33.35.102:3000                              |4AC63FB091A3|   1213|      179|          0|   0
mydata   |172.33.46.58:3000                               |4AC63FB091A3|   1226|      185|          0|   0
mydata   |ip-172-33-42-240.eu-west-3.compute.internal:3000|4AC63FB091A3|   1235|      167|          0|   0
mydata   |                                                |            |   4096|     4205|          0|   0
Number of rows: 7

Admin+&amp;gt; show roster
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~Roster (2024-08-21 12:48:19 UTC)~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
                                            Node| Node|Namespace|                                         Current Roster|                                         Pending Roster|                                         Observed Nodes
                                                |   ID|         |                                                       |                                                       |
172.33.46.58:3000                               |A250 |mydata   |A829@33,A541@32,A476@33,A445@32,A333@32,A332@32,A250@33|A829@33,A541@32,A476@33,A445@32,A333@32,A332@32,A250@33|A829@33,A541@32,A476@33,A445@32,A333@32,A332@32,A250@33
172.32.8.190:3000                               |A332 |mydata   |A829@33,A541@32,A476@33,A445@32,A333@32,A332@32,A250@33|A829@33,A541@32,A476@33,A445@32,A333@32,A332@32,A250@33|A829@33,A541@32,A476@33,A445@32,A333@32,A332@32,A250@33
172.32.1.105:3000                               |A333 |mydata   |A829@33,A541@32,A476@33,A445@32,A333@32,A332@32,A250@33|A829@33,A541@32,A476@33,A445@32,A333@32,A332@32,A250@33|A829@33,A541@32,A476@33,A445@32,A333@32,A332@32,A250@33
172.32.7.25:3000                                |A445 |mydata   |A829@33,A541@32,A476@33,A445@32,A333@32,A332@32,A250@33|A829@33,A541@32,A476@33,A445@32,A333@32,A332@32,A250@33|A829@33,A541@32,A476@33,A445@32,A333@32,A332@32,A250@33
ip-172-33-42-240.eu-west-3.compute.internal:3000|A476 |mydata   |A829@33,A541@32,A476@33,A445@32,A333@32,A332@32,A250@33|A829@33,A541@32,A476@33,A445@32,A333@32,A332@32,A250@33|A829@33,A541@32,A476@33,A445@32,A333@32,A332@32,A250@33
172.32.0.79:3000                                |A541 |mydata   |A829@33,A541@32,A476@33,A445@32,A333@32,A332@32,A250@33|A829@33,A541@32,A476@33,A445@32,A333@32,A332@32,A250@33|A829@33,A541@32,A476@33,A445@32,A333@32,A332@32,A250@33
172.33.35.102:3000                              |*A829|mydata   |A829@33,A541@32,A476@33,A445@32,A333@32,A332@32,A250@33|A829@33,A541@32,A476@33,A445@32,A333@32,A332@32,A250@33|A829@33,A541@32,A476@33,A445@32,A333@32,A332@32,A250@33
Number of rows: 7


# 778 records
Admin+&amp;gt; info
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~Namespace Object Information (2024-08-21 12:49:17 UTC)~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Namespace|                                           Node|Rack|  Repl|Expirations|    Total|~~~~~~~~~~~~Objects~~~~~~~~~~~~|~~~~~~~~~Tombstones~~~~~~~~|~~~~Pending~~~~
         |                                               |  ID|Factor|           |  Records|   Master|    Prole|Non-Replica| Master|  Prole|Non-Replica|~~~~Migrates~~~
         |                                               |    |      |           |         |         |         |           |       |       |           |     Tx|     Rx
mydata   |172.32.0.79:3000                               |  32|     2|    0.000  |195.000  |100.000  | 95.000  |    0.000  |0.000  |0.000  |    0.000  |0.000  |0.000
mydata   |172.32.1.105:3000                              |  32|     2|    0.000  |209.000  |115.000  | 94.000  |    0.000  |0.000  |0.000  |    0.000  |0.000  |0.000
mydata   |172.32.7.25:3000                               |  32|     2|    0.000  |176.000  |102.000  | 74.000  |    0.000  |0.000  |0.000  |    0.000  |0.000  |0.000
mydata   |172.32.8.190:3000                              |  32|     2|    0.000  |198.000  |108.000  | 90.000  |    0.000  |0.000  |0.000  |    0.000  |0.000  |0.000
mydata   |172.33.35.102:3000                             |  33|     2|    0.000  |291.000  |126.000  |165.000  |    0.000  |0.000  |0.000  |    0.000  |0.000  |0.000
mydata   |172.33.42.240:3000                             |  33|     2|    0.000  |252.000  |104.000  |148.000  |    0.000  |0.000  |0.000  |    0.000  |0.000  |0.000
mydata   |ip-172-33-46-58.eu-west-3.compute.internal:3000|  33|     2|    0.000  |235.000  |123.000  |112.000  |    0.000  |0.000  |0.000  |    0.000  |0.000  |0.000
mydata   |                                               |    |      |    0.000  |  1.556 K|778.000  |778.000  |    0.000  |0.000  |0.000  |    0.000  |0.000  |0.000
Number of rows: 7
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;Voila! - Looks good to me. We have all 778 records that we expected.&lt;/p&gt;

&lt;h4&gt;
  
  
  Automating Network Partition Scenarios
&lt;/h4&gt;

&lt;p&gt;Automating these scenarios can save time and reduce the potential for human error. You can use scripts and tools like AWS CLI or Terraform to create and manage these network partitions.&lt;/p&gt;

&lt;h4&gt;
  
  
  Future: Service Mesh and Kubernetes
&lt;/h4&gt;

&lt;p&gt;In the next article, we will explore creating a similar network partition scenario using a service mesh and stretching a database cluster in Kubernetes with the Aerospike Kubernetes Operator.&lt;/p&gt;

&lt;h4&gt;
  
  
  Contact and Feedback
&lt;/h4&gt;

&lt;p&gt;If you have any questions or suggestions, feel free to drop me a line at &lt;a href="mailto:icloud.nkm@gmail.com"&gt;icloud.nkm@gmail.com&lt;/a&gt;.&lt;/p&gt;

&lt;h4&gt;
  
  
  Conclusion
&lt;/h4&gt;

&lt;p&gt;We have discussed various network partition scenarios and their potential impacts on a distributed data system. By understanding and testing these scenarios, you can design more resilient and robust systems.&lt;/p&gt;




&lt;p&gt;Hope you have enjoyed reading this article and learned something new.&lt;/p&gt;

</description>
    </item>
    <item>
      <title>How to use the Kubernetes Operator to establish connectivity between clusters</title>
      <dc:creator>Naresh Maharaj</dc:creator>
      <pubDate>Fri, 09 Feb 2024 16:32:21 +0000</pubDate>
      <link>https://dev.to/aerospike/how-to-use-the-kubernetes-operator-to-establish-connectivity-between-clusters-443</link>
      <guid>https://dev.to/aerospike/how-to-use-the-kubernetes-operator-to-establish-connectivity-between-clusters-443</guid>
      <description>&lt;p&gt;&lt;a href="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdeveloper-hub.s3.us-west-1.amazonaws.com%2Fnaresh-maharaj%2Fhow-to-use-kubernetes-operator-to-establish-connectivity_1707420652860.webp" class="article-body-image-wrapper"&gt;&lt;img src="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdeveloper-hub.s3.us-west-1.amazonaws.com%2Fnaresh-maharaj%2Fhow-to-use-kubernetes-operator-to-establish-connectivity_1707420652860.webp" width="800" height="450"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;Discover how Aerospike's Cross Datacenter Replication feature (XDR) transfers data between clusters effortlessly.&lt;/p&gt;

&lt;p&gt;This post outlines how to establish connectivity between two Aerospike clusters by leveraging Aerospike's &lt;a href="https://aerospike.com/products/features/cross-datacenter-replication-xdr/" rel="noopener noreferrer"&gt;Cross Datacenter Replication&lt;/a&gt; (XDR) feature to seamlessly transmit data from a source cluster to a destination cluster, providing high availability for critical data systems. &lt;/p&gt;

&lt;p&gt;Ensuring network visibility for Aerospike service ports in the remote data center from a source cluster is crucial. However, this can pose challenges, especially in a Kubernetes environment. Fortunately, there is a solution:  deploying a proxy server in front of the private Kubernetes destination cluster. To showcase this solution, we recommend installing the &lt;a href="https://aerospike.com/products/kubernetes-operator/" rel="noopener noreferrer"&gt;Kubernetes Operator&lt;/a&gt; to facilitate the creation and scheduling of the source and destination databases. While the demonstration is on setting up replication in one direction, it's worth noting that Aerospike supports &lt;a href="https://aerospike.com/docs/server/architecture/active-active" rel="noopener noreferrer"&gt;active-active replication&lt;/a&gt;. This is accompanied by a &lt;a href="https://aerospike.com/docs/server/architecture/xdr#bin-convergence-in-mesh-topology" rel="noopener noreferrer"&gt;conflict resolution&lt;/a&gt; mechanism to handle update clashes. This &lt;a href="https://aerospike.com/docs/server/architecture/xdr" rel="noopener noreferrer"&gt;XDR&lt;/a&gt; proxy also supports this feature as well.&lt;/p&gt;

&lt;p&gt;Let's begin the process.&lt;/p&gt;

&lt;p&gt;&lt;a href="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fqmhy58692gtxhxdam7in.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fqmhy58692gtxhxdam7in.png" width="800" height="450"&gt;&lt;/a&gt;&lt;br&gt;
&lt;em&gt;Fig. 1 Aerospike Kubernetes Operator&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;The following Kubernetes nodes have been created using EKS. &lt;/p&gt;

&lt;p&gt;First, display the following private and public IP addresses from listing the nodes with the kubectl command.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;kubectl get nodes &lt;span class="nt"&gt;-o&lt;/span&gt; wide

NAME                             STATUS   ROLES    AGE     VERSION                INTERNAL-IP      EXTERNAL-IP      OS-IMAGE         KERNEL-VERSION                 CONTAINER-RUNTIME
ip-192-168-11-132.ec2.internal   Ready    &amp;lt;none&amp;gt;   2m53s   v1.22.15-eks-fb459a0   192.168.11.132   44.201.67.177    Amazon Linux 2   5.4.219-126.411.amzn2.x86_64   docker://20.10.17
ip-192-168-31-131.ec2.internal   Ready    &amp;lt;none&amp;gt;   2m52s   v1.22.15-eks-fb459a0   192.168.31.131   44.192.83.79     Amazon Linux 2   5.4.219-126.411.amzn2.x86_64   docker://20.10.17
ip-192-168-41-140.ec2.internal   Ready    &amp;lt;none&amp;gt;   2m51s   v1.22.15-eks-fb459a0   192.168.41.140   18.208.222.35    Amazon Linux 2   5.4.219-126.411.amzn2.x86_64   docker://20.10.17
ip-192-168-41-63.ec2.internal    Ready    &amp;lt;none&amp;gt;   2m51s   v1.22.15-eks-fb459a0   192.168.41.63    54.173.138.131   Amazon Linux 2   5.4.219-126.411.amzn2.x86_64   docker://20.10.17
ip-192-168-59-220.ec2.internal   Ready    &amp;lt;none&amp;gt;   2m52s   v1.22.15-eks-fb459a0   192.168.59.220   54.227.122.222   Amazon Linux 2   5.4.219-126.411.amzn2.x86_64   docker://20.10.17
ip-192-168-6-124.ec2.internal    Ready    &amp;lt;none&amp;gt;   2m51s   v1.22.15-eks-fb459a0   192.168.6.124    35.174.60.1      Amazon Linux 2   5.4.219-126.411.amzn2.x86_64   docker://20.10.1
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;Now, get a copy of the Aerospike git repo for the Kubernetes Operator.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;git clone &lt;span class="se"&gt;\ &lt;/span&gt;https://github.com/aerospike/aerospike-kubernetes-operator.git
&lt;span class="nb"&gt;cp &lt;/span&gt;features.conf &lt;span class="se"&gt;\ &lt;/span&gt;aerospike-kubernetes-operator/config/samples/secrets/.
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;h2&gt;
  
  
  Setup
&lt;/h2&gt;

&lt;p&gt;Run the following commands in the order specified. Wait for the CSV “Succeeded phase” to appear after running this line. Initially, it might take between 30 seconds and a minute to show up.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;kubectl get csv &lt;span class="nt"&gt;-n&lt;/span&gt; operators &lt;span class="nt"&gt;-w&lt;/span&gt;

&lt;span class="nb"&gt;cd &lt;/span&gt;aerospike-kubernetes-operator/
kubectl apply &lt;span class="nt"&gt;-f&lt;/span&gt; config/samples/storage/eks_ssd_storage_class.yaml
curl &lt;span class="nt"&gt;-sL&lt;/span&gt; &lt;span class="se"&gt;\&lt;/span&gt;
https://github.com/operator-framework/operator-lifecycle-manager/releases/download/v0.22.0/install.sh | bash &lt;span class="nt"&gt;-s&lt;/span&gt; v0.22.0
kubectl create &lt;span class="nt"&gt;-f&lt;/span&gt; &lt;span class="se"&gt;\&lt;/span&gt;
https://operatorhub.io/install/aerospike-kubernetes-operator.yaml
kubectl get csv &lt;span class="nt"&gt;-n&lt;/span&gt; operators &lt;span class="nt"&gt;-w&lt;/span&gt;
cd..
git clone &lt;span class="se"&gt;\ &lt;/span&gt;https://github.com/nareshmaharaj-consultant/kubernetes-anything
&lt;span class="nb"&gt;cd &lt;/span&gt;kubernetes-anything
./createNamespace.sh aerospike
&lt;span class="nb"&gt;cd&lt;/span&gt; ../aerospike-kubernetes-operator/
kubectl &lt;span class="nt"&gt;-n&lt;/span&gt; aerospike create secret generic aerospike-secret &lt;span class="se"&gt;\&lt;/span&gt;
&lt;span class="nt"&gt;--from-file&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;config/samples/secrets
kubectl &lt;span class="nt"&gt;-n&lt;/span&gt; aerospike create secret generic auth-secret &lt;span class="se"&gt;\&lt;/span&gt;
&lt;span class="nt"&gt;--from-literal&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="nv"&gt;password&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'admin123'&lt;/span&gt;
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;h3&gt;
  
  
  Destination Cluster
&lt;/h3&gt;

&lt;p&gt;Use the following YAML configuration file for our destination cluster. Save the file and name it ssd1_xdr_dest_6.1_cluster_cr.yaml:&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight yaml"&gt;&lt;code&gt;&lt;span class="na"&gt;apiVersion&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;asdb.aerospike.com/v1beta1&lt;/span&gt;
&lt;span class="na"&gt;kind&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;AerospikeCluster&lt;/span&gt;
&lt;span class="na"&gt;metadata&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
  &lt;span class="na"&gt;name&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;aerocluster-dest-xdr&lt;/span&gt;
  &lt;span class="na"&gt;namespace&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;aerospike&lt;/span&gt;

&lt;span class="na"&gt;spec&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
  &lt;span class="na"&gt;size&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="m"&gt;1&lt;/span&gt;
  &lt;span class="na"&gt;image&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;aerospike/aerospike-server-enterprise:6.1.0.2&lt;/span&gt;

  &lt;span class="na"&gt;storage&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
    &lt;span class="na"&gt;filesystemVolumePolicy&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
      &lt;span class="na"&gt;initMethod&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;deleteFiles&lt;/span&gt;
      &lt;span class="na"&gt;cascadeDelete&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="kc"&gt;true&lt;/span&gt;
    &lt;span class="na"&gt;blockVolumePolicy&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
      &lt;span class="na"&gt;cascadeDelete&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="kc"&gt;true&lt;/span&gt;
    &lt;span class="na"&gt;volumes&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
      &lt;span class="pi"&gt;-&lt;/span&gt; &lt;span class="na"&gt;name&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;workdir&lt;/span&gt;
        &lt;span class="na"&gt;aerospike&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
          &lt;span class="na"&gt;path&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;/opt/aerospike&lt;/span&gt;
        &lt;span class="na"&gt;source&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
          &lt;span class="na"&gt;persistentVolume&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
            &lt;span class="na"&gt;storageClass&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;ssd&lt;/span&gt;
            &lt;span class="na"&gt;volumeMode&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;Filesystem&lt;/span&gt;
            &lt;span class="na"&gt;size&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;1Gi&lt;/span&gt;
      &lt;span class="pi"&gt;-&lt;/span&gt; &lt;span class="na"&gt;name&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;ns&lt;/span&gt;
        &lt;span class="na"&gt;aerospike&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
          &lt;span class="na"&gt;path&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;/opt/aerospike/data/&lt;/span&gt;
        &lt;span class="na"&gt;source&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
          &lt;span class="na"&gt;persistentVolume&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
            &lt;span class="na"&gt;storageClass&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;ssd&lt;/span&gt;
            &lt;span class="na"&gt;volumeMode&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;Filesystem&lt;/span&gt;
            &lt;span class="na"&gt;size&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;1Gi&lt;/span&gt;
      &lt;span class="pi"&gt;-&lt;/span&gt; &lt;span class="na"&gt;name&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;aerospike-config-secret&lt;/span&gt;
        &lt;span class="na"&gt;source&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
          &lt;span class="na"&gt;secret&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
            &lt;span class="na"&gt;secretName&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;aerospike-secret&lt;/span&gt;
        &lt;span class="na"&gt;aerospike&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
          &lt;span class="na"&gt;path&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;/etc/aerospike/secret&lt;/span&gt;

  &lt;span class="na"&gt;podSpec&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
    &lt;span class="na"&gt;multiPodPerHost&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="kc"&gt;true&lt;/span&gt;

  &lt;span class="na"&gt;aerospikeAccessControl&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
    &lt;span class="na"&gt;roles&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
      &lt;span class="pi"&gt;-&lt;/span&gt; &lt;span class="na"&gt;name&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;writer&lt;/span&gt;
        &lt;span class="na"&gt;privileges&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
        &lt;span class="pi"&gt;-&lt;/span&gt; &lt;span class="s"&gt;read-write&lt;/span&gt;
      &lt;span class="pi"&gt;-&lt;/span&gt; &lt;span class="na"&gt;name&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;reader&lt;/span&gt;
        &lt;span class="na"&gt;privileges&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
        &lt;span class="pi"&gt;-&lt;/span&gt; &lt;span class="s"&gt;read&lt;/span&gt;
    &lt;span class="na"&gt;users&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
      &lt;span class="pi"&gt;-&lt;/span&gt; &lt;span class="na"&gt;name&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;admin&lt;/span&gt;
        &lt;span class="na"&gt;secretName&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;auth-secret&lt;/span&gt;
        &lt;span class="na"&gt;roles&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
          &lt;span class="pi"&gt;-&lt;/span&gt; &lt;span class="s"&gt;sys-admin&lt;/span&gt;
          &lt;span class="pi"&gt;-&lt;/span&gt; &lt;span class="s"&gt;user-admin&lt;/span&gt;
          &lt;span class="pi"&gt;-&lt;/span&gt; &lt;span class="s"&gt;read-write&lt;/span&gt;
      &lt;span class="pi"&gt;-&lt;/span&gt; &lt;span class="na"&gt;name&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;xdr-writer&lt;/span&gt;
        &lt;span class="na"&gt;secretName&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;xdr-user-auth-secret&lt;/span&gt;
        &lt;span class="na"&gt;roles&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
          &lt;span class="pi"&gt;-&lt;/span&gt; &lt;span class="s"&gt;writer&lt;/span&gt;

  &lt;span class="na"&gt;aerospikeConfig&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
    &lt;span class="na"&gt;service&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
      &lt;span class="na"&gt;feature-key-file&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;/etc/aerospike/secret/features.conf&lt;/span&gt;
    &lt;span class="na"&gt;security&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="pi"&gt;{}&lt;/span&gt;
    &lt;span class="na"&gt;network&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
      &lt;span class="na"&gt;service&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
        &lt;span class="na"&gt;port&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="m"&gt;3000&lt;/span&gt;
      &lt;span class="na"&gt;fabric&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
        &lt;span class="na"&gt;port&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="m"&gt;3001&lt;/span&gt;
      &lt;span class="na"&gt;heartbeat&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
        &lt;span class="na"&gt;port&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="m"&gt;3002&lt;/span&gt;
    &lt;span class="na"&gt;namespaces&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
      &lt;span class="pi"&gt;-&lt;/span&gt; &lt;span class="na"&gt;name&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;test&lt;/span&gt;
        &lt;span class="na"&gt;memory-size&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="m"&gt;134217728&lt;/span&gt;
        &lt;span class="na"&gt;replication-factor&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="m"&gt;1&lt;/span&gt;
        &lt;span class="na"&gt;storage-engine&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
          &lt;span class="na"&gt;type&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;device&lt;/span&gt;
          &lt;span class="na"&gt;files&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
            &lt;span class="pi"&gt;-&lt;/span&gt; &lt;span class="s"&gt;/opt/aerospike/data/test.dat&lt;/span&gt;
          &lt;span class="na"&gt;filesize&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="m"&gt;1073741824&lt;/span&gt;
          &lt;span class="na"&gt;data-in-memory&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="kc"&gt;true&lt;/span&gt;
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;Next, create the following Kubernetes resources for our Aerospike destination cluster:&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;XDR destination database user login credentials, as a Kubernetes secret&lt;/li&gt;
&lt;li&gt;Destination database cluster using our YAML file named ssd1_xdr_dest_6.1_cluster_cr.yaml
&lt;/li&gt;
&lt;/ul&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;&lt;span class="nb"&gt;export &lt;/span&gt;&lt;span class="nv"&gt;secret_auth_name&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;xdr-user-auth-secret
&lt;span class="nb"&gt;export &lt;/span&gt;&lt;span class="nv"&gt;password_secret&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;admin123
kubectl &lt;span class="nt"&gt;-n&lt;/span&gt; aerospike create secret generic &lt;span class="nv"&gt;$secret_auth_name&lt;/span&gt; &lt;span class="se"&gt;\&lt;/span&gt;
&lt;span class="nt"&gt;--from-literal&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="nv"&gt;password&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="nv"&gt;$password_secret&lt;/span&gt;
kubectl create &lt;span class="nt"&gt;-f&lt;/span&gt; ssd1_xdr_dest_6.1_cluster_cr.yaml
kubectl &lt;span class="nt"&gt;-n&lt;/span&gt; aerospike get po &lt;span class="nt"&gt;-w&lt;/span&gt;
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;You should see the database pods up and running successfully.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;kubectl get po &lt;span class="nt"&gt;-n&lt;/span&gt; aerospike &lt;span class="nt"&gt;-w&lt;/span&gt;

NAME                       READY   STATUS     RESTARTS   AGE
aerocluster-dest-xdr-0-0   0/1     Init:0/1   0          13s
aerocluster-dest-xdr-0-0   0/1     Init:0/1   0          18s
aerocluster-dest-xdr-0-0   0/1     PodInitializing   0          19s
aerocluster-dest-xdr-0-0   1/1     Running           0          24s
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;h3&gt;
  
  
  XDR-Proxy
&lt;/h3&gt;

&lt;p&gt;Next, set up the xdr-proxy. Take a brief look at &lt;em&gt;Fig. 1 Aerospike Kubernetes Operator&lt;/em&gt;, and you’ll notice that we are working from the right to the left in that order.&lt;/p&gt;

&lt;h4&gt;
  
  
  Configuration
&lt;/h4&gt;

&lt;p&gt;Create the following xdr-proxy configuration file. Replace the seed address with a fully qualified domain name (FQDN) for the destination database pod(s) you created earlier. Multiple seed addresses may be added (hint: recommended in production).&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;&lt;span class="nb"&gt;cd&lt;/span&gt; ..
&lt;span class="nb"&gt;mkdir&lt;/span&gt; &lt;span class="nt"&gt;-p&lt;/span&gt; xdr-cfg/etc/auth
&lt;span class="nb"&gt;cd &lt;/span&gt;xdr-cfg/etc/

&lt;span class="nb"&gt;cat&lt;/span&gt; &lt;span class="o"&gt;&amp;lt;&amp;lt;&lt;/span&gt;&lt;span class="no"&gt;EOF&lt;/span&gt;&lt;span class="sh"&gt;&amp;gt; aerospike-xdr-proxy.yml
# Change the configuration for your use case.
# Naresh Maharaj
# Refer to https://www.aerospike.com/docs/connectors/enterprise/xdr-proxy/configuration/index.html
# for details.

# The connector's listening ports, manage service, TLS, and network interface.
service:
  port: 8901
  # Aerospike Enterprise Server &amp;gt;= 5.0
  manage:
    address: 0.0.0.0
    port: 8902

# The destination aerospike cluster.
aerospike:
  seeds:
    - aerocluster-dest-xdr-0-0.aerospike.svc.cluster.local:
        port: 3000
  credentials:
    username: xdr-writer
    password-file: /etc/aerospike-xdr-proxy/auth/password_DC1.txt
    auth-mode: internal

# The logging config
logging:
  enable-console-logging: true
  file: /var/log/aerospike-xdr-proxy/aerospike-xdr-proxy.log
  levels:
    root: debug
    record-parser: debug
    server: debug
    com.aerospike.connect: debug
  # Ticker log interval in seconds
  ticker-interval: 3600
&lt;/span&gt;&lt;span class="no"&gt;EOF

&lt;/span&gt;&lt;span class="nb"&gt;sudo tee &lt;/span&gt;auth/password_DC1.txt &lt;span class="o"&gt;&amp;lt;&amp;lt;&lt;/span&gt;&lt;span class="no"&gt;EOF&lt;/span&gt;&lt;span class="sh"&gt;
admin123
&lt;/span&gt;&lt;span class="no"&gt;EOF
&lt;/span&gt;&lt;span class="nb"&gt;cd&lt;/span&gt; ..

kubectl &lt;span class="nt"&gt;-n&lt;/span&gt; aerospike create configmap xdr-proxy-cfg &lt;span class="nt"&gt;--from-file&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;etc/
kubectl &lt;span class="nt"&gt;-n&lt;/span&gt; aerospike create secret generic xdr-proxy-auth-secret &lt;span class="se"&gt;\&lt;/span&gt;
&lt;span class="nt"&gt;--from-file&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;etc/auth
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;h4&gt;
  
  
  Deployment
&lt;/h4&gt;

&lt;p&gt;Now that you have the xdr-proxy configuration file created, produce the Kubernetes deployment YAML file for the xdr-proxy. The following YAML file is used to deploy the xdr-proxy pods, ideally in the same data center or location where the destination databases will be hosted. &lt;/p&gt;

&lt;p&gt;&lt;em&gt;Tip&lt;/em&gt;: Remember that your xdr-proxy configuration differs from the Kubernetes xdr-proxy deployment file.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;&lt;span class="nb"&gt;cat&lt;/span&gt; &lt;span class="o"&gt;&amp;lt;&amp;lt;&lt;/span&gt;&lt;span class="no"&gt;EOF&lt;/span&gt;&lt;span class="sh"&gt;&amp;gt; xdr-proxy-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: xdr-proxy
  namespace: aerospike
  labels:
    app: xdr-proxy
spec:
  replicas: 2
  selector:
    matchLabels:
      app: xdr-proxy
  template:
    metadata:
      labels:
        app: xdr-proxy
    spec:
      containers:
      - name: xdr-proxy
        image: aerospike/aerospike-xdr-proxy:2.1.0
        volumeMounts:
        - name: xdr-proxy-dir
          mountPath: "/etc/aerospike-xdr-proxy/"
          readOnly: true
        - name: xdr-auth-dir
          mountPath: "/etc/aerospike-xdr-proxy/auth"
          readOnly: true
        ports:
          - name: xdr-proxy-main
            containerPort: 8901
          - name: xdr-proxy-mng
            containerPort: 8902
      volumes:
      - name: xdr-proxy-dir
        configMap:
          name: xdr-proxy-cfg
          optional: false
      - name: xdr-auth-dir
        secret:
          secretName: xdr-proxy-auth-secret
          optional: false
---
apiVersion: v1
kind: Service
metadata:
  name: xdr-proxy
  namespace: aerospike
spec:
  selector:
    app: xdr-proxy
  ports:
  - name: main
    protocol: TCP
    port: 8901
    targetPort: xdr-proxy-main
  - name: manage
    protocol: TCP
    port: 8902
    targetPort: xdr-proxy-mng
&lt;/span&gt;&lt;span class="no"&gt;EOF

&lt;/span&gt;kubectl create &lt;span class="nt"&gt;-f&lt;/span&gt; xdr-proxy-deployment.yaml
kubectl get po &lt;span class="nt"&gt;-n&lt;/span&gt; aerospike &lt;span class="nt"&gt;-w&lt;/span&gt;
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;The following command shows the current pods are running successfully. So far, so good!&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;kubectl get po &lt;span class="nt"&gt;-n&lt;/span&gt; aerospike &lt;span class="nt"&gt;-w&lt;/span&gt;
NAME                         READY   STATUS    RESTARTS   AGE
aerocluster-dest-xdr-0-0     1/1     Running   0          77m
xdr-proxy-7d9fccd6c8-g5mjt   1/1     Running   0          2m26s
xdr-proxy-7d9fccd6c8-mjxp4   1/1     Running   0          2m26s
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;h4&gt;
  
  
  Source cluster
&lt;/h4&gt;

&lt;p&gt;Create the Aerospike source cluster using the following configuration. We will insert our sample messages here.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;&lt;span class="nb"&gt;cd&lt;/span&gt; ../aerospike-kubernetes-operator/

&lt;span class="nb"&gt;cat&lt;/span&gt; &lt;span class="o"&gt;&amp;lt;&amp;lt;&lt;/span&gt;&lt;span class="no"&gt;EOF&lt;/span&gt;&lt;span class="sh"&gt;&amp;gt; ssd1_xdr_src_6.1_cluster_cr.yaml
apiVersion: asdb.aerospike.com/v1beta1
kind: AerospikeCluster
metadata:
  name: aerocluster-source-xdr
  namespace: aerospike

spec:
  size: 1
  image: aerospike/aerospike-server-enterprise:6.1.0.2

  storage:
    filesystemVolumePolicy:
      initMethod: deleteFiles
      cascadeDelete: true
    blockVolumePolicy:
      cascadeDelete: true
    volumes:
      - name: workdir
        aerospike:
          path: /opt/aerospike
        source:
          persistentVolume:
            storageClass: ssd
            volumeMode: Filesystem
            size: 1Gi
      - name: ns
        aerospike:
          path: /opt/aerospike/data/
        source:
          persistentVolume:
            storageClass: ssd
            volumeMode: Filesystem
            size: 1Gi
      - name: aerospike-config-secret
        source:
          secret:
            secretName: aerospike-secret
        aerospike:
          path: /etc/aerospike/secret

  podSpec:
    multiPodPerHost: true

  aerospikeAccessControl:
    roles:
      - name: writer
        privileges:
        - read-write
      - name: reader
        privileges:
        - read
    users:
      - name: admin
        secretName: auth-secret
        roles:
          - sys-admin
          - user-admin
          - read-write

  aerospikeConfig:
    service:
      feature-key-file: /etc/aerospike/secret/features.conf
    security: {}
    network:
      service:
        port: 3000
      fabric:
        port: 3001
      heartbeat:
        port: 3002
    xdr:
      dcs:
        - name: DC2
          connector: true
          node-address-ports:
            - xdr-proxy.aerospike.svc.cluster.local 8901
          namespaces:
            - name: test
    namespaces:
      - name: test
        memory-size: 134217728
        replication-factor: 1
        storage-engine:
          type: device
          files:
            - /opt/aerospike/data/test.dat
          filesize: 1073741824
          data-in-memory: true
&lt;/span&gt;&lt;span class="no"&gt;EOF

&lt;/span&gt;kubectl create &lt;span class="nt"&gt;-f&lt;/span&gt; ssd1_xdr_src_6.1_cluster_cr.yaml
kubectl get po &lt;span class="nt"&gt;-n&lt;/span&gt; aerospike &lt;span class="nt"&gt;-w&lt;/span&gt;
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;From the source cluster, confirm the XDR component has connected to the xdr-proxy by filtering the Kubernetes log file as shown in the following kubectl command.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;kubectl &lt;span class="nt"&gt;-n&lt;/span&gt; aerospike logs aerocluster-source-xdr-0-0 &lt;span class="nt"&gt;-c&lt;/span&gt; aerospike-server | &lt;span class="nb"&gt;grep &lt;/span&gt;xdr | &lt;span class="nb"&gt;grep &lt;/span&gt;conn
Dec 08 2022 13:49:21 GMT: INFO &lt;span class="o"&gt;(&lt;/span&gt;xdr&lt;span class="o"&gt;)&lt;/span&gt;: &lt;span class="o"&gt;(&lt;/span&gt;dc.c:581&lt;span class="o"&gt;)&lt;/span&gt; DC DC2 connected Oct 10 2022 13:57:53 GMT: INFO &lt;span class="o"&gt;(&lt;/span&gt;xdr&lt;span class="o"&gt;)&lt;/span&gt;: &lt;span class="o"&gt;(&lt;/span&gt;dc.c:581&lt;span class="o"&gt;)&lt;/span&gt; DC DC2 connected
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;h2&gt;
  
  
  Simple message test
&lt;/h2&gt;

&lt;p&gt;Add some sample messages to the source database and confirm they are being received in the destination database cluster. Start by getting the source database service address and connect using Aerospike's command line tool -  &lt;a href="https://aerospike.com/docs/tools/aql" rel="noopener noreferrer"&gt;AQL&lt;/a&gt;.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;kubectl get svc &lt;span class="nt"&gt;-n&lt;/span&gt; aerospike

NAME                         TYPE        CLUSTER-IP       EXTERNAL-IP   PORT&lt;span class="o"&gt;(&lt;/span&gt;S&lt;span class="o"&gt;)&lt;/span&gt;             AGE
aerocluster-dest-xdr         ClusterIP   None             &amp;lt;none&amp;gt;        3000/TCP            3h38m
aerocluster-dest-xdr-0-0     NodePort    10.100.226.179   &amp;lt;none&amp;gt;        3000:30168/TCP      3h38m
aerocluster-source-xdr       ClusterIP   None             &amp;lt;none&amp;gt;        3000/TCP            33m
aerocluster-source-xdr-0-0   NodePort    10.100.116.173   &amp;lt;none&amp;gt;        3000:31999/TCP      33m
xdr-proxy                    ClusterIP   10.100.44.96     &amp;lt;none&amp;gt;        8901/TCP,8902/TCP   41m
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;Insert a source record using the following command in AQL&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;kubectl run &lt;span class="nt"&gt;-it&lt;/span&gt; &lt;span class="nt"&gt;--rm&lt;/span&gt; &lt;span class="nt"&gt;--restart&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;Never aerospike-tool &lt;span class="nt"&gt;-n&lt;/span&gt; aerospike &lt;span class="nt"&gt;--image&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;aerospike/aerospike-tools:latest &lt;span class="nt"&gt;--&lt;/span&gt; aql &lt;span class="nt"&gt;-U&lt;/span&gt; admin &lt;span class="nt"&gt;-P&lt;/span&gt; admin123 &lt;span class="nt"&gt;-h&lt;/span&gt; aerocluster-source-xdr-0-0

insert into test.a1 &lt;span class="o"&gt;(&lt;/span&gt;PK,a,b,c,d&lt;span class="o"&gt;)&lt;/span&gt; values&lt;span class="o"&gt;(&lt;/span&gt;1,&lt;span class="s2"&gt;"A"&lt;/span&gt;,&lt;span class="s2"&gt;"B"&lt;/span&gt;,&lt;span class="s2"&gt;"C"&lt;/span&gt;,&lt;span class="s2"&gt;"D"&lt;/span&gt;&lt;span class="o"&gt;)&lt;/span&gt;
OK, 1 record affected.

aql&amp;gt; &lt;span class="k"&gt;select&lt;/span&gt; &lt;span class="k"&gt;*&lt;/span&gt; from &lt;span class="nb"&gt;test&lt;/span&gt;
+----+-----+-----+-----+-----+
| PK | a   | b   | c   | d   |
+----+-----+-----+-----+-----+
| 1  | &lt;span class="s2"&gt;"A"&lt;/span&gt; | &lt;span class="s2"&gt;"B"&lt;/span&gt; | &lt;span class="s2"&gt;"C"&lt;/span&gt; | &lt;span class="s2"&gt;"D"&lt;/span&gt; |
+----+-----+-----+-----+-----+
1 row &lt;span class="k"&gt;in &lt;/span&gt;&lt;span class="nb"&gt;set&lt;/span&gt; &lt;span class="o"&gt;(&lt;/span&gt;0.023 secs&lt;span class="o"&gt;)&lt;/span&gt;
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;Now, run the following select query in the destination cluster.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;kubectl run &lt;span class="nt"&gt;-it&lt;/span&gt; &lt;span class="nt"&gt;--rm&lt;/span&gt; &lt;span class="nt"&gt;--restart&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;Never aerospike-tool &lt;span class="nt"&gt;-n&lt;/span&gt; aerospike &lt;span class="nt"&gt;--image&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;aerospike/aerospike-tools:latest &lt;span class="nt"&gt;--&lt;/span&gt; aql &lt;span class="nt"&gt;-U&lt;/span&gt; admin &lt;span class="nt"&gt;-P&lt;/span&gt; admin123 &lt;span class="nt"&gt;-h&lt;/span&gt; aerocluster-dest-xdr-0-0

aql&amp;gt; &lt;span class="k"&gt;select&lt;/span&gt; &lt;span class="k"&gt;*&lt;/span&gt; from &lt;span class="nb"&gt;test&lt;/span&gt;
+----+-----+-----+-----+-----+
| PK | a   | b   | c   | d   |
+----+-----+-----+-----+-----+
| 1  | &lt;span class="s2"&gt;"A"&lt;/span&gt; | &lt;span class="s2"&gt;"B"&lt;/span&gt; | &lt;span class="s2"&gt;"C"&lt;/span&gt; | &lt;span class="s2"&gt;"D"&lt;/span&gt; |
+----+-----+-----+-----+-----+
1 row &lt;span class="k"&gt;in &lt;/span&gt;&lt;span class="nb"&gt;set&lt;/span&gt; &lt;span class="o"&gt;(&lt;/span&gt;0.031 secs&lt;span class="o"&gt;)&lt;/span&gt;

OK
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;h2&gt;
  
  
  Interim summary
&lt;/h2&gt;

&lt;p&gt;At this point, it's confirmed that xdr-proxy is doing exactly what It should do.&lt;/p&gt;

&lt;p&gt;If if you review the log file for the initial two xdr-proxies scheduled, you should see userKey=1.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;kubectl logs xdr-proxy-7d9fccd6c8-5q7gn &lt;span class="nt"&gt;-n&lt;/span&gt; aerospike | &lt;span class="nb"&gt;grep &lt;/span&gt;record-parser
2022-12-08 14:53:50.607 GMT DEBUG record-parser - parsed message fields: &lt;span class="nv"&gt;key&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;Key&lt;span class="o"&gt;(&lt;/span&gt;&lt;span class="nv"&gt;namespace&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'test'&lt;/span&gt;, &lt;span class="nb"&gt;set&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;a1, &lt;span class="nv"&gt;digest&lt;/span&gt;&lt;span class="o"&gt;=[&lt;/span&gt;120, 48, &lt;span class="nt"&gt;-23&lt;/span&gt;, &lt;span class="nt"&gt;-90&lt;/span&gt;, 110, 126, 84, &lt;span class="nt"&gt;-1&lt;/span&gt;, 114, &lt;span class="nt"&gt;-116&lt;/span&gt;, &lt;span class="nt"&gt;-9&lt;/span&gt;, &lt;span class="nt"&gt;-21&lt;/span&gt;, 28, 75, 126, &lt;span class="nt"&gt;-68&lt;/span&gt;, &lt;span class="nt"&gt;-51&lt;/span&gt;, 83, 31, &lt;span class="nt"&gt;-117&lt;/span&gt;&lt;span class="o"&gt;]&lt;/span&gt;, &lt;span class="nv"&gt;userKey&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;1, &lt;span class="nv"&gt;lastUpdateTimeMs&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;1670511230565, &lt;span class="nv"&gt;userKeyString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;1, &lt;span class="nv"&gt;digestString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'eDDppm5+VP9yjPfrHEt+vM1TH4s='&lt;/span&gt;&lt;span class="o"&gt;)&lt;/span&gt;

kubectl logs xdr-proxy-7d9fccd6c8-f5zkt &lt;span class="nt"&gt;-n&lt;/span&gt; aerospike | &lt;span class="nb"&gt;grep&lt;/span&gt; &lt;span class="se"&gt;\&lt;/span&gt;
record-parser
&lt;span class="o"&gt;(&lt;/span&gt;none&lt;span class="o"&gt;)&lt;/span&gt;
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;h2&gt;
  
  
  Scaling the XDR-Proxies
&lt;/h2&gt;

&lt;p&gt;Go ahead and scale up the xdr-proxy to six pods by editing the file xdr-proxy-deployment.yaml and then apply the changes.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;apiVersion: apps/v1
kind: Deployment
metadata:
  name: xdr-proxy
  namespace: aerospike
  labels:
    app: xdr-proxy
spec:
  replicas: 6
  selector:
    matchLabels:
      app: xdr-proxy
...
kubectl apply &lt;span class="nt"&gt;-f&lt;/span&gt; xdr-proxy-deployment.yaml
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;You can achieve exactly the same by running the following command.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;kubectl scale deploy xdr-proxy &lt;span class="nt"&gt;-n&lt;/span&gt; aerospike &lt;span class="nt"&gt;--replicas&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;6
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;You should now have six instances of the xdr-proxies running.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;kubectl get po &lt;span class="nt"&gt;-n&lt;/span&gt; aerospike &lt;span class="nt"&gt;-w&lt;/span&gt;
NAME                         READY   STATUS    RESTARTS   AGE
aerocluster-dest-xdr-0-0     1/1     Running   0          3h50m
aerocluster-source-xdr-0-0   1/1     Running   0          75m
xdr-proxy-7d9fccd6c8-49ttl   1/1     Running   0          7s
xdr-proxy-7d9fccd6c8-5q7gn   1/1     Running   0          83m
xdr-proxy-7d9fccd6c8-c4j7k   1/1     Running   0          7s
xdr-proxy-7d9fccd6c8-f5zkt   1/1     Running   0          83m
xdr-proxy-7d9fccd6c8-lscbg   1/1     Running   0          7s
xdr-proxy-7d9fccd6c8-r56vs   1/1     Running   0          7s
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;Add some sample messages in the source cluster with primary keys 5, 6, and 7. Note that in Aerospike, a primary key serves as a unique identifier for a record within a specific set in a specific namespace. This key is a unique identifier for addressing the record for any operation.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;aql&amp;gt; insert into &lt;span class="nb"&gt;test&lt;/span&gt; &lt;span class="o"&gt;(&lt;/span&gt;PK,a,b&lt;span class="o"&gt;)&lt;/span&gt; values &lt;span class="o"&gt;(&lt;/span&gt;5,&lt;span class="s2"&gt;"A"&lt;/span&gt;,&lt;span class="s2"&gt;"B"&lt;/span&gt;&lt;span class="o"&gt;)&lt;/span&gt;
OK, 1 record affected.
aql&amp;gt; insert into &lt;span class="nb"&gt;test&lt;/span&gt; &lt;span class="o"&gt;(&lt;/span&gt;PK,a,b&lt;span class="o"&gt;)&lt;/span&gt; values &lt;span class="o"&gt;(&lt;/span&gt;6,&lt;span class="s2"&gt;"A"&lt;/span&gt;,&lt;span class="s2"&gt;"B"&lt;/span&gt;&lt;span class="o"&gt;)&lt;/span&gt;
OK, 1 record affected.
aql&amp;gt; insert into &lt;span class="nb"&gt;test&lt;/span&gt; &lt;span class="o"&gt;(&lt;/span&gt;PK,a,b&lt;span class="o"&gt;)&lt;/span&gt; values &lt;span class="o"&gt;(&lt;/span&gt;7,&lt;span class="s2"&gt;"A"&lt;/span&gt;,&lt;span class="s2"&gt;"B"&lt;/span&gt;&lt;span class="o"&gt;)&lt;/span&gt;
OK, 1 record affected.
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;Notice how userKey=5, userKey=6, and userKey=7 have been shipped across to the newly scaled xdr-proxies. Run the commands below to see the same,&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;kubectl logs xdr-proxy-7d9fccd6c8-5q7gn &lt;span class="nt"&gt;-n&lt;/span&gt; aerospike | &lt;span class="nb"&gt;grep&lt;/span&gt; &lt;span class="se"&gt;\&lt;/span&gt;
record-parser
2022-12-08 14:53:50.607 GMT DEBUG record-parser - parsed message fields: &lt;span class="nv"&gt;key&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;Key&lt;span class="o"&gt;(&lt;/span&gt;&lt;span class="nv"&gt;namespace&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'test'&lt;/span&gt;, &lt;span class="nb"&gt;set&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;a1, &lt;span class="nv"&gt;digest&lt;/span&gt;&lt;span class="o"&gt;=[&lt;/span&gt;120, 48, &lt;span class="nt"&gt;-23&lt;/span&gt;, &lt;span class="nt"&gt;-90&lt;/span&gt;, 110, 126, 84, &lt;span class="nt"&gt;-1&lt;/span&gt;, 114, &lt;span class="nt"&gt;-116&lt;/span&gt;, &lt;span class="nt"&gt;-9&lt;/span&gt;, &lt;span class="nt"&gt;-21&lt;/span&gt;, 28, 75, 126, &lt;span class="nt"&gt;-68&lt;/span&gt;, &lt;span class="nt"&gt;-51&lt;/span&gt;, 83, 31, &lt;span class="nt"&gt;-117&lt;/span&gt;&lt;span class="o"&gt;]&lt;/span&gt;, &lt;span class="nv"&gt;userKey&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;1, &lt;span class="nv"&gt;lastUpdateTimeMs&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;1670511230565, &lt;span class="nv"&gt;userKeyString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;1, &lt;span class="nv"&gt;digestString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'eDDppm5+VP9yjPfrHEt+vM1TH4s='&lt;/span&gt;&lt;span class="o"&gt;)&lt;/span&gt;

kubectl logs xdr-proxy-7d9fccd6c8-f5zkt &lt;span class="nt"&gt;-n&lt;/span&gt; aerospike | &lt;span class="nb"&gt;grep&lt;/span&gt; &lt;span class="se"&gt;\&lt;/span&gt;
record-parser
&lt;span class="o"&gt;(&lt;/span&gt;none&lt;span class="o"&gt;)&lt;/span&gt;

kubectl logs xdr-proxy-7d9fccd6c8-49ttl &lt;span class="nt"&gt;-n&lt;/span&gt; aerospike | &lt;span class="nb"&gt;grep&lt;/span&gt; &lt;span class="se"&gt;\&lt;/span&gt;
record-parser
&lt;span class="o"&gt;(&lt;/span&gt;none&lt;span class="o"&gt;)&lt;/span&gt;

kubectl logs xdr-proxy-7d9fccd6c8-c4j7k &lt;span class="nt"&gt;-n&lt;/span&gt; aerospike | &lt;span class="nb"&gt;grep&lt;/span&gt; &lt;span class="se"&gt;\&lt;/span&gt;
record-parser
2022-12-08 15:05:37.511 GMT DEBUG record-parser - parsed message fields: &lt;span class="nv"&gt;key&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;Key&lt;span class="o"&gt;(&lt;/span&gt;&lt;span class="nv"&gt;namespace&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'test'&lt;/span&gt;, &lt;span class="nb"&gt;set&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;null, &lt;span class="nv"&gt;digest&lt;/span&gt;&lt;span class="o"&gt;=[&lt;/span&gt;&lt;span class="nt"&gt;-88&lt;/span&gt;, 104, 104, &lt;span class="nt"&gt;-114&lt;/span&gt;, 19, &lt;span class="nt"&gt;-44&lt;/span&gt;, &lt;span class="nt"&gt;-19&lt;/span&gt;, 29, &lt;span class="nt"&gt;-15&lt;/span&gt;, 18, 118, 72, &lt;span class="nt"&gt;-117&lt;/span&gt;, &lt;span class="nt"&gt;-106&lt;/span&gt;, &lt;span class="nt"&gt;-28&lt;/span&gt;, 21, &lt;span class="nt"&gt;-48&lt;/span&gt;, 50, 26, 113], &lt;span class="nv"&gt;userKey&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;7, &lt;span class="nv"&gt;lastUpdateTimeMs&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;1670511937250, &lt;span class="nv"&gt;userKeyString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;7, &lt;span class="nv"&gt;digestString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'qGhojhPU7R3xEnZIi5bkFdAyGnE='&lt;/span&gt;&lt;span class="o"&gt;)&lt;/span&gt;

kubectl logs xdr-proxy-7d9fccd6c8-lscbg &lt;span class="nt"&gt;-n&lt;/span&gt; aerospike | &lt;span class="nb"&gt;grep &lt;/span&gt;record-parser
2022-12-08 15:05:27.548 GMT DEBUG record-parser - parsed message fields: &lt;span class="nv"&gt;key&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;Key&lt;span class="o"&gt;(&lt;/span&gt;&lt;span class="nv"&gt;namespace&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'test'&lt;/span&gt;, &lt;span class="nb"&gt;set&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;null, &lt;span class="nv"&gt;digest&lt;/span&gt;&lt;span class="o"&gt;=[&lt;/span&gt;68, 4, &lt;span class="nt"&gt;-94&lt;/span&gt;, &lt;span class="nt"&gt;-44&lt;/span&gt;, &lt;span class="nt"&gt;-75&lt;/span&gt;, 112, &lt;span class="nt"&gt;-102&lt;/span&gt;, 73, &lt;span class="nt"&gt;-120&lt;/span&gt;, 41, &lt;span class="nt"&gt;-101&lt;/span&gt;, &lt;span class="nt"&gt;-120&lt;/span&gt;, 33, &lt;span class="nt"&gt;-111&lt;/span&gt;, 15, &lt;span class="nt"&gt;-114&lt;/span&gt;, &lt;span class="nt"&gt;-85&lt;/span&gt;, 46, &lt;span class="nt"&gt;-2&lt;/span&gt;, 80], &lt;span class="nv"&gt;userKey&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;5, &lt;span class="nv"&gt;lastUpdateTimeMs&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;1670511927465, &lt;span class="nv"&gt;userKeyString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;5, &lt;span class="nv"&gt;digestString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'RASi1LVwmkmIKZuIIZEPjqsu/lA='&lt;/span&gt;&lt;span class="o"&gt;)&lt;/span&gt;
2022-12-08 15:05:32.300 GMT DEBUG record-parser - parsed message fields: &lt;span class="nv"&gt;key&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;Key&lt;span class="o"&gt;(&lt;/span&gt;&lt;span class="nv"&gt;namespace&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'test'&lt;/span&gt;, &lt;span class="nb"&gt;set&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;null, &lt;span class="nv"&gt;digest&lt;/span&gt;&lt;span class="o"&gt;=[&lt;/span&gt;33, &lt;span class="nt"&gt;-100&lt;/span&gt;, 127, 120, 17, 45, &lt;span class="nt"&gt;-79&lt;/span&gt;, 115, &lt;span class="nt"&gt;-40&lt;/span&gt;, 53, &lt;span class="nt"&gt;-70&lt;/span&gt;, &lt;span class="nt"&gt;-57&lt;/span&gt;, 120, 73, 20, &lt;span class="nt"&gt;-50&lt;/span&gt;, &lt;span class="nt"&gt;-99&lt;/span&gt;, &lt;span class="nt"&gt;-98&lt;/span&gt;, &lt;span class="nt"&gt;-104&lt;/span&gt;, 85], &lt;span class="nv"&gt;userKey&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;6, &lt;span class="nv"&gt;lastUpdateTimeMs&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;1670511932288, &lt;span class="nv"&gt;userKeyString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;6, &lt;span class="nv"&gt;digestString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'IZx/eBEtsXPYNbrHeEkUzp2emFU='&lt;/span&gt;&lt;span class="o"&gt;)&lt;/span&gt;

kubectl logs xdr-proxy-7d9fccd6c8-r56vs &lt;span class="nt"&gt;-n&lt;/span&gt; aerospike | &lt;span class="nb"&gt;grep &lt;/span&gt;record-parser
&lt;span class="o"&gt;(&lt;/span&gt;none&lt;span class="o"&gt;)&lt;/span&gt;
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;However, we need to consider one critical factor. When data is actively flowing between source and destination clusters, the existing cached list of xdr-proxy connections from the source cluster will not be refreshed just because you added a bunch of new xdr-proxies. Consequently,  the newly scaled xdr-proxies you have just scheduled will not be utilized immediately.&lt;/p&gt;

&lt;p&gt;To demonstrate this, let's add data to the source cluster using Aerospike's benchmark tool. At the same time, we will scale the xdr-proxies on the destination side and observe the results. &lt;/p&gt;

&lt;h2&gt;
  
  
  Add data to the source cluster
&lt;/h2&gt;

&lt;p&gt;Before you begin, reduce the xdr-proxy server count to one (1) to clarify the observations. In this example, I use my local machine, which has the benchmark tool installed, to send data to the source EC2 instances. To do this, you’ll need to obtain the external IP address of the source cluster’s Kubernetes service. You can download the benchmark tool from &lt;a href="https://aerospike.com/docs/tools/install" rel="noopener noreferrer"&gt;https://aerospike.com/docs/tools/install&lt;/a&gt;.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;kubectl get AerospikeCluster aerocluster-source-xdr &lt;span class="nt"&gt;-n&lt;/span&gt; aerospike  &lt;span class="nt"&gt;-o&lt;/span&gt; yaml

...
pods:
    aerocluster-source-xdr-0-0:
      aerospike:
        accessEndpoints:
        - 192.168.41.63:31999
        alternateAccessEndpoints:
        - 54.173.138.131:31999
        clusterName: aerocluster-source-xdr
        nodeID: 0a0
        tlsAccessEndpoints: &lt;span class="o"&gt;[]&lt;/span&gt;
        tlsAlternateAccessEndpoints: &lt;span class="o"&gt;[]&lt;/span&gt;
        tlsName: &lt;span class="s2"&gt;""&lt;/span&gt;
      aerospikeConfigHash: 4aacb9809beaa01d99a9f00293c9f7dc141845f8
      hostExternalIP: 54.173.138.131
      hostInternalIP: 192.168.41.63
      image: aerospike/aerospike-server-enterprise:6.1.0.2
      initializedVolumes:
      - workdir
      - ns
      networkPolicyHash: acbbfab3668e1fceeed201139d1173f00095667e
      podIP: 192.168.50.203
      podPort: 3000
      podSpecHash: 972dc2a779fe9ab407212b547d54d3a72ecef259
      servicePort: 31999
...
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;You may need to add a  firewall rule to allow traffic into the Kubernetes service. Connect the asbenchmark tool to start writing traffic using the public IP address for the NodePort Service.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;asbenchmark &lt;span class="nt"&gt;-h&lt;/span&gt; 54.173.138.131:31999 &lt;span class="nt"&gt;-Uadmin&lt;/span&gt; &lt;span class="nt"&gt;-Padmin123&lt;/span&gt; &lt;span class="nt"&gt;-z&lt;/span&gt; 10 &lt;span class="nt"&gt;-servicesAlternate&lt;/span&gt; &lt;span class="nt"&gt;-w&lt;/span&gt; RU,0 &lt;span class="nt"&gt;-o&lt;/span&gt; B256
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;Scale up the xdr-proxy servers from one to two and check the logs of both proxies to see what messages have been received. In a production environment, you should always disable the unnecessary logging.&lt;/p&gt;

&lt;p&gt;Notice how no data has passed through the new xdr-proxy instance xdr-proxy-7d9fccd6c8-s2tzt.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;kubectl &lt;span class="nt"&gt;-n&lt;/span&gt; aerospike logs xdr-proxy-7d9fccd6c8-s2tzt | &lt;span class="nb"&gt;grep&lt;/span&gt; &lt;span class="se"&gt;\&lt;/span&gt;
record-parser
&lt;span class="o"&gt;(&lt;/span&gt;none&lt;span class="o"&gt;)&lt;/span&gt;

kubectl &lt;span class="nt"&gt;-n&lt;/span&gt; aerospike logs xdr-proxy-7d9fccd6c8-5q7gn | &lt;span class="nb"&gt;grep&lt;/span&gt; &lt;span class="se"&gt;\&lt;/span&gt;
record-parser
...
2022-12-08 18:21:36.158 GMT DEBUG record-parser - parsed message fields: &lt;span class="nv"&gt;key&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;Key&lt;span class="o"&gt;(&lt;/span&gt;&lt;span class="nv"&gt;namespace&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'test'&lt;/span&gt;, &lt;span class="nb"&gt;set&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;testset, &lt;span class="nv"&gt;digest&lt;/span&gt;&lt;span class="o"&gt;=[&lt;/span&gt;51, &lt;span class="nt"&gt;-120&lt;/span&gt;, 114, &lt;span class="nt"&gt;-17&lt;/span&gt;, &lt;span class="nt"&gt;-44&lt;/span&gt;, 72, 123, 125, 50, 92, 3, 110, &lt;span class="nt"&gt;-21&lt;/span&gt;, &lt;span class="nt"&gt;-38&lt;/span&gt;, 74, 25, 42, 35, 117, 72], &lt;span class="nv"&gt;userKey&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;null, &lt;span class="nv"&gt;lastUpdateTimeMs&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;1670523696059, &lt;span class="nv"&gt;userKeyString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;null, &lt;span class="nv"&gt;digestString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'M4hy79RIe30yXANu69pKGSojdUg='&lt;/span&gt;&lt;span class="o"&gt;)&lt;/span&gt;
2022-12-08 18:21:36.158 GMT DEBUG record-parser - parsed message fields: &lt;span class="nv"&gt;key&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;Key&lt;span class="o"&gt;(&lt;/span&gt;&lt;span class="nv"&gt;namespace&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'test'&lt;/span&gt;, &lt;span class="nb"&gt;set&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;testset, &lt;span class="nv"&gt;digest&lt;/span&gt;&lt;span class="o"&gt;=[&lt;/span&gt;124, &lt;span class="nt"&gt;-6&lt;/span&gt;, &lt;span class="nt"&gt;-70&lt;/span&gt;, &lt;span class="nt"&gt;-23&lt;/span&gt;, 44, 41, &lt;span class="nt"&gt;-19&lt;/span&gt;, 40, &lt;span class="nt"&gt;-11&lt;/span&gt;, &lt;span class="nt"&gt;-16&lt;/span&gt;, 126, 120, 81, &lt;span class="nt"&gt;-113&lt;/span&gt;, &lt;span class="nt"&gt;-112&lt;/span&gt;, &lt;span class="nt"&gt;-79&lt;/span&gt;, 66, 77, &lt;span class="nt"&gt;-99&lt;/span&gt;, &lt;span class="nt"&gt;-6&lt;/span&gt;&lt;span class="o"&gt;]&lt;/span&gt;, &lt;span class="nv"&gt;userKey&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;null, &lt;span class="nv"&gt;lastUpdateTimeMs&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;1670523696059, &lt;span class="nv"&gt;userKeyString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;null, &lt;span class="nv"&gt;digestString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'fPq66Swp7Sj18H54UY+QsUJNnfo='&lt;/span&gt;&lt;span class="o"&gt;)&lt;/span&gt;
2022-12-08 18:21:36.158 GMT DEBUG record-parser - parsed message fields: &lt;span class="nv"&gt;key&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;Key&lt;span class="o"&gt;(&lt;/span&gt;&lt;span class="nv"&gt;namespace&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'test'&lt;/span&gt;, &lt;span class="nb"&gt;set&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;testset, &lt;span class="nv"&gt;digest&lt;/span&gt;&lt;span class="o"&gt;=[&lt;/span&gt;&lt;span class="nt"&gt;-127&lt;/span&gt;, &lt;span class="nt"&gt;-118&lt;/span&gt;, 63, &lt;span class="nt"&gt;-32&lt;/span&gt;, &lt;span class="nt"&gt;-74&lt;/span&gt;, 60, &lt;span class="nt"&gt;-60&lt;/span&gt;, 86, 31, &lt;span class="nt"&gt;-119&lt;/span&gt;, &lt;span class="nt"&gt;-1&lt;/span&gt;, &lt;span class="nt"&gt;-105&lt;/span&gt;, &lt;span class="nt"&gt;-108&lt;/span&gt;, &lt;span class="nt"&gt;-59&lt;/span&gt;, 111, 48, &lt;span class="nt"&gt;-34&lt;/span&gt;, &lt;span class="nt"&gt;-61&lt;/span&gt;, &lt;span class="nt"&gt;-108&lt;/span&gt;, &lt;span class="nt"&gt;-5&lt;/span&gt;&lt;span class="o"&gt;]&lt;/span&gt;, &lt;span class="nv"&gt;userKey&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;null, &lt;span class="nv"&gt;lastUpdateTimeMs&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;1670523696105, &lt;span class="nv"&gt;userKeyString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;null, &lt;span class="nv"&gt;digestString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'gYo/4LY8xFYfif+XlMVvMN7DlPs='&lt;/span&gt;&lt;span class="o"&gt;)&lt;/span&gt;
2022-12-08 18:21:36.256 GMT DEBUG record-parser - parsed message fields: &lt;span class="nv"&gt;key&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;Key&lt;span class="o"&gt;(&lt;/span&gt;&lt;span class="nv"&gt;namespace&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'test'&lt;/span&gt;, &lt;span class="nb"&gt;set&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;testset, &lt;span class="nv"&gt;digest&lt;/span&gt;&lt;span class="o"&gt;=[&lt;/span&gt;99, 1, 80, 2, 76, &lt;span class="nt"&gt;-43&lt;/span&gt;, 125, 77, 47, 8, 6, 35, 49, 117, &lt;span class="nt"&gt;-35&lt;/span&gt;, 54, 120, &lt;span class="nt"&gt;-29&lt;/span&gt;, 118, &lt;span class="nt"&gt;-72&lt;/span&gt;&lt;span class="o"&gt;]&lt;/span&gt;, &lt;span class="nv"&gt;userKey&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;null, &lt;span class="nv"&gt;lastUpdateTimeMs&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;1670523696178, &lt;span class="nv"&gt;userKeyString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;null, &lt;span class="nv"&gt;digestString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'YwFQAkzVfU0vCAYjMXXdNnjjdrg='&lt;/span&gt;&lt;span class="o"&gt;)&lt;/span&gt;
2022-12-08 18:21:36.257 GMT DEBUG record-parser - parsed message fields: &lt;span class="nv"&gt;key&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;Key&lt;span class="o"&gt;(&lt;/span&gt;&lt;span class="nv"&gt;namespace&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'test'&lt;/span&gt;, &lt;span class="nb"&gt;set&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;testset, &lt;span class="nv"&gt;digest&lt;/span&gt;&lt;span class="o"&gt;=[&lt;/span&gt;&lt;span class="nt"&gt;-88&lt;/span&gt;, &lt;span class="nt"&gt;-46&lt;/span&gt;, &lt;span class="nt"&gt;-48&lt;/span&gt;, &lt;span class="nt"&gt;-33&lt;/span&gt;, 77, &lt;span class="nt"&gt;-120&lt;/span&gt;, 123, &lt;span class="nt"&gt;-101&lt;/span&gt;, &lt;span class="nt"&gt;-70&lt;/span&gt;, &lt;span class="nt"&gt;-20&lt;/span&gt;, &lt;span class="nt"&gt;-96&lt;/span&gt;, &lt;span class="nt"&gt;-104&lt;/span&gt;, &lt;span class="nt"&gt;-51&lt;/span&gt;, &lt;span class="nt"&gt;-90&lt;/span&gt;, 28, &lt;span class="nt"&gt;-15&lt;/span&gt;, 70, 11, 118, 83], &lt;span class="nv"&gt;userKey&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;null, &lt;span class="nv"&gt;lastUpdateTimeMs&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;1670523696202, &lt;span class="nv"&gt;userKeyString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;null, &lt;span class="nv"&gt;digestString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'qNLQ302Ie5u67KCYzaYc8UYLdlM='&lt;/span&gt;&lt;span class="o"&gt;)&lt;/span&gt;
2022-12-08 18:21:36.257 GMT DEBUG record-parser - parsed message fields: &lt;span class="nv"&gt;key&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;Key&lt;span class="o"&gt;(&lt;/span&gt;&lt;span class="nv"&gt;namespace&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'test'&lt;/span&gt;, &lt;span class="nb"&gt;set&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;testset, &lt;span class="nv"&gt;digest&lt;/span&gt;&lt;span class="o"&gt;=[&lt;/span&gt;&lt;span class="nt"&gt;-97&lt;/span&gt;, 18, 28, &lt;span class="nt"&gt;-43&lt;/span&gt;, 75, 42, &lt;span class="nt"&gt;-22&lt;/span&gt;, &lt;span class="nt"&gt;-126&lt;/span&gt;, &lt;span class="nt"&gt;-61&lt;/span&gt;, &lt;span class="nt"&gt;-108&lt;/span&gt;, &lt;span class="nt"&gt;-36&lt;/span&gt;, 118, &lt;span class="nt"&gt;-86&lt;/span&gt;, &lt;span class="nt"&gt;-105&lt;/span&gt;, &lt;span class="nt"&gt;-52&lt;/span&gt;, 119, &lt;span class="nt"&gt;-39&lt;/span&gt;, &lt;span class="nt"&gt;-33&lt;/span&gt;, &lt;span class="nt"&gt;-127&lt;/span&gt;, &lt;span class="nt"&gt;-76&lt;/span&gt;&lt;span class="o"&gt;]&lt;/span&gt;, &lt;span class="nv"&gt;userKey&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;null, &lt;span class="nv"&gt;lastUpdateTimeMs&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;1670523696175, &lt;span class="nv"&gt;userKeyString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;null, &lt;span class="nv"&gt;digestString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'nxIc1Usq6oLDlNx2qpfMd9nfgbQ='&lt;/span&gt;&lt;span class="o"&gt;)&lt;/span&gt;
2022-12-08 18:21:36.257 GMT DEBUG record-parser - parsed message fields: &lt;span class="nv"&gt;key&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;Key&lt;span class="o"&gt;(&lt;/span&gt;&lt;span class="nv"&gt;namespace&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'test'&lt;/span&gt;, &lt;span class="nb"&gt;set&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;testset, &lt;span class="nv"&gt;digest&lt;/span&gt;&lt;span class="o"&gt;=[&lt;/span&gt;&lt;span class="nt"&gt;-120&lt;/span&gt;, 103, &lt;span class="nt"&gt;-51&lt;/span&gt;, 57, &lt;span class="nt"&gt;-71&lt;/span&gt;, &lt;span class="nt"&gt;-106&lt;/span&gt;, 13, &lt;span class="nt"&gt;-48&lt;/span&gt;, 100, 28, 59, &lt;span class="nt"&gt;-3&lt;/span&gt;, &lt;span class="nt"&gt;-39&lt;/span&gt;, &lt;span class="nt"&gt;-56&lt;/span&gt;, &lt;span class="nt"&gt;-67&lt;/span&gt;, &lt;span class="nt"&gt;-103&lt;/span&gt;, 29, 36, 75, 119], &lt;span class="nv"&gt;userKey&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;null, &lt;span class="nv"&gt;lastUpdateTimeMs&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;1670523696191, &lt;span class="nv"&gt;userKeyString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;null, &lt;span class="nv"&gt;digestString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'iGfNObmWDdBkHDv92ci9mR0kS3c='&lt;/span&gt;&lt;span class="o"&gt;)&lt;/span&gt;
2022-12-08 18:21:36.257 GMT DEBUG record-parser - parsed message fields: &lt;span class="nv"&gt;key&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;Key&lt;span class="o"&gt;(&lt;/span&gt;&lt;span class="nv"&gt;namespace&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'test'&lt;/span&gt;, &lt;span class="nb"&gt;set&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;testset, &lt;span class="nv"&gt;digest&lt;/span&gt;&lt;span class="o"&gt;=[&lt;/span&gt;&lt;span class="nt"&gt;-47&lt;/span&gt;, 88, &lt;span class="nt"&gt;-13&lt;/span&gt;, 13, &lt;span class="nt"&gt;-35&lt;/span&gt;, 77, 24, 22, &lt;span class="nt"&gt;-40&lt;/span&gt;, &lt;span class="nt"&gt;-61&lt;/span&gt;, &lt;span class="nt"&gt;-118&lt;/span&gt;, &lt;span class="nt"&gt;-115&lt;/span&gt;, 82, 13, 127, &lt;span class="nt"&gt;-125&lt;/span&gt;, 53, 66, &lt;span class="nt"&gt;-22&lt;/span&gt;, &lt;span class="nt"&gt;-8&lt;/span&gt;&lt;span class="o"&gt;]&lt;/span&gt;, &lt;span class="nv"&gt;userKey&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;null, &lt;span class="nv"&gt;lastUpdateTimeMs&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;1670523696233, &lt;span class="nv"&gt;userKeyString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;null, &lt;span class="nv"&gt;digestString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'0VjzDd1NGBbYw4qNUg1/gzVC6vg='&lt;/span&gt;&lt;span class="o"&gt;)&lt;/span&gt;
2022-12-08 18:21:36.258 GMT DEBUG record-parser - parsed message fields: &lt;span class="nv"&gt;key&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;Key&lt;span class="o"&gt;(&lt;/span&gt;&lt;span class="nv"&gt;namespace&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'test'&lt;/span&gt;, &lt;span class="nb"&gt;set&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;testset, &lt;span class="nv"&gt;digest&lt;/span&gt;&lt;span class="o"&gt;=[&lt;/span&gt;&lt;span class="nt"&gt;-63&lt;/span&gt;, &lt;span class="nt"&gt;-11&lt;/span&gt;, 93, &lt;span class="nt"&gt;-90&lt;/span&gt;, 47, 29, &lt;span class="nt"&gt;-63&lt;/span&gt;, 36, 12, 53, &lt;span class="nt"&gt;-86&lt;/span&gt;, 84, 57, &lt;span class="nt"&gt;-125&lt;/span&gt;, 16, 43, &lt;span class="nt"&gt;-18&lt;/span&gt;, 93, 56, 9], &lt;span class="nv"&gt;userKey&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;null, &lt;span class="nv"&gt;lastUpdateTimeMs&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;1670523696186, &lt;span class="nv"&gt;userKeyString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;null, &lt;span class="nv"&gt;digestString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'wfVdpi8dwSQMNapUOYMQK+5dOAk='&lt;/span&gt;&lt;span class="o"&gt;)&lt;/span&gt;
2022-12-08 18:21:36.258 GMT DEBUG record-parser - parsed message fields: &lt;span class="nv"&gt;key&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;Key&lt;span class="o"&gt;(&lt;/span&gt;&lt;span class="nv"&gt;namespace&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'test'&lt;/span&gt;, &lt;span class="nb"&gt;set&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;testset, &lt;span class="nv"&gt;digest&lt;/span&gt;&lt;span class="o"&gt;=[&lt;/span&gt;&lt;span class="nt"&gt;-23&lt;/span&gt;, 101, &lt;span class="nt"&gt;-114&lt;/span&gt;, &lt;span class="nt"&gt;-87&lt;/span&gt;, &lt;span class="nt"&gt;-52&lt;/span&gt;, 107, 36, 113, 101, 33, &lt;span class="nt"&gt;-16&lt;/span&gt;, 82, &lt;span class="nt"&gt;-95&lt;/span&gt;, 97, 34, &lt;span class="nt"&gt;-121&lt;/span&gt;, 82, &lt;span class="nt"&gt;-97&lt;/span&gt;, 40, 59], &lt;span class="nv"&gt;userKey&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;null, &lt;span class="nv"&gt;lastUpdateTimeMs&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;1670523696145, &lt;span class="nv"&gt;userKeyString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;null, &lt;span class="nv"&gt;digestString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'6WWOqcxrJHFlIfBSoWEih1KfKDs='&lt;/span&gt;&lt;span class="o"&gt;)&lt;/span&gt;
2022-12-08 18:21:36.258 GMT DEBUG record-parser - parsed message fields: &lt;span class="nv"&gt;key&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;Key&lt;span class="o"&gt;(&lt;/span&gt;&lt;span class="nv"&gt;namespace&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'test'&lt;/span&gt;, &lt;span class="nb"&gt;set&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;testset, &lt;span class="nv"&gt;digest&lt;/span&gt;&lt;span class="o"&gt;=[&lt;/span&gt;&lt;span class="nt"&gt;-77&lt;/span&gt;, &lt;span class="nt"&gt;-118&lt;/span&gt;, 49, 4, &lt;span class="nt"&gt;-75&lt;/span&gt;, 123, 81, 2, &lt;span class="nt"&gt;-103&lt;/span&gt;, &lt;span class="nt"&gt;-73&lt;/span&gt;, 42, &lt;span class="nt"&gt;-70&lt;/span&gt;, &lt;span class="nt"&gt;-54&lt;/span&gt;, 95, 98, 23, 73, 66, &lt;span class="nt"&gt;-86&lt;/span&gt;, 7], &lt;span class="nv"&gt;userKey&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;null, &lt;span class="nv"&gt;lastUpdateTimeMs&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;1670523696230, &lt;span class="nv"&gt;userKeyString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;null, &lt;span class="nv"&gt;digestString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'s4oxBLV7UQKZtyq6yl9iF0lCqgc='&lt;/span&gt;&lt;span class="o"&gt;)&lt;/span&gt;
2022-12-08 18:21:36.258 GMT DEBUG record-parser - parsed message fields: &lt;span class="nv"&gt;key&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;Key&lt;span class="o"&gt;(&lt;/span&gt;&lt;span class="nv"&gt;namespace&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'test'&lt;/span&gt;, &lt;span class="nb"&gt;set&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;testset, &lt;span class="nv"&gt;digest&lt;/span&gt;&lt;span class="o"&gt;=[&lt;/span&gt;50, &lt;span class="nt"&gt;-38&lt;/span&gt;, &lt;span class="nt"&gt;-31&lt;/span&gt;, &lt;span class="nt"&gt;-54&lt;/span&gt;, &lt;span class="nt"&gt;-122&lt;/span&gt;, &lt;span class="nt"&gt;-113&lt;/span&gt;, &lt;span class="nt"&gt;-38&lt;/span&gt;, 88, 15, 7, 96, 51, &lt;span class="nt"&gt;-92&lt;/span&gt;, &lt;span class="nt"&gt;-25&lt;/span&gt;, 60, &lt;span class="nt"&gt;-104&lt;/span&gt;, 26, 113, &lt;span class="nt"&gt;-117&lt;/span&gt;, &lt;span class="nt"&gt;-82&lt;/span&gt;&lt;span class="o"&gt;]&lt;/span&gt;, &lt;span class="nv"&gt;userKey&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;null, &lt;span class="nv"&gt;lastUpdateTimeMs&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;1670523696157, &lt;span class="nv"&gt;userKeyString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;null, &lt;span class="nv"&gt;digestString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'MtrhyoaP2lgPB2AzpOc8mBpxi64='&lt;/span&gt;&lt;span class="o"&gt;)&lt;/span&gt;
2022-12-08 18:21:36.258 GMT DEBUG record-parser - parsed message fields: &lt;span class="nv"&gt;key&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;Key&lt;span class="o"&gt;(&lt;/span&gt;&lt;span class="nv"&gt;namespace&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'test'&lt;/span&gt;, &lt;span class="nb"&gt;set&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;testset, &lt;span class="nv"&gt;digest&lt;/span&gt;&lt;span class="o"&gt;=[&lt;/span&gt;&lt;span class="nt"&gt;-77&lt;/span&gt;, 31, 67, &lt;span class="nt"&gt;-18&lt;/span&gt;, &lt;span class="nt"&gt;-52&lt;/span&gt;, &lt;span class="nt"&gt;-114&lt;/span&gt;, 42, &lt;span class="nt"&gt;-18&lt;/span&gt;, 36, &lt;span class="nt"&gt;-111&lt;/span&gt;, 89, 62, 109, 114, &lt;span class="nt"&gt;-54&lt;/span&gt;, 54, &lt;span class="nt"&gt;-121&lt;/span&gt;, &lt;span class="nt"&gt;-110&lt;/span&gt;, &lt;span class="nt"&gt;-88&lt;/span&gt;, &lt;span class="nt"&gt;-108&lt;/span&gt;&lt;span class="o"&gt;]&lt;/span&gt;, &lt;span class="nv"&gt;userKey&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;null, &lt;span class="nv"&gt;lastUpdateTimeMs&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;1670523696206, &lt;span class="nv"&gt;userKeyString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;null, &lt;span class="nv"&gt;digestString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'sx9D7syOKu4kkVk+bXLKNoeSqJQ='&lt;/span&gt;&lt;span class="o"&gt;)&lt;/span&gt;
2022-12-08 18:21:36.258 GMT DEBUG record-parser - parsed message fields: &lt;span class="nv"&gt;key&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;Key&lt;span class="o"&gt;(&lt;/span&gt;&lt;span class="nv"&gt;namespace&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'test'&lt;/span&gt;, &lt;span class="nb"&gt;set&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;testset, &lt;span class="nv"&gt;digest&lt;/span&gt;&lt;span class="o"&gt;=[&lt;/span&gt;73, 11, 98, &lt;span class="nt"&gt;-50&lt;/span&gt;, 32, 12, 0, &lt;span class="nt"&gt;-50&lt;/span&gt;, 22, &lt;span class="nt"&gt;-101&lt;/span&gt;, &lt;span class="nt"&gt;-108&lt;/span&gt;, 18, 38, 7, &lt;span class="nt"&gt;-65&lt;/span&gt;, 6, &lt;span class="nt"&gt;-58&lt;/span&gt;, 60, &lt;span class="nt"&gt;-6&lt;/span&gt;, &lt;span class="nt"&gt;-33&lt;/span&gt;&lt;span class="o"&gt;]&lt;/span&gt;, &lt;span class="nv"&gt;userKey&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;null, &lt;span class="nv"&gt;lastUpdateTimeMs&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;1670523696171, &lt;span class="nv"&gt;userKeyString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;null, &lt;span class="nv"&gt;digestString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'SQtiziAMAM4Wm5QSJge/BsY8+t8='&lt;/span&gt;&lt;span class="o"&gt;)&lt;/span&gt;
2022-12-08 18:21:36.258 GMT DEBUG record-parser - parsed message fields: &lt;span class="nv"&gt;key&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;Key&lt;span class="o"&gt;(&lt;/span&gt;&lt;span class="nv"&gt;namespace&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'test'&lt;/span&gt;, &lt;span class="nb"&gt;set&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;testset, &lt;span class="nv"&gt;digest&lt;/span&gt;&lt;span class="o"&gt;=[&lt;/span&gt;109, &lt;span class="nt"&gt;-82&lt;/span&gt;, 24, 53, 35, 89, &lt;span class="nt"&gt;-72&lt;/span&gt;, &lt;span class="nt"&gt;-117&lt;/span&gt;, &lt;span class="nt"&gt;-22&lt;/span&gt;, 79, 119, &lt;span class="nt"&gt;-89&lt;/span&gt;, 56, &lt;span class="nt"&gt;-5&lt;/span&gt;, 0, &lt;span class="nt"&gt;-103&lt;/span&gt;, &lt;span class="nt"&gt;-54&lt;/span&gt;, 51, 25, 126], &lt;span class="nv"&gt;userKey&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;null, &lt;span class="nv"&gt;lastUpdateTimeMs&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;1670523696146, &lt;span class="nv"&gt;userKeyString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;null, &lt;span class="nv"&gt;digestString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'ba4YNSNZuIvqT3enOPsAmcozGX4='&lt;/span&gt;&lt;span class="o"&gt;)&lt;/span&gt;
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;h2&gt;
  
  
  Achieve dependable resiliency with Aerospike
&lt;/h2&gt;

&lt;p&gt;Aerospike's XDR feature provides a reliable solution for mitigating the risk of cluster unavailability. By asynchronously replicating data between data centers, users can ensure availability. This step-by-step walkthrough demonstrates how to accomplish this seamlessly in a Kubernetes environment using the xdr-proxy. With the Aerospike Kubernetes Operator, you can effortlessly avoid network complications and achieve optimal performance with minimal effort.&lt;/p&gt;

&lt;p&gt;Share your experience! Your feedback is important to us. &lt;a href="https://discord.com/invite/NfC93wJEJU" rel="noopener noreferrer"&gt;Join our Aerospike community&lt;/a&gt;!&lt;/p&gt;

</description>
      <category>k8s</category>
      <category>kubernetes</category>
      <category>xdr</category>
    </item>
    <item>
      <title>Using AWS IAM for client authentication</title>
      <dc:creator>Naresh Maharaj</dc:creator>
      <pubDate>Wed, 20 Dec 2023 20:11:36 +0000</pubDate>
      <link>https://dev.to/aerospike/using-aws-iam-for-client-authentication-47nf</link>
      <guid>https://dev.to/aerospike/using-aws-iam-for-client-authentication-47nf</guid>
      <description>&lt;p&gt;&lt;a href="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdeveloper-hub.s3.us-west-1.amazonaws.com%2Fnaresh-maharaj%2Fmsk%2Flearn-how-to-use-aws-iam-to-authenticate-clients-aerospike-blog.webp" class="article-body-image-wrapper"&gt;&lt;img src="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdeveloper-hub.s3.us-west-1.amazonaws.com%2Fnaresh-maharaj%2Fmsk%2Flearn-how-to-use-aws-iam-to-authenticate-clients-aerospike-blog.webp" alt="heroimg" width="800" height="450"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;In this blog post, we detail how to create an Amazon Managed Streaming for Apache Kafka (Amazon MSK) resource using &lt;a href="https://aws.amazon.com/iam/getting-started/" rel="noopener noreferrer"&gt;AWS Identity and Access Management&lt;/a&gt; (AWS IAM) in roles and policies to authenticate user access. In the initial step, we establish an &lt;a href="https://aerospike.com/developer/blog/improving-in-memory-performance-aerospike-database-7" rel="noopener noreferrer"&gt;Aerospike Database&lt;/a&gt; cluster and insert sample messages into the database. Subsequently, we observe in real time how these messages are streamed to Amazon MSK using Aerospike's Kafka Source Connector. &amp;lt;!--truncate--&amp;gt;Below we provide a comprehensive, step-by-step guide for users to successfully implement this process.&lt;/p&gt;

&lt;p&gt;&lt;a href="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fe5qk6nh8ke5cp2p6r2cu.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fe5qk6nh8ke5cp2p6r2cu.png" alt="image" width="800" height="239"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;h2&gt;
  
  
  AWS MKS Kafka
&lt;/h2&gt;

&lt;p&gt;In this section, you will set up a simple three-node Kafka cluster. &lt;/p&gt;

&lt;ol&gt;
&lt;li&gt;Visit the AWS console and select &lt;strong&gt;MSK service&lt;/strong&gt;. &lt;/li&gt;
&lt;/ol&gt;

&lt;p&gt;&lt;a href="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fr8naj8wvike4csec7d4l.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fr8naj8wvike4csec7d4l.png" alt="image" width="800" height="141"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;ol&gt;
&lt;li&gt;Create a new cluster by selecting &lt;strong&gt;Create Cluster&lt;/strong&gt; → &lt;strong&gt;Quick Create&lt;/strong&gt;.&lt;/li&gt;
&lt;/ol&gt;

&lt;p&gt;&lt;a href="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fg5e3zzv53e8o378andsb.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fg5e3zzv53e8o378andsb.png" alt="image" width="800" height="222"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;ol&gt;
&lt;li&gt;Select the provisioned cluster and instance type of kafka.t3.small.&lt;/li&gt;
&lt;/ol&gt;

&lt;p&gt;&lt;a href="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Ftx79xiu82srhc7zd1aj1.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Ftx79xiu82srhc7zd1aj1.png" alt="image" width="800" height="476"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;ol&gt;
&lt;li&gt;Select the EBS storage type per broker of 10 GB.&lt;/li&gt;
&lt;/ol&gt;

&lt;p&gt;&lt;a href="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fr9uzaheevg9853ont9se.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fr9uzaheevg9853ont9se.png" alt="image" width="800" height="118"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;NOTE&lt;/strong&gt;: Take note of the VPC, subnets, and security group ID, as you will require these details later in the article.&lt;/p&gt;

&lt;p&gt;The next step is the critical step where you will create the AWS IAM policy and roles. This setup ensures that the Aerospike Database authenticates using AWS IAM to write data to MSK.&lt;/p&gt;

&lt;ol&gt;
&lt;li&gt;From the AWS Console, select the AWS IAM service.&lt;/li&gt;
&lt;/ol&gt;

&lt;p&gt;&lt;a href="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2F6o7dpkwsz3wyyrhqdsex.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2F6o7dpkwsz3wyyrhqdsex.png" alt="image" width="800" height="240"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;ol&gt;
&lt;li&gt;To create a new AWS IAM policy, copy the following JSON and paste it in the JSON tab. Replace &lt;code&gt;region:Account-ID&lt;/code&gt; with your own region and AWS account ID.&lt;/li&gt;
&lt;/ol&gt;

&lt;p&gt;&lt;a href="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fwc3c2xe0ijvz1tp5j7un.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fwc3c2xe0ijvz1tp5j7un.png" alt="image" width="800" height="144"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;ol&gt;
&lt;li&gt;Save the policy and name it msk-tutorial-policy.
&lt;/li&gt;
&lt;/ol&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;{
    "Version": "2012-10-17",
    "Statement": [
        {
            "Effect": "Allow",
            "Action": [
                "kafka-cluster:Connect",
                "kafka-cluster:AlterCluster",
                "kafka-cluster:DescribeCluster"
            ],
            "Resource": [
                "arn:aws:kafka:region:Account-ID:cluster/MSKTutorialCluster/*"
            ]
        },
        {
            "Effect": "Allow",
            "Action": [
                "kafka-cluster:*Topic*",
                "kafka-cluster:WriteData",
                "kafka-cluster:ReadData"
            ],
            "Resource": [
                "arn:aws:kafka:region:Account-ID:topic/MSKTutorialCluster/*"
            ]
        },
        {
            "Effect": "Allow",
            "Action": [
                "kafka-cluster:AlterGroup",
                "kafka-cluster:DescribeGroup"
            ],
            "Resource": [
                "arn:aws:kafka:region:Account-ID:group/MSKTutorialCluster/*"
            ]
        }
    ]
}
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;ol&gt;
&lt;li&gt;Create the IAM role.&lt;/li&gt;
&lt;/ol&gt;

&lt;p&gt;&lt;a href="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Feovpb69036ilbkwkqpag.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Feovpb69036ilbkwkqpag.png" alt="image" width="800" height="233"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;ol&gt;
&lt;li&gt;Under &lt;strong&gt;Common Use Cases&lt;/strong&gt;, select &lt;strong&gt;EC2&lt;/strong&gt; and then &lt;strong&gt;Next&lt;/strong&gt;.&lt;/li&gt;
&lt;/ol&gt;

&lt;p&gt;&lt;a href="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fe2sfq0vdp33msh58qgmq.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fe2sfq0vdp33msh58qgmq.png" alt="image" width="800" height="189"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;ol&gt;
&lt;li&gt;Under &lt;strong&gt;Permissions&lt;/strong&gt;, select the policy named &lt;strong&gt;msk-tutorial-policy&lt;/strong&gt; and then &lt;strong&gt;Next&lt;/strong&gt;.&lt;/li&gt;
&lt;/ol&gt;

&lt;p&gt;&lt;a href="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Ff2qsoiyy7wim19bynp2t.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Ff2qsoiyy7wim19bynp2t.png" alt="image" width="800" height="304"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;ol&gt;
&lt;li&gt;Give the role a name like &lt;strong&gt;msk-tutorial-role&lt;/strong&gt; and click the &lt;strong&gt;Create Role&lt;/strong&gt; button.&lt;/li&gt;
&lt;/ol&gt;

&lt;h2&gt;
  
  
  Kafka client machine
&lt;/h2&gt;

&lt;p&gt;Next, create a client machine to install the Kafka tools necessary to access our MSK cluster.&lt;/p&gt;

&lt;ol&gt;
&lt;li&gt;Create a new ec2 instance using type &lt;code&gt;t2.micro&lt;/code&gt;
&lt;/li&gt;
&lt;/ol&gt;

&lt;p&gt;&lt;a href="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2F411olph76r7l0r4rzxye.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2F411olph76r7l0r4rzxye.png" alt="image" width="800" height="289"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;ol&gt;
&lt;li&gt;Use the default AMI: &lt;code&gt;Amazon Linux 2023&lt;/code&gt;
&lt;/li&gt;
&lt;/ol&gt;

&lt;p&gt;&lt;a href="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2F002nbnvqt0txvcs98iiy.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2F002nbnvqt0txvcs98iiy.png" alt="image" width="800" height="515"&gt;&lt;/a&gt;&lt;br&gt;
&lt;em&gt;The AMI may be different depending on your region&lt;/em&gt;&lt;/p&gt;

&lt;ol&gt;
&lt;li&gt;Create a key-pair if required. I am using an already existing key-pair.&lt;/li&gt;
&lt;/ol&gt;

&lt;p&gt;&lt;a href="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fe8ob38zxcyu93ithnbrx.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fe8ob38zxcyu93ithnbrx.png" alt="image" width="800" height="242"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;ol&gt;
&lt;li&gt;Under &lt;strong&gt;Advanced Options.IAM instance profile&lt;/strong&gt;, select the IAM role created earlier.&lt;/li&gt;
&lt;/ol&gt;

&lt;p&gt;&lt;a href="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fz33qc7474ozht078q2e2.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fz33qc7474ozht078q2e2.png" alt="image" width="800" height="455"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;ol&gt;
&lt;li&gt;&lt;p&gt;Launch the instance!&lt;/p&gt;&lt;/li&gt;
&lt;li&gt;&lt;p&gt;Under instances launched, choose the instance you just created. Click on the ‘Security’ tab and note the security group associated with this instance.&lt;br&gt;
e.g., &lt;code&gt;sg-0914e6271c97ae4c9 (launch-wizard-1)&lt;/code&gt;&lt;/p&gt;&lt;/li&gt;
&lt;li&gt;&lt;p&gt;Navigate to the &lt;a href="https://console.aws.amazon.com/vpc/" rel="noopener noreferrer"&gt;VPC section&lt;/a&gt; and select &lt;strong&gt;Security Groups&lt;/strong&gt; from the left-hand menu. Locate the security group associated with the MSK cluster, such as &lt;code&gt;sg-e5f51dfb&lt;/code&gt;, and choose &lt;strong&gt;Edit Inbound Rules&lt;/strong&gt;.&lt;/p&gt;&lt;/li&gt;
&lt;li&gt;&lt;p&gt;Create a new rule to allow all traffic from the new ec2 instance.&lt;/p&gt;&lt;/li&gt;
&lt;/ol&gt;

&lt;p&gt;&lt;a href="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fgmh3x19wme2u6p8gldrf.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fgmh3x19wme2u6p8gldrf.png" alt="image" width="800" height="62"&gt;&lt;/a&gt;&lt;/p&gt;
&lt;h2&gt;
  
  
  Kafka topics
&lt;/h2&gt;

&lt;p&gt;After successfully establishing your initial Kafka cluster and Kafka client machine, proceed to conduct testing. Verify the functionality by accessing the MSK cluster, creating a topic, producing and consuming sample messages, and ensuring that everything operates as anticipated.&lt;/p&gt;

&lt;ol&gt;
&lt;li&gt;&lt;p&gt;From the MSK Cluster, note the Kafka version being used. This examples uses 2.8.1.&lt;/p&gt;&lt;/li&gt;
&lt;li&gt;&lt;p&gt;From the Kafka client machine, install Java 11+.&lt;br&gt;
&lt;/p&gt;&lt;/li&gt;
&lt;/ol&gt;
&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;&lt;span class="nb"&gt;sudo &lt;/span&gt;yum &lt;span class="nt"&gt;-y&lt;/span&gt; &lt;span class="nb"&gt;install &lt;/span&gt;java-11
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;


&lt;ol&gt;
&lt;li&gt;Download Apache Kafka using wget, then extract the archive using tar.
&lt;/li&gt;
&lt;/ol&gt;
&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;wget https://archive.apache.org/dist/kafka/2.8.1/kafka_2.12-2.8.1.tgz
&lt;span class="nb"&gt;tar&lt;/span&gt; &lt;span class="nt"&gt;-xzf&lt;/span&gt; kafka_2.12-2.8.1.tgz
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;


&lt;ol&gt;
&lt;li&gt;To use IAM, you will need the MSK IAM Auth jar file. Download the jar to the Kafka libs folder you just extracted.
&lt;/li&gt;
&lt;/ol&gt;
&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;&lt;span class="nb"&gt;cd &lt;/span&gt;kafka_2.12-2.8.1/libs/
wget https://github.com/aws/aws-msk-iam-auth/releases/download/v1.1.1/aws-msk-iam-auth-1.1.1-all.jar
&lt;span class="nb"&gt;cd&lt;/span&gt; ../bin/
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;


&lt;ol&gt;
&lt;li&gt;Create a file called client.properties to use when authenticating to MSK. It will define the SASL mechanism to use and reference the Java class file that will handle your IAM callbacks.
&lt;/li&gt;
&lt;/ol&gt;
&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;&lt;span class="nb"&gt;cat&lt;/span&gt; &lt;span class="o"&gt;&amp;lt;&amp;lt;&lt;/span&gt;&lt;span class="no"&gt;EOF&lt;/span&gt;&lt;span class="sh"&gt;&amp;gt; client.properties
security.protocol=SASL_SSL
sasl.mechanism=AWS_MSK_IAM
sasl.jaas.config=software.amazon.msk.auth.iam.IAMLoginModule required;
sasl.client.callback.handler.class=software.amazon.msk.auth.iam.IAMClientCallbackHandler
&lt;/span&gt;&lt;span class="no"&gt;EOF
&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;

&lt;h2&gt;
  
  
  Creating topics
&lt;/h2&gt;

&lt;ol&gt;
&lt;li&gt;Go to the AWS Console and view the MSK Cluster Client Information. There will be three endpoints to choose from, but you only require one.&lt;/li&gt;
&lt;/ol&gt;

&lt;p&gt;Example choose:&lt;/p&gt;

&lt;p&gt;&lt;code&gt;B-2.msktutorialcluster.450050.c11.kafka.us-east-1.amazonaws.com:9098&lt;/code&gt;&lt;/p&gt;

&lt;p&gt;&lt;a href="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fawiwhojnwxb0jaeiculk.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fawiwhojnwxb0jaeiculk.png" alt="image" width="800" height="274"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;ol&gt;
&lt;li&gt;From the &lt;code&gt;kafka/bin&lt;/code&gt; folder, run the command to create a topic. Let's call it &lt;code&gt;aerospike-airforce-1&lt;/code&gt;.
&lt;/li&gt;
&lt;/ol&gt;
&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;&lt;span class="nb"&gt;export &lt;/span&gt;&lt;span class="nv"&gt;BootstrapServerString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s2"&gt;"b-2.msktutorialcluster.450050.c11.kafka.us-east-1.amazonaws.com:9098"&lt;/span&gt;
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;./kafka-topics.sh &lt;span class="nt"&gt;--create&lt;/span&gt; &lt;span class="nt"&gt;--bootstrap-server&lt;/span&gt; &lt;span class="nv"&gt;$BootstrapServerString&lt;/span&gt; &lt;span class="nt"&gt;--command-config&lt;/span&gt; client.properties &lt;span class="nt"&gt;--replication-factor&lt;/span&gt; 3 &lt;span class="nt"&gt;--partitions&lt;/span&gt; 1 &lt;span class="nt"&gt;--topic&lt;/span&gt; aerospike-airforce-1
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;

&lt;h2&gt;
  
  
  Listing topics
&lt;/h2&gt;

&lt;p&gt;To list the topics, use the following command. Notice our latest topic, called aerospike-airforce-1, just showed up.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;./kafka-topics.sh &lt;span class="nt"&gt;--bootstrap-server&lt;/span&gt; &lt;span class="nv"&gt;$BootstrapServerString&lt;/span&gt; &lt;span class="nt"&gt;--command-config&lt;/span&gt; client.properties &lt;span class="nt"&gt;--list&lt;/span&gt;

MSKTutorialTopic
__amazon_msk_canary
__consumer_offsets
aerospike
aerospike-airforce-1
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;h2&gt;
  
  
  Producer and consumer
&lt;/h2&gt;

&lt;p&gt;I agree that this is more of a Kafka-101 rather than a straightforward Hello-World scenario. Nonetheless, it is essential to test our configuration by sending and receiving messages from the designated Kafka topic before proceeding further.&lt;/p&gt;

&lt;p&gt;Produce some messages by opening a new window and running the following Kafka producer command. Type three or four messages, hitting the 'Return' key after each message&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;./kafka-console-producer.sh &lt;span class="nt"&gt;--broker-list&lt;/span&gt; &lt;span class="nv"&gt;$BootstrapServerString&lt;/span&gt; &lt;span class="nt"&gt;--producer&lt;/span&gt;.config client.properties &lt;span class="nt"&gt;--topic&lt;/span&gt; aerospike-airforce-1
&lt;span class="o"&gt;&amp;gt;&lt;/span&gt;Instrument Check
&lt;span class="o"&gt;&amp;gt;&lt;/span&gt;Pre flight checks confirmed
&lt;span class="o"&gt;&amp;gt;&lt;/span&gt;Ready &lt;span class="k"&gt;for &lt;/span&gt;takeoff
&lt;span class="o"&gt;&amp;gt;&lt;/span&gt;Full throttle, flaps
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;You're now ready to start a client consumer application. Open a new window and run the consumer. You should now see the same messages you published earlier.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;./kafka-console-consumer.sh &lt;span class="nt"&gt;--bootstrap-server&lt;/span&gt; &lt;span class="nv"&gt;$BootstrapServerString&lt;/span&gt; &lt;span class="nt"&gt;--consumer&lt;/span&gt;.config client.properties &lt;span class="nt"&gt;--topic&lt;/span&gt; aerospike-airforce-1 &lt;span class="nt"&gt;--from-beginning&lt;/span&gt;
Instrument Check
Pre flight checks confirmed
Ready &lt;span class="k"&gt;for &lt;/span&gt;takeoff
Full throttle, flaps
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;h2&gt;
  
  
  Database source
&lt;/h2&gt;

&lt;p&gt;Let's review your achievements thus far. You've established a 3-node Kafka cluster in AWS utilizing MSK, incorporating IAM roles and permissions. Additionally, you have successfully created topics and demonstrated the production and consumption of messages using the IAM credentials established during the setup.&lt;/p&gt;

&lt;p&gt;The next phase of your journey involves installing the Aerospike Database, inserting messages, and configuring a simple XDR component. XDR is a Cross Datacenter Replication tool and is crucial for transmitting data from the Aerospike Database to the Aerospike Kafka Source Connector allowing us to subsequently forward messages to Amazon MSK.&lt;/p&gt;

&lt;h2&gt;
  
  
  Create the Aerospike Database
&lt;/h2&gt;

&lt;ol&gt;
&lt;li&gt;Start by creating a new ec2 instance. For this demo, you can use Linux Centos 8&lt;/li&gt;
&lt;/ol&gt;

&lt;p&gt;Rocky 8 AMI: ami-043ceee68871e0bb5 ( us-east-1 )&lt;/p&gt;

&lt;p&gt;&lt;a href="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Flr4iw9ejogzrfi3crskt.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Flr4iw9ejogzrfi3crskt.png" alt="image" width="448" height="81"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;ol&gt;
&lt;li&gt;Select the instance type as &lt;code&gt;t2.medium&lt;/code&gt;.&lt;/li&gt;
&lt;/ol&gt;

&lt;p&gt;&lt;a href="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fdylf351ft9ivfjbzamed.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fdylf351ft9ivfjbzamed.png" alt="image" width="764" height="226"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;ol&gt;
&lt;li&gt;Add the extra volume for the Aerospike data storage layer. EBS volume is all that is required for now.&lt;/li&gt;
&lt;/ol&gt;

&lt;p&gt;&lt;a href="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fzhm22ywwvlg4g798t99x.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fzhm22ywwvlg4g798t99x.png" alt="image" width="768" height="329"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;ol&gt;
&lt;li&gt;Launch the instance and connect to the host using ssh. If you have an Aerospike license feature file, upload it to the instance.&lt;/li&gt;
&lt;/ol&gt;

&lt;h2&gt;
  
  
  Install the Aerospike Database server
&lt;/h2&gt;

&lt;ol&gt;
&lt;li&gt;Run the following to install the Aerospike Database Server.
&lt;/li&gt;
&lt;/ol&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;&lt;span class="nb"&gt;export &lt;/span&gt;&lt;span class="nv"&gt;VER&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s2"&gt;"6.1.0.2"&lt;/span&gt;
&lt;span class="nb"&gt;sudo &lt;/span&gt;yum &lt;span class="nb"&gt;install &lt;/span&gt;java python3 openssl-devel wget git gcc maven bind-utils sysstat nc &lt;span class="nt"&gt;-y&lt;/span&gt;
wget &lt;span class="nt"&gt;-O&lt;/span&gt; aerospike-tools.tgz &lt;span class="s1"&gt;'https://www.aerospike.com/download/tools/latest/artifact/el8'&lt;/span&gt;
&lt;span class="nb"&gt;tar&lt;/span&gt; &lt;span class="nt"&gt;-xvf&lt;/span&gt; aerospike-tools.tgz
&lt;span class="nb"&gt;cd &lt;/span&gt;aerospike-tools_&lt;span class="k"&gt;*&lt;/span&gt;
&lt;span class="nb"&gt;sudo&lt;/span&gt; ./dep-check
&lt;span class="nb"&gt;sudo&lt;/span&gt; ./asinstall
wget &lt;span class="nt"&gt;-O&lt;/span&gt; aerospike.tgz https://enterprise.aerospike.com/enterprise/download/server/&lt;span class="nv"&gt;$VER&lt;/span&gt;/artifact/el8
&lt;span class="nb"&gt;tar&lt;/span&gt; &lt;span class="nt"&gt;-xvf&lt;/span&gt; aerospike.tgz
&lt;span class="nb"&gt;cd &lt;/span&gt;aerospike-server-enterprise-&lt;span class="nv"&gt;$VER&lt;/span&gt;&lt;span class="nt"&gt;-el8&lt;/span&gt;
&lt;span class="nb"&gt;sudo&lt;/span&gt; ./asinstall
&lt;span class="nb"&gt;sudo mkdir&lt;/span&gt; &lt;span class="nt"&gt;-p&lt;/span&gt; /var/log/aerospike/
&lt;span class="nb"&gt;sudo &lt;/span&gt;systemctl &lt;span class="nb"&gt;enable &lt;/span&gt;aerospike
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;ol&gt;
&lt;li&gt;Confirm the storage disk for Aerospike.
&lt;/li&gt;
&lt;/ol&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;lsblk
NAME    MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
xvda    202:0    0  10G  0 disk
└─xvda1 202:1    0  10G  0 part /
xvdb    202:16   0  10G  0 disk   &amp;lt;&amp;lt;&lt;span class="nt"&gt;-----------------&lt;/span&gt; This one!
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;ol&gt;
&lt;li&gt;When its data is available, replace the Aerospike configuration file under /etc/aerospike/aerospike.conf with the configuration file listed below, also replacing the following lines:

&lt;ul&gt;
&lt;li&gt;Under &lt;code&gt;heartbeat.address&lt;/code&gt; add in your internal 172.x.x.x address&lt;/li&gt;
&lt;li&gt;For &lt;code&gt;xdr.dc.node-address-port&lt;/code&gt; enter the {kafka-client-machine-address}:8080&lt;/li&gt;
&lt;/ul&gt;
&lt;/li&gt;
&lt;/ol&gt;

&lt;h2&gt;
  
  
  Aerospike Database configuration file for use with &lt;code&gt;systemd&lt;/code&gt;
&lt;/h2&gt;



&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;service {
  # paxos-single-replica-limit 1 # Number of nodes where the replica count is automatically
  proto-fd-max 15000
  service-threads 10
  feature-key-file /etc/aerospike/features.conf
  node-id A1
  cluster-name CLA
}

logging {
  file /var/log/aerospike/aerospike.log {
    context any info
  }
}

# public and private addresses
network {
  service {
    address any
    port 3000
  }

  heartbeat {
    mode mesh
    address 172.31.94.201
    port 3002 # Heartbeat port for this node.
    interval 150 # controls how often to send a heartbeat packet
    timeout 10 # number of intervals after which a node is considered to be missing
  }

  fabric {
    port 3001
  }

  info {
    port 3003
  }
}

namespace test {
  replication-factor 2
  memory-size 40G
  default-ttl 0
  index-type shmem
  high-water-disk-pct 50
  high-water-memory-pct 60
  stop-writes-pct 90
  nsup-period 0

  storage-engine device {
    device /dev/xvdb
    data-in-memory false
    write-block-size 128K
    min-avail-pct 5
  }
}

xdr {
  # Change notification XDR block that round-robins between two connector nodes
  dc aerospike-kafka-source {
    connector true
    node-address-port 172.31.58.190 8080
    namespace test {
    }
  }
}
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;h2&gt;
  
  
  Start the Aerospike service
&lt;/h2&gt;

&lt;ol&gt;
&lt;li&gt;Copy the license feature file to the aerospike configuration directory.
&lt;/li&gt;
&lt;/ol&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;&lt;span class="nb"&gt;sudo cp &lt;/span&gt;features.conf /etc/aerospike/
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;ol&gt;
&lt;li&gt;Start the Aerospike server and check the logs to ensure there are no errors.
&lt;/li&gt;
&lt;/ol&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;&lt;span class="nb"&gt;sudo &lt;/span&gt;systemctl start aerospike
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;





&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;&lt;span class="nb"&gt;sudo &lt;/span&gt;systemctl status aerospike
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;h2&gt;
  
  
  Aerospike Kafka Source Connector
&lt;/h2&gt;

&lt;p&gt;The seamless flow of data from Aerospike Database Enterprise Edition to Apache Kafka hinges on the utilization of the Aerospike Kafka source (outbound) connector. This connector subscribes to change notifications. Upon receiving these notifications, the connector converts them into messages, which are dispatched to Kafka topics. Going back to the ec2 instance you created earlier with our Kafka client configured, go ahead and &lt;a href="https://docs.aerospike.com/connect/kafka/from-asdb/installing#installing-on-linux" rel="noopener noreferrer"&gt;install&lt;/a&gt; the &lt;a href="https://aerospike.com/resources/solution-brief/kafka-connector-data-streams/" rel="noopener noreferrer"&gt;Aerospike Kafka Source Connector&lt;/a&gt;. This is your outbound connector to send data from the Aerospike to MSK.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;&lt;span class="nb"&gt;sudo &lt;/span&gt;yum &lt;span class="nb"&gt;install &lt;/span&gt;java &lt;span class="c"&gt;#( install 11+ JDK )&lt;/span&gt;
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;





&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;wget https://enterprise.aerospike.com/artifacts/enterprise/aerospike-kafka-outbound/5.0.1/aerospike-kafka-outbound-5.0.1-1.noarch.rpm
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;





&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;&lt;span class="nb"&gt;sudo &lt;/span&gt;rpm &lt;span class="nt"&gt;-i&lt;/span&gt; aerospike-kafka-outbound-5.0.0-1.noarch.rpm
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;h2&gt;
  
  
  Configure the outbound connector
&lt;/h2&gt;

&lt;p&gt;The terms “outbound” and “source connector” are used interchangeably in this article.&lt;/p&gt;

&lt;ol&gt;
&lt;li&gt;&lt;p&gt;Locate the following file on the Kafka client box: &lt;code&gt;/etc/aerospike-kafka-outbound/aerospike-kafka-outbound.yml&lt;/code&gt;. &lt;/p&gt;&lt;/li&gt;
&lt;li&gt;&lt;p&gt;Replace the broker address for one of the node addresses in the MSK Kafka cluster &lt;code&gt;producer-props.bootstrap.servers&lt;/code&gt;.&lt;/p&gt;&lt;/li&gt;
&lt;li&gt;&lt;p&gt;Then add the following contents to the file with the changes that have been outlined.&lt;br&gt;
&lt;/p&gt;&lt;/li&gt;
&lt;/ol&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;# Change the configuration for your use case.
#
# Refer to https://www.aerospike.com/docs/connectors/enterprise/kafka/outbound/configuration/index.html
# for details.

# The connector's listening ports, TLS, and network interface.
service:
  port: 8080

# Format of the Kafka destination message.
format:
  mode: flat-json
  metadata-key: metadata

# Aerospike record routing to a Kafka destination.
routing:
  mode: static
  destination: aerospike

# Kafka producer initialization properties.
producer-props:
  bootstrap.servers:
    - b-3.msktutorialcluster.450050.c11.kafka.us-east-1.amazonaws.com:9098
  ssl.truststore.location: /etc/aerospike-kafka-outbound/kafka.client.truststore.jks
  ssl.truststore.password: changeit
  security.protocol: SASL_SSL
  sasl.mechanism: AWS_MSK_IAM
  sasl.jaas.config: software.amazon.msk.auth.iam.IAMLoginModule required awsProfileName=default;
  sasl.client.callback.handler.class: software.amazon.msk.auth.iam.IAMClientCallbackHandler

# The logging properties.
logging:
  file: /var/log/aerospike-kafka-outbound/aerospike-kafka-outbound.log
  enable-console-logging: true
  levels:
    root: debug
    record-parser: debug
    server: debug
    com.aerospike.connect: debug
  ticker-interval: 3600
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;ol&gt;
&lt;li&gt;Create the CA certificate trust store for use in the Kafka Outbound Connector config. You can see the SSL trust store location referenced in the file above as &lt;code&gt;ssl.truststore.location&lt;/code&gt;.
&lt;/li&gt;
&lt;/ol&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;&lt;span class="nb"&gt;sudo cp&lt;/span&gt; /usr/lib/jvm/java-11-amazon-corretto/lib/security/cacerts /etc/aerospike-kafka-outbound/kafka.client.truststore.jks
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;





&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;&lt;span class="nb"&gt;sudo chmod &lt;/span&gt;755 /etc/aerospike-kafka-outbound/kafka.client.truststore.jks
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;ol&gt;
&lt;li&gt;Finally,  make the AWS IAM Kafka Auth Jar file available to the Aerospike Outbound Kafka Connector. This is the same jar file that you downloaded and added to the kafka/libs folder.
&lt;/li&gt;
&lt;/ol&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;&lt;span class="nb"&gt;sudo cp &lt;/span&gt;kafka_2.12-2.8.1/libs/aws-msk-iam-auth-1.1.1-all.jar /opt/aerospike-kafka-outbound/lib/aws-msk-iam-auth-1.1.1-all.jar
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;ol&gt;
&lt;li&gt;Start the service.
&lt;/li&gt;
&lt;/ol&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;&lt;span class="nb"&gt;sudo &lt;/span&gt;systemctl &lt;span class="nb"&gt;enable &lt;/span&gt;aerospike-kafka-outbound
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;





&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;&lt;span class="nb"&gt;sudo &lt;/span&gt;systemctl start aerospike-kafka-outbound
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;h2&gt;
  
  
  Send data from Aerospike to Kafka
&lt;/h2&gt;

&lt;ol&gt;
&lt;li&gt;Open a separate window so you can list all messages on the Aerospike Kafka topic. Start by adding one of the private endpoint bootstrap servers as an environment variable for ease of use.
&lt;/li&gt;
&lt;/ol&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;&lt;span class="nb"&gt;export &lt;/span&gt;&lt;span class="nv"&gt;BootstrapServerString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s2"&gt;"b-3.msktutorialcluster.450050.c11.kafka.us-east-1.amazonaws.com:9098"&lt;/span&gt;
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;ol&gt;
&lt;li&gt;Run the consumer client as follows:
&lt;/li&gt;
&lt;/ol&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;./kafka-console-consumer.sh &lt;span class="nt"&gt;--bootstrap-server&lt;/span&gt; &lt;span class="nv"&gt;$BootstrapServerString&lt;/span&gt; &lt;span class="nt"&gt;--consumer&lt;/span&gt;.config client.properties &lt;span class="nt"&gt;--topic&lt;/span&gt; aerospike &lt;span class="nt"&gt;--from-beginning&lt;/span&gt;
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;ol&gt;
&lt;li&gt;In a new window, start &lt;a href="https://aerospike.com/docs/tools/aql" rel="noopener noreferrer"&gt;AQL&lt;/a&gt;, the Aerospike command line client which connects to your Aerospike Database.
&lt;/li&gt;
&lt;/ol&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight sql"&gt;&lt;code&gt;&lt;span class="n"&gt;aql&lt;/span&gt; &lt;span class="o"&gt;-&lt;/span&gt;&lt;span class="n"&gt;U&lt;/span&gt; &lt;span class="n"&gt;auser&lt;/span&gt; &lt;span class="o"&gt;-&lt;/span&gt;&lt;span class="n"&gt;P&lt;/span&gt; &lt;span class="n"&gt;a&lt;/span&gt;&lt;span class="o"&gt;-&lt;/span&gt;&lt;span class="n"&gt;secret&lt;/span&gt;&lt;span class="o"&gt;-&lt;/span&gt;&lt;span class="n"&gt;pwd&lt;/span&gt;
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;ol&gt;
&lt;li&gt;Insert some data
&lt;/li&gt;
&lt;/ol&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight sql"&gt;&lt;code&gt;&lt;span class="k"&gt;insert&lt;/span&gt; &lt;span class="k"&gt;into&lt;/span&gt; &lt;span class="n"&gt;test&lt;/span&gt; &lt;span class="p"&gt;(&lt;/span&gt;&lt;span class="n"&gt;pk&lt;/span&gt;&lt;span class="p"&gt;,&lt;/span&gt; &lt;span class="n"&gt;a&lt;/span&gt;&lt;span class="p"&gt;)&lt;/span&gt; &lt;span class="k"&gt;values&lt;/span&gt;&lt;span class="p"&gt;(&lt;/span&gt;&lt;span class="mi"&gt;400&lt;/span&gt;&lt;span class="p"&gt;,&lt;/span&gt; &lt;span class="nv"&gt;"Your winning lottery ticket awaits you"&lt;/span&gt;&lt;span class="p"&gt;)&lt;/span&gt;
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;ol&gt;
&lt;li&gt;Check to see if the message appears in the Kafka consumer window
&lt;/li&gt;
&lt;/ol&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;{"metadata":{"namespace":"test","userKey":400,"digest":"W7eGav2hKfOU00xx7mnOPYa2uCo=","msg":"write","gen":1,"lut":1681488437767,"exp":0},"a":"Your winning lottery ticket awaits you"}
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;h2&gt;
  
  
  Conclusion
&lt;/h2&gt;

&lt;p&gt;You've just discovered how straightforward it is to transmit data from Aerospike to AWS MSK Kafka while ensuring client authentication through AWS IAM permissions! From establishing an Aerospike Database from scratch to configuring the AWS MSK Kafka cluster and employing the Aerospike Outbound Kafka Connector, you've effortlessly constructed a real-time streaming data pipeline. Congratulations on this accomplishment!&lt;/p&gt;

&lt;p&gt;Share your experience! Your feedback is important to us. &lt;a href="https://discord.com/invite/NfC93wJEJU" rel="noopener noreferrer"&gt;Join our Aerospike community&lt;/a&gt;! &lt;/p&gt;

</description>
      <category>aerospike</category>
      <category>iam</category>
      <category>kafka</category>
      <category>java</category>
    </item>
    <item>
      <title>Aerospike Kubernetes - Talking Cluster to Cluster using XDR-Proxy</title>
      <dc:creator>Naresh Maharaj</dc:creator>
      <pubDate>Thu, 19 Jan 2023 09:18:49 +0000</pubDate>
      <link>https://dev.to/naresh_maharaj_c4b8fbd4aa/aerospike-kubernetes-talking-cluster-to-cluster-using-xdr-proxy-1dfn</link>
      <guid>https://dev.to/naresh_maharaj_c4b8fbd4aa/aerospike-kubernetes-talking-cluster-to-cluster-using-xdr-proxy-1dfn</guid>
      <description>&lt;p&gt;&lt;a href="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Ffzu1gvv2znbqlymhc4y2.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Ffzu1gvv2znbqlymhc4y2.png" width="751" height="441"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;In this article we focus on establishing connectivity between 2 Aerospike clusters. The goal is to use Aerospike's Cross Data Center Replication feature ( XDR ) to seamlessly send data from a source cluster to a destination cluster. The source cluster needs network visibility of all Aerospike service ports in the remote cluster, and this can present problems, particularly in a Kubernetes environment. Placing a proxy server in front of the private Kubernetes destination cluster can overcome this problem and achieve the desired goal. To demonstrate the solution we start by installing the Kubernetes Operator that will schedule our source and destination databases. In this example, we set up our replication in one direction. Aerospike is capable of supporting 'master/master' replication and provides a conflict resolution &lt;a href="https://docs.aerospike.com/server/architecture/xdr#bin-convergence-in-mesh-topology" rel="noopener noreferrer"&gt;mechanism&lt;/a&gt; in the event of update clashes. This too could be supported using the XDR proxy.&lt;/p&gt;

&lt;h2&gt;
  
  
  Aerospike Kubernetes Operator
&lt;/h2&gt;

&lt;p&gt;The following Kubernetes nodes have been created using EKS. You can display the following private and public IP addresses from listing the nodes with the kubectl command.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;kubectl get nodes &lt;span class="nt"&gt;-o&lt;/span&gt; wide
NAME                             STATUS   ROLES    AGE     VERSION                INTERNAL-IP      EXTERNAL-IP      OS-IMAGE         KERNEL-VERSION                 CONTAINER-RUNTIME
ip-192-168-11-132.ec2.internal   Ready    &amp;lt;none&amp;gt;   2m53s   v1.22.15-eks-fb459a0   192.168.11.132   44.201.67.177    Amazon Linux 2   5.4.219-126.411.amzn2.x86_64   docker://20.10.17
ip-192-168-31-131.ec2.internal   Ready    &amp;lt;none&amp;gt;   2m52s   v1.22.15-eks-fb459a0   192.168.31.131   44.192.83.79     Amazon Linux 2   5.4.219-126.411.amzn2.x86_64   docker://20.10.17
ip-192-168-41-140.ec2.internal   Ready    &amp;lt;none&amp;gt;   2m51s   v1.22.15-eks-fb459a0   192.168.41.140   18.208.222.35    Amazon Linux 2   5.4.219-126.411.amzn2.x86_64   docker://20.10.17
ip-192-168-41-63.ec2.internal    Ready    &amp;lt;none&amp;gt;   2m51s   v1.22.15-eks-fb459a0   192.168.41.63    54.173.138.131   Amazon Linux 2   5.4.219-126.411.amzn2.x86_64   docker://20.10.17
ip-192-168-59-220.ec2.internal   Ready    &amp;lt;none&amp;gt;   2m52s   v1.22.15-eks-fb459a0   192.168.59.220   54.227.122.222   Amazon Linux 2   5.4.219-126.411.amzn2.x86_64   docker://20.10.17
ip-192-168-6-124.ec2.internal    Ready    &amp;lt;none&amp;gt;   2m51s   v1.22.15-eks-fb459a0   192.168.6.124    35.174.60.1      Amazon Linux 2   5.4.219-126.411.amzn2.x86_64   docker://20.10.1
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;Start by getting a copy of the Aerospike git repo for the Kubernetes Operator.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;git clone https://github.com/aerospike/aerospike-kubernetes-operator.git
&lt;span class="nb"&gt;cp &lt;/span&gt;features.conf aerospike-kubernetes-operator/config/samples/secrets/.
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;h3&gt;
  
  
  Setup
&lt;/h3&gt;

&lt;p&gt;Run the following commands in the order specified. Wait for the csv "Succeeded phase" to show up after running this line. Initially it might take between 30 seconds and a minute to show up.&lt;br&gt;
&lt;em&gt;kubectl get csv -n operators -w&lt;/em&gt;&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;&lt;span class="nb"&gt;cd &lt;/span&gt;aerospike-kubernetes-operator/
kubectl apply &lt;span class="nt"&gt;-f&lt;/span&gt; config/samples/storage/eks_ssd_storage_class.yaml
curl &lt;span class="nt"&gt;-sL&lt;/span&gt; https://github.com/operator-framework/operator-lifecycle-manager/releases/download/v0.22.0/install.sh | bash &lt;span class="nt"&gt;-s&lt;/span&gt; v0.22.0
kubectl create &lt;span class="nt"&gt;-f&lt;/span&gt; https://operatorhub.io/install/aerospike-kubernetes-operator.yaml
kubectl get csv &lt;span class="nt"&gt;-n&lt;/span&gt; operators &lt;span class="nt"&gt;-w&lt;/span&gt;
cd..
git clone https://github.com/nareshmaharaj-consultant/kubernetes-anything
&lt;span class="nb"&gt;cd &lt;/span&gt;kubernetes-anything
./createNamespace.sh aerospike
&lt;span class="nb"&gt;cd&lt;/span&gt; ../aerospike-kubernetes-operator/
kubectl &lt;span class="nt"&gt;-n&lt;/span&gt; aerospike create secret generic aerospike-secret &lt;span class="nt"&gt;--from-file&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;config/samples/secrets
kubectl &lt;span class="nt"&gt;-n&lt;/span&gt; aerospike create secret generic auth-secret &lt;span class="nt"&gt;--from-literal&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="nv"&gt;password&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'admin123'&lt;/span&gt;
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;h2&gt;
  
  
  Destination Cluster
&lt;/h2&gt;

&lt;p&gt;Use the following yaml configuration file for our &lt;strong&gt;destination&lt;/strong&gt; cluster. Save the file and name it ssd1_xdr_dest_6.1_cluster_cr.yaml:&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight yaml"&gt;&lt;code&gt;&lt;span class="na"&gt;apiVersion&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;asdb.aerospike.com/v1beta1&lt;/span&gt;
&lt;span class="na"&gt;kind&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;AerospikeCluster&lt;/span&gt;
&lt;span class="na"&gt;metadata&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
  &lt;span class="na"&gt;name&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;aerocluster-dest-xdr&lt;/span&gt;
  &lt;span class="na"&gt;namespace&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;aerospike&lt;/span&gt;

&lt;span class="na"&gt;spec&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
  &lt;span class="na"&gt;size&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="m"&gt;1&lt;/span&gt;
  &lt;span class="na"&gt;image&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;aerospike/aerospike-server-enterprise:6.1.0.2&lt;/span&gt;

  &lt;span class="na"&gt;storage&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
    &lt;span class="na"&gt;filesystemVolumePolicy&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
      &lt;span class="na"&gt;initMethod&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;deleteFiles&lt;/span&gt;
      &lt;span class="na"&gt;cascadeDelete&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="kc"&gt;true&lt;/span&gt;
    &lt;span class="na"&gt;blockVolumePolicy&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
      &lt;span class="na"&gt;cascadeDelete&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="kc"&gt;true&lt;/span&gt;
    &lt;span class="na"&gt;volumes&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
      &lt;span class="pi"&gt;-&lt;/span&gt; &lt;span class="na"&gt;name&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;workdir&lt;/span&gt;
        &lt;span class="na"&gt;aerospike&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
          &lt;span class="na"&gt;path&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;/opt/aerospike&lt;/span&gt;
        &lt;span class="na"&gt;source&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
          &lt;span class="na"&gt;persistentVolume&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
            &lt;span class="na"&gt;storageClass&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;ssd&lt;/span&gt;
            &lt;span class="na"&gt;volumeMode&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;Filesystem&lt;/span&gt;
            &lt;span class="na"&gt;size&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;1Gi&lt;/span&gt;
      &lt;span class="pi"&gt;-&lt;/span&gt; &lt;span class="na"&gt;name&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;ns&lt;/span&gt;
        &lt;span class="na"&gt;aerospike&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
          &lt;span class="na"&gt;path&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;/opt/aerospike/data/&lt;/span&gt;
        &lt;span class="na"&gt;source&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
          &lt;span class="na"&gt;persistentVolume&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
            &lt;span class="na"&gt;storageClass&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;ssd&lt;/span&gt;
            &lt;span class="na"&gt;volumeMode&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;Filesystem&lt;/span&gt;
            &lt;span class="na"&gt;size&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;1Gi&lt;/span&gt;
      &lt;span class="pi"&gt;-&lt;/span&gt; &lt;span class="na"&gt;name&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;aerospike-config-secret&lt;/span&gt;
        &lt;span class="na"&gt;source&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
          &lt;span class="na"&gt;secret&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
            &lt;span class="na"&gt;secretName&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;aerospike-secret&lt;/span&gt;
        &lt;span class="na"&gt;aerospike&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
          &lt;span class="na"&gt;path&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;/etc/aerospike/secret&lt;/span&gt;

  &lt;span class="na"&gt;podSpec&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
    &lt;span class="na"&gt;multiPodPerHost&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="kc"&gt;true&lt;/span&gt;

  &lt;span class="na"&gt;aerospikeAccessControl&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
    &lt;span class="na"&gt;roles&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
      &lt;span class="pi"&gt;-&lt;/span&gt; &lt;span class="na"&gt;name&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;writer&lt;/span&gt;
        &lt;span class="na"&gt;privileges&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
        &lt;span class="pi"&gt;-&lt;/span&gt; &lt;span class="s"&gt;read-write&lt;/span&gt;
      &lt;span class="pi"&gt;-&lt;/span&gt; &lt;span class="na"&gt;name&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;reader&lt;/span&gt;
        &lt;span class="na"&gt;privileges&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
        &lt;span class="pi"&gt;-&lt;/span&gt; &lt;span class="s"&gt;read&lt;/span&gt;
    &lt;span class="na"&gt;users&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
      &lt;span class="pi"&gt;-&lt;/span&gt; &lt;span class="na"&gt;name&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;admin&lt;/span&gt;
        &lt;span class="na"&gt;secretName&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;auth-secret&lt;/span&gt;
        &lt;span class="na"&gt;roles&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
          &lt;span class="pi"&gt;-&lt;/span&gt; &lt;span class="s"&gt;sys-admin&lt;/span&gt;
          &lt;span class="pi"&gt;-&lt;/span&gt; &lt;span class="s"&gt;user-admin&lt;/span&gt;
          &lt;span class="pi"&gt;-&lt;/span&gt; &lt;span class="s"&gt;read-write&lt;/span&gt;
      &lt;span class="pi"&gt;-&lt;/span&gt; &lt;span class="na"&gt;name&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;xdr-writer&lt;/span&gt;
        &lt;span class="na"&gt;secretName&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;xdr-user-auth-secret&lt;/span&gt;
        &lt;span class="na"&gt;roles&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
          &lt;span class="pi"&gt;-&lt;/span&gt; &lt;span class="s"&gt;writer&lt;/span&gt;

  &lt;span class="na"&gt;aerospikeConfig&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
    &lt;span class="na"&gt;service&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
      &lt;span class="na"&gt;feature-key-file&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;/etc/aerospike/secret/features.conf&lt;/span&gt;
    &lt;span class="na"&gt;security&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="pi"&gt;{}&lt;/span&gt;
    &lt;span class="na"&gt;network&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
      &lt;span class="na"&gt;service&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
        &lt;span class="na"&gt;port&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="m"&gt;3000&lt;/span&gt;
      &lt;span class="na"&gt;fabric&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
        &lt;span class="na"&gt;port&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="m"&gt;3001&lt;/span&gt;
      &lt;span class="na"&gt;heartbeat&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
        &lt;span class="na"&gt;port&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="m"&gt;3002&lt;/span&gt;
    &lt;span class="na"&gt;namespaces&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
      &lt;span class="pi"&gt;-&lt;/span&gt; &lt;span class="na"&gt;name&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;test&lt;/span&gt;
        &lt;span class="na"&gt;memory-size&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="m"&gt;134217728&lt;/span&gt;
        &lt;span class="na"&gt;replication-factor&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="m"&gt;1&lt;/span&gt;
        &lt;span class="na"&gt;storage-engine&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
          &lt;span class="na"&gt;type&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;device&lt;/span&gt;
          &lt;span class="na"&gt;files&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
            &lt;span class="pi"&gt;-&lt;/span&gt; &lt;span class="s"&gt;/opt/aerospike/data/test.dat&lt;/span&gt;
          &lt;span class="na"&gt;filesize&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="m"&gt;1073741824&lt;/span&gt;
          &lt;span class="na"&gt;data-in-memory&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="kc"&gt;true&lt;/span&gt;
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;Create the following Kubernetes resources for our Aerospike destination cluster:&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;xdr destination database user login credentials, as a Kubernetes secret&lt;/li&gt;
&lt;li&gt;destination database cluster using our yaml file named ssd1_xdr_dest_6.1_cluster_cr.yaml
&lt;/li&gt;
&lt;/ul&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;&lt;span class="nb"&gt;export &lt;/span&gt;&lt;span class="nv"&gt;secret_auth_name&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;xdr-user-auth-secret
&lt;span class="nb"&gt;export &lt;/span&gt;&lt;span class="nv"&gt;password_secret&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;admin123
kubectl &lt;span class="nt"&gt;-n&lt;/span&gt; aerospike create secret generic &lt;span class="nv"&gt;$secret_auth_name&lt;/span&gt; &lt;span class="nt"&gt;--from-literal&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="nv"&gt;password&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="nv"&gt;$password_secret&lt;/span&gt;
kubectl create &lt;span class="nt"&gt;-f&lt;/span&gt; ssd1_xdr_dest_6.1_cluster_cr.yaml
kubectl &lt;span class="nt"&gt;-n&lt;/span&gt; aerospike get po &lt;span class="nt"&gt;-w&lt;/span&gt;
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;You should see the database pods running successfully.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;kubectl get po &lt;span class="nt"&gt;-n&lt;/span&gt; aerospike &lt;span class="nt"&gt;-w&lt;/span&gt;
NAME                       READY   STATUS     RESTARTS   AGE
aerocluster-dest-xdr-0-0   0/1     Init:0/1   0          13s
aerocluster-dest-xdr-0-0   0/1     Init:0/1   0          18s
aerocluster-dest-xdr-0-0   0/1     PodInitializing   0          19s
aerocluster-dest-xdr-0-0   1/1     Running           0          24s
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;h2&gt;
  
  
  XDR-Proxy
&lt;/h2&gt;

&lt;p&gt;Next we set up the &lt;strong&gt;&lt;em&gt;xdr-proxy&lt;/em&gt;&lt;/strong&gt;. If we look back at the main title image above, you will notice that we are working from the RIGHT hand side to the LEFT hand side in that order.&lt;/p&gt;

&lt;h3&gt;
  
  
  Configuration
&lt;/h3&gt;

&lt;p&gt;Create the following &lt;strong&gt;&lt;em&gt;xdr-proxy&lt;/em&gt;&lt;/strong&gt; configuration file. Replace the seeds address with a FQN DNS name for the destination database pod(s) you created earlier. Multiple seed addresses may be added (recommended in production).&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight yaml"&gt;&lt;code&gt;&lt;span class="s"&gt;cd ..&lt;/span&gt;
&lt;span class="s"&gt;mkdir -p xdr-cfg/etc/auth&lt;/span&gt;
&lt;span class="s"&gt;cd xdr-cfg/etc/&lt;/span&gt;

&lt;span class="s"&gt;cat &amp;lt;&amp;lt;EOF&amp;gt; aerospike-xdr-proxy.yml&lt;/span&gt;
&lt;span class="c1"&gt;# Change the configuration for your use case.&lt;/span&gt;
&lt;span class="c1"&gt;# Naresh Maharaj&lt;/span&gt;
&lt;span class="c1"&gt;# Refer to https://www.aerospike.com/docs/connectors/enterprise/xdr-proxy/configuration/index.html&lt;/span&gt;
&lt;span class="c1"&gt;# for details.&lt;/span&gt;

&lt;span class="c1"&gt;# The connector's listening ports, manage service, TLS and network interface.&lt;/span&gt;
&lt;span class="na"&gt;service&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
  &lt;span class="na"&gt;port&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="m"&gt;8901&lt;/span&gt;
  &lt;span class="c1"&gt;# Aerospike Enterprise Server &amp;gt;= 5.0&lt;/span&gt;
  &lt;span class="na"&gt;manage&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
    &lt;span class="na"&gt;address&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;0.0.0.0&lt;/span&gt;
    &lt;span class="na"&gt;port&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="m"&gt;8902&lt;/span&gt;

&lt;span class="c1"&gt;# The destination aerospike cluster.&lt;/span&gt;
&lt;span class="na"&gt;aerospike&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
  &lt;span class="na"&gt;seeds&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
    &lt;span class="pi"&gt;-&lt;/span&gt; &lt;span class="na"&gt;aerocluster-dest-xdr-0-0.aerospike.svc.cluster.local&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
        &lt;span class="na"&gt;port&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="m"&gt;3000&lt;/span&gt;
  &lt;span class="na"&gt;credentials&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
    &lt;span class="na"&gt;username&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;xdr-writer&lt;/span&gt;
    &lt;span class="na"&gt;password-file&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;/etc/aerospike-xdr-proxy/auth/password_DC1.txt&lt;/span&gt;
    &lt;span class="na"&gt;auth-mode&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;internal&lt;/span&gt;

&lt;span class="c1"&gt;# The logging config&lt;/span&gt;
&lt;span class="na"&gt;logging&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
  &lt;span class="na"&gt;enable-console-logging&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="kc"&gt;true&lt;/span&gt;
  &lt;span class="na"&gt;file&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;/var/log/aerospike-xdr-proxy/aerospike-xdr-proxy.log&lt;/span&gt;
  &lt;span class="na"&gt;levels&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
    &lt;span class="na"&gt;root&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;debug&lt;/span&gt;
    &lt;span class="na"&gt;record-parser&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;debug&lt;/span&gt;
    &lt;span class="na"&gt;server&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;debug&lt;/span&gt;
    &lt;span class="na"&gt;com.aerospike.connect&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;debug&lt;/span&gt;
  &lt;span class="c1"&gt;# Ticker log interval in seconds&lt;/span&gt;
  &lt;span class="na"&gt;ticker-interval&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="m"&gt;3600&lt;/span&gt;
&lt;span class="s"&gt;EOF&lt;/span&gt;

&lt;span class="s"&gt;sudo tee auth/password_DC1.txt &amp;lt;&amp;lt;EOF&lt;/span&gt;
&lt;span class="s"&gt;admin123&lt;/span&gt;
&lt;span class="s"&gt;EOF&lt;/span&gt;
&lt;span class="s"&gt;cd ..&lt;/span&gt;

&lt;span class="s"&gt;kubectl -n aerospike create configmap xdr-proxy-cfg --from-file=etc/&lt;/span&gt;
&lt;span class="s"&gt;kubectl -n aerospike create secret generic xdr-proxy-auth-secret --from-file=etc/auth&lt;/span&gt;
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;h3&gt;
  
  
  Deployment
&lt;/h3&gt;

&lt;p&gt;Now that we have our xdr-proxy config file created we can now produce the &lt;strong&gt;&lt;em&gt;kubernetes deployment&lt;/em&gt;&lt;/strong&gt; yaml file for the xdr-proxy itself. The following yaml file is used to deploy our xdr-proxy ideally in the same data centre or location where the destination databases will be hosted.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight yaml"&gt;&lt;code&gt;&lt;span class="s"&gt;cat &amp;lt;&amp;lt;EOF&amp;gt; xdr-proxy-deployment.yaml&lt;/span&gt;
&lt;span class="na"&gt;apiVersion&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;apps/v1&lt;/span&gt;
&lt;span class="na"&gt;kind&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;Deployment&lt;/span&gt;
&lt;span class="na"&gt;metadata&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
  &lt;span class="na"&gt;name&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;xdr-proxy&lt;/span&gt;
  &lt;span class="na"&gt;namespace&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;aerospike&lt;/span&gt;
  &lt;span class="na"&gt;labels&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
    &lt;span class="na"&gt;app&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;xdr-proxy&lt;/span&gt;
&lt;span class="na"&gt;spec&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
  &lt;span class="na"&gt;replicas&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="m"&gt;2&lt;/span&gt;
  &lt;span class="na"&gt;selector&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
    &lt;span class="na"&gt;matchLabels&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
      &lt;span class="na"&gt;app&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;xdr-proxy&lt;/span&gt;
  &lt;span class="na"&gt;template&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
    &lt;span class="na"&gt;metadata&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
      &lt;span class="na"&gt;labels&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
        &lt;span class="na"&gt;app&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;xdr-proxy&lt;/span&gt;
    &lt;span class="na"&gt;spec&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
      &lt;span class="na"&gt;containers&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
      &lt;span class="pi"&gt;-&lt;/span&gt; &lt;span class="na"&gt;name&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;xdr-proxy&lt;/span&gt;
        &lt;span class="na"&gt;image&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;aerospike/aerospike-xdr-proxy:2.1.0&lt;/span&gt;
        &lt;span class="na"&gt;volumeMounts&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
        &lt;span class="pi"&gt;-&lt;/span&gt; &lt;span class="na"&gt;name&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;xdr-proxy-dir&lt;/span&gt;
          &lt;span class="na"&gt;mountPath&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s2"&gt;"&lt;/span&gt;&lt;span class="s"&gt;/etc/aerospike-xdr-proxy/"&lt;/span&gt;
          &lt;span class="na"&gt;readOnly&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="kc"&gt;true&lt;/span&gt;
        &lt;span class="pi"&gt;-&lt;/span&gt; &lt;span class="na"&gt;name&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;xdr-auth-dir&lt;/span&gt;
          &lt;span class="na"&gt;mountPath&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s2"&gt;"&lt;/span&gt;&lt;span class="s"&gt;/etc/aerospike-xdr-proxy/auth"&lt;/span&gt;
          &lt;span class="na"&gt;readOnly&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="kc"&gt;true&lt;/span&gt;
        &lt;span class="na"&gt;ports&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
          &lt;span class="pi"&gt;-&lt;/span&gt; &lt;span class="na"&gt;name&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;xdr-proxy-main&lt;/span&gt;
            &lt;span class="na"&gt;containerPort&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="m"&gt;8901&lt;/span&gt;
          &lt;span class="pi"&gt;-&lt;/span&gt; &lt;span class="na"&gt;name&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;xdr-proxy-mng&lt;/span&gt;
            &lt;span class="na"&gt;containerPort&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="m"&gt;8902&lt;/span&gt;
      &lt;span class="na"&gt;volumes&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
      &lt;span class="pi"&gt;-&lt;/span&gt; &lt;span class="na"&gt;name&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;xdr-proxy-dir&lt;/span&gt;
        &lt;span class="na"&gt;configMap&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
          &lt;span class="na"&gt;name&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;xdr-proxy-cfg&lt;/span&gt;
          &lt;span class="na"&gt;optional&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="kc"&gt;false&lt;/span&gt;
      &lt;span class="pi"&gt;-&lt;/span&gt; &lt;span class="na"&gt;name&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;xdr-auth-dir&lt;/span&gt;
        &lt;span class="na"&gt;secret&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
          &lt;span class="na"&gt;secretName&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;xdr-proxy-auth-secret&lt;/span&gt;
          &lt;span class="na"&gt;optional&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="kc"&gt;false&lt;/span&gt;
&lt;span class="nn"&gt;---&lt;/span&gt;
&lt;span class="na"&gt;apiVersion&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;v1&lt;/span&gt;
&lt;span class="na"&gt;kind&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;Service&lt;/span&gt;
&lt;span class="na"&gt;metadata&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
  &lt;span class="na"&gt;name&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;xdr-proxy&lt;/span&gt;
  &lt;span class="na"&gt;namespace&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;aerospike&lt;/span&gt;
&lt;span class="na"&gt;spec&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
  &lt;span class="na"&gt;selector&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
    &lt;span class="na"&gt;app&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;xdr-proxy&lt;/span&gt;
  &lt;span class="na"&gt;ports&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
  &lt;span class="pi"&gt;-&lt;/span&gt; &lt;span class="na"&gt;name&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;main&lt;/span&gt;
    &lt;span class="na"&gt;protocol&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;TCP&lt;/span&gt;
    &lt;span class="na"&gt;port&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="m"&gt;8901&lt;/span&gt;
    &lt;span class="na"&gt;targetPort&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;xdr-proxy-main&lt;/span&gt;
  &lt;span class="pi"&gt;-&lt;/span&gt; &lt;span class="na"&gt;name&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;manage&lt;/span&gt;
    &lt;span class="na"&gt;protocol&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;TCP&lt;/span&gt;
    &lt;span class="na"&gt;port&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="m"&gt;8902&lt;/span&gt;
    &lt;span class="na"&gt;targetPort&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;xdr-proxy-mng&lt;/span&gt;
&lt;span class="s"&gt;EOF&lt;/span&gt;

&lt;span class="s"&gt;kubectl create -f xdr-proxy-deployment.yaml&lt;/span&gt;
&lt;span class="s"&gt;kubectl get po -n aerospike -w&lt;/span&gt;
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;The following shows the current scheduled pods. So far, so good.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;kubectl get po &lt;span class="nt"&gt;-n&lt;/span&gt; aerospike &lt;span class="nt"&gt;-w&lt;/span&gt;
NAME                         READY   STATUS    RESTARTS   AGE
aerocluster-dest-xdr-0-0     1/1     Running   0          77m
xdr-proxy-7d9fccd6c8-g5mjt   1/1     Running   0          2m26s
xdr-proxy-7d9fccd6c8-mjxp4   1/1     Running   0          2m26s
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;h2&gt;
  
  
  Source Cluster
&lt;/h2&gt;

&lt;p&gt;Create the &lt;strong&gt;&lt;em&gt;source&lt;/em&gt;&lt;/strong&gt; Aerospike cluster using the following configuration&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight yaml"&gt;&lt;code&gt;&lt;span class="s"&gt;cd ../aerospike-kubernetes-operator/&lt;/span&gt;

&lt;span class="s"&gt;cat &amp;lt;&amp;lt;EOF&amp;gt; ssd1_xdr_src_6.1_cluster_cr.yaml&lt;/span&gt;
&lt;span class="na"&gt;apiVersion&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;asdb.aerospike.com/v1beta1&lt;/span&gt;
&lt;span class="na"&gt;kind&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;AerospikeCluster&lt;/span&gt;
&lt;span class="na"&gt;metadata&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
  &lt;span class="na"&gt;name&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;aerocluster-source-xdr&lt;/span&gt;
  &lt;span class="na"&gt;namespace&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;aerospike&lt;/span&gt;

&lt;span class="na"&gt;spec&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
  &lt;span class="na"&gt;size&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="m"&gt;1&lt;/span&gt;
  &lt;span class="na"&gt;image&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;aerospike/aerospike-server-enterprise:6.1.0.2&lt;/span&gt;

  &lt;span class="na"&gt;storage&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
    &lt;span class="na"&gt;filesystemVolumePolicy&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
      &lt;span class="na"&gt;initMethod&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;deleteFiles&lt;/span&gt;
      &lt;span class="na"&gt;cascadeDelete&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="kc"&gt;true&lt;/span&gt;
    &lt;span class="na"&gt;blockVolumePolicy&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
      &lt;span class="na"&gt;cascadeDelete&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="kc"&gt;true&lt;/span&gt;
    &lt;span class="na"&gt;volumes&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
      &lt;span class="pi"&gt;-&lt;/span&gt; &lt;span class="na"&gt;name&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;workdir&lt;/span&gt;
        &lt;span class="na"&gt;aerospike&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
          &lt;span class="na"&gt;path&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;/opt/aerospike&lt;/span&gt;
        &lt;span class="na"&gt;source&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
          &lt;span class="na"&gt;persistentVolume&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
            &lt;span class="na"&gt;storageClass&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;ssd&lt;/span&gt;
            &lt;span class="na"&gt;volumeMode&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;Filesystem&lt;/span&gt;
            &lt;span class="na"&gt;size&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;1Gi&lt;/span&gt;
      &lt;span class="pi"&gt;-&lt;/span&gt; &lt;span class="na"&gt;name&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;ns&lt;/span&gt;
        &lt;span class="na"&gt;aerospike&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
          &lt;span class="na"&gt;path&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;/opt/aerospike/data/&lt;/span&gt;
        &lt;span class="na"&gt;source&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
          &lt;span class="na"&gt;persistentVolume&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
            &lt;span class="na"&gt;storageClass&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;ssd&lt;/span&gt;
            &lt;span class="na"&gt;volumeMode&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;Filesystem&lt;/span&gt;
            &lt;span class="na"&gt;size&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;1Gi&lt;/span&gt;
      &lt;span class="pi"&gt;-&lt;/span&gt; &lt;span class="na"&gt;name&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;aerospike-config-secret&lt;/span&gt;
        &lt;span class="na"&gt;source&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
          &lt;span class="na"&gt;secret&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
            &lt;span class="na"&gt;secretName&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;aerospike-secret&lt;/span&gt;
        &lt;span class="na"&gt;aerospike&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
          &lt;span class="na"&gt;path&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;/etc/aerospike/secret&lt;/span&gt;

  &lt;span class="na"&gt;podSpec&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
    &lt;span class="na"&gt;multiPodPerHost&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="kc"&gt;true&lt;/span&gt;

  &lt;span class="na"&gt;aerospikeAccessControl&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
    &lt;span class="na"&gt;roles&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
      &lt;span class="pi"&gt;-&lt;/span&gt; &lt;span class="na"&gt;name&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;writer&lt;/span&gt;
        &lt;span class="na"&gt;privileges&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
        &lt;span class="pi"&gt;-&lt;/span&gt; &lt;span class="s"&gt;read-write&lt;/span&gt;
      &lt;span class="pi"&gt;-&lt;/span&gt; &lt;span class="na"&gt;name&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;reader&lt;/span&gt;
        &lt;span class="na"&gt;privileges&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
        &lt;span class="pi"&gt;-&lt;/span&gt; &lt;span class="s"&gt;read&lt;/span&gt;
    &lt;span class="na"&gt;users&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
      &lt;span class="pi"&gt;-&lt;/span&gt; &lt;span class="na"&gt;name&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;admin&lt;/span&gt;
        &lt;span class="na"&gt;secretName&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;auth-secret&lt;/span&gt;
        &lt;span class="na"&gt;roles&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
          &lt;span class="pi"&gt;-&lt;/span&gt; &lt;span class="s"&gt;sys-admin&lt;/span&gt;
          &lt;span class="pi"&gt;-&lt;/span&gt; &lt;span class="s"&gt;user-admin&lt;/span&gt;
          &lt;span class="pi"&gt;-&lt;/span&gt; &lt;span class="s"&gt;read-write&lt;/span&gt;

  &lt;span class="na"&gt;aerospikeConfig&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
    &lt;span class="na"&gt;service&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
      &lt;span class="na"&gt;feature-key-file&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;/etc/aerospike/secret/features.conf&lt;/span&gt;
    &lt;span class="na"&gt;security&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="pi"&gt;{}&lt;/span&gt;
    &lt;span class="na"&gt;network&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
      &lt;span class="na"&gt;service&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
        &lt;span class="na"&gt;port&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="m"&gt;3000&lt;/span&gt;
      &lt;span class="na"&gt;fabric&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
        &lt;span class="na"&gt;port&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="m"&gt;3001&lt;/span&gt;
      &lt;span class="na"&gt;heartbeat&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
        &lt;span class="na"&gt;port&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="m"&gt;3002&lt;/span&gt;
    &lt;span class="na"&gt;xdr&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
      &lt;span class="na"&gt;dcs&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
        &lt;span class="pi"&gt;-&lt;/span&gt; &lt;span class="na"&gt;name&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;DC2&lt;/span&gt;
          &lt;span class="na"&gt;connector&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="kc"&gt;true&lt;/span&gt;
          &lt;span class="na"&gt;node-address-ports&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
            &lt;span class="pi"&gt;-&lt;/span&gt; &lt;span class="s"&gt;xdr-proxy.aerospike.svc.cluster.local &lt;/span&gt;&lt;span class="m"&gt;8901&lt;/span&gt;
          &lt;span class="na"&gt;namespaces&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
            &lt;span class="pi"&gt;-&lt;/span&gt; &lt;span class="na"&gt;name&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;test&lt;/span&gt;
    &lt;span class="na"&gt;namespaces&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
      &lt;span class="pi"&gt;-&lt;/span&gt; &lt;span class="na"&gt;name&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;test&lt;/span&gt;
        &lt;span class="na"&gt;memory-size&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="m"&gt;134217728&lt;/span&gt;
        &lt;span class="na"&gt;replication-factor&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="m"&gt;1&lt;/span&gt;
        &lt;span class="na"&gt;storage-engine&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
          &lt;span class="na"&gt;type&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;device&lt;/span&gt;
          &lt;span class="na"&gt;files&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
            &lt;span class="pi"&gt;-&lt;/span&gt; &lt;span class="s"&gt;/opt/aerospike/data/test.dat&lt;/span&gt;
          &lt;span class="na"&gt;filesize&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="m"&gt;1073741824&lt;/span&gt;
          &lt;span class="na"&gt;data-in-memory&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="kc"&gt;true&lt;/span&gt;
&lt;span class="s"&gt;EOF&lt;/span&gt;

&lt;span class="s"&gt;kubectl create -f ssd1_xdr_src_6.1_cluster_cr.yaml&lt;/span&gt;
&lt;span class="s"&gt;kubectl get po -n aerospike -w&lt;/span&gt;
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;From the source cluster, confirm the xdr component has made a connection to the xdr-proxy by filtering the Kubernetes log file as shown in the following kubectl command.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;kubectl &lt;span class="nt"&gt;-n&lt;/span&gt; aerospike logs aerocluster-source-xdr-0-0 &lt;span class="nt"&gt;-c&lt;/span&gt; aerospike-server | &lt;span class="nb"&gt;grep &lt;/span&gt;xdr | &lt;span class="nb"&gt;grep &lt;/span&gt;conn
Dec 08 2022 13:49:21 GMT: INFO &lt;span class="o"&gt;(&lt;/span&gt;xdr&lt;span class="o"&gt;)&lt;/span&gt;: &lt;span class="o"&gt;(&lt;/span&gt;dc.c:581&lt;span class="o"&gt;)&lt;/span&gt; DC DC2 connectedOct 10 2022 13:57:53 GMT: INFO &lt;span class="o"&gt;(&lt;/span&gt;xdr&lt;span class="o"&gt;)&lt;/span&gt;: &lt;span class="o"&gt;(&lt;/span&gt;dc.c:581&lt;span class="o"&gt;)&lt;/span&gt; DC DC2 connected
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;h3&gt;
  
  
  Simple Message Test
&lt;/h3&gt;

&lt;p&gt;Add some data to the source database and confirm it is being received in the destination cluster. Start by getting the source database service address and connect using the Aerospike's command line tool called aql.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;kubectl get svc &lt;span class="nt"&gt;-n&lt;/span&gt; aerospike
NAME                         TYPE        CLUSTER-IP       EXTERNAL-IP   PORT&lt;span class="o"&gt;(&lt;/span&gt;S&lt;span class="o"&gt;)&lt;/span&gt;             AGE
aerocluster-dest-xdr         ClusterIP   None             &amp;lt;none&amp;gt;        3000/TCP            3h38m
aerocluster-dest-xdr-0-0     NodePort    10.100.226.179   &amp;lt;none&amp;gt;        3000:30168/TCP      3h38m
aerocluster-source-xdr       ClusterIP   None             &amp;lt;none&amp;gt;        3000/TCP            33m
aerocluster-source-xdr-0-0   NodePort    10.100.116.173   &amp;lt;none&amp;gt;        3000:31999/TCP      33m
xdr-proxy                    ClusterIP   10.100.44.96     &amp;lt;none&amp;gt;        8901/TCP,8902/TCP   41m
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;





&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;kubectl run &lt;span class="nt"&gt;-it&lt;/span&gt; &lt;span class="nt"&gt;--rm&lt;/span&gt; &lt;span class="nt"&gt;--restart&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;Never aerospike-tool &lt;span class="nt"&gt;-n&lt;/span&gt; aerospike &lt;span class="nt"&gt;--image&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;aerospike/aerospike-tools:latest &lt;span class="nt"&gt;--&lt;/span&gt; aql &lt;span class="nt"&gt;-U&lt;/span&gt; admin &lt;span class="nt"&gt;-P&lt;/span&gt; admin123 &lt;span class="nt"&gt;-h&lt;/span&gt; aerocluster-source-xdr-0-0
Insert a &lt;span class="nb"&gt;source &lt;/span&gt;record using the following &lt;span class="nb"&gt;command &lt;/span&gt;&lt;span class="k"&gt;in &lt;/span&gt;aql
insert into test.a1 &lt;span class="o"&gt;(&lt;/span&gt;PK,a,b,c,d&lt;span class="o"&gt;)&lt;/span&gt; values&lt;span class="o"&gt;(&lt;/span&gt;1,&lt;span class="s2"&gt;"A"&lt;/span&gt;,&lt;span class="s2"&gt;"B"&lt;/span&gt;,&lt;span class="s2"&gt;"C"&lt;/span&gt;,&lt;span class="s2"&gt;"D"&lt;/span&gt;&lt;span class="o"&gt;)&lt;/span&gt;
OK, 1 record affected.

aql&amp;gt; &lt;span class="k"&gt;select&lt;/span&gt; &lt;span class="k"&gt;*&lt;/span&gt; from &lt;span class="nb"&gt;test&lt;/span&gt;
+----+-----+-----+-----+-----+
| PK | a   | b   | c   | d   |
+----+-----+-----+-----+-----+
| 1  | &lt;span class="s2"&gt;"A"&lt;/span&gt; | &lt;span class="s2"&gt;"B"&lt;/span&gt; | &lt;span class="s2"&gt;"C"&lt;/span&gt; | &lt;span class="s2"&gt;"D"&lt;/span&gt; |
+----+-----+-----+-----+-----+
1 row &lt;span class="k"&gt;in &lt;/span&gt;&lt;span class="nb"&gt;set&lt;/span&gt; &lt;span class="o"&gt;(&lt;/span&gt;0.023 secs&lt;span class="o"&gt;)&lt;/span&gt;
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;Connect to the destination cluster in the same way and confirm data has successfully arrived.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;kubectl run &lt;span class="nt"&gt;-it&lt;/span&gt; &lt;span class="nt"&gt;--rm&lt;/span&gt; &lt;span class="nt"&gt;--restart&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;Never aerospike-tool &lt;span class="nt"&gt;-n&lt;/span&gt; aerospike &lt;span class="nt"&gt;--image&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;aerospike/aerospike-tools:latest &lt;span class="nt"&gt;--&lt;/span&gt; aql &lt;span class="nt"&gt;-U&lt;/span&gt; admin &lt;span class="nt"&gt;-P&lt;/span&gt; admin123 &lt;span class="nt"&gt;-h&lt;/span&gt; aerocluster-dest-xdr-0-0
Run the following &lt;span class="k"&gt;select &lt;/span&gt;query &lt;span class="k"&gt;in &lt;/span&gt;the destination cluster.
aql&amp;gt; &lt;span class="k"&gt;select&lt;/span&gt; &lt;span class="k"&gt;*&lt;/span&gt; from &lt;span class="nb"&gt;test&lt;/span&gt;
+----+-----+-----+-----+-----+
| PK | a   | b   | c   | d   |
+----+-----+-----+-----+-----+
| 1  | &lt;span class="s2"&gt;"A"&lt;/span&gt; | &lt;span class="s2"&gt;"B"&lt;/span&gt; | &lt;span class="s2"&gt;"C"&lt;/span&gt; | &lt;span class="s2"&gt;"D"&lt;/span&gt; |
+----+-----+-----+-----+-----+
1 row &lt;span class="k"&gt;in &lt;/span&gt;&lt;span class="nb"&gt;set&lt;/span&gt; &lt;span class="o"&gt;(&lt;/span&gt;0.031 secs&lt;span class="o"&gt;)&lt;/span&gt;

OK
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;h2&gt;
  
  
  Interim summary
&lt;/h2&gt;

&lt;p&gt;So far, at this point it's confirmed that xdr-proxy is doing exactly what we need it to do.&lt;/p&gt;

&lt;p&gt;If you now review the logs for the initial 2 xdr-proxies that were scheduled you should indeed see userKey=1.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;kubectl logs xdr-proxy-7d9fccd6c8-5q7gn &lt;span class="nt"&gt;-n&lt;/span&gt; aerospike | &lt;span class="nb"&gt;grep &lt;/span&gt;record-parser
2022-12-08 14:53:50.607 GMT DEBUG record-parser - parsed message fields: &lt;span class="nv"&gt;key&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;Key&lt;span class="o"&gt;(&lt;/span&gt;&lt;span class="nv"&gt;namespace&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'test'&lt;/span&gt;, &lt;span class="nb"&gt;set&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;a1, &lt;span class="nv"&gt;digest&lt;/span&gt;&lt;span class="o"&gt;=[&lt;/span&gt;120, 48, &lt;span class="nt"&gt;-23&lt;/span&gt;, &lt;span class="nt"&gt;-90&lt;/span&gt;, 110, 126, 84, &lt;span class="nt"&gt;-1&lt;/span&gt;, 114, &lt;span class="nt"&gt;-116&lt;/span&gt;, &lt;span class="nt"&gt;-9&lt;/span&gt;, &lt;span class="nt"&gt;-21&lt;/span&gt;, 28, 75, 126, &lt;span class="nt"&gt;-68&lt;/span&gt;, &lt;span class="nt"&gt;-51&lt;/span&gt;, 83, 31, &lt;span class="nt"&gt;-117&lt;/span&gt;&lt;span class="o"&gt;]&lt;/span&gt;, &lt;span class="nv"&gt;userKey&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;1, &lt;span class="nv"&gt;lastUpdateTimeMs&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;1670511230565, &lt;span class="nv"&gt;userKeyString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;1, &lt;span class="nv"&gt;digestString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'eDDppm5+VP9yjPfrHEt+vM1TH4s='&lt;/span&gt;&lt;span class="o"&gt;)&lt;/span&gt;

kubectl logs xdr-proxy-7d9fccd6c8-f5zkt &lt;span class="nt"&gt;-n&lt;/span&gt; aerospike | &lt;span class="nb"&gt;grep &lt;/span&gt;record-parser
&lt;span class="o"&gt;(&lt;/span&gt;none&lt;span class="o"&gt;)&lt;/span&gt;
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;h2&gt;
  
  
  Scaling the XDR-Proxies
&lt;/h2&gt;

&lt;p&gt;Go ahead and scale up the xdr-proxy to 6 pods by editing the file xdr-proxy-deployment.yaml and then applying the changes.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight yaml"&gt;&lt;code&gt;&lt;span class="na"&gt;apiVersion&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;apps/v1&lt;/span&gt;
&lt;span class="na"&gt;kind&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;Deployment&lt;/span&gt;
&lt;span class="na"&gt;metadata&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
  &lt;span class="na"&gt;name&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;xdr-proxy&lt;/span&gt;
  &lt;span class="na"&gt;namespace&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;aerospike&lt;/span&gt;
  &lt;span class="na"&gt;labels&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
    &lt;span class="na"&gt;app&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;xdr-proxy&lt;/span&gt;
&lt;span class="na"&gt;spec&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
  &lt;span class="na"&gt;replicas&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="m"&gt;6&lt;/span&gt;
  &lt;span class="na"&gt;selector&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
    &lt;span class="na"&gt;matchLabels&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
      &lt;span class="na"&gt;app&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;xdr-proxy&lt;/span&gt;
&lt;span class="nn"&gt;...&lt;/span&gt;
&lt;span class="s"&gt;kubectl apply -f xdr-proxy-deployment.yaml&lt;/span&gt;
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;You can also achieve the same by running the following command&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;kubectl scale deploy xdr-proxy &lt;span class="nt"&gt;-n&lt;/span&gt; aerospike &lt;span class="nt"&gt;--replicas&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;6
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;You should now have 6 instances of the xdr-proxies running.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;kubectl get po &lt;span class="nt"&gt;-n&lt;/span&gt; aerospike &lt;span class="nt"&gt;-w&lt;/span&gt;
NAME                         READY   STATUS    RESTARTS   AGE
aerocluster-dest-xdr-0-0     1/1     Running   0          3h50m
aerocluster-source-xdr-0-0   1/1     Running   0          75m
xdr-proxy-7d9fccd6c8-49ttl   1/1     Running   0          7s
xdr-proxy-7d9fccd6c8-5q7gn   1/1     Running   0          83m
xdr-proxy-7d9fccd6c8-c4j7k   1/1     Running   0          7s
xdr-proxy-7d9fccd6c8-f5zkt   1/1     Running   0          83m
xdr-proxy-7d9fccd6c8-lscbg   1/1     Running   0          7s
xdr-proxy-7d9fccd6c8-r56vs   1/1     Running   0          7s
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;Lets add some sample messages from our source xdr cluster with primary keys 5,6,7&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;aql&amp;gt; insert into &lt;span class="nb"&gt;test&lt;/span&gt; &lt;span class="o"&gt;(&lt;/span&gt;PK,a,b&lt;span class="o"&gt;)&lt;/span&gt; values &lt;span class="o"&gt;(&lt;/span&gt;5,&lt;span class="s2"&gt;"A"&lt;/span&gt;,&lt;span class="s2"&gt;"B"&lt;/span&gt;&lt;span class="o"&gt;)&lt;/span&gt;
OK, 1 record affected.
aql&amp;gt; insert into &lt;span class="nb"&gt;test&lt;/span&gt; &lt;span class="o"&gt;(&lt;/span&gt;PK,a,b&lt;span class="o"&gt;)&lt;/span&gt; values &lt;span class="o"&gt;(&lt;/span&gt;6,&lt;span class="s2"&gt;"A"&lt;/span&gt;,&lt;span class="s2"&gt;"B"&lt;/span&gt;&lt;span class="o"&gt;)&lt;/span&gt;
OK, 1 record affected.
aql&amp;gt; insert into &lt;span class="nb"&gt;test&lt;/span&gt; &lt;span class="o"&gt;(&lt;/span&gt;PK,a,b&lt;span class="o"&gt;)&lt;/span&gt; values &lt;span class="o"&gt;(&lt;/span&gt;7,&lt;span class="s2"&gt;"A"&lt;/span&gt;,&lt;span class="s2"&gt;"B"&lt;/span&gt;&lt;span class="o"&gt;)&lt;/span&gt;
OK, 1 record affected.
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;Notice how we have userKey=5, userKey=6 and userKey=7 across the newly scaled xdr-proxies.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;kubectl logs xdr-proxy-7d9fccd6c8-5q7gn &lt;span class="nt"&gt;-n&lt;/span&gt; aerospike | &lt;span class="nb"&gt;grep &lt;/span&gt;record-parser
2022-12-08 14:53:50.607 GMT DEBUG record-parser - parsed message fields: &lt;span class="nv"&gt;key&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;Key&lt;span class="o"&gt;(&lt;/span&gt;&lt;span class="nv"&gt;namespace&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'test'&lt;/span&gt;, &lt;span class="nb"&gt;set&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;a1, &lt;span class="nv"&gt;digest&lt;/span&gt;&lt;span class="o"&gt;=[&lt;/span&gt;120, 48, &lt;span class="nt"&gt;-23&lt;/span&gt;, &lt;span class="nt"&gt;-90&lt;/span&gt;, 110, 126, 84, &lt;span class="nt"&gt;-1&lt;/span&gt;, 114, &lt;span class="nt"&gt;-116&lt;/span&gt;, &lt;span class="nt"&gt;-9&lt;/span&gt;, &lt;span class="nt"&gt;-21&lt;/span&gt;, 28, 75, 126, &lt;span class="nt"&gt;-68&lt;/span&gt;, &lt;span class="nt"&gt;-51&lt;/span&gt;, 83, 31, &lt;span class="nt"&gt;-117&lt;/span&gt;&lt;span class="o"&gt;]&lt;/span&gt;, &lt;span class="nv"&gt;userKey&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;1, &lt;span class="nv"&gt;lastUpdateTimeMs&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;1670511230565, &lt;span class="nv"&gt;userKeyString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;1, &lt;span class="nv"&gt;digestString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'eDDppm5+VP9yjPfrHEt+vM1TH4s='&lt;/span&gt;&lt;span class="o"&gt;)&lt;/span&gt;

kubectl logs xdr-proxy-7d9fccd6c8-f5zkt &lt;span class="nt"&gt;-n&lt;/span&gt; aerospike | &lt;span class="nb"&gt;grep &lt;/span&gt;record-parser
&lt;span class="o"&gt;(&lt;/span&gt;none&lt;span class="o"&gt;)&lt;/span&gt;

kubectl logs xdr-proxy-7d9fccd6c8-49ttl &lt;span class="nt"&gt;-n&lt;/span&gt; aerospike | &lt;span class="nb"&gt;grep &lt;/span&gt;record-parser
&lt;span class="o"&gt;(&lt;/span&gt;none&lt;span class="o"&gt;)&lt;/span&gt;

kubectl logs xdr-proxy-7d9fccd6c8-c4j7k &lt;span class="nt"&gt;-n&lt;/span&gt; aerospike | &lt;span class="nb"&gt;grep &lt;/span&gt;record-parser
2022-12-08 15:05:37.511 GMT DEBUG record-parser - parsed message fields: &lt;span class="nv"&gt;key&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;Key&lt;span class="o"&gt;(&lt;/span&gt;&lt;span class="nv"&gt;namespace&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'test'&lt;/span&gt;, &lt;span class="nb"&gt;set&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;null, &lt;span class="nv"&gt;digest&lt;/span&gt;&lt;span class="o"&gt;=[&lt;/span&gt;&lt;span class="nt"&gt;-88&lt;/span&gt;, 104, 104, &lt;span class="nt"&gt;-114&lt;/span&gt;, 19, &lt;span class="nt"&gt;-44&lt;/span&gt;, &lt;span class="nt"&gt;-19&lt;/span&gt;, 29, &lt;span class="nt"&gt;-15&lt;/span&gt;, 18, 118, 72, &lt;span class="nt"&gt;-117&lt;/span&gt;, &lt;span class="nt"&gt;-106&lt;/span&gt;, &lt;span class="nt"&gt;-28&lt;/span&gt;, 21, &lt;span class="nt"&gt;-48&lt;/span&gt;, 50, 26, 113], &lt;span class="nv"&gt;userKey&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;7, &lt;span class="nv"&gt;lastUpdateTimeMs&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;1670511937250, &lt;span class="nv"&gt;userKeyString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;7, &lt;span class="nv"&gt;digestString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'qGhojhPU7R3xEnZIi5bkFdAyGnE='&lt;/span&gt;&lt;span class="o"&gt;)&lt;/span&gt;

kubectl logs xdr-proxy-7d9fccd6c8-lscbg &lt;span class="nt"&gt;-n&lt;/span&gt; aerospike | &lt;span class="nb"&gt;grep &lt;/span&gt;record-parser
2022-12-08 15:05:27.548 GMT DEBUG record-parser - parsed message fields: &lt;span class="nv"&gt;key&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;Key&lt;span class="o"&gt;(&lt;/span&gt;&lt;span class="nv"&gt;namespace&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'test'&lt;/span&gt;, &lt;span class="nb"&gt;set&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;null, &lt;span class="nv"&gt;digest&lt;/span&gt;&lt;span class="o"&gt;=[&lt;/span&gt;68, 4, &lt;span class="nt"&gt;-94&lt;/span&gt;, &lt;span class="nt"&gt;-44&lt;/span&gt;, &lt;span class="nt"&gt;-75&lt;/span&gt;, 112, &lt;span class="nt"&gt;-102&lt;/span&gt;, 73, &lt;span class="nt"&gt;-120&lt;/span&gt;, 41, &lt;span class="nt"&gt;-101&lt;/span&gt;, &lt;span class="nt"&gt;-120&lt;/span&gt;, 33, &lt;span class="nt"&gt;-111&lt;/span&gt;, 15, &lt;span class="nt"&gt;-114&lt;/span&gt;, &lt;span class="nt"&gt;-85&lt;/span&gt;, 46, &lt;span class="nt"&gt;-2&lt;/span&gt;, 80], &lt;span class="nv"&gt;userKey&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;5, &lt;span class="nv"&gt;lastUpdateTimeMs&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;1670511927465, &lt;span class="nv"&gt;userKeyString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;5, &lt;span class="nv"&gt;digestString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'RASi1LVwmkmIKZuIIZEPjqsu/lA='&lt;/span&gt;&lt;span class="o"&gt;)&lt;/span&gt;
2022-12-08 15:05:32.300 GMT DEBUG record-parser - parsed message fields: &lt;span class="nv"&gt;key&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;Key&lt;span class="o"&gt;(&lt;/span&gt;&lt;span class="nv"&gt;namespace&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'test'&lt;/span&gt;, &lt;span class="nb"&gt;set&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;null, &lt;span class="nv"&gt;digest&lt;/span&gt;&lt;span class="o"&gt;=[&lt;/span&gt;33, &lt;span class="nt"&gt;-100&lt;/span&gt;, 127, 120, 17, 45, &lt;span class="nt"&gt;-79&lt;/span&gt;, 115, &lt;span class="nt"&gt;-40&lt;/span&gt;, 53, &lt;span class="nt"&gt;-70&lt;/span&gt;, &lt;span class="nt"&gt;-57&lt;/span&gt;, 120, 73, 20, &lt;span class="nt"&gt;-50&lt;/span&gt;, &lt;span class="nt"&gt;-99&lt;/span&gt;, &lt;span class="nt"&gt;-98&lt;/span&gt;, &lt;span class="nt"&gt;-104&lt;/span&gt;, 85], &lt;span class="nv"&gt;userKey&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;6, &lt;span class="nv"&gt;lastUpdateTimeMs&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;1670511932288, &lt;span class="nv"&gt;userKeyString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;6, &lt;span class="nv"&gt;digestString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'IZx/eBEtsXPYNbrHeEkUzp2emFU='&lt;/span&gt;&lt;span class="o"&gt;)&lt;/span&gt;

kubectl logs xdr-proxy-7d9fccd6c8-r56vs &lt;span class="nt"&gt;-n&lt;/span&gt; aerospike | &lt;span class="nb"&gt;grep &lt;/span&gt;record-parser
&lt;span class="o"&gt;(&lt;/span&gt;none&lt;span class="o"&gt;)&lt;/span&gt;
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;When data is actively flowing between source and destination clusters, the existing cached list of xdr-proxy connections from the source cluster is not refreshed, so the newly scaled xdr-proxies we have just scheduled will not be utilized immediately.&lt;/p&gt;

&lt;p&gt;To demonstrate, let's add data to the source cluster using Aerospike's benchmark tool. At the same time, we will scale the xdr-proxies on the destination side and observe the results. Before we start, we reduce the xdr-proxy server count to 1 to make the observations clear.&lt;/p&gt;

&lt;p&gt;I use my own local machine which has the benchmark tool already installed to send data to the source EC2 instances. In order to do this, we need to get the external IP address of the service.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;kubectl get AerospikeCluster aerocluster-source-xdr &lt;span class="nt"&gt;-n&lt;/span&gt; aerospike  &lt;span class="nt"&gt;-o&lt;/span&gt; yaml

...
pods:
    aerocluster-source-xdr-0-0:
      aerospike:
        accessEndpoints:
        - 192.168.41.63:31999
        alternateAccessEndpoints:
        - 54.173.138.131:31999
        clusterName: aerocluster-source-xdr
        nodeID: 0a0
        tlsAccessEndpoints: &lt;span class="o"&gt;[]&lt;/span&gt;
        tlsAlternateAccessEndpoints: &lt;span class="o"&gt;[]&lt;/span&gt;
        tlsName: &lt;span class="s2"&gt;""&lt;/span&gt;
      aerospikeConfigHash: 4aacb9809beaa01d99a9f00293c9f7dc141845f8
      hostExternalIP: 54.173.138.131
      hostInternalIP: 192.168.41.63
      image: aerospike/aerospike-server-enterprise:6.1.0.2
      initializedVolumes:
      - workdir
      - ns
      networkPolicyHash: acbbfab3668e1fceeed201139d1173f00095667e
      podIP: 192.168.50.203
      podPort: 3000
      podSpecHash: 972dc2a779fe9ab407212b547d54d3a72ecef259
      servicePort: 31999
...
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;Add the AWS firewall rule to allow traffic into the Kubernetes service. Connect the &lt;code&gt;asbenchmark&lt;/code&gt; tool to start writing traffic using the public IP address for the NodePort Service.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;asbenchmark &lt;span class="nt"&gt;-h&lt;/span&gt; 54.173.138.131:31999 &lt;span class="nt"&gt;-Uadmin&lt;/span&gt; &lt;span class="nt"&gt;-Padmin123&lt;/span&gt; &lt;span class="nt"&gt;-z&lt;/span&gt; 10 &lt;span class="nt"&gt;-servicesAlternate&lt;/span&gt; &lt;span class="nt"&gt;-w&lt;/span&gt; RU,0 &lt;span class="nt"&gt;-o&lt;/span&gt; B256
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;Scale up the xdr-proxy servers from 1 to 2, and check the logs of both proxies to see what messages they received. In a production environment, you should disable the logging.&lt;/p&gt;

&lt;p&gt;Notice that no data has passed through the new xdr-proxy instance &lt;code&gt;xdr-proxy-7d9fccd6c8-s2tzt&lt;/code&gt;.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;kubectl &lt;span class="nt"&gt;-n&lt;/span&gt; aerospike logs xdr-proxy-7d9fccd6c8-s2tzt | &lt;span class="nb"&gt;grep &lt;/span&gt;record-parser
&lt;span class="o"&gt;(&lt;/span&gt;none&lt;span class="o"&gt;)&lt;/span&gt;

kubectl &lt;span class="nt"&gt;-n&lt;/span&gt; aerospike logs xdr-proxy-7d9fccd6c8-5q7gn | &lt;span class="nb"&gt;grep &lt;/span&gt;record-parser
...
2022-12-08 18:21:36.158 GMT DEBUG record-parser - parsed message fields: &lt;span class="nv"&gt;key&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;Key&lt;span class="o"&gt;(&lt;/span&gt;&lt;span class="nv"&gt;namespace&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'test'&lt;/span&gt;, &lt;span class="nb"&gt;set&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;testset, &lt;span class="nv"&gt;digest&lt;/span&gt;&lt;span class="o"&gt;=[&lt;/span&gt;51, &lt;span class="nt"&gt;-120&lt;/span&gt;, 114, &lt;span class="nt"&gt;-17&lt;/span&gt;, &lt;span class="nt"&gt;-44&lt;/span&gt;, 72, 123, 125, 50, 92, 3, 110, &lt;span class="nt"&gt;-21&lt;/span&gt;, &lt;span class="nt"&gt;-38&lt;/span&gt;, 74, 25, 42, 35, 117, 72], &lt;span class="nv"&gt;userKey&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;null, &lt;span class="nv"&gt;lastUpdateTimeMs&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;1670523696059, &lt;span class="nv"&gt;userKeyString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;null, &lt;span class="nv"&gt;digestString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'M4hy79RIe30yXANu69pKGSojdUg='&lt;/span&gt;&lt;span class="o"&gt;)&lt;/span&gt;
2022-12-08 18:21:36.158 GMT DEBUG record-parser - parsed message fields: &lt;span class="nv"&gt;key&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;Key&lt;span class="o"&gt;(&lt;/span&gt;&lt;span class="nv"&gt;namespace&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'test'&lt;/span&gt;, &lt;span class="nb"&gt;set&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;testset, &lt;span class="nv"&gt;digest&lt;/span&gt;&lt;span class="o"&gt;=[&lt;/span&gt;124, &lt;span class="nt"&gt;-6&lt;/span&gt;, &lt;span class="nt"&gt;-70&lt;/span&gt;, &lt;span class="nt"&gt;-23&lt;/span&gt;, 44, 41, &lt;span class="nt"&gt;-19&lt;/span&gt;, 40, &lt;span class="nt"&gt;-11&lt;/span&gt;, &lt;span class="nt"&gt;-16&lt;/span&gt;, 126, 120, 81, &lt;span class="nt"&gt;-113&lt;/span&gt;, &lt;span class="nt"&gt;-112&lt;/span&gt;, &lt;span class="nt"&gt;-79&lt;/span&gt;, 66, 77, &lt;span class="nt"&gt;-99&lt;/span&gt;, &lt;span class="nt"&gt;-6&lt;/span&gt;&lt;span class="o"&gt;]&lt;/span&gt;, &lt;span class="nv"&gt;userKey&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;null, &lt;span class="nv"&gt;lastUpdateTimeMs&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;1670523696059, &lt;span class="nv"&gt;userKeyString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;null, &lt;span class="nv"&gt;digestString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'fPq66Swp7Sj18H54UY+QsUJNnfo='&lt;/span&gt;&lt;span class="o"&gt;)&lt;/span&gt;
2022-12-08 18:21:36.158 GMT DEBUG record-parser - parsed message fields: &lt;span class="nv"&gt;key&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;Key&lt;span class="o"&gt;(&lt;/span&gt;&lt;span class="nv"&gt;namespace&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'test'&lt;/span&gt;, &lt;span class="nb"&gt;set&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;testset, &lt;span class="nv"&gt;digest&lt;/span&gt;&lt;span class="o"&gt;=[&lt;/span&gt;&lt;span class="nt"&gt;-127&lt;/span&gt;, &lt;span class="nt"&gt;-118&lt;/span&gt;, 63, &lt;span class="nt"&gt;-32&lt;/span&gt;, &lt;span class="nt"&gt;-74&lt;/span&gt;, 60, &lt;span class="nt"&gt;-60&lt;/span&gt;, 86, 31, &lt;span class="nt"&gt;-119&lt;/span&gt;, &lt;span class="nt"&gt;-1&lt;/span&gt;, &lt;span class="nt"&gt;-105&lt;/span&gt;, &lt;span class="nt"&gt;-108&lt;/span&gt;, &lt;span class="nt"&gt;-59&lt;/span&gt;, 111, 48, &lt;span class="nt"&gt;-34&lt;/span&gt;, &lt;span class="nt"&gt;-61&lt;/span&gt;, &lt;span class="nt"&gt;-108&lt;/span&gt;, &lt;span class="nt"&gt;-5&lt;/span&gt;&lt;span class="o"&gt;]&lt;/span&gt;, &lt;span class="nv"&gt;userKey&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;null, &lt;span class="nv"&gt;lastUpdateTimeMs&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;1670523696105, &lt;span class="nv"&gt;userKeyString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;null, &lt;span class="nv"&gt;digestString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'gYo/4LY8xFYfif+XlMVvMN7DlPs='&lt;/span&gt;&lt;span class="o"&gt;)&lt;/span&gt;
2022-12-08 18:21:36.256 GMT DEBUG record-parser - parsed message fields: &lt;span class="nv"&gt;key&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;Key&lt;span class="o"&gt;(&lt;/span&gt;&lt;span class="nv"&gt;namespace&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'test'&lt;/span&gt;, &lt;span class="nb"&gt;set&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;testset, &lt;span class="nv"&gt;digest&lt;/span&gt;&lt;span class="o"&gt;=[&lt;/span&gt;99, 1, 80, 2, 76, &lt;span class="nt"&gt;-43&lt;/span&gt;, 125, 77, 47, 8, 6, 35, 49, 117, &lt;span class="nt"&gt;-35&lt;/span&gt;, 54, 120, &lt;span class="nt"&gt;-29&lt;/span&gt;, 118, &lt;span class="nt"&gt;-72&lt;/span&gt;&lt;span class="o"&gt;]&lt;/span&gt;, &lt;span class="nv"&gt;userKey&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;null, &lt;span class="nv"&gt;lastUpdateTimeMs&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;1670523696178, &lt;span class="nv"&gt;userKeyString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;null, &lt;span class="nv"&gt;digestString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'YwFQAkzVfU0vCAYjMXXdNnjjdrg='&lt;/span&gt;&lt;span class="o"&gt;)&lt;/span&gt;
2022-12-08 18:21:36.257 GMT DEBUG record-parser - parsed message fields: &lt;span class="nv"&gt;key&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;Key&lt;span class="o"&gt;(&lt;/span&gt;&lt;span class="nv"&gt;namespace&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'test'&lt;/span&gt;, &lt;span class="nb"&gt;set&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;testset, &lt;span class="nv"&gt;digest&lt;/span&gt;&lt;span class="o"&gt;=[&lt;/span&gt;&lt;span class="nt"&gt;-88&lt;/span&gt;, &lt;span class="nt"&gt;-46&lt;/span&gt;, &lt;span class="nt"&gt;-48&lt;/span&gt;, &lt;span class="nt"&gt;-33&lt;/span&gt;, 77, &lt;span class="nt"&gt;-120&lt;/span&gt;, 123, &lt;span class="nt"&gt;-101&lt;/span&gt;, &lt;span class="nt"&gt;-70&lt;/span&gt;, &lt;span class="nt"&gt;-20&lt;/span&gt;, &lt;span class="nt"&gt;-96&lt;/span&gt;, &lt;span class="nt"&gt;-104&lt;/span&gt;, &lt;span class="nt"&gt;-51&lt;/span&gt;, &lt;span class="nt"&gt;-90&lt;/span&gt;, 28, &lt;span class="nt"&gt;-15&lt;/span&gt;, 70, 11, 118, 83], &lt;span class="nv"&gt;userKey&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;null, &lt;span class="nv"&gt;lastUpdateTimeMs&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;1670523696202, &lt;span class="nv"&gt;userKeyString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;null, &lt;span class="nv"&gt;digestString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'qNLQ302Ie5u67KCYzaYc8UYLdlM='&lt;/span&gt;&lt;span class="o"&gt;)&lt;/span&gt;
2022-12-08 18:21:36.257 GMT DEBUG record-parser - parsed message fields: &lt;span class="nv"&gt;key&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;Key&lt;span class="o"&gt;(&lt;/span&gt;&lt;span class="nv"&gt;namespace&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'test'&lt;/span&gt;, &lt;span class="nb"&gt;set&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;testset, &lt;span class="nv"&gt;digest&lt;/span&gt;&lt;span class="o"&gt;=[&lt;/span&gt;&lt;span class="nt"&gt;-97&lt;/span&gt;, 18, 28, &lt;span class="nt"&gt;-43&lt;/span&gt;, 75, 42, &lt;span class="nt"&gt;-22&lt;/span&gt;, &lt;span class="nt"&gt;-126&lt;/span&gt;, &lt;span class="nt"&gt;-61&lt;/span&gt;, &lt;span class="nt"&gt;-108&lt;/span&gt;, &lt;span class="nt"&gt;-36&lt;/span&gt;, 118, &lt;span class="nt"&gt;-86&lt;/span&gt;, &lt;span class="nt"&gt;-105&lt;/span&gt;, &lt;span class="nt"&gt;-52&lt;/span&gt;, 119, &lt;span class="nt"&gt;-39&lt;/span&gt;, &lt;span class="nt"&gt;-33&lt;/span&gt;, &lt;span class="nt"&gt;-127&lt;/span&gt;, &lt;span class="nt"&gt;-76&lt;/span&gt;&lt;span class="o"&gt;]&lt;/span&gt;, &lt;span class="nv"&gt;userKey&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;null, &lt;span class="nv"&gt;lastUpdateTimeMs&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;1670523696175, &lt;span class="nv"&gt;userKeyString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;null, &lt;span class="nv"&gt;digestString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'nxIc1Usq6oLDlNx2qpfMd9nfgbQ='&lt;/span&gt;&lt;span class="o"&gt;)&lt;/span&gt;
2022-12-08 18:21:36.257 GMT DEBUG record-parser - parsed message fields: &lt;span class="nv"&gt;key&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;Key&lt;span class="o"&gt;(&lt;/span&gt;&lt;span class="nv"&gt;namespace&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'test'&lt;/span&gt;, &lt;span class="nb"&gt;set&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;testset, &lt;span class="nv"&gt;digest&lt;/span&gt;&lt;span class="o"&gt;=[&lt;/span&gt;&lt;span class="nt"&gt;-120&lt;/span&gt;, 103, &lt;span class="nt"&gt;-51&lt;/span&gt;, 57, &lt;span class="nt"&gt;-71&lt;/span&gt;, &lt;span class="nt"&gt;-106&lt;/span&gt;, 13, &lt;span class="nt"&gt;-48&lt;/span&gt;, 100, 28, 59, &lt;span class="nt"&gt;-3&lt;/span&gt;, &lt;span class="nt"&gt;-39&lt;/span&gt;, &lt;span class="nt"&gt;-56&lt;/span&gt;, &lt;span class="nt"&gt;-67&lt;/span&gt;, &lt;span class="nt"&gt;-103&lt;/span&gt;, 29, 36, 75, 119], &lt;span class="nv"&gt;userKey&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;null, &lt;span class="nv"&gt;lastUpdateTimeMs&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;1670523696191, &lt;span class="nv"&gt;userKeyString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;null, &lt;span class="nv"&gt;digestString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'iGfNObmWDdBkHDv92ci9mR0kS3c='&lt;/span&gt;&lt;span class="o"&gt;)&lt;/span&gt;
2022-12-08 18:21:36.257 GMT DEBUG record-parser - parsed message fields: &lt;span class="nv"&gt;key&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;Key&lt;span class="o"&gt;(&lt;/span&gt;&lt;span class="nv"&gt;namespace&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'test'&lt;/span&gt;, &lt;span class="nb"&gt;set&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;testset, &lt;span class="nv"&gt;digest&lt;/span&gt;&lt;span class="o"&gt;=[&lt;/span&gt;&lt;span class="nt"&gt;-47&lt;/span&gt;, 88, &lt;span class="nt"&gt;-13&lt;/span&gt;, 13, &lt;span class="nt"&gt;-35&lt;/span&gt;, 77, 24, 22, &lt;span class="nt"&gt;-40&lt;/span&gt;, &lt;span class="nt"&gt;-61&lt;/span&gt;, &lt;span class="nt"&gt;-118&lt;/span&gt;, &lt;span class="nt"&gt;-115&lt;/span&gt;, 82, 13, 127, &lt;span class="nt"&gt;-125&lt;/span&gt;, 53, 66, &lt;span class="nt"&gt;-22&lt;/span&gt;, &lt;span class="nt"&gt;-8&lt;/span&gt;&lt;span class="o"&gt;]&lt;/span&gt;, &lt;span class="nv"&gt;userKey&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;null, &lt;span class="nv"&gt;lastUpdateTimeMs&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;1670523696233, &lt;span class="nv"&gt;userKeyString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;null, &lt;span class="nv"&gt;digestString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'0VjzDd1NGBbYw4qNUg1/gzVC6vg='&lt;/span&gt;&lt;span class="o"&gt;)&lt;/span&gt;
2022-12-08 18:21:36.258 GMT DEBUG record-parser - parsed message fields: &lt;span class="nv"&gt;key&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;Key&lt;span class="o"&gt;(&lt;/span&gt;&lt;span class="nv"&gt;namespace&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'test'&lt;/span&gt;, &lt;span class="nb"&gt;set&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;testset, &lt;span class="nv"&gt;digest&lt;/span&gt;&lt;span class="o"&gt;=[&lt;/span&gt;&lt;span class="nt"&gt;-63&lt;/span&gt;, &lt;span class="nt"&gt;-11&lt;/span&gt;, 93, &lt;span class="nt"&gt;-90&lt;/span&gt;, 47, 29, &lt;span class="nt"&gt;-63&lt;/span&gt;, 36, 12, 53, &lt;span class="nt"&gt;-86&lt;/span&gt;, 84, 57, &lt;span class="nt"&gt;-125&lt;/span&gt;, 16, 43, &lt;span class="nt"&gt;-18&lt;/span&gt;, 93, 56, 9], &lt;span class="nv"&gt;userKey&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;null, &lt;span class="nv"&gt;lastUpdateTimeMs&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;1670523696186, &lt;span class="nv"&gt;userKeyString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;null, &lt;span class="nv"&gt;digestString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'wfVdpi8dwSQMNapUOYMQK+5dOAk='&lt;/span&gt;&lt;span class="o"&gt;)&lt;/span&gt;
2022-12-08 18:21:36.258 GMT DEBUG record-parser - parsed message fields: &lt;span class="nv"&gt;key&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;Key&lt;span class="o"&gt;(&lt;/span&gt;&lt;span class="nv"&gt;namespace&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'test'&lt;/span&gt;, &lt;span class="nb"&gt;set&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;testset, &lt;span class="nv"&gt;digest&lt;/span&gt;&lt;span class="o"&gt;=[&lt;/span&gt;&lt;span class="nt"&gt;-23&lt;/span&gt;, 101, &lt;span class="nt"&gt;-114&lt;/span&gt;, &lt;span class="nt"&gt;-87&lt;/span&gt;, &lt;span class="nt"&gt;-52&lt;/span&gt;, 107, 36, 113, 101, 33, &lt;span class="nt"&gt;-16&lt;/span&gt;, 82, &lt;span class="nt"&gt;-95&lt;/span&gt;, 97, 34, &lt;span class="nt"&gt;-121&lt;/span&gt;, 82, &lt;span class="nt"&gt;-97&lt;/span&gt;, 40, 59], &lt;span class="nv"&gt;userKey&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;null, &lt;span class="nv"&gt;lastUpdateTimeMs&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;1670523696145, &lt;span class="nv"&gt;userKeyString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;null, &lt;span class="nv"&gt;digestString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'6WWOqcxrJHFlIfBSoWEih1KfKDs='&lt;/span&gt;&lt;span class="o"&gt;)&lt;/span&gt;
2022-12-08 18:21:36.258 GMT DEBUG record-parser - parsed message fields: &lt;span class="nv"&gt;key&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;Key&lt;span class="o"&gt;(&lt;/span&gt;&lt;span class="nv"&gt;namespace&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'test'&lt;/span&gt;, &lt;span class="nb"&gt;set&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;testset, &lt;span class="nv"&gt;digest&lt;/span&gt;&lt;span class="o"&gt;=[&lt;/span&gt;&lt;span class="nt"&gt;-77&lt;/span&gt;, &lt;span class="nt"&gt;-118&lt;/span&gt;, 49, 4, &lt;span class="nt"&gt;-75&lt;/span&gt;, 123, 81, 2, &lt;span class="nt"&gt;-103&lt;/span&gt;, &lt;span class="nt"&gt;-73&lt;/span&gt;, 42, &lt;span class="nt"&gt;-70&lt;/span&gt;, &lt;span class="nt"&gt;-54&lt;/span&gt;, 95, 98, 23, 73, 66, &lt;span class="nt"&gt;-86&lt;/span&gt;, 7], &lt;span class="nv"&gt;userKey&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;null, &lt;span class="nv"&gt;lastUpdateTimeMs&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;1670523696230, &lt;span class="nv"&gt;userKeyString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;null, &lt;span class="nv"&gt;digestString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'s4oxBLV7UQKZtyq6yl9iF0lCqgc='&lt;/span&gt;&lt;span class="o"&gt;)&lt;/span&gt;
2022-12-08 18:21:36.258 GMT DEBUG record-parser - parsed message fields: &lt;span class="nv"&gt;key&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;Key&lt;span class="o"&gt;(&lt;/span&gt;&lt;span class="nv"&gt;namespace&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'test'&lt;/span&gt;, &lt;span class="nb"&gt;set&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;testset, &lt;span class="nv"&gt;digest&lt;/span&gt;&lt;span class="o"&gt;=[&lt;/span&gt;50, &lt;span class="nt"&gt;-38&lt;/span&gt;, &lt;span class="nt"&gt;-31&lt;/span&gt;, &lt;span class="nt"&gt;-54&lt;/span&gt;, &lt;span class="nt"&gt;-122&lt;/span&gt;, &lt;span class="nt"&gt;-113&lt;/span&gt;, &lt;span class="nt"&gt;-38&lt;/span&gt;, 88, 15, 7, 96, 51, &lt;span class="nt"&gt;-92&lt;/span&gt;, &lt;span class="nt"&gt;-25&lt;/span&gt;, 60, &lt;span class="nt"&gt;-104&lt;/span&gt;, 26, 113, &lt;span class="nt"&gt;-117&lt;/span&gt;, &lt;span class="nt"&gt;-82&lt;/span&gt;&lt;span class="o"&gt;]&lt;/span&gt;, &lt;span class="nv"&gt;userKey&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;null, &lt;span class="nv"&gt;lastUpdateTimeMs&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;1670523696157, &lt;span class="nv"&gt;userKeyString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;null, &lt;span class="nv"&gt;digestString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'MtrhyoaP2lgPB2AzpOc8mBpxi64='&lt;/span&gt;&lt;span class="o"&gt;)&lt;/span&gt;
2022-12-08 18:21:36.258 GMT DEBUG record-parser - parsed message fields: &lt;span class="nv"&gt;key&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;Key&lt;span class="o"&gt;(&lt;/span&gt;&lt;span class="nv"&gt;namespace&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'test'&lt;/span&gt;, &lt;span class="nb"&gt;set&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;testset, &lt;span class="nv"&gt;digest&lt;/span&gt;&lt;span class="o"&gt;=[&lt;/span&gt;&lt;span class="nt"&gt;-77&lt;/span&gt;, 31, 67, &lt;span class="nt"&gt;-18&lt;/span&gt;, &lt;span class="nt"&gt;-52&lt;/span&gt;, &lt;span class="nt"&gt;-114&lt;/span&gt;, 42, &lt;span class="nt"&gt;-18&lt;/span&gt;, 36, &lt;span class="nt"&gt;-111&lt;/span&gt;, 89, 62, 109, 114, &lt;span class="nt"&gt;-54&lt;/span&gt;, 54, &lt;span class="nt"&gt;-121&lt;/span&gt;, &lt;span class="nt"&gt;-110&lt;/span&gt;, &lt;span class="nt"&gt;-88&lt;/span&gt;, &lt;span class="nt"&gt;-108&lt;/span&gt;&lt;span class="o"&gt;]&lt;/span&gt;, &lt;span class="nv"&gt;userKey&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;null, &lt;span class="nv"&gt;lastUpdateTimeMs&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;1670523696206, &lt;span class="nv"&gt;userKeyString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;null, &lt;span class="nv"&gt;digestString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'sx9D7syOKu4kkVk+bXLKNoeSqJQ='&lt;/span&gt;&lt;span class="o"&gt;)&lt;/span&gt;
2022-12-08 18:21:36.258 GMT DEBUG record-parser - parsed message fields: &lt;span class="nv"&gt;key&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;Key&lt;span class="o"&gt;(&lt;/span&gt;&lt;span class="nv"&gt;namespace&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'test'&lt;/span&gt;, &lt;span class="nb"&gt;set&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;testset, &lt;span class="nv"&gt;digest&lt;/span&gt;&lt;span class="o"&gt;=[&lt;/span&gt;73, 11, 98, &lt;span class="nt"&gt;-50&lt;/span&gt;, 32, 12, 0, &lt;span class="nt"&gt;-50&lt;/span&gt;, 22, &lt;span class="nt"&gt;-101&lt;/span&gt;, &lt;span class="nt"&gt;-108&lt;/span&gt;, 18, 38, 7, &lt;span class="nt"&gt;-65&lt;/span&gt;, 6, &lt;span class="nt"&gt;-58&lt;/span&gt;, 60, &lt;span class="nt"&gt;-6&lt;/span&gt;, &lt;span class="nt"&gt;-33&lt;/span&gt;&lt;span class="o"&gt;]&lt;/span&gt;, &lt;span class="nv"&gt;userKey&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;null, &lt;span class="nv"&gt;lastUpdateTimeMs&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;1670523696171, &lt;span class="nv"&gt;userKeyString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;null, &lt;span class="nv"&gt;digestString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'SQtiziAMAM4Wm5QSJge/BsY8+t8='&lt;/span&gt;&lt;span class="o"&gt;)&lt;/span&gt;
2022-12-08 18:21:36.258 GMT DEBUG record-parser - parsed message fields: &lt;span class="nv"&gt;key&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;Key&lt;span class="o"&gt;(&lt;/span&gt;&lt;span class="nv"&gt;namespace&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'test'&lt;/span&gt;, &lt;span class="nb"&gt;set&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;testset, &lt;span class="nv"&gt;digest&lt;/span&gt;&lt;span class="o"&gt;=[&lt;/span&gt;109, &lt;span class="nt"&gt;-82&lt;/span&gt;, 24, 53, 35, 89, &lt;span class="nt"&gt;-72&lt;/span&gt;, &lt;span class="nt"&gt;-117&lt;/span&gt;, &lt;span class="nt"&gt;-22&lt;/span&gt;, 79, 119, &lt;span class="nt"&gt;-89&lt;/span&gt;, 56, &lt;span class="nt"&gt;-5&lt;/span&gt;, 0, &lt;span class="nt"&gt;-103&lt;/span&gt;, &lt;span class="nt"&gt;-54&lt;/span&gt;, 51, 25, 126], &lt;span class="nv"&gt;userKey&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;null, &lt;span class="nv"&gt;lastUpdateTimeMs&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;1670523696146, &lt;span class="nv"&gt;userKeyString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;null, &lt;span class="nv"&gt;digestString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'ba4YNSNZuIvqT3enOPsAmcozGX4='&lt;/span&gt;&lt;span class="o"&gt;)&lt;/span&gt;
...
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;h2&gt;
  
  
  Conclusion
&lt;/h2&gt;

&lt;p&gt;Dependable resiliency is a core part of the Aerospike value proposition. Aerospike's XDR feature ensures that users can mitigate the risk of a cluster becoming unavailable by replicating data asynchronously from one data center to another. We show here how you can achieve this in a Kubernetes environment by using the xdr-proxy, allowing you to avoid network complications. All this has been achieved with minimum effort thanks to the Aerospike Kubernetes Operator.&lt;/p&gt;

</description>
      <category>programming</category>
      <category>webdev</category>
      <category>react</category>
      <category>redux</category>
    </item>
    <item>
      <title>Aerospike Kubernetes Talking Cluster to Cluster</title>
      <dc:creator>Naresh Maharaj</dc:creator>
      <pubDate>Mon, 16 Jan 2023 20:03:05 +0000</pubDate>
      <link>https://dev.to/naresh_maharaj_c4b8fbd4aa/aerospike-kubernetes-talking-cluster-to-cluster-27c0</link>
      <guid>https://dev.to/naresh_maharaj_c4b8fbd4aa/aerospike-kubernetes-talking-cluster-to-cluster-27c0</guid>
      <description>&lt;p&gt;&lt;a href="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fb0n19ptasvz30knuwii6.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fb0n19ptasvz30knuwii6.png" alt="Hello World" width="800" height="454"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;In this article we focus on establishing 2 separate Kubernetes clusters both each hosting the Aerospike low latency database. The goal is to be able to use Aerospike's Cross Data centre Replication tool ( XDR ) to send data from a source cluster to the destination cluster seamlessly. The problem sometimes encountered in exposing a cluster to the outside world is the organisation's security restriction policies. By placing a proxy server in front of the private Kubernetes destination cluster can overcome this problem an achieve the desired goal. To demonstrate the solution we will first start by installing the Kubernetes operator that will schedule our said source and destination databases.&lt;/p&gt;

&lt;h3&gt;
  
  
  Aerospike Kubernetes Operator
&lt;/h3&gt;

&lt;p&gt;The following Kubernetes nodes have been created using EKS. You can see the following private and public IP addresses from listing the nodes with the kubectl command.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;kubectl get nodes &lt;span class="nt"&gt;-o&lt;/span&gt; wide
NAME                             STATUS   ROLES    AGE     VERSION                INTERNAL-IP      EXTERNAL-IP      OS-IMAGE         KERNEL-VERSION                 CONTAINER-RUNTIME
ip-192-168-11-132.ec2.internal   Ready    &amp;lt;none&amp;gt;   2m53s   v1.22.15-eks-fb459a0   192.168.11.132   44.201.67.177    Amazon Linux 2   5.4.219-126.411.amzn2.x86_64   docker://20.10.17
ip-192-168-31-131.ec2.internal   Ready    &amp;lt;none&amp;gt;   2m52s   v1.22.15-eks-fb459a0   192.168.31.131   44.192.83.79     Amazon Linux 2   5.4.219-126.411.amzn2.x86_64   docker://20.10.17
ip-192-168-41-140.ec2.internal   Ready    &amp;lt;none&amp;gt;   2m51s   v1.22.15-eks-fb459a0   192.168.41.140   18.208.222.35    Amazon Linux 2   5.4.219-126.411.amzn2.x86_64   docker://20.10.17
ip-192-168-41-63.ec2.internal    Ready    &amp;lt;none&amp;gt;   2m51s   v1.22.15-eks-fb459a0   192.168.41.63    54.173.138.131   Amazon Linux 2   5.4.219-126.411.amzn2.x86_64   docker://20.10.17
ip-192-168-59-220.ec2.internal   Ready    &amp;lt;none&amp;gt;   2m52s   v1.22.15-eks-fb459a0   192.168.59.220   54.227.122.222   Amazon Linux 2   5.4.219-126.411.amzn2.x86_64   docker://20.10.17
ip-192-168-6-124.ec2.internal    Ready    &amp;lt;none&amp;gt;   2m51s   v1.22.15-eks-fb459a0   192.168.6.124    35.174.60.1      Amazon Linux 2   5.4.219-126.411.amzn2.x86_64   docker://20.10.1
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;Start by getting a copy of the Aerospike git repo for the k8s operator.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;git clone https://github.com/aerospike/aerospike-kubernetes-operator.git
&lt;span class="nb"&gt;cp &lt;/span&gt;features.conf aerospike-kubernetes-operator/config/samples/secrets/.
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;Install the Aerospike Kubernetes Operator.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;&lt;span class="nb"&gt;cd &lt;/span&gt;aerospike-kubernetes-operator/
kubectl apply &lt;span class="nt"&gt;-f&lt;/span&gt; config/samples/storage/eks_ssd_storage_class.yaml
kubectl apply &lt;span class="nt"&gt;-f&lt;/span&gt; config/samples/storage/local_storage_class.yaml
curl &lt;span class="nt"&gt;-sL&lt;/span&gt; https://github.com/operator-framework/operator-lifecycle-manager/releases/download/v0.22.0/install.sh | bash &lt;span class="nt"&gt;-s&lt;/span&gt; v0.22.0
kubectl create &lt;span class="nt"&gt;-f&lt;/span&gt; https://operatorhub.io/install/aerospike-kubernetes-operator.yaml
kubectl get csv &lt;span class="nt"&gt;-n&lt;/span&gt; operators &lt;span class="nt"&gt;-w&lt;/span&gt;
cd..
git clone https://github.com/nareshmaharaj-consultant/kubernetes-anything
&lt;span class="nb"&gt;cd &lt;/span&gt;kubernetes-anything
./createNamespace.sh aerospike
&lt;span class="nb"&gt;cd&lt;/span&gt; ../aerospike-kubernetes-operator/
kubectl &lt;span class="nt"&gt;-n&lt;/span&gt; aerospike create secret generic aerospike-secret &lt;span class="nt"&gt;--from-file&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;config/samples/secrets
kubectl &lt;span class="nt"&gt;-n&lt;/span&gt; aerospike create secret generic auth-secret &lt;span class="nt"&gt;--from-literal&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="nv"&gt;password&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'admin123'&lt;/span&gt;
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;Install the &lt;strong&gt;&lt;em&gt;destination&lt;/em&gt;&lt;/strong&gt; cluster first by using the following configuration&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight yaml"&gt;&lt;code&gt;&lt;span class="na"&gt;apiVersion&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;asdb.aerospike.com/v1beta1&lt;/span&gt;
&lt;span class="na"&gt;kind&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;AerospikeCluster&lt;/span&gt;
&lt;span class="na"&gt;metadata&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
  &lt;span class="na"&gt;name&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;aerocluster-dest-xdr&lt;/span&gt;
  &lt;span class="na"&gt;namespace&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;aerospike&lt;/span&gt;

&lt;span class="na"&gt;spec&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
  &lt;span class="na"&gt;size&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="m"&gt;1&lt;/span&gt;
  &lt;span class="na"&gt;image&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;aerospike/aerospike-server-enterprise:6.1.0.2&lt;/span&gt;

  &lt;span class="na"&gt;storage&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
    &lt;span class="na"&gt;filesystemVolumePolicy&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
      &lt;span class="na"&gt;initMethod&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;deleteFiles&lt;/span&gt;
      &lt;span class="na"&gt;cascadeDelete&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="kc"&gt;true&lt;/span&gt;
    &lt;span class="na"&gt;blockVolumePolicy&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
      &lt;span class="na"&gt;cascadeDelete&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="kc"&gt;true&lt;/span&gt;
    &lt;span class="na"&gt;volumes&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
      &lt;span class="pi"&gt;-&lt;/span&gt; &lt;span class="na"&gt;name&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;workdir&lt;/span&gt;
        &lt;span class="na"&gt;aerospike&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
          &lt;span class="na"&gt;path&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;/opt/aerospike&lt;/span&gt;
        &lt;span class="na"&gt;source&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
          &lt;span class="na"&gt;persistentVolume&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
            &lt;span class="na"&gt;storageClass&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;ssd&lt;/span&gt;
            &lt;span class="na"&gt;volumeMode&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;Filesystem&lt;/span&gt;
            &lt;span class="na"&gt;size&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;1Gi&lt;/span&gt;
      &lt;span class="pi"&gt;-&lt;/span&gt; &lt;span class="na"&gt;name&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;ns&lt;/span&gt;
        &lt;span class="na"&gt;aerospike&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
          &lt;span class="na"&gt;path&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;/opt/aerospike/data/&lt;/span&gt;
        &lt;span class="na"&gt;source&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
          &lt;span class="na"&gt;persistentVolume&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
            &lt;span class="na"&gt;storageClass&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;ssd&lt;/span&gt;
            &lt;span class="na"&gt;volumeMode&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;Filesystem&lt;/span&gt;
            &lt;span class="na"&gt;size&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;1Gi&lt;/span&gt;
      &lt;span class="pi"&gt;-&lt;/span&gt; &lt;span class="na"&gt;name&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;aerospike-config-secret&lt;/span&gt;
        &lt;span class="na"&gt;source&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
          &lt;span class="na"&gt;secret&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
            &lt;span class="na"&gt;secretName&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;aerospike-secret&lt;/span&gt;
        &lt;span class="na"&gt;aerospike&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
          &lt;span class="na"&gt;path&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;/etc/aerospike/secret&lt;/span&gt;

  &lt;span class="na"&gt;podSpec&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
    &lt;span class="na"&gt;multiPodPerHost&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="kc"&gt;true&lt;/span&gt;

  &lt;span class="na"&gt;aerospikeAccessControl&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
    &lt;span class="na"&gt;roles&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
      &lt;span class="pi"&gt;-&lt;/span&gt; &lt;span class="na"&gt;name&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;writer&lt;/span&gt;
        &lt;span class="na"&gt;privileges&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
        &lt;span class="pi"&gt;-&lt;/span&gt; &lt;span class="s"&gt;read-write&lt;/span&gt;
      &lt;span class="pi"&gt;-&lt;/span&gt; &lt;span class="na"&gt;name&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;reader&lt;/span&gt;
        &lt;span class="na"&gt;privileges&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
        &lt;span class="pi"&gt;-&lt;/span&gt; &lt;span class="s"&gt;read&lt;/span&gt;
    &lt;span class="na"&gt;users&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
      &lt;span class="pi"&gt;-&lt;/span&gt; &lt;span class="na"&gt;name&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;admin&lt;/span&gt;
        &lt;span class="na"&gt;secretName&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;auth-secret&lt;/span&gt;
        &lt;span class="na"&gt;roles&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
          &lt;span class="pi"&gt;-&lt;/span&gt; &lt;span class="s"&gt;sys-admin&lt;/span&gt;
          &lt;span class="pi"&gt;-&lt;/span&gt; &lt;span class="s"&gt;user-admin&lt;/span&gt;
          &lt;span class="pi"&gt;-&lt;/span&gt; &lt;span class="s"&gt;read-write&lt;/span&gt;
      &lt;span class="pi"&gt;-&lt;/span&gt; &lt;span class="na"&gt;name&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;xdr-writer&lt;/span&gt;
        &lt;span class="na"&gt;secretName&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;xdr-user-auth-secret&lt;/span&gt;
        &lt;span class="na"&gt;roles&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
          &lt;span class="pi"&gt;-&lt;/span&gt; &lt;span class="s"&gt;writer&lt;/span&gt;

  &lt;span class="na"&gt;aerospikeConfig&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
    &lt;span class="na"&gt;service&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
      &lt;span class="na"&gt;feature-key-file&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;/etc/aerospike/secret/features.conf&lt;/span&gt;
    &lt;span class="na"&gt;security&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="pi"&gt;{}&lt;/span&gt;
    &lt;span class="na"&gt;network&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
      &lt;span class="na"&gt;service&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
        &lt;span class="na"&gt;port&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="m"&gt;3000&lt;/span&gt;
      &lt;span class="na"&gt;fabric&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
        &lt;span class="na"&gt;port&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="m"&gt;3001&lt;/span&gt;
      &lt;span class="na"&gt;heartbeat&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
        &lt;span class="na"&gt;port&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="m"&gt;3002&lt;/span&gt;
    &lt;span class="na"&gt;namespaces&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
      &lt;span class="pi"&gt;-&lt;/span&gt; &lt;span class="na"&gt;name&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;test&lt;/span&gt;
        &lt;span class="na"&gt;memory-size&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="m"&gt;134217728&lt;/span&gt;
        &lt;span class="na"&gt;replication-factor&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="m"&gt;1&lt;/span&gt;
        &lt;span class="na"&gt;storage-engine&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
          &lt;span class="na"&gt;type&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;device&lt;/span&gt;
          &lt;span class="na"&gt;files&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
            &lt;span class="pi"&gt;-&lt;/span&gt; &lt;span class="s"&gt;/opt/aerospike/data/test.dat&lt;/span&gt;
          &lt;span class="na"&gt;filesize&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="m"&gt;1073741824&lt;/span&gt;
          &lt;span class="na"&gt;data-in-memory&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="kc"&gt;true&lt;/span&gt;
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;Create the Aerospike destination &lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;database login credentials using k8s secrets&lt;/li&gt;
&lt;li&gt;schedule the destination single node database cluster.
&lt;/li&gt;
&lt;/ul&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;&lt;span class="nb"&gt;export &lt;/span&gt;&lt;span class="nv"&gt;secret_auth_name&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;xdr-user-auth-secret
&lt;span class="nb"&gt;export &lt;/span&gt;&lt;span class="nv"&gt;password_secret&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;admin123
kubectl &lt;span class="nt"&gt;-n&lt;/span&gt; aerospike create secret generic &lt;span class="nv"&gt;$secret_auth_name&lt;/span&gt; &lt;span class="nt"&gt;--from-literal&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="nv"&gt;password&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="nv"&gt;$password_secret&lt;/span&gt;
kubectl create &lt;span class="nt"&gt;-f&lt;/span&gt; ssd1_xdr_dest_6.1_cluster_cr.yaml
kubectl &lt;span class="nt"&gt;-n&lt;/span&gt; aerospike get po &lt;span class="nt"&gt;-w&lt;/span&gt;
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;You should see the database pods successfully running.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;kubectl get po &lt;span class="nt"&gt;-n&lt;/span&gt; aerospike &lt;span class="nt"&gt;-w&lt;/span&gt;
NAME                       READY   STATUS     RESTARTS   AGE
aerocluster-dest-xdr-0-0   0/1     Init:0/1   0          13s
aerocluster-dest-xdr-0-0   0/1     Init:0/1   0          18s
aerocluster-dest-xdr-0-0   0/1     PodInitializing   0          19s
aerocluster-dest-xdr-0-0   1/1     Running           0          24s
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;Next we set up the xdr-proxy. If we look back at the main title image above, you will notice that we are working from the RIGHT hand side to the LEFT hand side in that order. &lt;/p&gt;

&lt;p&gt;Go ahead and create the following &lt;strong&gt;&lt;em&gt;xdr-proxy&lt;/em&gt;&lt;/strong&gt; configuration file. Replace the seed address with a FQN DNS name for the destination database pod(s) you created earlier. If you have more than one seed address now is the time to add them to the yaml list.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight yaml"&gt;&lt;code&gt;&lt;span class="s"&gt;cd ..&lt;/span&gt;
&lt;span class="s"&gt;mkdir -p xdr-cfg/etc/auth&lt;/span&gt;
&lt;span class="s"&gt;cd xdr-cfg/etc/&lt;/span&gt;

&lt;span class="s"&gt;cat &amp;lt;&amp;lt;EOF&amp;gt; aerospike-xdr-proxy.yml&lt;/span&gt;
&lt;span class="c1"&gt;# Change the configuration for your use case.&lt;/span&gt;
&lt;span class="c1"&gt;# Naresh Maharaj&lt;/span&gt;
&lt;span class="c1"&gt;# Refer to https://www.aerospike.com/docs/connectors/enterprise/xdr-proxy/configuration/index.html&lt;/span&gt;
&lt;span class="c1"&gt;# for details.&lt;/span&gt;

&lt;span class="c1"&gt;# The connector's listening ports, manage service, TLS and network interface.&lt;/span&gt;
&lt;span class="na"&gt;service&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
  &lt;span class="na"&gt;port&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="m"&gt;8901&lt;/span&gt;
  &lt;span class="c1"&gt;# Aerospike Enterprise Server &amp;gt;= 5.0&lt;/span&gt;
  &lt;span class="na"&gt;manage&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
    &lt;span class="na"&gt;address&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;0.0.0.0&lt;/span&gt;
    &lt;span class="na"&gt;port&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="m"&gt;8902&lt;/span&gt;

&lt;span class="c1"&gt;# The destination aerospike cluster.&lt;/span&gt;
&lt;span class="na"&gt;aerospike&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
  &lt;span class="na"&gt;seeds&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
    &lt;span class="pi"&gt;-&lt;/span&gt; &lt;span class="na"&gt;aerocluster-dest-xdr-0-0.aerospike.svc.cluster.local&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
        &lt;span class="na"&gt;port&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="m"&gt;3000&lt;/span&gt;
  &lt;span class="na"&gt;credentials&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
    &lt;span class="na"&gt;username&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;admin&lt;/span&gt;
    &lt;span class="na"&gt;password-file&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;/etc/aerospike-xdr-proxy/auth/password_DC1.txt&lt;/span&gt;
    &lt;span class="na"&gt;auth-mode&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;internal&lt;/span&gt;

&lt;span class="c1"&gt;# The logging config&lt;/span&gt;
&lt;span class="na"&gt;logging&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
  &lt;span class="na"&gt;enable-console-logging&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="kc"&gt;true&lt;/span&gt;
  &lt;span class="na"&gt;file&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;/var/log/aerospike-xdr-proxy/aerospike-xdr-proxy.log&lt;/span&gt;
  &lt;span class="na"&gt;levels&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
    &lt;span class="na"&gt;root&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;debug&lt;/span&gt;
    &lt;span class="na"&gt;record-parser&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;debug&lt;/span&gt;
    &lt;span class="na"&gt;server&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;debug&lt;/span&gt;
    &lt;span class="na"&gt;com.aerospike.connect&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;debug&lt;/span&gt;
  &lt;span class="c1"&gt;# Ticker log interval in seconds&lt;/span&gt;
  &lt;span class="na"&gt;ticker-interval&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="m"&gt;3600&lt;/span&gt;
&lt;span class="s"&gt;EOF&lt;/span&gt;

&lt;span class="s"&gt;sudo tee auth/password_DC1.txt &amp;lt;&amp;lt;EOF&lt;/span&gt;
&lt;span class="s"&gt;admin123&lt;/span&gt;
&lt;span class="s"&gt;EOF&lt;/span&gt;

&lt;span class="s"&gt;kubectl -n aerospike create configmap xdr-proxy-cfg --from-file=etc/&lt;/span&gt;
&lt;span class="s"&gt;kubectl -n aerospike create secret generic xdr-proxy-auth-secret --from-file=etc/auth&lt;/span&gt;
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;Now that we have our xdr-proxy config file created we can now produce the &lt;strong&gt;&lt;em&gt;kubernetes deployment&lt;/em&gt;&lt;/strong&gt; yaml file for the xdr-proxy itself. The following yaml file is used to deploy our xdr-proxy ideally in the same data centre or location where the destination databases will be hosted.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight yaml"&gt;&lt;code&gt;&lt;span class="s"&gt;cat &amp;lt;&amp;lt;EOF&amp;gt; xdr-proxy-deployment.yaml&lt;/span&gt;
&lt;span class="na"&gt;apiVersion&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;apps/v1&lt;/span&gt;
&lt;span class="na"&gt;kind&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;Deployment&lt;/span&gt;
&lt;span class="na"&gt;metadata&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
  &lt;span class="na"&gt;name&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;xdr-proxy&lt;/span&gt;
  &lt;span class="na"&gt;namespace&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;aerospike&lt;/span&gt;
  &lt;span class="na"&gt;labels&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
    &lt;span class="na"&gt;app&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;xdr-proxy&lt;/span&gt;
&lt;span class="na"&gt;spec&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
  &lt;span class="na"&gt;replicas&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="m"&gt;2&lt;/span&gt;
  &lt;span class="na"&gt;selector&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
    &lt;span class="na"&gt;matchLabels&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
      &lt;span class="na"&gt;app&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;xdr-proxy&lt;/span&gt;
  &lt;span class="na"&gt;template&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
    &lt;span class="na"&gt;metadata&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
      &lt;span class="na"&gt;labels&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
        &lt;span class="na"&gt;app&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;xdr-proxy&lt;/span&gt;
    &lt;span class="na"&gt;spec&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
      &lt;span class="na"&gt;containers&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
      &lt;span class="pi"&gt;-&lt;/span&gt; &lt;span class="na"&gt;name&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;xdr-proxy&lt;/span&gt;
        &lt;span class="na"&gt;image&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;aerospike/aerospike-xdr-proxy:2.1.0&lt;/span&gt;
        &lt;span class="na"&gt;volumeMounts&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
        &lt;span class="pi"&gt;-&lt;/span&gt; &lt;span class="na"&gt;name&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;xdr-proxy-dir&lt;/span&gt;
          &lt;span class="na"&gt;mountPath&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s2"&gt;"&lt;/span&gt;&lt;span class="s"&gt;/etc/aerospike-xdr-proxy/"&lt;/span&gt;
          &lt;span class="na"&gt;readOnly&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="kc"&gt;true&lt;/span&gt;
        &lt;span class="pi"&gt;-&lt;/span&gt; &lt;span class="na"&gt;name&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;xdr-auth-dir&lt;/span&gt;
          &lt;span class="na"&gt;mountPath&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s2"&gt;"&lt;/span&gt;&lt;span class="s"&gt;/etc/aerospike-xdr-proxy/auth"&lt;/span&gt;
          &lt;span class="na"&gt;readOnly&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="kc"&gt;true&lt;/span&gt;
        &lt;span class="na"&gt;ports&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
          &lt;span class="pi"&gt;-&lt;/span&gt; &lt;span class="na"&gt;name&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;xdr-proxy-main&lt;/span&gt;
            &lt;span class="na"&gt;containerPort&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="m"&gt;8901&lt;/span&gt;
          &lt;span class="pi"&gt;-&lt;/span&gt; &lt;span class="na"&gt;name&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;xdr-proxy-mng&lt;/span&gt;
            &lt;span class="na"&gt;containerPort&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="m"&gt;8902&lt;/span&gt;
      &lt;span class="na"&gt;volumes&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
      &lt;span class="pi"&gt;-&lt;/span&gt; &lt;span class="na"&gt;name&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;xdr-proxy-dir&lt;/span&gt;
        &lt;span class="na"&gt;configMap&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
          &lt;span class="na"&gt;name&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;xdr-proxy-cfg&lt;/span&gt;
          &lt;span class="na"&gt;optional&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="kc"&gt;false&lt;/span&gt;
      &lt;span class="pi"&gt;-&lt;/span&gt; &lt;span class="na"&gt;name&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;xdr-auth-dir&lt;/span&gt;
        &lt;span class="na"&gt;secret&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
          &lt;span class="na"&gt;secretName&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;xdr-proxy-auth-secret&lt;/span&gt;
          &lt;span class="na"&gt;optional&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="kc"&gt;false&lt;/span&gt;
&lt;span class="nn"&gt;---&lt;/span&gt;
&lt;span class="na"&gt;apiVersion&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;v1&lt;/span&gt;
&lt;span class="na"&gt;kind&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;Service&lt;/span&gt;
&lt;span class="na"&gt;metadata&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
  &lt;span class="na"&gt;name&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;xdr-proxy&lt;/span&gt;
  &lt;span class="na"&gt;namespace&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;aerospike&lt;/span&gt;
&lt;span class="na"&gt;spec&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
  &lt;span class="na"&gt;selector&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
    &lt;span class="na"&gt;app&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;xdr-proxy&lt;/span&gt;
  &lt;span class="na"&gt;ports&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
  &lt;span class="pi"&gt;-&lt;/span&gt; &lt;span class="na"&gt;name&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;main&lt;/span&gt;
    &lt;span class="na"&gt;protocol&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;TCP&lt;/span&gt;
    &lt;span class="na"&gt;port&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="m"&gt;8901&lt;/span&gt;
    &lt;span class="na"&gt;targetPort&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;xdr-proxy-main&lt;/span&gt;
  &lt;span class="pi"&gt;-&lt;/span&gt; &lt;span class="na"&gt;name&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;manage&lt;/span&gt;
    &lt;span class="na"&gt;protocol&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;TCP&lt;/span&gt;
    &lt;span class="na"&gt;port&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="m"&gt;8902&lt;/span&gt;
    &lt;span class="na"&gt;targetPort&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;xdr-proxy-mng&lt;/span&gt;
&lt;span class="s"&gt;EOF&lt;/span&gt;

&lt;span class="s"&gt;kubectl create -f xdr-proxy-deployment.yaml&lt;/span&gt;
&lt;span class="s"&gt;kubectl get po -n aerospike -w&lt;/span&gt;
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;The following shows the current scheduled pods. So far, so good.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;kubectl get po &lt;span class="nt"&gt;-n&lt;/span&gt; aerospike &lt;span class="nt"&gt;-w&lt;/span&gt;
NAME                         READY   STATUS    RESTARTS   AGE
aerocluster-dest-xdr-0-0     1/1     Running   0          77m
xdr-proxy-7d9fccd6c8-g5mjt   1/1     Running   0          2m26s
xdr-proxy-7d9fccd6c8-mjxp4   1/1     Running   0          2m26s
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;Create the &lt;strong&gt;&lt;em&gt;source&lt;/em&gt;&lt;/strong&gt; cluster using the following configuration&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight yaml"&gt;&lt;code&gt;&lt;span class="s"&gt;cd ../aerospike-kubernetes-operator/&lt;/span&gt;

&lt;span class="s"&gt;cat &amp;lt;&amp;lt;EOF&amp;gt; ssd1_xdr_src_6.1_cluster_cr.yaml&lt;/span&gt;
&lt;span class="na"&gt;apiVersion&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;asdb.aerospike.com/v1beta1&lt;/span&gt;
&lt;span class="na"&gt;kind&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;AerospikeCluster&lt;/span&gt;
&lt;span class="na"&gt;metadata&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
  &lt;span class="na"&gt;name&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;aerocluster-source-xdr&lt;/span&gt;
  &lt;span class="na"&gt;namespace&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;aerospike&lt;/span&gt;

&lt;span class="na"&gt;spec&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
  &lt;span class="na"&gt;size&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="m"&gt;1&lt;/span&gt;
  &lt;span class="na"&gt;image&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;aerospike/aerospike-server-enterprise:6.1.0.2&lt;/span&gt;

  &lt;span class="na"&gt;storage&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
    &lt;span class="na"&gt;filesystemVolumePolicy&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
      &lt;span class="na"&gt;initMethod&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;deleteFiles&lt;/span&gt;
      &lt;span class="na"&gt;cascadeDelete&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="kc"&gt;true&lt;/span&gt;
    &lt;span class="na"&gt;blockVolumePolicy&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
      &lt;span class="na"&gt;cascadeDelete&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="kc"&gt;true&lt;/span&gt;
    &lt;span class="na"&gt;volumes&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
      &lt;span class="pi"&gt;-&lt;/span&gt; &lt;span class="na"&gt;name&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;workdir&lt;/span&gt;
        &lt;span class="na"&gt;aerospike&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
          &lt;span class="na"&gt;path&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;/opt/aerospike&lt;/span&gt;
        &lt;span class="na"&gt;source&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
          &lt;span class="na"&gt;persistentVolume&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
            &lt;span class="na"&gt;storageClass&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;ssd&lt;/span&gt;
            &lt;span class="na"&gt;volumeMode&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;Filesystem&lt;/span&gt;
            &lt;span class="na"&gt;size&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;1Gi&lt;/span&gt;
      &lt;span class="pi"&gt;-&lt;/span&gt; &lt;span class="na"&gt;name&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;ns&lt;/span&gt;
        &lt;span class="na"&gt;aerospike&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
          &lt;span class="na"&gt;path&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;/opt/aerospike/data/&lt;/span&gt;
        &lt;span class="na"&gt;source&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
          &lt;span class="na"&gt;persistentVolume&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
            &lt;span class="na"&gt;storageClass&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;ssd&lt;/span&gt;
            &lt;span class="na"&gt;volumeMode&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;Filesystem&lt;/span&gt;
            &lt;span class="na"&gt;size&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;1Gi&lt;/span&gt;
      &lt;span class="pi"&gt;-&lt;/span&gt; &lt;span class="na"&gt;name&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;aerospike-config-secret&lt;/span&gt;
        &lt;span class="na"&gt;source&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
          &lt;span class="na"&gt;secret&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
            &lt;span class="na"&gt;secretName&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;aerospike-secret&lt;/span&gt;
        &lt;span class="na"&gt;aerospike&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
          &lt;span class="na"&gt;path&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;/etc/aerospike/secret&lt;/span&gt;

  &lt;span class="na"&gt;podSpec&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
    &lt;span class="na"&gt;multiPodPerHost&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="kc"&gt;true&lt;/span&gt;

  &lt;span class="na"&gt;aerospikeAccessControl&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
    &lt;span class="na"&gt;roles&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
      &lt;span class="pi"&gt;-&lt;/span&gt; &lt;span class="na"&gt;name&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;writer&lt;/span&gt;
        &lt;span class="na"&gt;privileges&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
        &lt;span class="pi"&gt;-&lt;/span&gt; &lt;span class="s"&gt;read-write&lt;/span&gt;
      &lt;span class="pi"&gt;-&lt;/span&gt; &lt;span class="na"&gt;name&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;reader&lt;/span&gt;
        &lt;span class="na"&gt;privileges&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
        &lt;span class="pi"&gt;-&lt;/span&gt; &lt;span class="s"&gt;read&lt;/span&gt;
    &lt;span class="na"&gt;users&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
      &lt;span class="pi"&gt;-&lt;/span&gt; &lt;span class="na"&gt;name&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;admin&lt;/span&gt;
        &lt;span class="na"&gt;secretName&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;auth-secret&lt;/span&gt;
        &lt;span class="na"&gt;roles&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
          &lt;span class="pi"&gt;-&lt;/span&gt; &lt;span class="s"&gt;sys-admin&lt;/span&gt;
          &lt;span class="pi"&gt;-&lt;/span&gt; &lt;span class="s"&gt;user-admin&lt;/span&gt;
          &lt;span class="pi"&gt;-&lt;/span&gt; &lt;span class="s"&gt;read-write&lt;/span&gt;

  &lt;span class="na"&gt;aerospikeConfig&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
    &lt;span class="na"&gt;service&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
      &lt;span class="na"&gt;feature-key-file&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;/etc/aerospike/secret/features.conf&lt;/span&gt;
    &lt;span class="na"&gt;security&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="pi"&gt;{}&lt;/span&gt;
    &lt;span class="na"&gt;network&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
      &lt;span class="na"&gt;service&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
        &lt;span class="na"&gt;port&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="m"&gt;3000&lt;/span&gt;
      &lt;span class="na"&gt;fabric&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
        &lt;span class="na"&gt;port&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="m"&gt;3001&lt;/span&gt;
      &lt;span class="na"&gt;heartbeat&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
        &lt;span class="na"&gt;port&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="m"&gt;3002&lt;/span&gt;
    &lt;span class="na"&gt;xdr&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
      &lt;span class="na"&gt;dcs&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
        &lt;span class="pi"&gt;-&lt;/span&gt; &lt;span class="na"&gt;name&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;DC2&lt;/span&gt;
          &lt;span class="na"&gt;connector&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="kc"&gt;true&lt;/span&gt;
          &lt;span class="na"&gt;node-address-ports&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
            &lt;span class="pi"&gt;-&lt;/span&gt; &lt;span class="s"&gt;xdr-proxy.aerospike.svc.cluster.local &lt;/span&gt;&lt;span class="m"&gt;8901&lt;/span&gt;
          &lt;span class="na"&gt;namespaces&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
            &lt;span class="pi"&gt;-&lt;/span&gt; &lt;span class="na"&gt;name&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;test&lt;/span&gt;
    &lt;span class="na"&gt;namespaces&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
      &lt;span class="pi"&gt;-&lt;/span&gt; &lt;span class="na"&gt;name&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;test&lt;/span&gt;
        &lt;span class="na"&gt;memory-size&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="m"&gt;134217728&lt;/span&gt;
        &lt;span class="na"&gt;replication-factor&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="m"&gt;1&lt;/span&gt;
        &lt;span class="na"&gt;storage-engine&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
          &lt;span class="na"&gt;type&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;device&lt;/span&gt;
          &lt;span class="na"&gt;files&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
            &lt;span class="pi"&gt;-&lt;/span&gt; &lt;span class="s"&gt;/opt/aerospike/data/test.dat&lt;/span&gt;
          &lt;span class="na"&gt;filesize&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="m"&gt;1073741824&lt;/span&gt;
          &lt;span class="na"&gt;data-in-memory&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="kc"&gt;true&lt;/span&gt;
&lt;span class="s"&gt;EOF&lt;/span&gt;

&lt;span class="s"&gt;kubectl create -f ssd1_xdr_src_6.1_cluster_cr.yaml&lt;/span&gt;
&lt;span class="s"&gt;kubectl get po -n aerospike -w&lt;/span&gt;
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;From the source cluster, confirm the xdr component has made a connection to the xdr-proxy by filtering the k8s log file as shown in the following kubectl command.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;kubectl &lt;span class="nt"&gt;-n&lt;/span&gt; aerospike logs aerocluster-source-xdr-0-0 &lt;span class="nt"&gt;-c&lt;/span&gt; aerospike-server | &lt;span class="nb"&gt;grep &lt;/span&gt;xdr | &lt;span class="nb"&gt;grep &lt;/span&gt;conn
Dec 08 2022 13:49:21 GMT: INFO &lt;span class="o"&gt;(&lt;/span&gt;xdr&lt;span class="o"&gt;)&lt;/span&gt;: &lt;span class="o"&gt;(&lt;/span&gt;dc.c:581&lt;span class="o"&gt;)&lt;/span&gt; DC DC2 connectedOct 10 2022 13:57:53 GMT: INFO &lt;span class="o"&gt;(&lt;/span&gt;xdr&lt;span class="o"&gt;)&lt;/span&gt;: &lt;span class="o"&gt;(&lt;/span&gt;dc.c:581&lt;span class="o"&gt;)&lt;/span&gt; DC DC2 connected
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;Add some data to the source database and confirm it is being received in the destination cluster. Start by getting the source database service address and connect using the Aerospike's command line tool called aql.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;kubectl get svc &lt;span class="nt"&gt;-n&lt;/span&gt; aerospike
NAME                         TYPE        CLUSTER-IP       EXTERNAL-IP   PORT&lt;span class="o"&gt;(&lt;/span&gt;S&lt;span class="o"&gt;)&lt;/span&gt;             AGE
aerocluster-dest-xdr         ClusterIP   None             &amp;lt;none&amp;gt;        3000/TCP            3h38m
aerocluster-dest-xdr-0-0     NodePort    10.100.226.179   &amp;lt;none&amp;gt;        3000:30168/TCP      3h38m
aerocluster-source-xdr       ClusterIP   None             &amp;lt;none&amp;gt;        3000/TCP            33m
aerocluster-source-xdr-0-0   NodePort    10.100.116.173   &amp;lt;none&amp;gt;        3000:31999/TCP      33m
xdr-proxy                    ClusterIP   10.100.44.96     &amp;lt;none&amp;gt;        8901/TCP,8902/TCP   41m
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;





&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;kubectl run &lt;span class="nt"&gt;-it&lt;/span&gt; &lt;span class="nt"&gt;--rm&lt;/span&gt; &lt;span class="nt"&gt;--restart&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;Never aerospike-tool &lt;span class="nt"&gt;-n&lt;/span&gt; aerospike &lt;span class="nt"&gt;--image&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;aerospike/aerospike-tools:latest &lt;span class="nt"&gt;--&lt;/span&gt; aql &lt;span class="nt"&gt;-U&lt;/span&gt; admin &lt;span class="nt"&gt;-P&lt;/span&gt; admin123 &lt;span class="nt"&gt;-h&lt;/span&gt; aerocluster-source-xdr-0-0
Insert a &lt;span class="nb"&gt;source &lt;/span&gt;record using the following &lt;span class="nb"&gt;command &lt;/span&gt;&lt;span class="k"&gt;in &lt;/span&gt;aql
insert into test.a1 &lt;span class="o"&gt;(&lt;/span&gt;PK,a,b,c,d&lt;span class="o"&gt;)&lt;/span&gt; values&lt;span class="o"&gt;(&lt;/span&gt;1,&lt;span class="s2"&gt;"A"&lt;/span&gt;,&lt;span class="s2"&gt;"B"&lt;/span&gt;,&lt;span class="s2"&gt;"C"&lt;/span&gt;,&lt;span class="s2"&gt;"D"&lt;/span&gt;&lt;span class="o"&gt;)&lt;/span&gt;
OK, 1 record affected.

aql&amp;gt; &lt;span class="k"&gt;select&lt;/span&gt; &lt;span class="k"&gt;*&lt;/span&gt; from &lt;span class="nb"&gt;test&lt;/span&gt;
+----+-----+-----+-----+-----+
| PK | a   | b   | c   | d   |
+----+-----+-----+-----+-----+
| 1  | &lt;span class="s2"&gt;"A"&lt;/span&gt; | &lt;span class="s2"&gt;"B"&lt;/span&gt; | &lt;span class="s2"&gt;"C"&lt;/span&gt; | &lt;span class="s2"&gt;"D"&lt;/span&gt; |
+----+-----+-----+-----+-----+
1 row &lt;span class="k"&gt;in &lt;/span&gt;&lt;span class="nb"&gt;set&lt;/span&gt; &lt;span class="o"&gt;(&lt;/span&gt;0.023 secs&lt;span class="o"&gt;)&lt;/span&gt;
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;Connect to the destination cluster in the same way and confirm data has successfully arrived.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;kubectl get svc &lt;span class="nt"&gt;-n&lt;/span&gt; aerospike
NAME                         TYPE        CLUSTER-IP       EXTERNAL-IP   PORT&lt;span class="o"&gt;(&lt;/span&gt;S&lt;span class="o"&gt;)&lt;/span&gt;             AGE
aerocluster-dest-xdr         ClusterIP   None             &amp;lt;none&amp;gt;        3000/TCP            3h38m
aerocluster-dest-xdr-0-0     NodePort    10.100.226.179   &amp;lt;none&amp;gt;        3000:30168/TCP      3h38m
aerocluster-source-xdr       ClusterIP   None             &amp;lt;none&amp;gt;        3000/TCP            33m
aerocluster-source-xdr-0-0   NodePort    10.100.116.173   &amp;lt;none&amp;gt;        3000:31999/TCP      33m
xdr-proxy                    ClusterIP   10.100.44.96     &amp;lt;none&amp;gt;        8901/TCP,8902/TCP   41m
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;





&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;kubectl run &lt;span class="nt"&gt;-it&lt;/span&gt; &lt;span class="nt"&gt;--rm&lt;/span&gt; &lt;span class="nt"&gt;--restart&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;Never aerospike-tool &lt;span class="nt"&gt;-n&lt;/span&gt; aerospike &lt;span class="nt"&gt;--image&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;aerospike/aerospike-tools:latest &lt;span class="nt"&gt;--&lt;/span&gt; aql &lt;span class="nt"&gt;-U&lt;/span&gt; admin &lt;span class="nt"&gt;-P&lt;/span&gt; admin123 &lt;span class="nt"&gt;-h&lt;/span&gt; aerocluster-dest-xdr-0-0
Run the following &lt;span class="k"&gt;select &lt;/span&gt;query &lt;span class="k"&gt;in &lt;/span&gt;the destination cluster.
aql&amp;gt; &lt;span class="k"&gt;select&lt;/span&gt; &lt;span class="k"&gt;*&lt;/span&gt; from &lt;span class="nb"&gt;test&lt;/span&gt;
+----+-----+-----+-----+-----+
| PK | a   | b   | c   | d   |
+----+-----+-----+-----+-----+
| 1  | &lt;span class="s2"&gt;"A"&lt;/span&gt; | &lt;span class="s2"&gt;"B"&lt;/span&gt; | &lt;span class="s2"&gt;"C"&lt;/span&gt; | &lt;span class="s2"&gt;"D"&lt;/span&gt; |
+----+-----+-----+-----+-----+
1 row &lt;span class="k"&gt;in &lt;/span&gt;&lt;span class="nb"&gt;set&lt;/span&gt; &lt;span class="o"&gt;(&lt;/span&gt;0.031 secs&lt;span class="o"&gt;)&lt;/span&gt;

OK
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;&lt;u&gt;Interim summary&lt;/u&gt;: So far, at this point it's confirmed that xdr-proxy is doing exactly what we need it to do.&lt;/p&gt;

&lt;p&gt;If you now review the logs for the initial 2 xdr-proxies that were scheduled you should indeed see userKey=1.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;kubectl logs xdr-proxy-7d9fccd6c8-5q7gn &lt;span class="nt"&gt;-n&lt;/span&gt; aerospike | &lt;span class="nb"&gt;grep &lt;/span&gt;record-parser
2022-12-08 14:53:50.607 GMT DEBUG record-parser - parsed message fields: &lt;span class="nv"&gt;key&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;Key&lt;span class="o"&gt;(&lt;/span&gt;&lt;span class="nv"&gt;namespace&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'test'&lt;/span&gt;, &lt;span class="nb"&gt;set&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;a1, &lt;span class="nv"&gt;digest&lt;/span&gt;&lt;span class="o"&gt;=[&lt;/span&gt;120, 48, &lt;span class="nt"&gt;-23&lt;/span&gt;, &lt;span class="nt"&gt;-90&lt;/span&gt;, 110, 126, 84, &lt;span class="nt"&gt;-1&lt;/span&gt;, 114, &lt;span class="nt"&gt;-116&lt;/span&gt;, &lt;span class="nt"&gt;-9&lt;/span&gt;, &lt;span class="nt"&gt;-21&lt;/span&gt;, 28, 75, 126, &lt;span class="nt"&gt;-68&lt;/span&gt;, &lt;span class="nt"&gt;-51&lt;/span&gt;, 83, 31, &lt;span class="nt"&gt;-117&lt;/span&gt;&lt;span class="o"&gt;]&lt;/span&gt;, &lt;span class="nv"&gt;userKey&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;1, &lt;span class="nv"&gt;lastUpdateTimeMs&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;1670511230565, &lt;span class="nv"&gt;userKeyString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;1, &lt;span class="nv"&gt;digestString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'eDDppm5+VP9yjPfrHEt+vM1TH4s='&lt;/span&gt;&lt;span class="o"&gt;)&lt;/span&gt;

kubectl logs xdr-proxy-7d9fccd6c8-f5zkt &lt;span class="nt"&gt;-n&lt;/span&gt; aerospike | &lt;span class="nb"&gt;grep &lt;/span&gt;record-parser
&lt;span class="o"&gt;(&lt;/span&gt;none&lt;span class="o"&gt;)&lt;/span&gt;
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;Scale up the xdr-proxy to 6 pods.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight yaml"&gt;&lt;code&gt;&lt;span class="na"&gt;apiVersion&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;apps/v1&lt;/span&gt;
&lt;span class="na"&gt;kind&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;Deployment&lt;/span&gt;
&lt;span class="na"&gt;metadata&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
  &lt;span class="na"&gt;name&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;xdr-proxy&lt;/span&gt;
  &lt;span class="na"&gt;namespace&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;aerospike&lt;/span&gt;
  &lt;span class="na"&gt;labels&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
    &lt;span class="na"&gt;app&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;xdr-proxy&lt;/span&gt;
&lt;span class="na"&gt;spec&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
  &lt;span class="na"&gt;replicas&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="m"&gt;6&lt;/span&gt;
  &lt;span class="na"&gt;selector&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
    &lt;span class="na"&gt;matchLabels&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
      &lt;span class="na"&gt;app&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;xdr-proxy&lt;/span&gt;
&lt;span class="nn"&gt;...&lt;/span&gt;
&lt;span class="s"&gt;kubectl apply -f xdr-proxy-deployment.yaml&lt;/span&gt;
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;We should now have 6 instances of the xdr-proxies running.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;kubectl get po &lt;span class="nt"&gt;-n&lt;/span&gt; aerospike &lt;span class="nt"&gt;-w&lt;/span&gt;
NAME                         READY   STATUS    RESTARTS   AGE
aerocluster-dest-xdr-0-0     1/1     Running   0          3h50m
aerocluster-source-xdr-0-0   1/1     Running   0          75m
xdr-proxy-7d9fccd6c8-49ttl   1/1     Running   0          7s
xdr-proxy-7d9fccd6c8-5q7gn   1/1     Running   0          83m
xdr-proxy-7d9fccd6c8-c4j7k   1/1     Running   0          7s
xdr-proxy-7d9fccd6c8-f5zkt   1/1     Running   0          83m
xdr-proxy-7d9fccd6c8-lscbg   1/1     Running   0          7s
xdr-proxy-7d9fccd6c8-r56vs   1/1     Running   0          7s
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;Lets add some sample messages from our source xdr cluster with primary keys 5,6,7&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;aql&amp;gt; insert into &lt;span class="nb"&gt;test&lt;/span&gt; &lt;span class="o"&gt;(&lt;/span&gt;PK,a,b&lt;span class="o"&gt;)&lt;/span&gt; values &lt;span class="o"&gt;(&lt;/span&gt;5,&lt;span class="s2"&gt;"A"&lt;/span&gt;,&lt;span class="s2"&gt;"B"&lt;/span&gt;&lt;span class="o"&gt;)&lt;/span&gt;
OK, 1 record affected.
aql&amp;gt; insert into &lt;span class="nb"&gt;test&lt;/span&gt; &lt;span class="o"&gt;(&lt;/span&gt;PK,a,b&lt;span class="o"&gt;)&lt;/span&gt; values &lt;span class="o"&gt;(&lt;/span&gt;6,&lt;span class="s2"&gt;"A"&lt;/span&gt;,&lt;span class="s2"&gt;"B"&lt;/span&gt;&lt;span class="o"&gt;)&lt;/span&gt;
OK, 1 record affected.
aql&amp;gt; insert into &lt;span class="nb"&gt;test&lt;/span&gt; &lt;span class="o"&gt;(&lt;/span&gt;PK,a,b&lt;span class="o"&gt;)&lt;/span&gt; values &lt;span class="o"&gt;(&lt;/span&gt;7,&lt;span class="s2"&gt;"A"&lt;/span&gt;,&lt;span class="s2"&gt;"B"&lt;/span&gt;&lt;span class="o"&gt;)&lt;/span&gt;
OK, 1 record affected.
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;Notice how we have userKey=5, userKey=6 and userKey=7 across the new scaled xdr-proxies.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;kubectl logs xdr-proxy-7d9fccd6c8-5q7gn &lt;span class="nt"&gt;-n&lt;/span&gt; aerospike | &lt;span class="nb"&gt;grep &lt;/span&gt;record-parser
2022-12-08 14:53:50.607 GMT DEBUG record-parser - parsed message fields: &lt;span class="nv"&gt;key&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;Key&lt;span class="o"&gt;(&lt;/span&gt;&lt;span class="nv"&gt;namespace&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'test'&lt;/span&gt;, &lt;span class="nb"&gt;set&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;a1, &lt;span class="nv"&gt;digest&lt;/span&gt;&lt;span class="o"&gt;=[&lt;/span&gt;120, 48, &lt;span class="nt"&gt;-23&lt;/span&gt;, &lt;span class="nt"&gt;-90&lt;/span&gt;, 110, 126, 84, &lt;span class="nt"&gt;-1&lt;/span&gt;, 114, &lt;span class="nt"&gt;-116&lt;/span&gt;, &lt;span class="nt"&gt;-9&lt;/span&gt;, &lt;span class="nt"&gt;-21&lt;/span&gt;, 28, 75, 126, &lt;span class="nt"&gt;-68&lt;/span&gt;, &lt;span class="nt"&gt;-51&lt;/span&gt;, 83, 31, &lt;span class="nt"&gt;-117&lt;/span&gt;&lt;span class="o"&gt;]&lt;/span&gt;, &lt;span class="nv"&gt;userKey&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;1, &lt;span class="nv"&gt;lastUpdateTimeMs&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;1670511230565, &lt;span class="nv"&gt;userKeyString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;1, &lt;span class="nv"&gt;digestString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'eDDppm5+VP9yjPfrHEt+vM1TH4s='&lt;/span&gt;&lt;span class="o"&gt;)&lt;/span&gt;

kubectl logs xdr-proxy-7d9fccd6c8-f5zkt &lt;span class="nt"&gt;-n&lt;/span&gt; aerospike | &lt;span class="nb"&gt;grep &lt;/span&gt;record-parser
&lt;span class="o"&gt;(&lt;/span&gt;none&lt;span class="o"&gt;)&lt;/span&gt;

kubectl logs xdr-proxy-7d9fccd6c8-49ttl &lt;span class="nt"&gt;-n&lt;/span&gt; aerospike | &lt;span class="nb"&gt;grep &lt;/span&gt;record-parser
&lt;span class="o"&gt;(&lt;/span&gt;none&lt;span class="o"&gt;)&lt;/span&gt;

kubectl logs xdr-proxy-7d9fccd6c8-c4j7k &lt;span class="nt"&gt;-n&lt;/span&gt; aerospike | &lt;span class="nb"&gt;grep &lt;/span&gt;record-parser
2022-12-08 15:05:37.511 GMT DEBUG record-parser - parsed message fields: &lt;span class="nv"&gt;key&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;Key&lt;span class="o"&gt;(&lt;/span&gt;&lt;span class="nv"&gt;namespace&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'test'&lt;/span&gt;, &lt;span class="nb"&gt;set&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;null, &lt;span class="nv"&gt;digest&lt;/span&gt;&lt;span class="o"&gt;=[&lt;/span&gt;&lt;span class="nt"&gt;-88&lt;/span&gt;, 104, 104, &lt;span class="nt"&gt;-114&lt;/span&gt;, 19, &lt;span class="nt"&gt;-44&lt;/span&gt;, &lt;span class="nt"&gt;-19&lt;/span&gt;, 29, &lt;span class="nt"&gt;-15&lt;/span&gt;, 18, 118, 72, &lt;span class="nt"&gt;-117&lt;/span&gt;, &lt;span class="nt"&gt;-106&lt;/span&gt;, &lt;span class="nt"&gt;-28&lt;/span&gt;, 21, &lt;span class="nt"&gt;-48&lt;/span&gt;, 50, 26, 113], &lt;span class="nv"&gt;userKey&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;7, &lt;span class="nv"&gt;lastUpdateTimeMs&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;1670511937250, &lt;span class="nv"&gt;userKeyString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;7, &lt;span class="nv"&gt;digestString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'qGhojhPU7R3xEnZIi5bkFdAyGnE='&lt;/span&gt;&lt;span class="o"&gt;)&lt;/span&gt;

kubectl logs xdr-proxy-7d9fccd6c8-lscbg &lt;span class="nt"&gt;-n&lt;/span&gt; aerospike | &lt;span class="nb"&gt;grep &lt;/span&gt;record-parser
2022-12-08 15:05:27.548 GMT DEBUG record-parser - parsed message fields: &lt;span class="nv"&gt;key&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;Key&lt;span class="o"&gt;(&lt;/span&gt;&lt;span class="nv"&gt;namespace&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'test'&lt;/span&gt;, &lt;span class="nb"&gt;set&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;null, &lt;span class="nv"&gt;digest&lt;/span&gt;&lt;span class="o"&gt;=[&lt;/span&gt;68, 4, &lt;span class="nt"&gt;-94&lt;/span&gt;, &lt;span class="nt"&gt;-44&lt;/span&gt;, &lt;span class="nt"&gt;-75&lt;/span&gt;, 112, &lt;span class="nt"&gt;-102&lt;/span&gt;, 73, &lt;span class="nt"&gt;-120&lt;/span&gt;, 41, &lt;span class="nt"&gt;-101&lt;/span&gt;, &lt;span class="nt"&gt;-120&lt;/span&gt;, 33, &lt;span class="nt"&gt;-111&lt;/span&gt;, 15, &lt;span class="nt"&gt;-114&lt;/span&gt;, &lt;span class="nt"&gt;-85&lt;/span&gt;, 46, &lt;span class="nt"&gt;-2&lt;/span&gt;, 80], &lt;span class="nv"&gt;userKey&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;5, &lt;span class="nv"&gt;lastUpdateTimeMs&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;1670511927465, &lt;span class="nv"&gt;userKeyString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;5, &lt;span class="nv"&gt;digestString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'RASi1LVwmkmIKZuIIZEPjqsu/lA='&lt;/span&gt;&lt;span class="o"&gt;)&lt;/span&gt;
2022-12-08 15:05:32.300 GMT DEBUG record-parser - parsed message fields: &lt;span class="nv"&gt;key&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;Key&lt;span class="o"&gt;(&lt;/span&gt;&lt;span class="nv"&gt;namespace&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'test'&lt;/span&gt;, &lt;span class="nb"&gt;set&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;null, &lt;span class="nv"&gt;digest&lt;/span&gt;&lt;span class="o"&gt;=[&lt;/span&gt;33, &lt;span class="nt"&gt;-100&lt;/span&gt;, 127, 120, 17, 45, &lt;span class="nt"&gt;-79&lt;/span&gt;, 115, &lt;span class="nt"&gt;-40&lt;/span&gt;, 53, &lt;span class="nt"&gt;-70&lt;/span&gt;, &lt;span class="nt"&gt;-57&lt;/span&gt;, 120, 73, 20, &lt;span class="nt"&gt;-50&lt;/span&gt;, &lt;span class="nt"&gt;-99&lt;/span&gt;, &lt;span class="nt"&gt;-98&lt;/span&gt;, &lt;span class="nt"&gt;-104&lt;/span&gt;, 85], &lt;span class="nv"&gt;userKey&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;6, &lt;span class="nv"&gt;lastUpdateTimeMs&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;1670511932288, &lt;span class="nv"&gt;userKeyString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;6, &lt;span class="nv"&gt;digestString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'IZx/eBEtsXPYNbrHeEkUzp2emFU='&lt;/span&gt;&lt;span class="o"&gt;)&lt;/span&gt;

kubectl logs xdr-proxy-7d9fccd6c8-r56vs &lt;span class="nt"&gt;-n&lt;/span&gt; aerospike | &lt;span class="nb"&gt;grep &lt;/span&gt;record-parser
&lt;span class="o"&gt;(&lt;/span&gt;none&lt;span class="o"&gt;)&lt;/span&gt;
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;However, one should be aware that when data is actively flowing between source and destination clusters, there is no reset on the source connections, so the newly scaled xdr-proxies will not be utilised.&lt;/p&gt;

&lt;p&gt;We can go ahead and prove this out. Lets try adding some data using the Aerospike's benchmark tool which will ingest data into the source cluster. At the same time we will scale the xdr-proxies on the destination side and see the result. Before we start however, it might be prudent to first scale down the xdr-proxy servers to 1 so there is no possibility of misconstrued results.&lt;/p&gt;

&lt;p&gt;I am going to use my own local machine which has the benchmark tool already installed to send data to the source ec2  instances. In order to do this we need to get the external IP address of the service.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;kubectl get AerospikeCluster aerocluster-source-xdr &lt;span class="nt"&gt;-n&lt;/span&gt; aerospike  &lt;span class="nt"&gt;-o&lt;/span&gt; yaml

...
pods:
    aerocluster-source-xdr-0-0:
      aerospike:
        accessEndpoints:
        - 192.168.41.63:31999
        alternateAccessEndpoints:
        - 54.173.138.131:31999
        clusterName: aerocluster-source-xdr
        nodeID: 0a0
        tlsAccessEndpoints: &lt;span class="o"&gt;[]&lt;/span&gt;
        tlsAlternateAccessEndpoints: &lt;span class="o"&gt;[]&lt;/span&gt;
        tlsName: &lt;span class="s2"&gt;""&lt;/span&gt;
      aerospikeConfigHash: 4aacb9809beaa01d99a9f00293c9f7dc141845f8
      hostExternalIP: 54.173.138.131
      hostInternalIP: 192.168.41.63
      image: aerospike/aerospike-server-enterprise:6.1.0.2
      initializedVolumes:
      - workdir
      - ns
      networkPolicyHash: acbbfab3668e1fceeed201139d1173f00095667e
      podIP: 192.168.50.203
      podPort: 3000
      podSpecHash: 972dc2a779fe9ab407212b547d54d3a72ecef259
      servicePort: 31999
...
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;Add the AWS firewall rule to allow traffic into the k8s service. Connect the asbenchmark tool to start writing traffic using the public IP address for the NodePort Service.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;asbenchmark &lt;span class="nt"&gt;-h&lt;/span&gt; 54.173.138.131:31999 &lt;span class="nt"&gt;-Uadmin&lt;/span&gt; &lt;span class="nt"&gt;-Padmin123&lt;/span&gt; &lt;span class="nt"&gt;-z&lt;/span&gt; 10 &lt;span class="nt"&gt;-servicesAlternate&lt;/span&gt; &lt;span class="nt"&gt;-w&lt;/span&gt; RU,0 &lt;span class="nt"&gt;-o&lt;/span&gt; B256
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;Scale up the xdr-proxy servers from 1 to 2 and check the logs of both proxies to see what messages are being received. In a production environment you should disable the logging. &lt;/p&gt;

&lt;p&gt;Notice how the new xdr-proxy instance &lt;code&gt;xdr-proxy-7d9fccd6c8-s2tzt&lt;/code&gt; has no data passed through it.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;kubectl &lt;span class="nt"&gt;-n&lt;/span&gt; aerospike logs xdr-proxy-7d9fccd6c8-s2tzt | &lt;span class="nb"&gt;grep &lt;/span&gt;record-parser
&lt;span class="o"&gt;(&lt;/span&gt;none&lt;span class="o"&gt;)&lt;/span&gt;

kubectl &lt;span class="nt"&gt;-n&lt;/span&gt; aerospike logs xdr-proxy-7d9fccd6c8-5q7gn | &lt;span class="nb"&gt;grep &lt;/span&gt;record-parser
...
2022-12-08 18:21:36.158 GMT DEBUG record-parser - parsed message fields: &lt;span class="nv"&gt;key&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;Key&lt;span class="o"&gt;(&lt;/span&gt;&lt;span class="nv"&gt;namespace&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'test'&lt;/span&gt;, &lt;span class="nb"&gt;set&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;testset, &lt;span class="nv"&gt;digest&lt;/span&gt;&lt;span class="o"&gt;=[&lt;/span&gt;51, &lt;span class="nt"&gt;-120&lt;/span&gt;, 114, &lt;span class="nt"&gt;-17&lt;/span&gt;, &lt;span class="nt"&gt;-44&lt;/span&gt;, 72, 123, 125, 50, 92, 3, 110, &lt;span class="nt"&gt;-21&lt;/span&gt;, &lt;span class="nt"&gt;-38&lt;/span&gt;, 74, 25, 42, 35, 117, 72], &lt;span class="nv"&gt;userKey&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;null, &lt;span class="nv"&gt;lastUpdateTimeMs&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;1670523696059, &lt;span class="nv"&gt;userKeyString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;null, &lt;span class="nv"&gt;digestString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'M4hy79RIe30yXANu69pKGSojdUg='&lt;/span&gt;&lt;span class="o"&gt;)&lt;/span&gt;
2022-12-08 18:21:36.158 GMT DEBUG record-parser - parsed message fields: &lt;span class="nv"&gt;key&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;Key&lt;span class="o"&gt;(&lt;/span&gt;&lt;span class="nv"&gt;namespace&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'test'&lt;/span&gt;, &lt;span class="nb"&gt;set&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;testset, &lt;span class="nv"&gt;digest&lt;/span&gt;&lt;span class="o"&gt;=[&lt;/span&gt;124, &lt;span class="nt"&gt;-6&lt;/span&gt;, &lt;span class="nt"&gt;-70&lt;/span&gt;, &lt;span class="nt"&gt;-23&lt;/span&gt;, 44, 41, &lt;span class="nt"&gt;-19&lt;/span&gt;, 40, &lt;span class="nt"&gt;-11&lt;/span&gt;, &lt;span class="nt"&gt;-16&lt;/span&gt;, 126, 120, 81, &lt;span class="nt"&gt;-113&lt;/span&gt;, &lt;span class="nt"&gt;-112&lt;/span&gt;, &lt;span class="nt"&gt;-79&lt;/span&gt;, 66, 77, &lt;span class="nt"&gt;-99&lt;/span&gt;, &lt;span class="nt"&gt;-6&lt;/span&gt;&lt;span class="o"&gt;]&lt;/span&gt;, &lt;span class="nv"&gt;userKey&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;null, &lt;span class="nv"&gt;lastUpdateTimeMs&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;1670523696059, &lt;span class="nv"&gt;userKeyString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;null, &lt;span class="nv"&gt;digestString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'fPq66Swp7Sj18H54UY+QsUJNnfo='&lt;/span&gt;&lt;span class="o"&gt;)&lt;/span&gt;
2022-12-08 18:21:36.158 GMT DEBUG record-parser - parsed message fields: &lt;span class="nv"&gt;key&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;Key&lt;span class="o"&gt;(&lt;/span&gt;&lt;span class="nv"&gt;namespace&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'test'&lt;/span&gt;, &lt;span class="nb"&gt;set&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;testset, &lt;span class="nv"&gt;digest&lt;/span&gt;&lt;span class="o"&gt;=[&lt;/span&gt;&lt;span class="nt"&gt;-127&lt;/span&gt;, &lt;span class="nt"&gt;-118&lt;/span&gt;, 63, &lt;span class="nt"&gt;-32&lt;/span&gt;, &lt;span class="nt"&gt;-74&lt;/span&gt;, 60, &lt;span class="nt"&gt;-60&lt;/span&gt;, 86, 31, &lt;span class="nt"&gt;-119&lt;/span&gt;, &lt;span class="nt"&gt;-1&lt;/span&gt;, &lt;span class="nt"&gt;-105&lt;/span&gt;, &lt;span class="nt"&gt;-108&lt;/span&gt;, &lt;span class="nt"&gt;-59&lt;/span&gt;, 111, 48, &lt;span class="nt"&gt;-34&lt;/span&gt;, &lt;span class="nt"&gt;-61&lt;/span&gt;, &lt;span class="nt"&gt;-108&lt;/span&gt;, &lt;span class="nt"&gt;-5&lt;/span&gt;&lt;span class="o"&gt;]&lt;/span&gt;, &lt;span class="nv"&gt;userKey&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;null, &lt;span class="nv"&gt;lastUpdateTimeMs&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;1670523696105, &lt;span class="nv"&gt;userKeyString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;null, &lt;span class="nv"&gt;digestString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'gYo/4LY8xFYfif+XlMVvMN7DlPs='&lt;/span&gt;&lt;span class="o"&gt;)&lt;/span&gt;
2022-12-08 18:21:36.256 GMT DEBUG record-parser - parsed message fields: &lt;span class="nv"&gt;key&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;Key&lt;span class="o"&gt;(&lt;/span&gt;&lt;span class="nv"&gt;namespace&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'test'&lt;/span&gt;, &lt;span class="nb"&gt;set&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;testset, &lt;span class="nv"&gt;digest&lt;/span&gt;&lt;span class="o"&gt;=[&lt;/span&gt;99, 1, 80, 2, 76, &lt;span class="nt"&gt;-43&lt;/span&gt;, 125, 77, 47, 8, 6, 35, 49, 117, &lt;span class="nt"&gt;-35&lt;/span&gt;, 54, 120, &lt;span class="nt"&gt;-29&lt;/span&gt;, 118, &lt;span class="nt"&gt;-72&lt;/span&gt;&lt;span class="o"&gt;]&lt;/span&gt;, &lt;span class="nv"&gt;userKey&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;null, &lt;span class="nv"&gt;lastUpdateTimeMs&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;1670523696178, &lt;span class="nv"&gt;userKeyString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;null, &lt;span class="nv"&gt;digestString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'YwFQAkzVfU0vCAYjMXXdNnjjdrg='&lt;/span&gt;&lt;span class="o"&gt;)&lt;/span&gt;
2022-12-08 18:21:36.257 GMT DEBUG record-parser - parsed message fields: &lt;span class="nv"&gt;key&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;Key&lt;span class="o"&gt;(&lt;/span&gt;&lt;span class="nv"&gt;namespace&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'test'&lt;/span&gt;, &lt;span class="nb"&gt;set&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;testset, &lt;span class="nv"&gt;digest&lt;/span&gt;&lt;span class="o"&gt;=[&lt;/span&gt;&lt;span class="nt"&gt;-88&lt;/span&gt;, &lt;span class="nt"&gt;-46&lt;/span&gt;, &lt;span class="nt"&gt;-48&lt;/span&gt;, &lt;span class="nt"&gt;-33&lt;/span&gt;, 77, &lt;span class="nt"&gt;-120&lt;/span&gt;, 123, &lt;span class="nt"&gt;-101&lt;/span&gt;, &lt;span class="nt"&gt;-70&lt;/span&gt;, &lt;span class="nt"&gt;-20&lt;/span&gt;, &lt;span class="nt"&gt;-96&lt;/span&gt;, &lt;span class="nt"&gt;-104&lt;/span&gt;, &lt;span class="nt"&gt;-51&lt;/span&gt;, &lt;span class="nt"&gt;-90&lt;/span&gt;, 28, &lt;span class="nt"&gt;-15&lt;/span&gt;, 70, 11, 118, 83], &lt;span class="nv"&gt;userKey&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;null, &lt;span class="nv"&gt;lastUpdateTimeMs&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;1670523696202, &lt;span class="nv"&gt;userKeyString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;null, &lt;span class="nv"&gt;digestString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'qNLQ302Ie5u67KCYzaYc8UYLdlM='&lt;/span&gt;&lt;span class="o"&gt;)&lt;/span&gt;
2022-12-08 18:21:36.257 GMT DEBUG record-parser - parsed message fields: &lt;span class="nv"&gt;key&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;Key&lt;span class="o"&gt;(&lt;/span&gt;&lt;span class="nv"&gt;namespace&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'test'&lt;/span&gt;, &lt;span class="nb"&gt;set&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;testset, &lt;span class="nv"&gt;digest&lt;/span&gt;&lt;span class="o"&gt;=[&lt;/span&gt;&lt;span class="nt"&gt;-97&lt;/span&gt;, 18, 28, &lt;span class="nt"&gt;-43&lt;/span&gt;, 75, 42, &lt;span class="nt"&gt;-22&lt;/span&gt;, &lt;span class="nt"&gt;-126&lt;/span&gt;, &lt;span class="nt"&gt;-61&lt;/span&gt;, &lt;span class="nt"&gt;-108&lt;/span&gt;, &lt;span class="nt"&gt;-36&lt;/span&gt;, 118, &lt;span class="nt"&gt;-86&lt;/span&gt;, &lt;span class="nt"&gt;-105&lt;/span&gt;, &lt;span class="nt"&gt;-52&lt;/span&gt;, 119, &lt;span class="nt"&gt;-39&lt;/span&gt;, &lt;span class="nt"&gt;-33&lt;/span&gt;, &lt;span class="nt"&gt;-127&lt;/span&gt;, &lt;span class="nt"&gt;-76&lt;/span&gt;&lt;span class="o"&gt;]&lt;/span&gt;, &lt;span class="nv"&gt;userKey&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;null, &lt;span class="nv"&gt;lastUpdateTimeMs&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;1670523696175, &lt;span class="nv"&gt;userKeyString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;null, &lt;span class="nv"&gt;digestString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'nxIc1Usq6oLDlNx2qpfMd9nfgbQ='&lt;/span&gt;&lt;span class="o"&gt;)&lt;/span&gt;
2022-12-08 18:21:36.257 GMT DEBUG record-parser - parsed message fields: &lt;span class="nv"&gt;key&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;Key&lt;span class="o"&gt;(&lt;/span&gt;&lt;span class="nv"&gt;namespace&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'test'&lt;/span&gt;, &lt;span class="nb"&gt;set&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;testset, &lt;span class="nv"&gt;digest&lt;/span&gt;&lt;span class="o"&gt;=[&lt;/span&gt;&lt;span class="nt"&gt;-120&lt;/span&gt;, 103, &lt;span class="nt"&gt;-51&lt;/span&gt;, 57, &lt;span class="nt"&gt;-71&lt;/span&gt;, &lt;span class="nt"&gt;-106&lt;/span&gt;, 13, &lt;span class="nt"&gt;-48&lt;/span&gt;, 100, 28, 59, &lt;span class="nt"&gt;-3&lt;/span&gt;, &lt;span class="nt"&gt;-39&lt;/span&gt;, &lt;span class="nt"&gt;-56&lt;/span&gt;, &lt;span class="nt"&gt;-67&lt;/span&gt;, &lt;span class="nt"&gt;-103&lt;/span&gt;, 29, 36, 75, 119], &lt;span class="nv"&gt;userKey&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;null, &lt;span class="nv"&gt;lastUpdateTimeMs&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;1670523696191, &lt;span class="nv"&gt;userKeyString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;null, &lt;span class="nv"&gt;digestString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'iGfNObmWDdBkHDv92ci9mR0kS3c='&lt;/span&gt;&lt;span class="o"&gt;)&lt;/span&gt;
2022-12-08 18:21:36.257 GMT DEBUG record-parser - parsed message fields: &lt;span class="nv"&gt;key&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;Key&lt;span class="o"&gt;(&lt;/span&gt;&lt;span class="nv"&gt;namespace&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'test'&lt;/span&gt;, &lt;span class="nb"&gt;set&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;testset, &lt;span class="nv"&gt;digest&lt;/span&gt;&lt;span class="o"&gt;=[&lt;/span&gt;&lt;span class="nt"&gt;-47&lt;/span&gt;, 88, &lt;span class="nt"&gt;-13&lt;/span&gt;, 13, &lt;span class="nt"&gt;-35&lt;/span&gt;, 77, 24, 22, &lt;span class="nt"&gt;-40&lt;/span&gt;, &lt;span class="nt"&gt;-61&lt;/span&gt;, &lt;span class="nt"&gt;-118&lt;/span&gt;, &lt;span class="nt"&gt;-115&lt;/span&gt;, 82, 13, 127, &lt;span class="nt"&gt;-125&lt;/span&gt;, 53, 66, &lt;span class="nt"&gt;-22&lt;/span&gt;, &lt;span class="nt"&gt;-8&lt;/span&gt;&lt;span class="o"&gt;]&lt;/span&gt;, &lt;span class="nv"&gt;userKey&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;null, &lt;span class="nv"&gt;lastUpdateTimeMs&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;1670523696233, &lt;span class="nv"&gt;userKeyString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;null, &lt;span class="nv"&gt;digestString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'0VjzDd1NGBbYw4qNUg1/gzVC6vg='&lt;/span&gt;&lt;span class="o"&gt;)&lt;/span&gt;
2022-12-08 18:21:36.258 GMT DEBUG record-parser - parsed message fields: &lt;span class="nv"&gt;key&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;Key&lt;span class="o"&gt;(&lt;/span&gt;&lt;span class="nv"&gt;namespace&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'test'&lt;/span&gt;, &lt;span class="nb"&gt;set&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;testset, &lt;span class="nv"&gt;digest&lt;/span&gt;&lt;span class="o"&gt;=[&lt;/span&gt;&lt;span class="nt"&gt;-63&lt;/span&gt;, &lt;span class="nt"&gt;-11&lt;/span&gt;, 93, &lt;span class="nt"&gt;-90&lt;/span&gt;, 47, 29, &lt;span class="nt"&gt;-63&lt;/span&gt;, 36, 12, 53, &lt;span class="nt"&gt;-86&lt;/span&gt;, 84, 57, &lt;span class="nt"&gt;-125&lt;/span&gt;, 16, 43, &lt;span class="nt"&gt;-18&lt;/span&gt;, 93, 56, 9], &lt;span class="nv"&gt;userKey&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;null, &lt;span class="nv"&gt;lastUpdateTimeMs&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;1670523696186, &lt;span class="nv"&gt;userKeyString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;null, &lt;span class="nv"&gt;digestString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'wfVdpi8dwSQMNapUOYMQK+5dOAk='&lt;/span&gt;&lt;span class="o"&gt;)&lt;/span&gt;
2022-12-08 18:21:36.258 GMT DEBUG record-parser - parsed message fields: &lt;span class="nv"&gt;key&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;Key&lt;span class="o"&gt;(&lt;/span&gt;&lt;span class="nv"&gt;namespace&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'test'&lt;/span&gt;, &lt;span class="nb"&gt;set&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;testset, &lt;span class="nv"&gt;digest&lt;/span&gt;&lt;span class="o"&gt;=[&lt;/span&gt;&lt;span class="nt"&gt;-23&lt;/span&gt;, 101, &lt;span class="nt"&gt;-114&lt;/span&gt;, &lt;span class="nt"&gt;-87&lt;/span&gt;, &lt;span class="nt"&gt;-52&lt;/span&gt;, 107, 36, 113, 101, 33, &lt;span class="nt"&gt;-16&lt;/span&gt;, 82, &lt;span class="nt"&gt;-95&lt;/span&gt;, 97, 34, &lt;span class="nt"&gt;-121&lt;/span&gt;, 82, &lt;span class="nt"&gt;-97&lt;/span&gt;, 40, 59], &lt;span class="nv"&gt;userKey&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;null, &lt;span class="nv"&gt;lastUpdateTimeMs&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;1670523696145, &lt;span class="nv"&gt;userKeyString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;null, &lt;span class="nv"&gt;digestString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'6WWOqcxrJHFlIfBSoWEih1KfKDs='&lt;/span&gt;&lt;span class="o"&gt;)&lt;/span&gt;
2022-12-08 18:21:36.258 GMT DEBUG record-parser - parsed message fields: &lt;span class="nv"&gt;key&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;Key&lt;span class="o"&gt;(&lt;/span&gt;&lt;span class="nv"&gt;namespace&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'test'&lt;/span&gt;, &lt;span class="nb"&gt;set&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;testset, &lt;span class="nv"&gt;digest&lt;/span&gt;&lt;span class="o"&gt;=[&lt;/span&gt;&lt;span class="nt"&gt;-77&lt;/span&gt;, &lt;span class="nt"&gt;-118&lt;/span&gt;, 49, 4, &lt;span class="nt"&gt;-75&lt;/span&gt;, 123, 81, 2, &lt;span class="nt"&gt;-103&lt;/span&gt;, &lt;span class="nt"&gt;-73&lt;/span&gt;, 42, &lt;span class="nt"&gt;-70&lt;/span&gt;, &lt;span class="nt"&gt;-54&lt;/span&gt;, 95, 98, 23, 73, 66, &lt;span class="nt"&gt;-86&lt;/span&gt;, 7], &lt;span class="nv"&gt;userKey&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;null, &lt;span class="nv"&gt;lastUpdateTimeMs&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;1670523696230, &lt;span class="nv"&gt;userKeyString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;null, &lt;span class="nv"&gt;digestString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'s4oxBLV7UQKZtyq6yl9iF0lCqgc='&lt;/span&gt;&lt;span class="o"&gt;)&lt;/span&gt;
2022-12-08 18:21:36.258 GMT DEBUG record-parser - parsed message fields: &lt;span class="nv"&gt;key&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;Key&lt;span class="o"&gt;(&lt;/span&gt;&lt;span class="nv"&gt;namespace&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'test'&lt;/span&gt;, &lt;span class="nb"&gt;set&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;testset, &lt;span class="nv"&gt;digest&lt;/span&gt;&lt;span class="o"&gt;=[&lt;/span&gt;50, &lt;span class="nt"&gt;-38&lt;/span&gt;, &lt;span class="nt"&gt;-31&lt;/span&gt;, &lt;span class="nt"&gt;-54&lt;/span&gt;, &lt;span class="nt"&gt;-122&lt;/span&gt;, &lt;span class="nt"&gt;-113&lt;/span&gt;, &lt;span class="nt"&gt;-38&lt;/span&gt;, 88, 15, 7, 96, 51, &lt;span class="nt"&gt;-92&lt;/span&gt;, &lt;span class="nt"&gt;-25&lt;/span&gt;, 60, &lt;span class="nt"&gt;-104&lt;/span&gt;, 26, 113, &lt;span class="nt"&gt;-117&lt;/span&gt;, &lt;span class="nt"&gt;-82&lt;/span&gt;&lt;span class="o"&gt;]&lt;/span&gt;, &lt;span class="nv"&gt;userKey&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;null, &lt;span class="nv"&gt;lastUpdateTimeMs&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;1670523696157, &lt;span class="nv"&gt;userKeyString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;null, &lt;span class="nv"&gt;digestString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'MtrhyoaP2lgPB2AzpOc8mBpxi64='&lt;/span&gt;&lt;span class="o"&gt;)&lt;/span&gt;
2022-12-08 18:21:36.258 GMT DEBUG record-parser - parsed message fields: &lt;span class="nv"&gt;key&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;Key&lt;span class="o"&gt;(&lt;/span&gt;&lt;span class="nv"&gt;namespace&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'test'&lt;/span&gt;, &lt;span class="nb"&gt;set&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;testset, &lt;span class="nv"&gt;digest&lt;/span&gt;&lt;span class="o"&gt;=[&lt;/span&gt;&lt;span class="nt"&gt;-77&lt;/span&gt;, 31, 67, &lt;span class="nt"&gt;-18&lt;/span&gt;, &lt;span class="nt"&gt;-52&lt;/span&gt;, &lt;span class="nt"&gt;-114&lt;/span&gt;, 42, &lt;span class="nt"&gt;-18&lt;/span&gt;, 36, &lt;span class="nt"&gt;-111&lt;/span&gt;, 89, 62, 109, 114, &lt;span class="nt"&gt;-54&lt;/span&gt;, 54, &lt;span class="nt"&gt;-121&lt;/span&gt;, &lt;span class="nt"&gt;-110&lt;/span&gt;, &lt;span class="nt"&gt;-88&lt;/span&gt;, &lt;span class="nt"&gt;-108&lt;/span&gt;&lt;span class="o"&gt;]&lt;/span&gt;, &lt;span class="nv"&gt;userKey&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;null, &lt;span class="nv"&gt;lastUpdateTimeMs&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;1670523696206, &lt;span class="nv"&gt;userKeyString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;null, &lt;span class="nv"&gt;digestString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'sx9D7syOKu4kkVk+bXLKNoeSqJQ='&lt;/span&gt;&lt;span class="o"&gt;)&lt;/span&gt;
2022-12-08 18:21:36.258 GMT DEBUG record-parser - parsed message fields: &lt;span class="nv"&gt;key&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;Key&lt;span class="o"&gt;(&lt;/span&gt;&lt;span class="nv"&gt;namespace&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'test'&lt;/span&gt;, &lt;span class="nb"&gt;set&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;testset, &lt;span class="nv"&gt;digest&lt;/span&gt;&lt;span class="o"&gt;=[&lt;/span&gt;73, 11, 98, &lt;span class="nt"&gt;-50&lt;/span&gt;, 32, 12, 0, &lt;span class="nt"&gt;-50&lt;/span&gt;, 22, &lt;span class="nt"&gt;-101&lt;/span&gt;, &lt;span class="nt"&gt;-108&lt;/span&gt;, 18, 38, 7, &lt;span class="nt"&gt;-65&lt;/span&gt;, 6, &lt;span class="nt"&gt;-58&lt;/span&gt;, 60, &lt;span class="nt"&gt;-6&lt;/span&gt;, &lt;span class="nt"&gt;-33&lt;/span&gt;&lt;span class="o"&gt;]&lt;/span&gt;, &lt;span class="nv"&gt;userKey&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;null, &lt;span class="nv"&gt;lastUpdateTimeMs&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;1670523696171, &lt;span class="nv"&gt;userKeyString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;null, &lt;span class="nv"&gt;digestString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'SQtiziAMAM4Wm5QSJge/BsY8+t8='&lt;/span&gt;&lt;span class="o"&gt;)&lt;/span&gt;
2022-12-08 18:21:36.258 GMT DEBUG record-parser - parsed message fields: &lt;span class="nv"&gt;key&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;Key&lt;span class="o"&gt;(&lt;/span&gt;&lt;span class="nv"&gt;namespace&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'test'&lt;/span&gt;, &lt;span class="nb"&gt;set&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;testset, &lt;span class="nv"&gt;digest&lt;/span&gt;&lt;span class="o"&gt;=[&lt;/span&gt;109, &lt;span class="nt"&gt;-82&lt;/span&gt;, 24, 53, 35, 89, &lt;span class="nt"&gt;-72&lt;/span&gt;, &lt;span class="nt"&gt;-117&lt;/span&gt;, &lt;span class="nt"&gt;-22&lt;/span&gt;, 79, 119, &lt;span class="nt"&gt;-89&lt;/span&gt;, 56, &lt;span class="nt"&gt;-5&lt;/span&gt;, 0, &lt;span class="nt"&gt;-103&lt;/span&gt;, &lt;span class="nt"&gt;-54&lt;/span&gt;, 51, 25, 126], &lt;span class="nv"&gt;userKey&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;null, &lt;span class="nv"&gt;lastUpdateTimeMs&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;1670523696146, &lt;span class="nv"&gt;userKeyString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;null, &lt;span class="nv"&gt;digestString&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="s1"&gt;'ba4YNSNZuIvqT3enOPsAmcozGX4='&lt;/span&gt;&lt;span class="o"&gt;)&lt;/span&gt;
...
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;h2&gt;
  
  
  Conclusion
&lt;/h2&gt;

&lt;p&gt;Aerospike renowned for its core resiliency has an industry established uptime of 5 x 9s - that's basically ( 0.99999 ). So adding to the data resiliency matrix, XDR now means its possible to have an additional copy of the data distributed asynchronously from one data-centre to another with minimal effort. Taking this one step further, we have shown how this can be achieved using the xdr-proxy to access k8s clusters in a private network. All this has been achieved with minimum effort thanks to the Aerospike Kubernetes Operator. &lt;/p&gt;

</description>
      <category>watercooler</category>
    </item>
    <item>
      <title>Block and Filesystem side-by-side with K8s and Aerospike</title>
      <dc:creator>Naresh Maharaj</dc:creator>
      <pubDate>Wed, 30 Nov 2022 18:34:25 +0000</pubDate>
      <link>https://dev.to/aerospike/block-and-filesystem-side-by-side-with-k8s-and-aerospike-m3j</link>
      <guid>https://dev.to/aerospike/block-and-filesystem-side-by-side-with-k8s-and-aerospike-m3j</guid>
      <description>&lt;p&gt;​&lt;a href="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2F8eg8bpyzq7e1t0y710er.jpg" class="article-body-image-wrapper"&gt;&lt;img src="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2F8eg8bpyzq7e1t0y710er.jpg" width="800" height="361"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;In this article we focus on side-by-side block and filesystem requests using Kubernetes.​ The driver for this is it will allow us to deploy Aerospike using Aerospike's all flash mode.&lt;/p&gt;

&lt;p&gt;Storing database values on a raw block device with index information on file can bring significant cost reductions, especially when considering use cases for Aerospike’s &lt;a href="https://docs.aerospike.com/server/operations/plan/ssd" rel="noopener noreferrer"&gt;All-Flash&lt;/a&gt;. To support such a workload, you can configure Aerospike to use NVMe Flash drives as the primary index store.&lt;/p&gt;

&lt;p&gt;How does this operation work in practice? &lt;br&gt;
Suppose a devOps engineer has a host machine with one or more raw block devices and wants to use a filesystem formatted to Ext4 or XFS for storage. devOps could log into each host, create partitions, format the Filesystem, and mount the device in the traditional way but this seems less than pragmatic. &lt;br&gt;
That's where side-by-side block and filesystem requests using Kubernetes comes in. &lt;br&gt;
​&lt;br&gt;
This article describes the following process:&lt;br&gt;
​&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Spin up an AWS EKS Cluster with eksctl&lt;/li&gt;
&lt;li&gt;Clarify our understanding of block storage devices&lt;/li&gt;
&lt;li&gt;Introduce the &lt;a href="https://github.com/kubernetes-sigs/sig-storage-local-static-provisioner" rel="noopener noreferrer"&gt;Local Static Provisioner&lt;/a&gt;
&lt;/li&gt;
&lt;li&gt;Create a Persistent Volume (PV) using a raw device

&lt;ul&gt;
&lt;li&gt;1. intended for block storage&lt;/li&gt;
&lt;li&gt;2. intended for storing files
&lt;/li&gt;
&lt;/ul&gt;
&lt;/li&gt;
&lt;li&gt;Install httpd and review

&lt;ul&gt;
&lt;li&gt;1. scheduling Pods to a host with a filesystem&lt;/li&gt;
&lt;li&gt;2. use of a PersistentVolumeClaim (PVC)&lt;/li&gt;
&lt;li&gt;3. node affinity with a PersistentVolume&lt;/li&gt;
&lt;/ul&gt;
&lt;/li&gt;
&lt;li&gt;Install Aerospike using the &lt;a href="https://docs.aerospike.com/cloud/kubernetes/operator" rel="noopener noreferrer"&gt;Kubernetes Operator&lt;/a&gt;
&lt;/li&gt;
&lt;li&gt;Insert some test data&lt;/li&gt;
&lt;li&gt;Use Kubernetes Taints and Tolerations 

&lt;ul&gt;
&lt;li&gt;1. to reserve Hosts with filesystems &lt;/li&gt;
&lt;li&gt;2. schedule Pods to Hosts with filesystems&lt;/li&gt;
&lt;/ul&gt;
&lt;/li&gt;
&lt;li&gt;Clean everything up.&lt;/li&gt;
&lt;/ul&gt;
&lt;h2&gt;
  
  
  Prerequisites
&lt;/h2&gt;

&lt;p&gt;Before we zoom into action, we need to have the following:&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;lots of energy and some basic knowledge of Kubernetes&lt;/li&gt;
&lt;li&gt;jq ( a cool json command line parser )&lt;/li&gt;
&lt;li&gt;eksctl ( cli tool for working with EKS clusters )&lt;/li&gt;
&lt;li&gt;kubectl ( cli tool to run commands against k8s clusters )&lt;/li&gt;
&lt;li&gt;parted ( cli tool for creating and deleting partitions)&lt;/li&gt;
&lt;/ul&gt;
&lt;h2&gt;
  
  
  Create an EKS Cluster
&lt;/h2&gt;
&lt;h3&gt;
  
  
  Setup
&lt;/h3&gt;

&lt;p&gt;​&lt;br&gt;
Create a file called &lt;em&gt;my-cluster.yaml&lt;/em&gt; with the following contents. This specifies the configuration for the Kubernetes cluster itself. The &lt;em&gt;ssh&lt;/em&gt; configuration is optional.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;apiVersion: eksctl.io/v1alpha5
kind: ClusterConfig
metadata:
  name: my1-eks-cluster
  region: us-east-1
nodeGroups:
  - name: ng-1
    labels: { role: database }
    instanceType: m5d.large
    desiredCapacity: 3
    volumeSize: 13
    ssh:
      allow: true # will use ~/.ssh/id_rsa.pub as the default ssh key
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;Use the AWS eksctl tool to create our Kubernetes cluster. You may need to install eksctl if this is not present on your machine.&lt;br&gt;
Using &lt;em&gt;eksctl&lt;/em&gt; ...&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;eksctl create cluster -f my-cluster.yaml
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;The result on completion will be a 3 node Kubernetes cluster. Confirm successful creation as follows :&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;kubectl get nodes -L role
NAME                             STATUS   ROLES    AGE     VERSION                ROLE
ip-192-168-22-209.ec2.internal   Ready    &amp;lt;none&amp;gt;   2m36s   v1.22.12-eks-ba74326   database
ip-192-168-28-68.ec2.internal    Ready    &amp;lt;none&amp;gt;   2m38s   v1.22.12-eks-ba74326   database
ip-192-168-50-205.ec2.internal   Ready    &amp;lt;none&amp;gt;   2m33s   v1.22.12-eks-ba74326   database
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;h2&gt;
  
  
  Raw Block Storage
&lt;/h2&gt;

&lt;p&gt;Block storage stores a sequence of bytes in a fixed size block (page) on a storage device. Each block has a unique hash that references the address location of the specified block. Unlike a filesystem, block storage doesn't have the associated metadata such as format-type, owner, date, etc. Also, block storage doesn’t use the conventional storage paths to access data like a filesystem file. This reduction in overhead contributes to improved overall access speeds when using raw block devices. The ability to store bytes in blocks allows applications the flexibility to decide how these blocks are accessed and managed, making block storage an ideal choice for low latency databases such as &lt;a href="https://aerospike.com/" rel="noopener noreferrer"&gt;Aerospike&lt;/a&gt;.  From a developer's perspective, a block device is simply a large array of bytes, usually with some minimum granularity for reads and writes. In Aerospike this granularity is configured and referred to as the &lt;a href="https://docs.aerospike.com/server/architecture/resilience#block-size-and-cache-size" rel="noopener noreferrer"&gt;write-block-size&lt;/a&gt;. The Aerospike Kubernetes Operator uses the &lt;a href="https://kubernetes.io/blog/2019/03/07/raw-block-volume-support-to-beta/" rel="noopener noreferrer"&gt;storage infrastructure&lt;/a&gt; software inside of Kubernetes and the need for data platforms to use raw block storage becomes ever more important.&lt;br&gt;
&lt;a href="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fdm8rmexx27ye4vkp5ykh.jpg" class="article-body-image-wrapper"&gt;&lt;img src="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fdm8rmexx27ye4vkp5ykh.jpg" width="800" height="614"&gt;&lt;/a&gt;&lt;/p&gt;


&lt;h2&gt;
  
  
  Local Static Provisioner
&lt;/h2&gt;

&lt;p&gt;The local volume static provisioner manages the PersistentVolume (PV) lifecycle for pre-allocated disks by detecting and creating PVs for each local disk on the host, and cleaning up the disks when released. It doesn't support &lt;a href="https://kubernetes.io/docs/concepts/storage/dynamic-provisioning/" rel="noopener noreferrer"&gt;dynamic provisioning&lt;/a&gt; where storage volumes can be created on-demand. Without dynamic provisioning a manual step is required to create new storage volumes which are then mapped to PersistentVolume objects.&lt;/p&gt;
&lt;h3&gt;
  
  
  Why do I need it?
&lt;/h3&gt;

&lt;p&gt;As a captain of a ship, your time is best spent at the helm and not constantly going down below deck. Similarly, the LocalStaticProvisioner maintains the low level details like mappings between paths of local volumes and PV objects and keeps these stable across reboots and device changes, so you can focus on orchestration efforts. &lt;/p&gt;

&lt;p&gt;The PersistentVolume, often seen as a remote storage resource, decouples the volumes from the Pods. However, a distributed application with fast access patterns and replication logic can benefit from the disks being accessed locally. Local disk usage will strongly couple your application data to a specific node, so if the Pod fails and is successfully rescheduled, the data on that node will still be available to the container. If the node is no longer accessible, data could be inaccessible but applications with built in replication logic will ensure data is copied ( &lt;em&gt;we like to say replicated&lt;/em&gt; ) onto other nodes and this means applications can survive a node level outage. Without using the LocalStaticProvisioner the operator would be responsible for partitioning, formatting and mounting devices before they could be used as a Kubernetes PersistentVolume. The operator would also need to be responsible for cleaning up the volume and preparing it for reuse, something which is now the responsibility of the LocalStaticProvisioner when the PersistentVolumeClaim is released.&lt;/p&gt;
&lt;h3&gt;
  
  
  Persistent Volume Block Store
&lt;/h3&gt;

&lt;p&gt;To use a device as raw block storage, you can edit the following yaml file as required &lt;a href="https://github.com/kubernetes-sigs/sig-storage-local-static-provisioner/blob/master/deployment/kubernetes/example/default_example_provisioner_generated.yaml" rel="noopener noreferrer"&gt;default_example_provisioner_generated.yaml&lt;/a&gt;.&lt;/p&gt;

&lt;p&gt;The following section is the key part to this file. For the majority of the time you will not need to change it.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;apiVersion: v1
kind: ConfigMap
metadata:
  name: local-provisioner-config 
  namespace: default 
data:
  storageClassMap: |     
    fast-disks:
       hostDir: /mnt/fast-disks
       mountDir:  /mnt/fast-disks 
       blockCleanerCommand:
         - "/scripts/shred.sh"
         - "2"
       volumeMode: Filesystem
       fsType: ext4
       namePattern: "*"
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;To use a device as block storage for Aerospike, we need to replace the section above with the following. A pre-configured file is available &lt;a href="https://github.com/aerospike/aerospike-kubernetes-operator/blob/2.2.1/config/samples/storage/aerospike_local_volume_provisioner.yaml" rel="noopener noreferrer"&gt;here&lt;/a&gt; for your convenience.&lt;/p&gt;

&lt;p&gt;In a moment we will deploy this file so there is no need to do anything just right now.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;apiVersion: v1
kind: ConfigMap
metadata:
  name: local-provisioner-config
  namespace: aerospike
data:
  useNodeNameOnly: "true"
  storageClassMap: |
    local-ssd:
       hostDir: /mnt/disks
       mountDir:  /mnt/disks
       blockCleanerCommand:
         - "/scripts/shred.sh"
         - "2"
       volumeMode: Block
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;To learn more about how to configure and Operate the LocalStaticProvisioner see the &lt;a href="https://github.com/kubernetes-sigs/sig-storage-local-static-provisioner/blob/master/docs/operations.md" rel="noopener noreferrer"&gt;Operation&lt;/a&gt; and &lt;a href="https://github.com/kubernetes-sigs/sig-storage-local-static-provisioner/blob/master/docs/provisioner.md" rel="noopener noreferrer"&gt;Configuration&lt;/a&gt; guide.&lt;/p&gt;

&lt;p&gt;Once your Kubernetes cluster is up and running, create the soft links to your local storage device(s) on each node.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;kubectl get nodes -o wide
NAME                             STATUS   ROLES    AGE   VERSION
ip-192-168-10-182.ec2.internal   Ready    &amp;lt;none&amp;gt;   20m   v1.22.12-eks-ba74326
ip-192-168-15-174.ec2.internal   Ready    &amp;lt;none&amp;gt;   20m   v1.22.12-eks-ba74326
ip-192-168-45-233.ec2.internal   Ready    &amp;lt;none&amp;gt;   21m   v1.22.12-eks-ba74326
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;For each node, add a permanent soft link to the block device. In our case, the device name is nvme1n1 as below. You will need to ssh into each of the Kubernetes worker nodes to do this.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;lsblk
NAME          MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT
nvme1n1       259:0    0 69.9G  0 disk
nvme0n1       259:1    0   13G  0 disk
├─nvme0n1p1   259:2    0   13G  0 part /
└─nvme0n1p128 259:3    0    1M  0 part

sudo mkdir /mnt/disks -p
cd /mnt/disks/
sudo ln -sf /dev/nvme1n1 /mnt/disks/
ls -lrt /mnt/disks/
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;Now apply the local static provisioner yaml file.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;kubectl create ns aerospike
kubectl create -f https://raw.githubusercontent.com/aerospike/aerospike-kubernetes-operator/2.2.1/config/samples/storage/aerospike_local_volume_provisioner.yaml
kubectl create -f https://raw.githubusercontent.com/aerospike/aerospike-kubernetes-operator/2.2.1/config/samples/storage/local_storage_class.yaml
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;You should now see the following resources created.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;kubectl get sc,pv -n aerospike
NAME                                        PROVISIONER                    RECLAIMPOLICY   VOLUMEBINDINGMODE      ALLOWVOLUMEEXPANSION   AGE
storageclass.storage.k8s.io/gp2 (default)   kubernetes.io/aws-ebs          Delete          WaitForFirstConsumer   false                  39m
storageclass.storage.k8s.io/local-ssd       kubernetes.io/no-provisioner   Delete          WaitForFirstConsumer   false                  73s

NAME                                 CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS      CLAIM   STORAGECLASS   REASON   AGE
persistentvolume/local-pv-45df0f3d   69Gi       RWO            Delete           Available           local-ssd               6m42s
persistentvolume/local-pv-72d40f98   69Gi       RWO            Delete           Available           local-ssd               6m42s
persistentvolume/local-pv-7637ecb    69Gi       RWO            Delete           Available           local-ssd               6m42s
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;Remove the resources for now.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;kubectl delete -f https://raw.githubusercontent.com/aerospike/aerospike-kubernetes-operator/2.2.1/config/samples/storage/aerospike_local_volume_provisioner.yaml
kubectl delete -f https://raw.githubusercontent.com/aerospike/aerospike-kubernetes-operator/2.2.1/config/samples/storage/local_storage_class.yaml
kubectl get pv -o=json|jq .items[].metadata.name -r | grep ^local | while read -r line; do kubectl delete pv $line;done
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;h3&gt;
  
  
  Persistent Volume Filesystem
&lt;/h3&gt;

&lt;p&gt;This brings us to the core part of this article which is, how do we create an XFS filesystem from raw storage devices? In this section we partition the disk so we can have a block and a filesystem on a single device, but we do this only on 2 out of the 3 nodes. So, of the 3 nodes only 2 will have a filesystem.&lt;/p&gt;

&lt;h4&gt;
  
  
  Host Addresses
&lt;/h4&gt;

&lt;p&gt;We start by getting a list of the nodes and IP addresses:&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;kubectl get no -o wide
NAME                             STATUS   ROLES    AGE   VERSION                INTERNAL-IP      EXTERNAL-IP    OS-IMAGE         KERNEL-VERSION                 CONTAINER-RUNTIME
ip-192-168-23-238.ec2.internal   Ready    &amp;lt;none&amp;gt;   63m   v1.22.12-eks-ba74326   192.168.23.238   44.212.74.12   Amazon Linux 2   5.4.209-116.367.amzn2.x86_64   docker://20.10.17
ip-192-168-63-152.ec2.internal   Ready    &amp;lt;none&amp;gt;   63m   v1.22.12-eks-ba74326   192.168.63.152   3.234.239.82   Amazon Linux 2   5.4.209-116.367.amzn2.x86_64   docker://20.10.17
ip-192-168-7-181.ec2.internal    Ready    &amp;lt;none&amp;gt;   63m   v1.22.12-eks-ba74326   192.168.7.181    3.87.204.50    Amazon Linux 2   5.4.209-116.367.amzn2.x86_64   docker://20.10.17
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;h4&gt;
  
  
  Create Partitions
&lt;/h4&gt;

&lt;p&gt;We are going to use the 'parted' application to create our disk partitions. If this is not installed then use the following command to install it.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;sudo yum install parted -y
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;Next, we partition our &lt;strong&gt;nvme1n1&lt;/strong&gt; disk to give us partitions &lt;strong&gt;nvme1n1p1&lt;/strong&gt; and &lt;strong&gt;nvme1n1p2&lt;/strong&gt;:&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;sudo parted -a opt --script /dev/nvme1n1 mklabel gpt mkpart primary 0% 10% mkpart primary 10% 100%
lsblk

NAME          MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT
nvme1n1       259:0    0 69.9G  0 disk
├─nvme1n1p1   259:4    0    7G  0 part
└─nvme1n1p2   259:5    0 62.9G  0 part
nvme0n1       259:1    0   13G  0 disk
├─nvme0n1p1   259:2    0   13G  0 part /
└─nvme0n1p128 259:3    0    1M  0 part
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;h4&gt;
  
  
  Discovery Directories
&lt;/h4&gt;

&lt;p&gt;Lets create the soft links in our discovery directory on 2 of our chosen hosts which will have a 7GB filesystem.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;sudo mkdir /mnt/fs-disks -p
sudo mkdir /mnt/fast-disks -p
sudo ln -sf /dev/nvme1n1p1 /mnt/fs-disks/
sudo ln -sf /dev/nvme1n1p2 /mnt/fast-disks/
ls -lrt /mnt/fs-disks/ /mnt/fast-disks/
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;On the remaining 1 x host which does not have a filesystem do the following. In a real production system all disks would be the identical.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;sudo mkdir /mnt/fast-disks -p
sudo ln -sf /dev/nvme1n1 /mnt/fast-disks/
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;h4&gt;
  
  
  Storage Classes &amp;amp; Local Static Provisioner
&lt;/h4&gt;

&lt;p&gt;Download the local static provisioner &amp;amp; the storage class files, make a copy so we can edit them as required.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;wget https://raw.githubusercontent.com/aerospike/aerospike-kubernetes-operator/2.2.1/config/samples/storage/local_storage_class.yaml
cp local_storage_class.yaml local_storage_class-fast-ssd.yaml
cp local_storage_class.yaml local_storage_class-fs-ssd.yaml
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;h4&gt;
  
  
  Storage Class
&lt;/h4&gt;

&lt;p&gt;To create the storage classes [fast-ssd] and [fs-ssd] we need to change the metadata.name in each of our the edited storage_class files respectively and then deploy them as below. Make one [fast-ssd] the other [fs-ssd].&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;kubectl create -f local_storage_class-fast-ssd.yaml
kubectl create -f local_storage_class-fs-ssd.yaml
kubectl get sc

NAME            PROVISIONER                    RECLAIMPOLICY   VOLUMEBINDINGMODE      ALLOWVOLUMEEXPANSION   AGE
fast-ssd        kubernetes.io/no-provisioner   Delete          WaitForFirstConsumer   false                  9s
fs-ssd          kubernetes.io/no-provisioner   Delete          WaitForFirstConsumer   false                  3m23s
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;h4&gt;
  
  
  Local Static Provisioner
&lt;/h4&gt;

&lt;p&gt;We now want to ensure that the block storage and the filesystem storage can work side by side on the same host. In order to do this we have edited both files&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;aerospike_local_volume_provisioner-fast-ssd.yaml&lt;/li&gt;
&lt;li&gt;aerospike_local_volume_provisioner-fs-ssd.yaml &lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;This ensures that the names of the following Kubernetes resources do not clash. &lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;ServiceAccount&lt;/li&gt;
&lt;li&gt;ClusterRole&lt;/li&gt;
&lt;li&gt;ClusterRoleBinding&lt;/li&gt;
&lt;li&gt;ConfigMap&lt;/li&gt;
&lt;li&gt;DaemonSet &lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;Following are direct links to the edited files for your convenience.&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;&lt;a href="https://raw.githubusercontent.com/nareshmaharaj-consultant/localstaticprovisioner-k8s-config-files/main/aerospike_local_volume_provisioner-fs-ssd.yaml" rel="noopener noreferrer"&gt;aerospike_local_volume_provisioner-fs-ssd.yaml&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;&lt;a href="https://raw.githubusercontent.com/nareshmaharaj-consultant/localstaticprovisioner-k8s-config-files/main/aerospike_local_volume_provisioner-fast-ssd.yaml" rel="noopener noreferrer"&gt;aerospike_local_volume_provisioner-fast-ssd.yaml&lt;/a&gt;&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;Create an aerospike namespace and deploy the storage yaml files.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;kubectl create ns aerospike
kubectl create -f https://raw.githubusercontent.com/nareshmaharaj-consultant/localstaticprovisioner-k8s-config-files/main/aerospike_local_volume_provisioner-fs-ssd.yaml
kubectl create -f https://raw.githubusercontent.com/nareshmaharaj-consultant/localstaticprovisioner-k8s-config-files/main/aerospike_local_volume_provisioner-fast-ssd.yaml
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;Here is the result of deploying our changes. Notice the 7GB filesystem using storage class [fs-ssd].&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;kubectl get sc,pv
NAME                                        PROVISIONER                    RECLAIMPOLICY   VOLUMEBINDINGMODE      ALLOWVOLUMEEXPANSION   AGE
storageclass.storage.k8s.io/fast-ssd        kubernetes.io/no-provisioner   Delete          WaitForFirstConsumer   false                  17m
storageclass.storage.k8s.io/fs-ssd          kubernetes.io/no-provisioner   Delete          WaitForFirstConsumer   false                  17m
storageclass.storage.k8s.io/gp2 (default)   kubernetes.io/aws-ebs          Delete          WaitForFirstConsumer   false                  126m

NAME                                 CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS      CLAIM   STORAGECLASS   REASON   AGE
persistentvolume/local-pv-42dc4d3f   62Gi       RWO            Delete           Available           fast-ssd                22s
persistentvolume/local-pv-ba8bc9e    69Gi       RWO            Delete           Available           fast-ssd                22s
persistentvolume/local-pv-d7501418   69Gi       RWO            Delete           Available           fast-ssd                22s
persistentvolume/local-pv-f5282eeb   7152Mi     RWO            Delete           Available           fs-ssd                  2s
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;h3&gt;
  
  
  Using the Filesystem
&lt;/h3&gt;

&lt;p&gt;One misconception that might arise is how do we know which node has the XFS filesystem - so our Pod can be scheduled to the correct node? To answer this question - we start by testing our new filesystem and see where it takes us. First we create a simple PersistentVolumeClaim referencing our filesystem [fs-ssd] storage class. Go ahead and create a file called pvc-fs.yaml with the following content.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight yaml"&gt;&lt;code&gt;&lt;span class="na"&gt;apiVersion&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;v1&lt;/span&gt;
&lt;span class="na"&gt;kind&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;PersistentVolumeClaim&lt;/span&gt;
&lt;span class="na"&gt;metadata&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
  &lt;span class="na"&gt;name&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;fs-1g-pvc&lt;/span&gt;
&lt;span class="na"&gt;spec&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
  &lt;span class="na"&gt;accessModes&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
    &lt;span class="pi"&gt;-&lt;/span&gt; &lt;span class="s"&gt;ReadWriteOnce&lt;/span&gt;
  &lt;span class="na"&gt;volumeMode&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;Filesystem&lt;/span&gt;
  &lt;span class="na"&gt;resources&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
    &lt;span class="na"&gt;requests&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
      &lt;span class="na"&gt;storage&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;1Gi&lt;/span&gt;
  &lt;span class="na"&gt;storageClassName&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;fs-ssd&lt;/span&gt;
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;Create a Pod with the httpd server image and reference the PersistentVolumeClaim. Call your file pod-fs.yaml&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight yaml"&gt;&lt;code&gt;&lt;span class="na"&gt;apiVersion&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;v1&lt;/span&gt;
&lt;span class="na"&gt;kind&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;Pod&lt;/span&gt;
&lt;span class="na"&gt;metadata&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
  &lt;span class="na"&gt;creationTimestamp&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="kc"&gt;null&lt;/span&gt;
  &lt;span class="na"&gt;labels&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
    &lt;span class="na"&gt;app&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;fs-httpd&lt;/span&gt;
  &lt;span class="na"&gt;name&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;fs-httpd&lt;/span&gt;
&lt;span class="na"&gt;spec&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
  &lt;span class="na"&gt;containers&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
  &lt;span class="pi"&gt;-&lt;/span&gt; &lt;span class="na"&gt;image&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;httpd&lt;/span&gt;
    &lt;span class="na"&gt;name&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;fs-httpd&lt;/span&gt;
    &lt;span class="na"&gt;resources&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="pi"&gt;{}&lt;/span&gt;
    &lt;span class="na"&gt;volumeMounts&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
      &lt;span class="pi"&gt;-&lt;/span&gt; &lt;span class="na"&gt;mountPath&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s2"&gt;"&lt;/span&gt;&lt;span class="s"&gt;/datafiles/"&lt;/span&gt;
        &lt;span class="na"&gt;name&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;my-fs&lt;/span&gt;
  &lt;span class="na"&gt;dnsPolicy&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;ClusterFirst&lt;/span&gt;
  &lt;span class="na"&gt;restartPolicy&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;Always&lt;/span&gt;
  &lt;span class="na"&gt;volumes&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
    &lt;span class="pi"&gt;-&lt;/span&gt; &lt;span class="na"&gt;name&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;my-fs&lt;/span&gt;
      &lt;span class="na"&gt;persistentVolumeClaim&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
        &lt;span class="na"&gt;claimName&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;fs-1g-pvc&lt;/span&gt;
&lt;span class="na"&gt;status&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="pi"&gt;{}&lt;/span&gt;
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;Deploy both the PVC and the Pod&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight shell"&gt;&lt;code&gt;kubectl create &lt;span class="nt"&gt;-f&lt;/span&gt; pvc-fs.yaml
kubectl create &lt;span class="nt"&gt;-f&lt;/span&gt; pod-fs.yaml
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;Run the following command to see where this pod was scheduled.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;kubectl describe po fs-http
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;Output shows that the Pod was scheduled to the correct Node with the XFS filesystem ip-192-168-28-160.ec2.internal/192.168.28.160. (Your Node IP address will be different)&lt;/p&gt;

&lt;p&gt;This happened because our StorageClass [fs-ssd] was used in the PersistentVolumeClaim. The PersistentVolumeClaim was used in our Pod and the scheduler knew exactly where the Pod needed to be scheduled.&lt;/p&gt;

&lt;p&gt;We can go a bit further and prove this by adding a Taint to all Nodes with a filesystem. Delete the pod and then add the following taint. Then recreate the Pod once more. ( &lt;em&gt;learn more about &lt;a href="https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/" rel="noopener noreferrer"&gt;Kubernetes taints &amp;amp; tolerations&lt;/a&gt;&lt;/em&gt; )&lt;/p&gt;

&lt;p&gt;(Note: If you are keeping the taint and later redeploy the LocalStaticProvisioner be sure to uncomment the tolerations section in each LocalStaticProvisioner yaml file.)&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;# chg node name here!! 
kubectl delete -f pod-fs.yaml
kubectl taint node ip-192-168-28-160.ec2.internal storageType=filesystem:NoSchedule
kubectl taint node ip-192-168-63-152.ec2.internal storageType=filesystem:NoSchedule
kubectl create -f pod-fs.yaml
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;If we look at the events in the Pod, we see something very interesting. &lt;/p&gt;

&lt;p&gt;&lt;em&gt;"0/3 nodes are available: 2 node(s) had taint {storageType: filesystem}, that the pod didn't tolerate, 1 node(s) had volume node affinity conflict."&lt;/em&gt;&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;kubectl describe po fs-httpd
Name:             fs-httpd
Namespace:        default
Priority:         0
Service Account:  default
Node:             &amp;lt;none&amp;gt;
Labels:           app=fs-httpd
Annotations:      kubernetes.io/psp: eks.privileged
Status:           Pending
...
Events:
  Type     Reason            Age   From               Message
  ----     ------            ----  ----               -------
  Warning  FailedScheduling  35s   default-scheduler  0/3 nodes are available: 2 node(s) had taint {storageType: filesystem}, that the pod didn't tolerate, 1 node(s) had volume node affinity conflict.
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;This means that 2 of our Nodes rejected the Pod as it did not tolerate the Taint, but it did match the PersistentVolume on the Node. Of the remaining 1 x Node, it failed to meet the Node Affinity of the PersistentVolume attached to the PersistentVolumeClaim. To be clear, each of our PersistentVolume(s) are coupled to Nodes using the Node Affinity rules. If you describe a volume you will see the following:&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;kubectl describe pv local-pv-9dbcb710
Name:              local-pv-9dbcb710
StorageClass:      fs-ssd
Node Affinity:
  Required Terms:
    Term 0:        kubernetes.io/hostname in [ip-192-168-28-160.ec2.internal]
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;Now add a toleration to the Pod or delete the taint and we should get a Pod scheduled to the correct Node. Below is our new Pod configuration with the tolerations added&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight yaml"&gt;&lt;code&gt;&lt;span class="na"&gt;apiVersion&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;v1&lt;/span&gt;
&lt;span class="na"&gt;kind&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;Pod&lt;/span&gt;
&lt;span class="na"&gt;metadata&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
  &lt;span class="na"&gt;creationTimestamp&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="kc"&gt;null&lt;/span&gt;
  &lt;span class="na"&gt;labels&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
    &lt;span class="na"&gt;app&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;fs-httpd&lt;/span&gt;
  &lt;span class="na"&gt;name&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;fs-httpd&lt;/span&gt;
&lt;span class="na"&gt;spec&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
  &lt;span class="na"&gt;containers&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
  &lt;span class="pi"&gt;-&lt;/span&gt; &lt;span class="na"&gt;image&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;httpd&lt;/span&gt;
    &lt;span class="na"&gt;name&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;fs-httpd&lt;/span&gt;
    &lt;span class="na"&gt;resources&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="pi"&gt;{}&lt;/span&gt;
    &lt;span class="na"&gt;volumeMounts&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
      &lt;span class="pi"&gt;-&lt;/span&gt; &lt;span class="na"&gt;mountPath&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s2"&gt;"&lt;/span&gt;&lt;span class="s"&gt;/datafiles/"&lt;/span&gt;
        &lt;span class="na"&gt;name&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;my-fs&lt;/span&gt;
  &lt;span class="na"&gt;dnsPolicy&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;ClusterFirst&lt;/span&gt;
  &lt;span class="na"&gt;restartPolicy&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;Always&lt;/span&gt;
  &lt;span class="na"&gt;volumes&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
    &lt;span class="pi"&gt;-&lt;/span&gt; &lt;span class="na"&gt;name&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;my-fs&lt;/span&gt;
      &lt;span class="na"&gt;persistentVolumeClaim&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
        &lt;span class="na"&gt;claimName&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;fs-1g-pcv&lt;/span&gt;
  &lt;span class="na"&gt;tolerations&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
    &lt;span class="pi"&gt;-&lt;/span&gt; &lt;span class="na"&gt;key&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;storageType&lt;/span&gt;
      &lt;span class="na"&gt;operator&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s2"&gt;"&lt;/span&gt;&lt;span class="s"&gt;Equal"&lt;/span&gt;
      &lt;span class="na"&gt;value&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;filesystem&lt;/span&gt;
      &lt;span class="na"&gt;effect&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;NoSchedule&lt;/span&gt;
&lt;span class="na"&gt;status&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="pi"&gt;{}&lt;/span&gt;
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;If we describe our Pod we can see what events occured. I've only included the relevant information.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;kubectl describe po fs-httpd
Name:             fs-httpd
Node:             ip-192-168-28-160.ec2.internal/192.168.28.160
Volumes:
    ClaimName:  fs-1g-pcv
Tolerations:                storageType=filesystem:NoSchedule
Events:
  Type    Reason     Age   From               Message
  ----    ------     ----  ----               -------
  Normal  Scheduled  91s   default-scheduler  Successfully assigned default/fs-httpd to ip-192-168-28-160.ec2.internal
  Normal  Created    90s   kubelet            Created container fs-httpd
  Normal  Started    90s   kubelet            Started container fs-httpd
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;One last interesting fact is to see the mounted XFS filesystem &lt;strong&gt;/dev/nvme1n1p1&lt;/strong&gt; in the container.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;kubectl exec -it fs-httpd -- bash
root@fs-httpd:/usr/local/apache2# df -Th
Filesystem     Type     Size  Used Avail Use% Mounted on
overlay        overlay   13G  2.9G   11G  22% /
tmpfs          tmpfs     64M     0   64M   0% /dev
tmpfs          tmpfs    3.8G     0  3.8G   0% /sys/fs/cgroup
/dev/nvme1n1p1 xfs      7.0G   40M  7.0G   1% /datafiles
/dev/nvme0n1p1 xfs       13G  2.9G   11G  22% /etc/hosts
shm            tmpfs     64M     0   64M   0% /dev/shm
tmpfs          tmpfs    6.9G   12K  6.9G   1% /run/secrets/kubernetes.io/serviceaccount
tmpfs          tmpfs    3.8G     0  3.8G   0% /proc/acpi
tmpfs          tmpfs    3.8G     0  3.8G   0% /sys/firmware
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;h2&gt;
  
  
  Installing Aerospike - Block &amp;amp; Filesystem Storage
&lt;/h2&gt;

&lt;h3&gt;
  
  
  Aerospike Kubernetes Operator
&lt;/h3&gt;

&lt;p&gt;​&lt;br&gt;
Install the AKO (Aerospike Kubernetes Operator) and set up the database cluster. Refer to the following documentation for details on how to do this.&lt;/p&gt;

&lt;p&gt;&lt;a href="https://docs.aerospike.com/cloud/kubernetes/operator/install-operator-operatorhub" rel="noopener noreferrer"&gt;&lt;img src="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fm4tkxh10cmfnmucwtuku.png" alt="AKO Install Docs" width="800" height="194"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;You can confirm that your installation is successful by checking the pods in the &lt;em&gt;operators&lt;/em&gt; namespace.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;kubectl get pods -n operators
NAME                                                     READY   STATUS    RESTARTS   AGE
aerospike-operator-controller-manager-7946df5dd9-lmqvt   2/2     Running   0          2m44s
aerospike-operator-controller-manager-7946df5dd9-tg4sh   2/2     Running   0          2m44s
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;h3&gt;
  
  
  Create the Aerospike Database Cluster
&lt;/h3&gt;

&lt;p&gt;​&lt;br&gt;
In this section we create an Aerospike database cluster. For full details, refer to the following documentation.&lt;br&gt;
​&lt;br&gt;
&lt;a href="https://docs.aerospike.com/cloud/kubernetes/operator/create-cluster-kubectl#prepare-the-aerospike-cluster-configuration" rel="noopener noreferrer"&gt;&lt;img src="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fuaxirqtl84xl07x47eo4.png" alt="Aerospike Cluster" width="800" height="192"&gt;&lt;/a&gt;&lt;br&gt;
​&lt;br&gt;
The basic steps are as follows.&lt;br&gt;
​&lt;/p&gt;
&lt;h4&gt;
  
  
  Get the code
&lt;/h4&gt;

&lt;p&gt;Clone the Aerospike Kubernetes Git repo and be sure to copy your feature/licence key file, if you have one, to the following directory - &lt;em&gt;config/samples/secrets&lt;/em&gt;.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;git clone https://github.com/aerospike/aerospike-kubernetes-operator.git
cd aerospike-kubernetes-operator/
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;h4&gt;
  
  
  Initialize Storage
&lt;/h4&gt;



&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;kubectl apply -f config/samples/storage/eks_ssd_storage_class.yaml
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;As of version 6.0, for each block storage device, erase the disk to avoid critical errors:&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;Oct 26 2022 14:05:53 GMT: CRITICAL (drv_ssd): (drv_ssd.c:2216) /test/dev/xvdf: not an Aerospike device but not erased - check config or erase device
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;Use the following command to erase the device on each host. Remember on 2 of the Nodes the device will be nvme1n1p2 (demo only)&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;export diskPart=/dev/nvme1n1
sudo blkdiscard ${diskPart} &amp;amp;&amp;amp; sudo blkdiscard -z --length 8MiB ${diskPart} &amp;amp;
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;h4&gt;
  
  
  Add Secrets
&lt;/h4&gt;

&lt;p&gt;​&lt;br&gt;
Secrets, for those not familiar with the terminology, allow data to be introduced into the Kubernetes environment, while ensuring that it cannot be read. Examples might include private PKI keys or passwords.&lt;br&gt;
​&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;kubectl  -n aerospike create secret generic aerospike-secret --from-file=config/samples/secrets
kubectl  -n aerospike create secret generic auth-secret --from-literal=password='admin123'
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;h4&gt;
  
  
  Create Aerospike Cluster
&lt;/h4&gt;

&lt;p&gt;​&lt;br&gt;
Before we create the cluster, delete the httpd Pod and PersistentVolumeClaim we created earlier:&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;kubectl delete -f pod-fs.yaml
kubectl delete pvc fs-1g-pvc
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;Make sure all the PersistentVolume(s) have a status of 'Available':&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;kubectl get pv -n aerospike
NAME                CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS      CLAIM   STORAGECLASS   REASON   AGE
local-pv-684418fc   62Gi       RWO            Delete           Available           fast-ssd                4h13m
local-pv-9dbcb710   7152Mi     RWO            Delete           Available           fs-ssd                  5m42s
local-pv-aa49ff2c   69Gi       RWO            Delete           Available           fast-ssd                4h13m
local-pv-ad278bd7   69Gi       RWO            Delete           Available           fast-ssd                4h13m
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;If some of the PVs are not in the 'Available' state run the following patch to release the PersistentVolumeClaim (PVC).&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;kubectl get pv -o=json|jq .items[].metadata.name -r | grep ^local | while read -r line; do kubectl patch pv $line -p '{"spec":{"claimRef": null}}';done
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;Use the file &lt;em&gt;&lt;a href="https://raw.githubusercontent.com/nareshmaharaj-consultant/localstaticprovisioner-k8s-config-files/main/all_flash_cluster_cr.yaml" rel="noopener noreferrer"&gt;all_flash_cluster_cr.yaml&lt;/a&gt;&lt;/em&gt; to configure the Aerospike Database to use the All Flash recipe so we get to use the Block and Filesystem storage as a single unit of work. Note where we have used the different Storage classes and Tolerations.&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;storageClass: ssd ( AWS gps ext4 Filesystem Dynamic Storage )&lt;/li&gt;
&lt;li&gt;storageClass: fast-ssd ( LocalStaticProvisioner Block Storage )&lt;/li&gt;
&lt;li&gt;storageClass: fs-ssd ( LocalStaticProvisioner XFS filesystem )&lt;/li&gt;
&lt;/ul&gt;

&lt;h4&gt;
  
  
  Deploy the Aerospike Database
&lt;/h4&gt;



&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;kubectl create -f all_flash_cluster_cr.yaml
kubectl get po -n aerospike -w
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;h4&gt;
  
  
  Verify Aerospike Database Running
&lt;/h4&gt;

&lt;p&gt;The &lt;code&gt;get pods&lt;/code&gt; command should show you two active Aerospike pods.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;kubectl get po -n aerospike
NAME                                        READY   STATUS    RESTARTS   AGE
aerocluster-0-0                             1/1     Running   0          8m32s
aerocluster-0-1                             1/1     Running   0          8m32s
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;h4&gt;
  
  
  Insert test data with the benchmark tool
&lt;/h4&gt;

&lt;p&gt;The Aerospike Benchmark tool measures the performance of an Aerospike cluster. It can mimic real-world workloads with configurable record structures, various access patterns, and UDF calls. Use the benchmark tool to insert some data easily.&lt;/p&gt;

&lt;p&gt;Login to a node (which should have a filesystem - just checking...)&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;kubectl exec -it aerocluster-0-0 -n aerospike -- bash
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;Add some data using the simplest of commands&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;asbench -Uadmin -Padmin123
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;To verify we successfully added some data use aql tool, login and run a query&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;aql -Uadmin -Padmin123
select * from test
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;h2&gt;
  
  
  Cleaning up
&lt;/h2&gt;

&lt;p&gt;​&lt;br&gt;
&lt;strong&gt;WARNING:&lt;/strong&gt; If this step is bypassed and the EKS cluster is deleted, all persistent volumes created will remain.&lt;/p&gt;
&lt;h3&gt;
  
  
  Delete Database Cluster
&lt;/h3&gt;

&lt;p&gt;&lt;strong&gt;&lt;em&gt;This&lt;/em&gt;&lt;/strong&gt; is the important step and should always be run when using cloud storage classes.&lt;br&gt;
​&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;kubectl delete -f all_flash_cluster_cr.yaml
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;h3&gt;
  
  
  Delete EKS Cluster
&lt;/h3&gt;

&lt;p&gt;We can now delete the EKS cluster:&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;eksctl delete cluster -f my-cluster.yaml
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;h2&gt;
  
  
  Conclusion
&lt;/h2&gt;

&lt;p&gt;We have seen that with little effort we can easily spin up a deployment with local disks for block storage, combined with filesystem storage. We also confirmed the Pods were being scheduled automatically to the correct nodes. We also learned that by using Kubernetes taints, we could effectively reserve nodes for specific usage. You had a sneak preview into Aerospike - All Flash use cases too. And one final point, we only looked at the Kubernetes Special Interest Group (SIG) version of the LocalStaticProvisioner. You may also wish to consider Rancher's version which is listed below.&lt;/p&gt;

&lt;p&gt;Review Rancher&lt;br&gt;
&lt;a href="https://github.com/rancher/local-path-provisioner" rel="noopener noreferrer"&gt;https://github.com/rancher/local-path-provisioner&lt;/a&gt;&lt;/p&gt;

</description>
      <category>aerospike</category>
      <category>filesystem</category>
      <category>block</category>
      <category>kubernetes</category>
    </item>
    <item>
      <title>Aerospike k8s Volume Cleanup</title>
      <dc:creator>Naresh Maharaj</dc:creator>
      <pubDate>Thu, 13 Oct 2022 21:33:16 +0000</pubDate>
      <link>https://dev.to/aerospike/aerospike-k8s-volume-cleanup-7fc</link>
      <guid>https://dev.to/aerospike/aerospike-k8s-volume-cleanup-7fc</guid>
      <description>&lt;p&gt;​&lt;/p&gt;

&lt;p&gt;&lt;a href="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2F0royuaa9pmvxotvmh21i.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2F0royuaa9pmvxotvmh21i.png" width="800" height="378"&gt;&lt;/a&gt;&lt;br&gt;
​&lt;/p&gt;

&lt;p&gt;When using the Aerospike Kubernetes Operator, the complexity of configuring a  high performance distributed database is abstracted away, making the task of instantiating an Aerospike database incredibly easy. However, even though Kubernetes leads us towards expecting equivalent results regardless of platform, we need to be mindful of the peculiarities of individual frameworks, particularly if we are repeatedly iterating processes. &lt;br&gt;
​&lt;br&gt;
This article focuses on AWS EKS provisioned storage which is dynamically created when using the Aerospike Kubernetes Operator. Ensuring that storage has been fully deleted and other redundant resources removed is a necessary housekeeping step if you are to avoid unwelcome AWS charges.&lt;br&gt;
​&lt;/p&gt;



&lt;p&gt;​&lt;br&gt;
In this article we review&lt;br&gt;
​&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;How to create a simple AWS EKS cluster&lt;/li&gt;
&lt;li&gt;Install the Aerospike Kubernetes Operator&lt;/li&gt;
&lt;li&gt;How AWS Provisioned EBS Storage volumes are created&lt;/li&gt;
&lt;li&gt;The results of repeated Aerospike database cluster creation - particularly the resulting storage utilization&lt;/li&gt;
&lt;li&gt;Database cluster termination, with a focus on how the storage assets are managed&lt;/li&gt;
&lt;li&gt;What to watch out for&lt;/li&gt;
&lt;/ul&gt;


&lt;h2&gt;
  
  
  Create an EKS Cluster
&lt;/h2&gt;

&lt;p&gt;​&lt;br&gt;
In this section we create an EKS cluster, on which we will install Aerospike.&lt;br&gt;
​&lt;/p&gt;
&lt;h3&gt;
  
  
  Setup
&lt;/h3&gt;

&lt;p&gt;​&lt;br&gt;
Create a file called &lt;em&gt;my-cluster.yaml&lt;/em&gt; with the following contents. This specifies the configuration for the Kubernetes cluster itself. The &lt;em&gt;ssh&lt;/em&gt; configuration is optional.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;apiVersion: eksctl.io/v1alpha5
kind: ClusterConfig
​
metadata:
  name: my1-eks-cluster
  region: us-east-1
​
nodeGroups:
  - name: ng-1
    labels: { role: database }
    instanceType: t3.medium
    desiredCapacity: 3
    volumeSize: 13
    ssh:
      allow: true # will use ~/.ssh/id_rsa.pub as the default ssh key
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;We use the AWS eksctl tool to create our Kubernetes cluster. For more information on using eksctl see the official documentation.&lt;br&gt;
​&lt;br&gt;
&lt;a href="https://eksctl.io/" rel="noopener noreferrer"&gt;&lt;img src="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Ffp4cuecsmz0ii8ul1l9y.png" alt="eksctl - The official CLI for Amazon EKS" width="800" height="190"&gt;&lt;/a&gt;&lt;br&gt;
​&lt;br&gt;
Using &lt;em&gt;eksctl&lt;/em&gt; ...&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;eksctl create cluster -f my-cluster.yaml
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;The result on completion will be a 3 node Kubernetes cluster. Confirm successful creation as follows :&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;kubectl get nodes -L role
NAME                             STATUS   ROLES    AGE   VERSION                ROLE
ip-192-168-13-47.ec2.internal    Ready    &amp;lt;none&amp;gt;   10m   v1.22.12-eks-ba74326   database
ip-192-168-23-112.ec2.internal   Ready    &amp;lt;none&amp;gt;   10m   v1.22.12-eks-ba74326   database
ip-192-168-40-59.ec2.internal    Ready    &amp;lt;none&amp;gt;   10m   v1.22.12-eks-ba74326   database
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;We can check what volumes are created. In &lt;em&gt;my-cluster.yaml&lt;/em&gt; volume size is 13Gb, allowing these to be easily located when using the AWS EC2 console.&lt;/p&gt;

&lt;p&gt;&lt;a href="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Ff0z33si3fpbuem11tapp.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Ff0z33si3fpbuem11tapp.png" width="800" height="341"&gt;&lt;/a&gt;&lt;/p&gt;




&lt;h2&gt;
  
  
  Install Aerospike
&lt;/h2&gt;

&lt;h3&gt;
  
  
  Aerospike Kubernetes Operator
&lt;/h3&gt;

&lt;p&gt;​&lt;br&gt;
In this section we install the AKO ( Aerospike Kubernetes Operator ) and set up our database cluster. &lt;br&gt;
​&lt;br&gt;
Please refer to the following documentation for details as to how to do this.&lt;br&gt;
​&lt;br&gt;
&lt;a href="https://docs.aerospike.com/cloud/kubernetes/operator/install-operator-operatorhub" rel="noopener noreferrer"&gt;&lt;img src="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fm4tkxh10cmfnmucwtuku.png" alt="AKO Install Docs" width="800" height="194"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;AKO installation can be confirmed by checking the pods in the &lt;em&gt;operators&lt;/em&gt; namespace.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;kubectl get pods -n operators
NAME                                                     READY   STATUS    RESTARTS   AGE
aerospike-operator-controller-manager-7946df5dd9-lmqvt   2/2     Running   0          2m44s
aerospike-operator-controller-manager-7946df5dd9-tg4sh   2/2     Running   0          2m44s
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;h3&gt;
  
  
  Create the Aerospike Database Cluster
&lt;/h3&gt;

&lt;p&gt;​&lt;br&gt;
In this section we create an Aerospike cluster. For full detail please refer to the documentation below.&lt;br&gt;
​&lt;br&gt;
&lt;a href="https://docs.aerospike.com/cloud/kubernetes/operator/create-cluster-kubectl#prepare-the-aerospike-cluster-configuration" rel="noopener noreferrer"&gt;&lt;img src="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fuaxirqtl84xl07x47eo4.png" alt="Aerospike Cluster" width="800" height="192"&gt;&lt;/a&gt;&lt;br&gt;
​&lt;br&gt;
The basic steps are as follows.&lt;br&gt;
​&lt;/p&gt;
&lt;h4&gt;
  
  
  Get the code
&lt;/h4&gt;

&lt;p&gt;Clone the Aerospike Kubernetes Git repo and be sure to copy your feature/licence key file, if you have one, to the following directory - &lt;em&gt;config/samples/secrets&lt;/em&gt;.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;git clone https://github.com/aerospike/aerospike-kubernetes-operator.git
cd aerospike-kubernetes-operator/
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;h4&gt;
  
  
  Initialise Storage
&lt;/h4&gt;



&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;kubectl apply -f config/samples/storage/eks_ssd_storage_class.yaml
kubectl apply -f config/samples/storage/local_storage_class.yaml
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;h4&gt;
  
  
  Add Secrets
&lt;/h4&gt;

&lt;p&gt;​&lt;br&gt;
Secrets, for those not familiar with the terminology, allow data to be introduced into the Kubernetes environment, while ensuring that it canot be read. Examples might include private PKI keys or passwords.&lt;br&gt;
​&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;kubectl  -n aerospike create secret generic aerospike-secret --from-file=config/samples/secrets
kubectl  -n aerospike create secret generic auth-secret --from-literal=password='admin123'
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;h4&gt;
  
  
  Create Aerospike Cluster
&lt;/h4&gt;

&lt;p&gt;​&lt;br&gt;
We set up Aerospike using the &lt;a href="https://docs.aerospike.com/server/operations/configure/namespace/storage#recipe-for-an-ssd-storage-engine" rel="noopener noreferrer"&gt;ssd storage&lt;/a&gt; recipe.&lt;br&gt;
​&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;kubectl create -f config/samples/ssd_storage_cluster_cr.yaml 
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;h4&gt;
  
  
  Verify Aerospike Database Running
&lt;/h4&gt;

&lt;p&gt;The &lt;code&gt;get pods&lt;/code&gt; command should show you two active Aerospike pods.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;kubectl get pods -n aerospike
NAME              READY   STATUS    RESTARTS   AGE
aerocluster-0-0   1/1     Running   0          36s
aerocluster-0-1   1/1     Running   0          36s
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;






&lt;h2&gt;
  
  
  Persistent Volumes and Claims (PV,PVC)
&lt;/h2&gt;

&lt;p&gt;If we run the following command, we can see what persistent volume storage has been claimed.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;kubectl get pv,pvc -n aerospike -o json | jq .'items[].spec.capacity.storage' | egrep -v -e null
"1Gi"
"5Gi"
"5Gi"
"1Gi"
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;​&lt;br&gt;
Note we see the same persistent volumes when using the AWS Console as expected -  included for completeness.&lt;/p&gt;

&lt;p&gt;&lt;a href="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fiumonky60hhtbur1s48n.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fiumonky60hhtbur1s48n.png" width="800" height="335"&gt;&lt;/a&gt;&lt;/p&gt;


&lt;h2&gt;
  
  
  Cleaning up
&lt;/h2&gt;

&lt;p&gt;​&lt;br&gt;
&lt;strong&gt;WARNING:&lt;/strong&gt; If this step is bypassed and the EKS cluster is deleted, all persistent volumes created will remain.&lt;/p&gt;
&lt;h3&gt;
  
  
  Delete Aerospke Database Cluster
&lt;/h3&gt;

&lt;p&gt;&lt;strong&gt;&lt;em&gt;This&lt;/em&gt;&lt;/strong&gt; is the important step and should always be run when using cloud storage classes.&lt;br&gt;
​&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;kubectl delete -f ssd_storage_cluster_cr.yaml 
aerospikecluster.asdb.aerospike.com "aerocluster" deleted
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;h3&gt;
  
  
  Check Persistent Volume Claims
&lt;/h3&gt;

&lt;p&gt;If we now check our persistent volumes we see there are no persistent volumes remaining.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;kubectl get pv,pvc -n aerospike -o json | jq .'items[].spec.capacity.storage' | egrep -v -e null
(nothing returned...)
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;​&lt;br&gt;
Similarly, if visiting the AWS Console, we can see only the volumes for the k8s nodes we initially created using the &lt;em&gt;eksctl&lt;/em&gt; command.&lt;br&gt;
​&lt;/p&gt;

&lt;p&gt;&lt;a href="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2F8bb85rj22x7oe2ja5q69.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2F8bb85rj22x7oe2ja5q69.png" width="800" height="204"&gt;&lt;/a&gt;&lt;br&gt;
​&lt;/p&gt;
&lt;h3&gt;
  
  
  Delete EKS Cluster
&lt;/h3&gt;

&lt;p&gt;We can now go ahead and delete the EKS cluster&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;eksctl delete cluster -f my-cluster.yaml
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;h3&gt;
  
  
  Verify Resources Deleted
&lt;/h3&gt;

&lt;p&gt;Final check of our EBS volumes and we should find everything has been removed.&lt;br&gt;
​&lt;/p&gt;

&lt;p&gt;&lt;a href="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fmg167vt2d57kz39jsxcs.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fmg167vt2d57kz39jsxcs.png" width="800" height="246"&gt;&lt;/a&gt;&lt;/p&gt;


&lt;h2&gt;
  
  
  How &lt;em&gt;not&lt;/em&gt; to clean up
&lt;/h2&gt;

&lt;p&gt;​&lt;br&gt;
Assume we have a new EKS cluster, having installed the Aerospike Operator and a new Aerospike database. We then see the resources below.&lt;/p&gt;

&lt;p&gt;&lt;a href="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fiumonky60hhtbur1s48n.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fiumonky60hhtbur1s48n.png" width="800" height="335"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;​&lt;br&gt;
Suppose we &lt;em&gt;only&lt;/em&gt; we excecute the &lt;code&gt;eksctl delete cluster&lt;/code&gt; command.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;eksctl delete cluster -f my-cluster.yaml
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;&lt;a href="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fqlf9071wcj2esrha0n5g.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fqlf9071wcj2esrha0n5g.png" width="800" height="240"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;In that case, the persistent volumes associated with Aerospike will &lt;strong&gt;&lt;em&gt;NOT&lt;/em&gt;&lt;/strong&gt; be deleted. In this example the volumes are small, but at $0.10/GB/month these costs can accumulate (100TB = $10k / month!). Numerous &lt;em&gt;test&lt;/em&gt; iterations can result in capacity accumulating, generating large unwelcome AWS bills for.&lt;br&gt;
​&lt;br&gt;
Note that we can go ahead and manually delete these volumes via the AWS Console - but only because we know they are there.&lt;/p&gt;

&lt;h2&gt;
  
  
  Conclusion
&lt;/h2&gt;

&lt;p&gt;Automation tools are fantastic and this level of deployment agility is an astonishing convenience. It is always wise however to check on final results and not assume the tools take care of everything. Another moral is that following instructions in full may be the wise initial path - the author probably detailed those steps for good reason. &lt;/p&gt;

</description>
      <category>aerospike</category>
      <category>developers</category>
      <category>devops</category>
      <category>kubernetes</category>
    </item>
    <item>
      <title>Authorize Atlas to Access your AWS Account</title>
      <dc:creator>Naresh Maharaj</dc:creator>
      <pubDate>Tue, 15 Jun 2021 10:55:41 +0000</pubDate>
      <link>https://dev.to/naresh_maharaj_c4b8fbd4aa/mongodb-atlas-cloud-provider-aws-cloud-integration-using-terraform-45md</link>
      <guid>https://dev.to/naresh_maharaj_c4b8fbd4aa/mongodb-atlas-cloud-provider-aws-cloud-integration-using-terraform-45md</guid>
      <description>&lt;h1&gt;
  
  
  AWS IAM Role Access
&lt;/h1&gt;

&lt;p&gt;In MongoDB Atlas you may at some stage wish to use a cloud provider to either store files to S3, encrypt at rest and so on.&lt;/p&gt;

&lt;p&gt;MongoDB Atlas allows you to configure the Cloud Provider Role and authenticate that role to use later as a service.&lt;/p&gt;

&lt;p&gt;In this article we use terraform to &lt;/p&gt;

&lt;p&gt;1) Create the Cloud Service in MongoDB Atlas&lt;br&gt;
2) Get the external ref from Atlas and provide this as a trusted source to a Role on your own AWS Account.&lt;br&gt;
3) Authenticate the role as an Assumed Role to Atlas.&lt;/p&gt;

&lt;p&gt;&lt;a href="https://github.com/nareshmaharaj-consultant/atlas_cloud_provider_terraform" rel="noopener noreferrer"&gt;https://github.com/nareshmaharaj-consultant/atlas_cloud_provider_terraform&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;Use the following &lt;/p&gt;
&lt;h3&gt;
  
  
  variables.tf
&lt;/h3&gt;


&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;variable "mongodb_atlas_api_pub_key" {
 default = "qvesfrig"
}

variable "mongodb_atlas_api_pri_key" {
 default = "enter your private key here"
}

variable "mongodb_atlas_project_id" {
 default = "60ab6ed5fb4a1f43c4950e71"
}

variable "atlas_project_name" {
  type        = string
  description = "Name of the Atlas project the role is associated with"
  default     = "my-atlas"
}

variable "account_name" {
  type        = string
  description = "Name of the AWS account.  Used as a name prefix"
  default     =  "naresh.maharaj"
}

variable "tags" {
  type        = map(string)
  description = "Key/value pairs of additional information attached to resources"
  default     = {}
}

variable "atlas_aws_root_account_id" {
  type        = number
  description = "Atlas AWS root account ARN IAM account id"
  default     = "536727724300"
}

variable "aws_root_account_id" {
  type        = number
  description = "Atlas AWS root account ARN IAM account id"
  default     = "521195893806"
}

variable "atlas_external_ids" {
  type        = list(any)
  description = "List of unique external IDs (per-Atlas project)"
  default     = []
}
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;

&lt;h3&gt;
  
  
  main.tf
&lt;/h3&gt;


&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;terraform {
  required_providers {
    mongodbatlas = {
      source = "mongodb/mongodbatlas"
      version = "0.9.1"
    }
  }
}

provider "mongodbatlas" {
  # Configuration options
  public_key  = var.mongodb_atlas_api_pub_key
  private_key = var.mongodb_atlas_api_pri_key
}

resource "mongodbatlas_cloud_provider_access" "test_role" {
  project_id    = "${var.mongodb_atlas_project_id}"
  provider_name = "AWS"
}

data "aws_iam_policy_document" "atlas-assume-role-policy" {
  statement {
    sid     = "rolepolicy"
    actions = ["sts:AssumeRole"]

    condition {
      test     = "StringEquals"
      variable = "sts:ExternalId"
      values   = ["${mongodbatlas_cloud_provider_access.test_role.atlas_assumed_role_external_id}"]
    }

    principals {
      type        = "AWS"
      identifiers = ["arn:aws:iam::${var.atlas_aws_root_account_id}:root"]
    }

    principals {
      type        = "Service"
      identifiers = ["ec2.amazonaws.com"]
    }
  }
}

resource "aws_iam_role" "atlas-cmk-access-role" {
  name               = "${var.account_name}-atlas-cmk-${var.atlas_project_name}-role"
  tags               = merge({ "Name" = "${var.account_name}-atlas-cmk-role" }, var.tags)
  assume_role_policy = data.aws_iam_policy_document.atlas-assume-role-policy.json
}

resource "mongodbatlas_cloud_provider_access_authorization" "auth_role" {
   project_id =  mongodbatlas_cloud_provider_access.test_role.project_id
   role_id    =  mongodbatlas_cloud_provider_access.test_role.role_id

   aws = {
      iam_assumed_role_arn = "arn:aws:iam::${var.aws_root_account_id}:role/${var.account_name}-atlas-cmk-${var.atlas_project_name}-role"
   }
}


output "atlas_assumed_role_external_id" {
  value = mongodbatlas_cloud_provider_access.test_role.atlas_assumed_role_external_id
}
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;

&lt;h3&gt;
  
  
  Run
&lt;/h3&gt;



&lt;p&gt;&lt;code&gt;terraform init&lt;br&gt;
terraform plan ( check everything meets your expectations )&lt;br&gt;
terraform apply&lt;/code&gt;&lt;br&gt;
&lt;/p&gt;

</description>
      <category>terraform</category>
      <category>aws</category>
      <category>mongodb</category>
      <category>role</category>
    </item>
  </channel>
</rss>
