<?xml version="1.0" encoding="UTF-8"?>
<rss version="2.0" xmlns:atom="http://www.w3.org/2005/Atom" xmlns:dc="http://purl.org/dc/elements/1.1/">
  <channel>
    <title>DEV Community: ByteLedger</title>
    <description>The latest articles on DEV Community by ByteLedger (@vb_nair).</description>
    <link>https://dev.to/vb_nair</link>
    
    <atom:link rel="self" type="application/rss+xml" href="https://dev.to/feed/vb_nair"/>
    <language>en</language>
    <item>
      <title>Monitor k8 pods</title>
      <dc:creator>ByteLedger</dc:creator>
      <pubDate>Mon, 13 Oct 2025 11:20:09 +0000</pubDate>
      <link>https://dev.to/vb_nair/monitor-k8-pods-3110</link>
      <guid>https://dev.to/vb_nair/monitor-k8-pods-3110</guid>
      <description>&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;
# 3a. Prepare resource usage CSV file for the triggered pod
OUTPUT_DIR="$SCRIPTPATH/output"
mkdir -p "$OUTPUT_DIR"
RESOURCE_CSV="$OUTPUT_DIR/${JOB_NAME}_pod_resources.csv"
echo "podname,cpu_usage,memory_usage,timestamp" &amp;gt; "$RESOURCE_CSV"

# &amp;gt;&amp;gt;&amp;gt; BEGIN add ShieldCache df CSV (minimal)
SC_DF_CSV="$OUTPUT_DIR/${JOB_NAME}_shieldcache_df.csv"
echo "timestamp,pod,filesystem,size,used,avail,use_pct,mountpoint" &amp;gt; "$SC_DF_CSV"
# &amp;lt;&amp;lt;&amp;lt; END add ShieldCache df CSV (minimal)

&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;Next&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;
while true; do
  USAGE_LINE=$(kubectl top pod "$POD_NAME" -n "$NAMESPACE" --no-headers 2&amp;gt;/dev/null | \
               awk -v ts="$(date +'%Y-%m-%d %H:%M:%S')" '{print $1","$2","$3","ts}')
  if [ -n "$USAGE_LINE" ]; then
    echo "$USAGE_LINE" &amp;gt;&amp;gt; "$RESOURCE_CSV"
  fi

  # &amp;gt;&amp;gt;&amp;gt; BEGIN add ShieldCache df sample (minimal)
  DF_LINE=$(kubectl -n "$NAMESPACE" exec "$POD_NAME" -- sh -c 'df -hP /app/ShieldCache | tail -n +2' 2&amp;gt;/dev/null | \
            awk -v ts="$(date +"%Y-%m-%d %H:%M:%S")" -v pod="$POD_NAME" '{printf "%s,%s,%s,%s,%s,%s,%s,%s\n", ts,pod,$1,$2,$3,$4,$5,$6}')
  if [ -n "$DF_LINE" ]; then
    echo "$DF_LINE" &amp;gt;&amp;gt; "$SC_DF_CSV"
  fi
  # &amp;lt;&amp;lt;&amp;lt; END add ShieldCache df sample (minimal)

&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;Updated&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;
#!/bin/bash

# Define variables
SCRIPT=$(realpath -s "$0")
SCRIPTPATH=$(dirname "$SCRIPT")
NAMESPACE="onprem"                        # Namespace where the pods are located
CRONJOB_NAME="teams-chat"                 # Name of the CronJob you want to trigger the job from
JOB_NAME="teams-chat-$(date +'%Y%m%d-%H%M%S')"  # Fixed job name
TIMESTAMP=$(date +'%Y-%m-%d_%H%M%S')      # Timestamp for log file
LOG_PATH="$SCRIPTPATH/Logs/${TIMESTAMP}-${JOB_NAME}.log" # Log file with timestamp
# List of keywords to check in the logs (customize as needed)
ERROR_KEYWORDS=("java.io.EOFException" "NoSuchMethodError")
SUCCESS_KEYWORDS=("TGM Policy Ended")
DAY_OF_WEEK=$(date +%A)

if [[ "$DAY_OF_WEEK" != "Sunday" &amp;amp;&amp;amp; "$DAY_OF_WEEK" != "Monday" ]]; then
  echo "Today is $DAY_OF_WEEK. Starting import job..."
else
  echo "Today is $DAY_OF_WEEK. Import job will be skipped as there is no delivery."
  exit 0
fi

# 1. Validate the namespace exists
echo "[$(date +'%Y-%m-%d %H:%M:%S')] [INFO] More detailed logs can be found in $LOG_PATH" | tee -a "$LOG_PATH"
if ! kubectl get namespace "$NAMESPACE" &amp;amp;&amp;gt;/dev/null; then
  echo "[$(date +'%Y-%m-%d %H:%M:%S')] [ERROR] Error: Namespace $NAMESPACE does not exist." | tee -a "$LOG_PATH"
  cp "$LOG_PATH" "$SCRIPTPATH/Lastlog/lastpodlog.txt"
  exit 1
fi

# 2. Create the job manually from the CronJob
echo "[$(date +'%Y-%m-%d %H:%M:%S')] [INFO] Creating job $JOB_NAME from CronJob $CRONJOB_NAME in namespace $NAMESPACE..." | tee -a "$LOG_PATH"
kubectl create job --from=cronjob/$CRONJOB_NAME -n "$NAMESPACE" "$JOB_NAME"
if [[ $? -ne 0 ]]; then
  echo "[$(date +'%Y-%m-%d %H:%M:%S')] [ERROR] Error: Failed to create job from CronJob." | tee -a "$LOG_PATH"
  cp "$LOG_PATH" "$SCRIPTPATH/Lastlog/lastpodlog.txt"
  exit 1
fi

# 3. Wait for the pod associated with the job to be created
echo "[$(date +'%Y-%m-%d %H:%M:%S')] [INFO] Waiting for pod associated with job $JOB_NAME to be created..." | tee -a "$LOG_PATH"
while true; do
  POD_NAME=$(kubectl get pods -n "$NAMESPACE" -l job-name="$JOB_NAME" --no-headers | awk '{print $1}' | head -n 1)
  if [ -n "$POD_NAME" ]; then
    NODE_NAME=$(kubectl get pod "$POD_NAME" -n "$NAMESPACE" -o jsonpath='{.spec.nodeName}')
    echo "[$(date +'%Y-%m-%d %H:%M:%S')] [INFO] Pod $POD_NAME associated with job $JOB_NAME has been created on node $NODE_NAME" | tee -a "$LOG_PATH"
    break
  fi
  echo "[$(date +'%Y-%m-%d %H:%M:%S')] [INFO] Pod for job $JOB_NAME not yet created... Retrying in 10 seconds." | tee -a "$LOG_PATH"
  sleep 10
done

# 3a. Prepare resource usage CSV file for the triggered pod
OUTPUT_DIR="$SCRIPTPATH/output"
mkdir -p "$OUTPUT_DIR"
RESOURCE_CSV="$OUTPUT_DIR/${JOB_NAME}_pod_resources.csv"
echo "podname,cpu_usage,memory_usage,timestamp" &amp;gt; "$RESOURCE_CSV"

# &amp;gt;&amp;gt;&amp;gt; NEW (ShieldCache df): add a second CSV for /app/ShieldCache filesystem snapshots
SC_DF_CSV="$OUTPUT_DIR/${JOB_NAME}_shieldcache_df.csv"
echo "timestamp,pod,filesystem,size,used,avail,use_pct,mountpoint" &amp;gt; "$SC_DF_CSV"
# &amp;lt;&amp;lt;&amp;lt; NEW (ShieldCache df)

# 4. Wait for the pod to complete (success or failure) and check for "Error" state
echo "[$(date +'%Y-%m-%d %H:%M:%S')] [INFO] Waiting for pod $POD_NAME to complete..." | tee -a "$LOG_PATH"
while true; do
  # Collect resource usage for the pod every 30 seconds
  USAGE_LINE=$(kubectl top pod "$POD_NAME" -n "$NAMESPACE" --no-headers 2&amp;gt;/dev/null | awk -v ts="$(date +'%Y-%m-%d %H:%M:%S')" '{print $1","$2","$3","ts}')
  if [ -n "$USAGE_LINE" ]; then
    echo "$USAGE_LINE" &amp;gt;&amp;gt; "$RESOURCE_CSV"
  fi

  # &amp;gt;&amp;gt;&amp;gt; NEW (ShieldCache df): sample df -hP /app/ShieldCache into the second CSV
  DF_LINE=$(kubectl -n "$NAMESPACE" exec "$POD_NAME" -- sh -c "df -hP /app/ShieldCache | tail -n +2" 2&amp;gt;/dev/null | \
            awk -v ts="$(date +'%Y-%m-%d %H:%M:%S')" -v pod="$POD_NAME" '{printf "%s,%s,%s,%s,%s,%s,%s,%s\n", ts,pod,$1,$2,$3,$4,$5,$6}')
  if [ -n "$DF_LINE" ]; then
    echo "$DF_LINE" &amp;gt;&amp;gt; "$SC_DF_CSV"
  fi
  # &amp;lt;&amp;lt;&amp;lt; NEW (ShieldCache df)

  POD_STATUS=$(kubectl get pods -n "$NAMESPACE" --no-headers | awk -v pod="$POD_NAME" '$1==pod {print $3}')
  if [[ "$POD_STATUS" == "Completed" ]]; then
    echo "[$(date +'%Y-%m-%d %H:%M:%S')] [INFO] Pod $POD_NAME is in $POD_STATUS status." | tee -a "$LOG_PATH"
    break
  elif [[ "$POD_STATUS" == "Error" || "$POD_STATUS" == "CrashLoopBackOff" ]]; then
    echo "[$(date +'%Y-%m-%d %H:%M:%S')] [ERROR] Pod $POD_NAME is in status $POD_STATUS." | tee -a "$LOG_PATH"
    echo "[$(date +'%Y-%m-%d %H:%M:%S')] [INFO] Calling external script to send a notification." | tee -a "$LOG_PATH"
    /bin/bash $SCRIPTPATH/send_SMS_alert.sh | tee -a "$LOG_PATH"
    echo "[$(date +'%Y-%m-%d %H:%M:%S')] [INFO] Fetching logs for pod $POD_NAME..." | tee -a "$LOG_PATH"
    kubectl logs "$POD_NAME" --timestamps=true -n "$NAMESPACE" &amp;gt;&amp;gt; "$LOG_PATH"
    cp "$LOG_PATH" "$SCRIPTPATH/Lastlog/lastpodlog.txt"
    exit 2
  elif [[ "$POD_STATUS" == "Running" || "$POD_STATUS" == "NotReady" || "$POD_STATUS" == "Init" || "$POD_STATUS" == "PodInitializing" ]]; then
    echo "[$(date +'%Y-%m-%d %H:%M:%S')] [INFO] Pod $POD_NAME is still running ($POD_STATUS)... Waiting 30 seconds." | tee -a "$LOG_PATH"
    sleep 30
  else
    echo "[$(date +'%Y-%m-%d %H:%M:%S')] [INFO] Fetching logs for pod $POD_NAME..." | tee -a "$LOG_PATH"
    kubectl logs "$POD_NAME" --timestamps=true -n "$NAMESPACE" &amp;gt;&amp;gt; "$LOG_PATH"
    cp "$LOG_PATH" "$SCRIPTPATH/Lastlog/lastpodlog.txt"
    exit 3
  fi
done

# 5. Fetch logs from the pod
echo "[$(date +'%Y-%m-%d %H:%M:%S')] [INFO] Fetching logs for pod $POD_NAME..." | tee -a "$LOG_PATH"
kubectl logs "$POD_NAME" --timestamps=true -n "$NAMESPACE" &amp;gt;&amp;gt; "$LOG_PATH"
if [ $? -ne 0 ]; then
  echo "[$(date +'%Y-%m-%d %H:%M:%S')] [ERROR] Failed to fetch logs for pod $POD_NAME." | tee -a "$LOG_PATH"
  cp "$LOG_PATH" "$SCRIPTPATH/Lastlog/lastpodlog.txt"
  exit 3
fi

# 6. Grep the logs for any error-related keywords
echo "[$(date +'%Y-%m-%d %H:%M:%S')] [INFO] Checking logs for errors or exceptions..." | tee -a "$LOG_PATH"
for keyword in "${ERROR_KEYWORDS[@]}"; do
  if grep -iq "$keyword" "$LOG_PATH"; then
    echo "[$(date +'%Y-%m-%d %H:%M:%S')] [ERROR] Error found in logs: Keyword '$keyword' found. Exiting job as failed." | tee -a "$LOG_PATH"
    /bin/bash $SCRIPTPATH/send_SMS_alert.sh | tee -a "$LOG_PATH"
    cp "$LOG_PATH" "$SCRIPTPATH/Lastlog/lastpodlog.txt"
    tail -n 500 "$LOG_PATH"
    exit 4
  fi
done

# 7. Grep the logs for success-related keywords and ensure none are missing
echo "[$(date +'%Y-%m-%d %H:%M:%S')] [INFO] Checking logs for success-related keywords..." | tee -a "$LOG_PATH"
for keyword in "${SUCCESS_KEYWORDS[@]}"; do
  if ! grep -iq "$keyword" "$LOG_PATH"; then
    echo "[$(date +'%Y-%m-%d %H:%M:%S')] [INFO] Failure: Keyword '$keyword' not found in logs. Exiting job as failed." | tee -a "$LOG_PATH"
    /bin/bash $SCRIPTPATH/send_SMS_alert.sh | tee -a "$LOG_PATH"
    cp "$LOG_PATH" "$SCRIPTPATH/Lastlog/lastpodlog.txt"
    tail -n 500 "$LOG_PATH"
    exit 5
  fi
done

# 8. Output the last 500 lines of the log file to the terminal (execution agent)
echo "[$(date +'%Y-%m-%d %H:%M:%S')] [INFO] Displaying the last 500 lines of the log file:" | tee -a "$LOG_PATH"
tail -n 500 "$LOG_PATH"
cp "$LOG_PATH" "$SCRIPTPATH/Lastlog/lastpodlog.txt"

# 9. Job completed successfully
echo "[$(date +'%Y-%m-%d %H:%M:%S')] [INFO] Job $JOB_NAME completed successfully." | tee -a "$LOG_PATH"
exit 0

&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



</description>
      <category>monitoring</category>
      <category>automation</category>
      <category>kubernetes</category>
      <category>devops</category>
    </item>
    <item>
      <title>K8 Monitoring</title>
      <dc:creator>ByteLedger</dc:creator>
      <pubDate>Sun, 12 Oct 2025 15:52:27 +0000</pubDate>
      <link>https://dev.to/vb_nair/k8-monitoring-16m7</link>
      <guid>https://dev.to/vb_nair/k8-monitoring-16m7</guid>
      <description>&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;#!/bin/bash
# =============================================================
# Cronjob_monitorpod.sh
# Manually trigger a CronJob, track the newest pod for its Job,
# collect pod logs + periodic CPU/MEM. Additionally, whenever a
# log line contains "/app/ShieldCache", we take an immediate
# CPU/MEM snapshot and append it to the SAME RESOURCE_CSV.
# =============================================================

set -euo pipefail

# --------- EDIT THESE ----------
NAMESPACE="onprem"          # namespace where the CronJob lives
CRONJOB_NAME="teams-chat"   # CronJob to trigger
JOB_NAME_BASE="teams-chat"  # base Job name; script adds timestamp
# -------------------------------

# Paths
SCRIPT="$(realpath -s "$0")"
SCRIPTPATH="$(dirname "$SCRIPT")"

# Timestamps: one safe for K8s names (hyphens), one for files (underscores OK)
TIMESTAMP_K8S="$(date +'%Y%m%d-%H%M%S')"    # safe for k8s object names
TIMESTAMP_FILE="$(date +'%Y%m%d_%H%M%S')"   # file-friendly

# RFC1123-safe Job name
JOB_NAME="$(echo "${JOB_NAME_BASE}-${TIMESTAMP_K8S}" | tr '[:upper:]_' '[:lower:]-')"

# Ensure both typical casings exist; choose one canonical
mkdir -p "$SCRIPTPATH/Logs" "$SCRIPTPATH/Lastlog" &amp;gt;/dev/null 2&amp;gt;&amp;amp;1 || true
LOG_DIR="$SCRIPTPATH/Logs"
LASTLOG_DIR="$SCRIPTPATH/Lastlog"

# Files (only these two will be written)
LOG_PATH="$LOG_DIR/${TIMESTAMP_FILE}-${JOB_NAME_BASE}.log"
RESOURCE_CSV="$LOG_DIR/${TIMESTAMP_FILE}-${JOB_NAME_BASE}_pod_resources.csv"

# Keyword checks (customize)
ERROR_KEYWORDS=("java.io.EOFException" "NoSuchMethodError" "CrashLoopBackOff" "Error")
SUCCESS_KEYWORDS=("TGM Policy Ended" "Job completed" "Completed successfully")

ts(){ date +'%Y-%m-%d %H:%M:%S'; }

echo "$(ts) [INFO] Logs: $LOG_PATH"
echo "$(ts) [INFO] Namespace: $NAMESPACE | CronJob: $CRONJOB_NAME | Job: $JOB_NAME" | tee -a "$LOG_PATH"

# --- Validate namespace ---
if ! kubectl get namespace "$NAMESPACE" &amp;gt;/dev/null 2&amp;gt;&amp;amp;1; then
  echo "$(ts) [ERROR] Namespace '$NAMESPACE' not found." | tee -a "$LOG_PATH"
  cp "$LOG_PATH" "$LASTLOG_DIR/lastpodlog.txt" 2&amp;gt;/dev/null || true
  exit 1
fi

# --- Create Job from CronJob ---
echo "$(ts) [INFO] Creating Job '$JOB_NAME' from CronJob '$CRONJOB_NAME'..." | tee -a "$LOG_PATH"
if ! kubectl -n "$NAMESPACE" create job --from=cronjob/"$CRONJOB_NAME" "$JOB_NAME" &amp;gt;/dev/null 2&amp;gt;&amp;amp;1; then
  echo "$(ts) [ERROR] Failed to create Job from CronJob (name collision? RBAC?)." | tee -a "$LOG_PATH"
  cp "$LOG_PATH" "$LASTLOG_DIR/lastpodlog.txt" 2&amp;gt;/dev/null || true
  exit 1
fi

# --- Find newest pod for this Job ---
get_newest_pod() {
  kubectl -n "$NAMESPACE" get pods -l "job-name=${JOB_NAME}" \
    --sort-by=.metadata.creationTimestamp \
    -o jsonpath='{range .items[*]}{.metadata.name}{"\n"}{end}' 2&amp;gt;/dev/null | tail -n 1
}

echo "$(ts) [INFO] Waiting for pod for Job '$JOB_NAME'..." | tee -a "$LOG_PATH"
POD_NAME=""
while [[ -z "${POD_NAME}" ]]; do
  POD_NAME="$(get_newest_pod)"
  [[ -z "$POD_NAME" ]] &amp;amp;&amp;amp; { echo "$(ts) [INFO] Not yet created… retry in 10s"; sleep 10; }
done
NODE_NAME="$(kubectl -n "$NAMESPACE" get pod "$POD_NAME" -o jsonpath='{.spec.nodeName}' 2&amp;gt;/dev/null || true)"
echo "$(ts) [INFO] Pod: $POD_NAME | Node: ${NODE_NAME:-unknown}" | tee -a "$LOG_PATH"

# --- Prepare CSV (same file for periodic samples AND ShieldCache hits) ---
echo "timestamp,pod,cpu,mem" &amp;gt; "$RESOURCE_CSV"

# --- Background watcher: on /app/ShieldCache lines, append a usage row to RESOURCE_CSV ---
kubectl -n "$NAMESPACE" logs -f "pod/${POD_NAME}" --timestamps=true 2&amp;gt;/dev/null | \
while IFS= read -r line; do
  if echo "$line" | grep -q "/app/ShieldCache"; then
    now="$(ts)"
    usage="$(kubectl -n "$NAMESPACE" top pod "$POD_NAME" --no-headers 2&amp;gt;/dev/null | awk '{print $2","$3}')"
    echo "$now,$POD_NAME,${usage:-,}" &amp;gt;&amp;gt; "$RESOURCE_CSV"
    echo "$(ts) [INFO] ShieldCache activity detected; sampled usage: ${usage:-N/A}" &amp;gt;&amp;gt; "$LOG_PATH"
  fi
done &amp;amp;
CACHE_MON_PID=$!

cleanup(){ kill "$CACHE_MON_PID" &amp;gt;/dev/null 2&amp;gt;&amp;amp;1 || true; cp "$LOG_PATH" "$LASTLOG_DIR/lastpodlog.txt" 2&amp;gt;/dev/null || true; }
trap cleanup EXIT

# --- Main monitoring loop: periodic CPU/MEM and phase ---
echo "$(ts) [INFO] Monitoring pod resource usage… (to $RESOURCE_CSV)" | tee -a "$LOG_PATH"

while true; do
  # Periodic sample (every loop)
  line="$(kubectl -n "$NAMESPACE" top pod "$POD_NAME" --no-headers 2&amp;gt;/dev/null | awk -v t="$(ts)" '{print t","$1","$2","$3}')"
  [[ -n "$line" ]] &amp;amp;&amp;amp; echo "$line" &amp;gt;&amp;gt; "$RESOURCE_CSV"

  phase="$(kubectl -n "$NAMESPACE" get pod "$POD_NAME" -o jsonpath='{.status.phase}' 2&amp;gt;/dev/null || echo "Unknown")"
  case "$phase" in
    Running|Pending)
      echo "$(ts) [INFO] Pod $POD_NAME is $phase. Sleeping 30s…" | tee -a "$LOG_PATH"
      sleep 30
      ;;
    Succeeded|Completed)
      echo "$(ts) [INFO] Pod $POD_NAME finished with phase: $phase." | tee -a "$LOG_PATH"
      break
      ;;
    Failed|Error)
      echo "$(ts) [ERROR] Pod $POD_NAME ended with phase: $phase." | tee -a "$LOG_PATH"
      exit 2
      ;;
    *)
      echo "$(ts) [WARN] Pod $POD_NAME phase '$phase' (waiting 20s)..." | tee -a "$LOG_PATH"
      sleep 20
      ;;
  esac
done

# --- Fetch final logs once ---
echo "$(ts) [INFO] Fetching final logs for pod $POD_NAME…" | tee -a "$LOG_PATH"
kubectl -n "$NAMESPACE" logs "pod/${POD_NAME}" --timestamps=true &amp;gt;&amp;gt; "$LOG_PATH" 2&amp;gt;&amp;amp;1 || true

# --- Keyword scans ---
for kw in "${ERROR_KEYWORDS[@]}"; do
  if grep -qi -- "$kw" "$LOG_PATH"; then
    echo "$(ts) [ERROR] Error keyword found in logs: '$kw'." | tee -a "$LOG_PATH"
    exit 3
  fi
done
for kw in "${SUCCESS_KEYWORDS[@]}"; do
  if ! grep -qi -- "$kw" "$LOG_PATH"; then
    echo "$(ts) [WARN] Success keyword '$kw' not found in logs." | tee -a "$LOG_PATH"
  fi
done

echo "$(ts) [INFO] Done. Files:"
echo "  - $LOG_PATH"
echo "  - $RESOURCE_CSV"
exit 0

&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;h2&gt;
  
  
  updated
&lt;/h2&gt;



&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;#!/bin/bash

# Define variables
SCRIPT="$(realpath -s "$0")"
SCRIPTPATH="$(dirname "$SCRIPT")"

NAMESPACE="onprem"                      # Namespace where the pods are located
CRONJOB_NAME="teams-chat"               # Name of the CronJob you want to trigger the job from
JOB_NAME="teams-chat"                   # Fixed job name
TIMESTAMP="$(date +'%Y%m%d_%H%M%S')"    # Timestamp for log file
LOG_PATH="$SCRIPTPATH/Logs/${TIMESTAMP}-${JOB_NAME}.log"   # Log file with timestamp
RESOURCE_CSV="$SCRIPTPATH/Logs/${TIMESTAMP}-${JOB_NAME}_pod_resources.csv"

# List of keywords to check in the logs (customize as needed)
ERROR_KEYWORDS=("java.io.EOFException" "NoSuchMethodError")
SUCCESS_KEYWORDS=("TGM Policy Ended")

DAY_OF_WEEK="$(date +%A)"

# Script help
print_help() {
  clear
  echo "---------------------------------------------------------------"
  echo "Script is used to monitor given POD resources (Memory and CPU)."
  echo " "
  echo "It must be run with ONE mandatory parameter - name of POD to monitor."
  echo "Usage: $0 &amp;lt;pod-name&amp;gt;"
  echo "Example:"
  echo "$0 smtp-email-1-29002137-fwtqx"
  echo "Use \"kubectl get pods\" command to list available PODs."
  echo "---------------------------------------------------------------"
  exit 0
}

# Day gating as in your screenshots
if [[ "$DAY_OF_WEEK" != "Sunday" &amp;amp;&amp;amp; "$DAY_OF_WEEK" != "Monday" ]]; then
  echo "Today is $DAY_OF_WEEK. Starting import job..."
else
  echo "Today is $DAY_OF_WEEK. Import job will be skipped as there is no delivery."
  exit 0
fi

# 1. Validate the namespace exists
echo "$(date +'%Y-%m-%d %H:%M:%S') [INFO] More detailed logs can be found in $LOG_PATH"
if ! kubectl get namespace "$NAMESPACE" &amp;amp;&amp;gt;/dev/null; then
  echo "$(date +'%Y-%m-%d %H:%M:%S') [ERROR] Error: Namespace $NAMESPACE does not exist." | tee -a "$LOG_PATH"
  cp "$LOG_PATH" "$SCRIPTPATH/Lastlog/lastpodlog.txt" 2&amp;gt;/dev/null || true
  exit 1
fi

# 2. Create the job manually from the CronJob
echo "$(date +'%Y-%m-%d %H:%M:%S') [INFO] Creating job $JOB_NAME from CronJob $CRONJOB_NAME in namespace $NAMESPACE..." | tee -a "$LOG_PATH"
kubectl create job --from=cronjob/"$CRONJOB_NAME" -n "$NAMESPACE" "$JOB_NAME"
if [[ $? -ne 0 ]]; then
  echo "$(date +'%Y-%m-%d %H:%M:%S') [ERROR] Error: Failed to create job from CronJob." | tee -a "$LOG_PATH"
  cp "$LOG_PATH" "$SCRIPTPATH/Lastlog/lastpodlog.txt" 2&amp;gt;/dev/null || true
  exit 1
fi

# 3. Wait for the pod associated with the job to be created
echo "$(date +'%Y-%m-%d %H:%M:%S') [INFO] Waiting for pod associated with job $JOB_NAME to be created..." | tee -a "$LOG_PATH"
while true
do
  POD_NAME="$(kubectl get pods -n "$NAMESPACE" -l job-name="$JOB_NAME" --no-headers | awk '{print $1}' | head -n 1)"
  if [[ -n "$POD_NAME" ]]; then
    NODE_NAME="$(kubectl get pod "$POD_NAME" -n "$NAMESPACE" -o jsonpath='{.spec.nodeName}')"
    echo "$(date +'%Y-%m-%d %H:%M:%S') [INFO] Pod $POD_NAME associated with job $JOB_NAME has been created on node $NODE_NAME." | tee -a "$LOG_PATH"
    break
  else
    echo "$(date +'%Y-%m-%d %H:%M:%S') [INFO] Pod for job $JOB_NAME not yet created... Retrying in 10 seconds." | tee -a "$LOG_PATH"
    sleep 10
  fi
done

# 4. Prepare resource usage CSV file for the triggered pod
echo "ts,pod,cpu,mem" &amp;gt; "$RESOURCE_CSV"

# &amp;gt;&amp;gt;&amp;gt;&amp;gt;&amp;gt;&amp;gt;&amp;gt;&amp;gt;&amp;gt;&amp;gt;&amp;gt;&amp;gt;&amp;gt;&amp;gt;&amp;gt;&amp;gt;&amp;gt;&amp;gt;&amp;gt;&amp;gt;&amp;gt;&amp;gt;&amp;gt;&amp;gt;&amp;gt;&amp;gt;&amp;gt;&amp;gt;&amp;gt;&amp;gt;&amp;gt;&amp;gt;&amp;gt;&amp;gt;&amp;gt;&amp;gt;&amp;gt;&amp;gt;&amp;gt;&amp;gt;&amp;gt;&amp;gt;&amp;gt;&amp;gt;&amp;gt;&amp;gt;&amp;gt;&amp;gt;&amp;gt;&amp;gt;&amp;gt;&amp;gt;&amp;gt;&amp;gt;&amp;gt;&amp;gt;&amp;gt;&amp;gt;&amp;gt;&amp;gt;&amp;gt;&amp;gt;&amp;gt;&amp;gt;&amp;gt;&amp;gt;&amp;gt;&amp;gt;&amp;gt;&amp;gt;&amp;gt;&amp;gt;
# (ADDED) ShieldCache correlator:
# Start a minimal background watcher that tails the pod logs and whenever
# a line contains "/app/ShieldCache", it takes a one-off CPU/MEM sample
# and appends it to the SAME RESOURCE_CSV in the same 4-column format.
kubectl logs -f "$POD_NAME" -n "$NAMESPACE" --timestamps=true 2&amp;gt;/dev/null | \
while IFS= read -r line; do
  if echo "$line" | grep -q "/app/ShieldCache"; then
    ts_now="$(date +'%Y-%m-%d %H:%M:%S')"
    usage_line="$(kubectl top pod "$POD_NAME" -n "$NAMESPACE" --no-headers 2&amp;gt;/dev/null | awk '{print $2","$3}')"
    echo "$ts_now,$POD_NAME,${usage_line:-,}" &amp;gt;&amp;gt; "$RESOURCE_CSV"
    echo "$(date +'%Y-%m-%d %H:%M:%S') [INFO] ShieldCache activity detected; sampled usage appended to CSV." &amp;gt;&amp;gt; "$LOG_PATH"
  fi
done &amp;amp;
# &amp;lt;&amp;lt;&amp;lt;&amp;lt;&amp;lt;&amp;lt;&amp;lt;&amp;lt;&amp;lt;&amp;lt;&amp;lt;&amp;lt;&amp;lt;&amp;lt;&amp;lt;&amp;lt;&amp;lt;&amp;lt;&amp;lt;&amp;lt;&amp;lt;&amp;lt;&amp;lt;&amp;lt;&amp;lt;&amp;lt;&amp;lt;&amp;lt;&amp;lt;&amp;lt;&amp;lt;&amp;lt;&amp;lt;&amp;lt;&amp;lt;&amp;lt;&amp;lt;&amp;lt;&amp;lt;&amp;lt;&amp;lt;&amp;lt;&amp;lt;&amp;lt;&amp;lt;&amp;lt;&amp;lt;&amp;lt;&amp;lt;&amp;lt;&amp;lt;&amp;lt;&amp;lt;&amp;lt;&amp;lt;&amp;lt;&amp;lt;&amp;lt;&amp;lt;&amp;lt;&amp;lt;&amp;lt;&amp;lt;&amp;lt;&amp;lt;&amp;lt;&amp;lt;&amp;lt;&amp;lt;&amp;lt;&amp;lt;&amp;lt;

# 5. Wait for the pod to complete (success or failure) and check for "Error" state
echo "$(date +'%Y-%m-%d %H:%M:%S') [INFO] Waiting for pod $POD_NAME to complete..." | tee -a "$LOG_PATH"
while true; do
  # Collect resource usage for the pod every 30 seconds
  USAGE_LINE="$(kubectl top pod "$POD_NAME" -n "$NAMESPACE" --no-headers 2&amp;gt;/dev/null | awk -v ts="$(date +'%Y-%m-%d %H:%M:%S')" '{print ts","$1","$2","$3}')"
  if [[ -n "$USAGE_LINE" ]]; then
    echo "$USAGE_LINE" &amp;gt;&amp;gt; "$RESOURCE_CSV"
  fi

  POD_STATUS="$(kubectl get pods -n "$NAMESPACE" --no-headers | awk -v pod="$POD_NAME" '$1==pod {print $3}')"

  if [[ "$POD_STATUS" == "Completed" ]]; then
    echo "$(date +'%Y-%m-%d %H:%M:%S') [INFO] Pod $POD_NAME status: $POD_STATUS." | tee -a "$LOG_PATH"
    break
  elif [[ "$POD_STATUS" == "Error" || "$POD_STATUS" == "CrashLoopBackOff" ]]; then
    echo "$(date +'%Y-%m-%d %H:%M:%S') [ERROR] Pod $POD_NAME status: $POD_STATUS." | tee -a "$LOG_PATH"
    # /bin/bash "$SCRIPTPATH/send_SMS_alert.sh" | tee -a "$LOG_PATH"
    echo "$(date +'%Y-%m-%d %H:%M:%S') [INFO] Creating timestamps-true.txt for pod $POD_NAME." | tee -a "$LOG_PATH"
    echo "$(date +'%Y-%m-%d %H:%M:%S') namespace=$NAMESPACE" &amp;gt;&amp;gt; "$SCRIPTPATH/Lastlog/timestamps-true.txt"
    cp "$LOG_PATH" "$SCRIPTPATH/Lastlog/lastpodlog.txt" 2&amp;gt;/dev/null || true
    exit 2
  elif [[ "$POD_STATUS" == "Running" || "$POD_STATUS" == "NotReady" || "$POD_STATUS" == "Init" || "$POD_STATUS" == "PodInitializing" ]]; then
    echo "$(date +'%Y-%m-%d %H:%M:%S') [INFO] Something went wrong? pod status=$POD_STATUS..." | tee -a "$LOG_PATH"
    # /bin/bash "$SCRIPTPATH/send_SMS_alert.sh" | tee -a "$LOG_PATH"
    echo "$(date +'%Y-%m-%d %H:%M:%S') [INFO] Fetching logs for pod $POD_NAME..." | tee -a "$LOG_PATH"
    kubectl logs "$POD_NAME" -n "$NAMESPACE" --timestamps=true | tee -a "$LOG_PATH"
    tail -n 100 "$LOG_PATH"
    echo "$(date +'%Y-%m-%d %H:%M:%S') [INFO] Pod $POD_NAME is still running ($POD_STATUS). Waiting 30 seconds." | tee -a "$LOG_PATH"
    sleep 30
  else
    echo "$(date +'%Y-%m-%d %H:%M:%S') [INFO] Fetching logs for pod $POD_NAME..." | tee -a "$LOG_PATH"
    if ! kubectl logs "$POD_NAME" -n "$NAMESPACE" --timestamps=true &amp;gt;&amp;gt; "$LOG_PATH" 2&amp;gt;&amp;amp;1; then
      echo "$(date +'%Y-%m-%d %H:%M:%S') [ERROR] Failed to fetch logs for pod $POD_NAME." | tee -a "$LOG_PATH"
      cp "$LOG_PATH" "$SCRIPTPATH/Lastlog/lastpodlog.txt" 2&amp;gt;/dev/null || true
      exit 3
    fi
    break
  fi
done

# 6. Fetch logs from the pod (final)
echo "$(date +'%Y-%m-%d %H:%M:%S') [INFO] Fetching logs for pod $POD_NAME..." | tee -a "$LOG_PATH"
kubectl logs "$POD_NAME" -n "$NAMESPACE" --timestamps=true | tee -a "$LOG_PATH"
if [[ $? -ne 0 ]]; then
  echo "$(date +'%Y-%m-%d %H:%M:%S') [ERROR] Failed to fetch logs for pod $POD_NAME." | tee -a "$LOG_PATH"
  cp "$LOG_PATH" "$SCRIPTPATH/Lastlog/lastpodlog.txt" 2&amp;gt;/dev/null || true
  exit 3
fi

# 7. Grep the logs for any error-related keywords
echo "$(date +'%Y-%m-%d %H:%M:%S') [INFO] Checking logs for errors or exceptions..." | tee -a "$LOG_PATH"
for keyword in "${ERROR_KEYWORDS[@]}"; do
  if grep -iq "$keyword" "$LOG_PATH"; then
    echo "$(date +'%Y-%m-%d %H:%M:%S') [ERROR] Error found in logs: Keyword '$keyword' found. Exiting job as failed." | tee -a "$LOG_PATH"
    # /bin/bash "$SCRIPTPATH/send_SMS_alert.sh" | tee -a "$LOG_PATH"
    cp "$LOG_PATH" "$SCRIPTPATH/Lastlog/lastpodlog.txt" 2&amp;gt;/dev/null || true
    tail -n 500 "$LOG_PATH"
    exit 4
  fi
done

# 8. Grep the logs for success-related keywords and ensure none are missing
echo "$(date +'%Y-%m-%d %H:%M:%S') [INFO] Checking logs for success-related keywords..." | tee -a "$LOG_PATH"
for keyword in "${SUCCESS_KEYWORDS[@]}"; do
  if ! grep -iq "$keyword" "$LOG_PATH"; then
    echo "$(date +'%Y-%m-%d %H:%M:%S') [INFO] Failure: Keyword '$keyword' not found in logs. Exiting job as failed." | tee -a "$LOG_PATH"
    # /bin/bash "$SCRIPTPATH/send_SMS_alert.sh" | tee -a "$LOG_PATH"
    cp "$LOG_PATH" "$SCRIPTPATH/Lastlog/lastpodlog.txt" 2&amp;gt;/dev/null || true
    tail -n 500 "$LOG_PATH"
    exit 5
  fi
done

# 9. Output the last 500 lines of the log file to the terminal (execution agent)
echo "$(date +'%Y-%m-%d %H:%M:%S') [INFO] Displaying the last 500 lines of the log file:" | tee -a "$LOG_PATH"
tail -n 500 "$LOG_PATH"
cp "$LOG_PATH" "$SCRIPTPATH/Lastlog/lastpodlog.txt" 2&amp;gt;/dev/null || true

# 10. Job completed successfully
echo "$(date +'%Y-%m-%d %H:%M:%S') [INFO] Job $JOB_NAME completed successfully." | tee -a "$LOG_PATH"
exit 0

&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



</description>
      <category>monitoring</category>
      <category>automation</category>
      <category>kubernetes</category>
      <category>devops</category>
    </item>
    <item>
      <title>Monitor pods in K8</title>
      <dc:creator>ByteLedger</dc:creator>
      <pubDate>Thu, 09 Oct 2025 08:30:22 +0000</pubDate>
      <link>https://dev.to/vb_nair/monitor-pods-in-k8-jgf</link>
      <guid>https://dev.to/vb_nair/monitor-pods-in-k8-jgf</guid>
      <description>&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;#!/usr/bin/env bash
# monitor_pod_or_cronjob.sh
# Two modes:
#  A) CronJob mode (give -c/--cronjob): create a Job from a CronJob and follow newest pod.
#  B) Pod-pattern mode (give one of --pod-exact/--pod-prefix/--pod-regex): wait for a pod that matches and follow THAT pod precisely.
#
# Output: &amp;lt;script_dir&amp;gt;/output/&amp;lt;YYYYmmdd-HHMMSS&amp;gt;_&amp;lt;name&amp;gt;/{pod.log,resources.csv}
# Requirements: kubectl (metrics-server optional for CPU/mem numbers).

set -euo pipefail

# --------------------------
# Defaults (override by flags)
# --------------------------
NAMESPACE=""
CRONJOB_NAME=""         # if set =&amp;gt; CronJob mode
JOB_PREFIX=""           # default =&amp;gt; CRONJOB_NAME

POD_EXACT=""            # if set =&amp;gt; Pod-pattern mode (exact match)
POD_PREFIX=""           # if set =&amp;gt; Pod-pattern mode (prefix match)
POD_REGEX=""            # if set =&amp;gt; Pod-pattern mode (regex via grep -E)
LOCK_ONCE="true"        # if true, never switch to another pod after locking; set --follow-newer=false to change

CONTAINER_NAME=""       # specific container to stream logs from
SAMPLE_INTERVAL=5       # seconds between resource samples
WAIT_TIMEOUT=900        # seconds to wait for pod to appear
KUBECTL="${KUBECTL:-kubectl}"
TUE_SAT_ONLY="false"

# --------------------------
# Internals
# --------------------------
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" &amp;amp;&amp;amp; pwd)"
OUTPUT_ROOT="${SCRIPT_DIR}/output"
TIMESTAMP="$(date +'%Y%m%d-%H%M%S')"

ts()  { date +'%Y-%m-%d %H:%M:%S'; }
tsz() { date -u +'%Y-%m-%dT%H:%M:%SZ'; }

usage() {
  cat &amp;lt;&amp;lt;EOF
Usage:

  # CronJob mode (create Job and monitor)
  ${0##*/} -n &amp;lt;ns&amp;gt; -c &amp;lt;cronjob&amp;gt; [--job-prefix PFX] [--container NAME] [--interval SECS] [--wait SECS] [--follow-newer=true|false]

  # Pod-pattern mode (do NOT create anything; monitor a pod by name pattern)
  ${0##*/} -n &amp;lt;ns&amp;gt; [--pod-exact NAME | --pod-prefix PREFIX | --pod-regex REGEX] [--container NAME] [--interval SECS] [--wait SECS] [--follow-newer=false]

Options:
  -n, --namespace NS         Namespace (required in both modes)
  -c, --cronjob NAME         CronJob name (CronJob mode)
  -j, --job-prefix PFX       Prefix for created Job (default: cronjob name)
      --pod-exact NAME       Exact pod name (Pod-pattern mode)
      --pod-prefix PREFIX    Pod name starts with PREFIX (Pod-pattern mode)
      --pod-regex REGEX      Pod name matches REGEX (Pod-pattern mode; grep -E)
      --container NAME       Container to stream logs (default: first / all)
      --interval SECS        Metrics sample interval (default: 5)
      --wait SECS            Timeout waiting for pod (default: 900)
      --follow-newer=BOOL    If true (default in CronJob mode), switch to newer pods when they appear.
                             In Pod-pattern mode default is true for prefix/regex, false for exact.
      --tue-sat-only         Skip on Sun/Mon
  -h, --help

Examples:
  # Monitor a specific pod by exact name:
  ${0##*/} -n data --pod-exact importer-20251012-81kdb --interval 2

  # Monitor the newest pod starting with a prefix:
  ${0##*/} -n data --pod-prefix importer- --interval 5 --follow-newer=false

  # Monitor by regex:
  ${0##*/} -n data --pod-regex '^importer-[0-9]{8}-[0-9]{6}-[a-z0-9]{5}$'

  # Old behavior: spawn from CronJob and monitor newest pod:
  ${0##*/} -n data -c daily-import --interval 5
EOF
  exit 1
}

# --------------------------
# Parse args
# --------------------------
FOLLOW_NEWER="__unset__"

while [[ $# -gt 0 ]]; do
  case "$1" in
    -n|--namespace)   NAMESPACE="$2"; shift 2;;
    -c|--cronjob)     CRONJOB_NAME="$2"; shift 2;;
    -j|--job-prefix)  JOB_PREFIX="$2"; shift 2;;
    --pod-exact)      POD_EXACT="$2"; shift 2;;
    --pod-prefix)     POD_PREFIX="$2"; shift 2;;
    --pod-regex)      POD_REGEX="$2"; shift 2;;
    --container)      CONTAINER_NAME="$2"; shift 2;;
    --interval)       SAMPLE_INTERVAL="$2"; shift 2;;
    --wait)           WAIT_TIMEOUT="$2"; shift 2;;
    --follow-newer=*) FOLLOW_NEWER="${1#*=}"; shift 1;;
    --tue-sat-only)   TUE_SAT_ONLY="true"; shift;;
    -h|--help)        usage;;
    *) echo "Unknown arg: $1" &amp;gt;&amp;amp;2; usage;;
  esac
done

# --------------------------
# Validations &amp;amp; mode detect
# --------------------------
[[ -z "$NAMESPACE" ]] &amp;amp;&amp;amp; { echo "[ERROR $(ts)] --namespace is required."; exit 2; }

MODE="unknown"
if [[ -n "$CRONJOB_NAME" ]]; then
  MODE="cronjob"
elif [[ -n "$POD_EXACT" || -n "$POD_PREFIX" || -n "$POD_REGEX" ]]; then
  MODE="podpattern"
else
  echo "[ERROR $(ts)] Specify either -c/--cronjob OR one of --pod-exact/--pod-prefix/--pod-regex."; exit 2
fi

# FOLLOW_NEWER default per mode
if [[ "$FOLLOW_NEWER" == "__unset__" ]]; then
  if [[ "$MODE" == "cronjob" ]]; then
    FOLLOW_NEWER="true"
  else
    # in pod-pattern mode: exact =&amp;gt; false; prefix/regex =&amp;gt; true
    if [[ -n "$POD_EXACT" ]]; then FOLLOW_NEWER="false"; else FOLLOW_NEWER="true"; fi
  fi
fi

# day gating optional
if [[ "$TUE_SAT_ONLY" == "true" ]]; then
  dow=$(date +%u); if [[ "$dow" -eq 7 || "$dow" -eq 1 ]]; then
    echo "[INFO  $(ts)] Skipping (Sun/Mon) due to --tue-sat-only."; exit 0; fi
fi

# cluster checks
"$KUBECTL" version --request-timeout=5s &amp;gt;/dev/null 2&amp;gt;&amp;amp;1 || { echo "[ERROR $(ts)] kubectl cannot reach cluster."; exit 3; }
"$KUBECTL" get ns "$NAMESPACE" &amp;gt;/dev/null 2&amp;gt;&amp;amp;1 || { echo "[ERROR $(ts)] Namespace '$NAMESPACE' not found or no access."; exit 4; }

# --------------------------
# Utility functions
# --------------------------
mem_to_bytes() {
  local v="$1"
  case "$v" in
    *Ki) echo $(( ${v%Ki} * 1024 ));;
    *Mi) echo $(( ${v%Mi} * 1024 * 1024 ));;
    *Gi) echo $(( ${v%Gi} * 1024 * 1024 * 1024 ));;
    *Ti) echo $(( ${v%Ti} * 1024 * 1024 * 1024 * 1024 ));;
    *K)  echo $(( ${v%K}  * 1000 ));;
    *M)  echo $(( ${v%M}  * 1000 * 1000 ));;
    *G)  echo $(( ${v%G}  * 1000 * 1000 * 1000 ));;
    *T)  echo $(( ${v%T}  * 1000 * 1000 * 1000 * 1000 ));;
    *)   echo "$v";;
  esac
}

get_newest_pod_by_job() {
  "$KUBECTL" -n "$NAMESPACE" get pods -l "job-name=${JOB_NAME}" \
    --sort-by=.metadata.creationTimestamp \
    -o jsonpath='{range .items[*]}{.metadata.name}{"\n"}{end}' 2&amp;gt;/dev/null | tail -n 1
}

get_pod_match() {
  # returns newest matching pod name for the chosen pattern mode
  local list
  list="$("$KUBECTL" -n "$NAMESPACE" get pods \
          --sort-by=.metadata.creationTimestamp \
          -o jsonpath='{range .items[*]}{.metadata.name}{"\n"}{end}' 2&amp;gt;/dev/null || true)"
  [[ -z "$list" ]] &amp;amp;&amp;amp; return 1
  if [[ -n "$POD_EXACT" ]]; then
    echo "$list" | awk -v n="$POD_EXACT" '$0==n' | tail -n 1
  elif [[ -n "$POD_PREFIX" ]]; then
    echo "$list" | awk -v p="$POD_PREFIX" 'index($0,p)==1' | tail -n 1
  else
    echo "$list" | grep -E -- "$POD_REGEX" | tail -n 1
  fi
}

pod_waiting_reason() {
  "$KUBECTL" -n "$NAMESPACE" get pod "$1" -o jsonpath='{.status.containerStatuses[0].state.waiting.reason}' 2&amp;gt;/dev/null || true
}

# --------------------------
# Output folder &amp;amp; filenames
# --------------------------
NAME_FOR_FOLDER=""
if [[ "$MODE" == "cronjob" ]]; then
  NAME_FOR_FOLDER="${CRONJOB_NAME}"
else
  NAME_FOR_FOLDER="${POD_EXACT:-${POD_PREFIX:-${POD_REGEX//[^A-Za-z0-9_-]/_}}}"
fi

RUN_DIR="${OUTPUT_ROOT}/${TIMESTAMP}_${NAME_FOR_FOLDER}"
mkdir -p "$RUN_DIR"
POD_LOG="${RUN_DIR}/pod.log"
RES_CSV="${RUN_DIR}/resources.csv"
echo "[INFO  $(ts)] Output folder: $RUN_DIR"
echo "ts_utc,container,cpu_m,mem_bytes,node_cpu_pct,node_mem_bytes" &amp;gt; "$RES_CSV"

# --------------------------
# CronJob mode: create Job
# --------------------------
POD_NAME=""
NODE_NAME=""
JOB_NAME=""

if [[ "$MODE" == "cronjob" ]]; then
  "$KUBECTL" -n "$NAMESPACE" get cronjob "$CRONJOB_NAME" &amp;gt;/dev/null 2&amp;gt;&amp;amp;1 || { echo "[ERROR $(ts)] CronJob '$CRONJOB_NAME' not found."; exit 5; }
  JOB_PREFIX="${JOB_PREFIX:-$CRONJOB_NAME}"
  JOB_NAME="${JOB_PREFIX}-${TIMESTAMP}"
  echo "[INFO  $(ts)] Creating Job '$JOB_NAME' from CronJob '$CRONJOB_NAME' in ns '$NAMESPACE'..." | tee -a "$POD_LOG"
  "$KUBECTL" -n "$NAMESPACE" create job --from=cronjob/"$CRONJOB_NAME" "$JOB_NAME" &amp;gt;/dev/null \
    || { echo "[ERROR $(ts)] Failed to create Job."; exit 6; }
fi

# --------------------------
# Wait for the target pod
# --------------------------
echo "[INFO  $(ts)] Waiting for target pod..." | tee -a "$POD_LOG"
DEADLINE=$(( $(date +%s) + WAIT_TIMEOUT ))
while [[ -z "$POD_NAME" &amp;amp;&amp;amp; $(date +%s) -lt $DEADLINE ]]; do
  if [[ "$MODE" == "cronjob" ]]; then
    POD_NAME="$(get_newest_pod_by_job)"
  else
    POD_NAME="$(get_pod_match)"
  fi
  [[ -z "$POD_NAME" ]] &amp;amp;&amp;amp; sleep 1
done
[[ -z "$POD_NAME" ]] &amp;amp;&amp;amp; { echo "[ERROR $(ts)] Timed out waiting for pod." | tee -a "$POD_LOG"; exit 7; }

NODE_NAME="$("$KUBECTL" -n "$NAMESPACE" get pod "$POD_NAME" -o jsonpath='{.spec.nodeName}' 2&amp;gt;/dev/null || true)"
echo "[INFO  $(ts)] Monitoring pod: $POD_NAME | Node: ${NODE_NAME:-unknown}" | tee -a "$POD_LOG"

# pick container hint
if [[ -z "$CONTAINER_NAME" ]]; then
  CONTAINER_NAME="$("$KUBECTL" -n "$NAMESPACE" get pod "$POD_NAME" -o jsonpath='{.spec.containers[0].name}' 2&amp;gt;/dev/null || true)"
fi
echo "[INFO  $(ts)] Container (hint): ${CONTAINER_NAME:-&amp;lt;all&amp;gt;}" | tee -a "$POD_LOG"

# Metrics probe
METRICS_AVAILABLE=true
if ! "$KUBECTL" -n "$NAMESPACE" top pod "$POD_NAME" &amp;gt;/dev/null 2&amp;gt;&amp;amp;1; then
  echo "[WARN  $(ts)] Metrics API not available; resources.csv will have blanks." | tee -a "$POD_LOG"
  METRICS_AVAILABLE=false
fi

# --------------------------
# Start log streaming
# --------------------------
{
  echo "[INFO  $(ts)] --- BEGIN POD LOG STREAM ---"
  if [[ -n "$CONTAINER_NAME" ]]; then
    "$KUBECTL" -n "$NAMESPACE" logs -f "pod/${POD_NAME}" -c "$CONTAINER_NAME" --timestamps
  else
    "$KUBECTL" -n "$NAMESPACE" logs -f "pod/${POD_NAME}" --all-containers --timestamps
  fi
  echo "[INFO  $(ts)] --- END POD LOG STREAM ---"
} &amp;gt;&amp;gt; "$POD_LOG" 2&amp;gt;&amp;amp;1 &amp;amp;
LOG_PID=$!

# --------------------------
# Sampling loop
# --------------------------
stop_sampling="false"
sample_loop() {
  while [[ "$stop_sampling" != "true" ]]; do
    # optionally follow newer pod if requested (for cronjob / prefix / regex cases)
    if [[ "$FOLLOW_NEWER" == "true" ]]; then
      if [[ "$MODE" == "cronjob" ]]; then
        latest="$(get_newest_pod_by_job)"
      else
        # exact =&amp;gt; stay; prefix/regex =&amp;gt; can move
        if [[ -n "$POD_EXACT" ]]; then latest="$POD_NAME"; else latest="$(get_pod_match)"; fi
      fi
      if [[ -n "$latest" &amp;amp;&amp;amp; "$latest" != "$POD_NAME" ]]; then
        POD_NAME="$latest"
        NODE_NAME="$("$KUBECTL" -n "$NAMESPACE" get pod "$POD_NAME" -o jsonpath='{.spec.nodeName}' 2&amp;gt;/dev/null || true)"
        echo "[INFO  $(ts)] Switched to pod: $POD_NAME | Node: ${NODE_NAME:-unknown}" &amp;gt;&amp;gt; "$POD_LOG"
      fi
    fi

    TS="$(tsz)"
    NODE_CPU="" NODE_MEM_B=""

    if [[ "$METRICS_AVAILABLE" == true &amp;amp;&amp;amp; -n "$NODE_NAME" ]]; then
      if read -r _ _ cpu_pct mem_raw _ &amp;lt; &amp;lt;("$KUBECTL" top node "$NODE_NAME" 2&amp;gt;/dev/null | awk 'NR==2{print $1,$2,$3,$4,$5}'); then
        NODE_CPU="${cpu_pct%%%}"
        NODE_MEM_B="$(mem_to_bytes "$mem_raw")"
      fi
    fi

    if [[ "$METRICS_AVAILABLE" == true &amp;amp;&amp;amp; -n "$POD_NAME" ]]; then
      while read -r pod ctnr cpu_raw mem_raw _; do
        [[ -z "$ctnr" ]] &amp;amp;&amp;amp; continue
        if [[ "$cpu_raw" == *m ]]; then CPU_M="${cpu_raw%m}"; else CPU_M=$(awk -v v="$cpu_raw" 'BEGIN{printf "%.0f", v*1000}'); fi
        MEM_B="$(mem_to_bytes "$mem_raw")"
        echo "$TS,$ctnr,$CPU_M,$MEM_B,${NODE_CPU},${NODE_MEM_B}" &amp;gt;&amp;gt; "$RES_CSV"
      done &amp;lt; &amp;lt;("$KUBECTL" -n "$NAMESPACE" top pod "$POD_NAME" --containers 2&amp;gt;/dev/null | awk 'NR&amp;gt;1{print $1,$2,$3,$4,$5}')
    else
      echo "$TS,,,,," &amp;gt;&amp;gt; "$RES_CSV"
    fi

    sleep "$SAMPLE_INTERVAL"
  done
}
sample_loop &amp;amp; SAMP_PID=$!

# --------------------------
# Watch for end
# --------------------------
PHASE=""
while true; do
  # If following newer, re-evaluate current pod for phase; otherwise lock on POD_NAME
  TARGET="$POD_NAME"
  PHASE="$("$KUBECTL" -n "$NAMESPACE" get pod "$TARGET" -o jsonpath='{.status.phase}' 2&amp;gt;/dev/null || echo "NotFound")"
  if [[ "$PHASE" == "Succeeded" || "$PHASE" == "Failed" || "$PHASE" == "NotFound" ]]; then
    break
  fi

  # Early exit on hard waiting reasons
  REASON="$(pod_waiting_reason "$TARGET" || true)"
  case "$REASON" in
    ImagePullBackOff|ErrImagePull|CreateContainerConfigError|CrashLoopBackOff|CreateContainerError)
      echo "[ERROR $(ts)] Pod waiting reason: $REASON" | tee -a "$POD_LOG"
      break;;
  esac

  sleep 3
done

# --------------------------
# Stop background tasks
# --------------------------
stop_sampling="true"
kill "$SAMP_PID" &amp;gt;/dev/null 2&amp;gt;&amp;amp;1 || true
kill "$LOG_PID"  &amp;gt;/dev/null 2&amp;gt;&amp;amp;1 || true
wait "$SAMP_PID" "$LOG_PID" 2&amp;gt;/dev/null || true

# --------------------------
# Final messages
# --------------------------
echo "[INFO  $(ts)] Pod final phase: ${PHASE:-unknown}" | tee -a "$POD_LOG"
echo "[INFO  $(ts)] Output saved to: $RUN_DIR" | tee -a "$POD_LOG"

&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



</description>
      <category>monitoring</category>
      <category>automation</category>
      <category>kubernetes</category>
      <category>devops</category>
    </item>
    <item>
      <title>python mail format parser</title>
      <dc:creator>ByteLedger</dc:creator>
      <pubDate>Wed, 24 Sep 2025 12:37:46 +0000</pubDate>
      <link>https://dev.to/vb_nair/python-mail-format-parser-44g3</link>
      <guid>https://dev.to/vb_nair/python-mail-format-parser-44g3</guid>
      <description>&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;@echo off
setlocal

REM --- Get yesterday in YYYY-MM-DD for -FromDate ---
for /f %%i in ('powershell -NoProfile -Command "(Get-Date).AddDays(-1).ToString('yyyy-MM-dd')"') do set FROMDATE=%%i

REM --- Build YYYY\MM\DD and prepend Y:\vfc-voice for -DirectoryPath ---
for /f %%i in ('powershell -NoProfile -Command "$d=(Get-Date).AddDays(-1); '{0}\{1}\{2}' -f $d.ToString('yyyy'),$d.ToString('MM'),$d.ToString('dd')"') do set SUBPATH=%%i
set "DIRPATH=Y:\vfc-voice\%SUBPATH%"

echo FromDate     = %FROMDATE%
echo DirectoryPath= %DIRPATH%

REM --- Run PowerShell script (same caret structure that worked before) ---
powershell.exe -NoProfile -ExecutionPolicy Bypass -Command ^
"Add-Type -AssemblyName System.Net.Http; " ^
"&amp;amp; 'C:\script\tools\data-completeness\data-completeness-ms-teams-voice.ps1' " ^
"-FromDate '%FROMDATE%' " ^
"-DirectoryPath '%DIRPATH%' " 

echo Exit code: %errorlevel%
endlocal
pause

&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;bat file&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;@echo off
powershell.exe -ExecutionPolicy Bypass ^
    -File "C:\Users\data-completeness.ps1" ^
    -FromDate "2024-01-01" ^
    -DirectoryPath "test-data\ds-testdata" ^
    -ManifestFile "20251003235850 - ExportDetails.zip!20251003235850 - FileListToShield.log" ^
    -FilenameColumn "Name" ^
    -SteelEyeSourceName "voice_chat"

&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;kubectl cmd&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;
Get-ChildItem -Path $env:USERPROFILE -Recurse -Filter kubectl.exe -ErrorAction SilentlyContinue | Select-Object -First 10 FullName

&amp;amp; "C:\Users\Administrator\AppData\Roaming\Lens\binaries\kubectl\1.29.3\kubectl.exe" version --client

$kubectl = "C:\Users\Administrator\AppData\Roaming\Lens\binaries\kubectl\1.29.3\kubectl.exe"
$kubeconfig = Get-ChildItem "$env:APPDATA\Lens\kubeconfigs" -Recurse -Include *.config -ErrorAction SilentlyContinue | Select-Object -First 1 -ExpandProperty FullName
if ($kubeconfig) { $env:KUBECONFIG = $kubeconfig }
&amp;amp; $kubectl version --client
&amp;amp; $kubectl get ns

&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;"$env:APPDATA\Lens\kubeconfigs" -Recurse -Include *.config -ErrorAction SilentlyContinue | Select-Object -First 1 -ExpandProperty FullName; if ($lkc) { $env:KUBECONFIG = $lkc } }; if ($kubectl) { &amp;amp; $kubectl version --client; &amp;amp; $kubectl get ns } else { Write-Error "kubectl.exe not found (PATH or Lens cache)." }&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;$kubectl = (Get-Command kubectl -ErrorAction SilentlyContinue).Source; if (-not $kubectl) { $kubectl = Get-ChildItem "$env:APPDATA\Lens" -Recurse -Filter kubectl.exe -ErrorAction SilentlyContinue | Select-Object -First 1 -ExpandProperty FullName }; if (-not $env:KUBECONFIG) { $lkc = Get-ChildItem "$env:APPDATA\Lens\kubeconfigs" -Recurse -Include *.config -ErrorAction SilentlyContinue | Select-Object -First 1 -ExpandProperty FullName; if ($lkc) { $env:KUBECONFIG = $lkc } }; if ($kubectl) { &amp;amp; $kubectl version --client; &amp;amp; $kubectl get ns } else { Write-Error "kubectl.exe not found (PATH or Lens cache)." }


https://knowledge.broadcom.com/external/article/297346/how-to-limit-the-per-message-max-size-in.html?


emlgen/
  __init__.py
  cli.py              # argparse CLI
  io_utils.py         # read/write EML, CRLF-safe
  ids.py              # new GUID/Message-ID/boundary/timestamp
  transform.py        # token map, replacements, text injection

emlgen/io_utils.py
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;from email import policy&lt;br&gt;
from email.parser import BytesParser&lt;br&gt;
from email.generator import BytesGenerator&lt;br&gt;
from io import BytesIO&lt;br&gt;
import os&lt;/p&gt;

&lt;p&gt;def read_eml(path: str):&lt;br&gt;
    with open(path, "rb") as f:&lt;br&gt;
        return BytesParser(policy=policy.default).parse(f)&lt;/p&gt;

&lt;p&gt;def write_eml(msg, path: str):&lt;br&gt;
    os.makedirs(os.path.dirname(path), exist_ok=True)&lt;br&gt;
    bio = BytesIO()&lt;br&gt;
    BytesGenerator(bio, policy=policy.SMTP).flatten(msg)  # CRLF&lt;br&gt;
    with open(path, "wb") as f:&lt;br&gt;
        f.write(bio.getvalue())&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;emlgen/ids.py

&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;import uuid, datetime&lt;br&gt;
from email.utils import make_msgid&lt;/p&gt;

&lt;p&gt;def new_guid(): return str(uuid.uuid4())&lt;/p&gt;

&lt;p&gt;def new_message_id(domain="example.test"):&lt;br&gt;
    return make_msgid(domain=domain)          # includes &amp;lt;...&amp;gt;&lt;/p&gt;

&lt;p&gt;def boundary(tag="MESSAGE_ID"):               # e.g. &lt;strong&gt;MESSAGE_ID&lt;/strong&gt;abc123&lt;br&gt;
    return f"&lt;strong&gt;{tag}&lt;/strong&gt;{uuid.uuid4().hex[:10]}"&lt;/p&gt;

&lt;p&gt;def ts_compact():&lt;br&gt;
    dt = datetime.datetime.utcnow()&lt;br&gt;
    return dt.strftime("%Y%m%d%H%M%S") + f"{int(dt.microsecond/1000):03d}"&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;emlgen/transform.py

&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;import re&lt;br&gt;
from email import encoders&lt;/p&gt;

&lt;p&gt;def get_boundary(msg):&lt;br&gt;
    b = msg.get_boundary()&lt;br&gt;
    if b: return b&lt;br&gt;
    m = re.search(r'boundary="?(.*?)"(;|$)', msg.get('Content-Type',''), re.I)&lt;br&gt;
    return m.group(1) if m else None&lt;/p&gt;

&lt;p&gt;def set_boundary(mp, val):&lt;br&gt;
    if mp.is_multipart():&lt;br&gt;
        mp.set_boundary(val)&lt;/p&gt;

&lt;p&gt;def walk_leaf_parts(msg):&lt;br&gt;
    for p in msg.walk():&lt;br&gt;
        if not p.is_multipart():&lt;br&gt;
            return_part = p&lt;br&gt;
            yield return_part&lt;/p&gt;

&lt;p&gt;def collect_tokens(msg):&lt;br&gt;
    toks = {}&lt;br&gt;
    mid = (msg.get("Message-ID") or "").strip()&lt;br&gt;
    toks["Message-ID"] = mid&lt;br&gt;
    toks["Message-ID-unbr"] = mid[1:-1] if mid.startswith("&amp;lt;") and mid.endswith("&amp;gt;") else ""&lt;br&gt;
    toks["ConversationID"] = (msg.get("X-Header-ConversationID") or "").strip()&lt;br&gt;
    toks["Boundary"] = get_boundary(msg) or ""&lt;br&gt;
    toks["Filenames"] = [p.get_filename() for p in walk_leaf_parts(msg) if p.get_filename()]&lt;br&gt;
    return toks&lt;/p&gt;

&lt;p&gt;def rename_attachment_headers(part, new_filename):&lt;br&gt;
    if part.get("Content-Disposition"):&lt;br&gt;
        part.set_param("filename", new_filename, header="Content-Disposition")&lt;br&gt;
    part.set_param("name", new_filename, header="Content-Type")&lt;/p&gt;

&lt;p&gt;def replace_text_payload(part, replace_map, keep_b64=True):&lt;br&gt;
    orig_cte = (part.get("Content-Transfer-Encoding") or "").lower()&lt;br&gt;
    charset = part.get_content_charset() or "utf-8"&lt;br&gt;
    raw = part.get_payload(decode=True) or b""&lt;br&gt;
    try:   text = raw.decode(charset, errors="replace")&lt;br&gt;
    except: text = raw.decode("utf-8", errors="replace")&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;for old, new in replace_map.items():
    if old: text = text.replace(old, new)

part.set_payload(text, charset=charset)
if part["Content-Transfer-Encoding"]:
    del part["Content-Transfer-Encoding"]
if keep_b64 and orig_cte == "base64":
    encoders.encode_base64(part)
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;

&lt;p&gt;def inject_text(msg, text, where="html-end", once_marker="&amp;lt;!--emlgen-injected--&amp;gt;"):&lt;br&gt;
    """&lt;br&gt;
    Adds &lt;code&gt;text&lt;/code&gt; into text/html or text/plain part.&lt;br&gt;
    where: 'html-top' | 'html-end' | 'text-top' | 'text-end'&lt;br&gt;
    Adds a marker to avoid duplicate injections on re-runs.&lt;br&gt;
    """&lt;br&gt;
    for p in walk_leaf_parts(msg):&lt;br&gt;
        ctype = (p.get_content_type() or "").lower()&lt;br&gt;
        if ctype == "text/html":&lt;br&gt;
            raw = p.get_payload(decode=True) or b""&lt;br&gt;
            charset = p.get_content_charset() or "utf-8"&lt;br&gt;
            try: html = raw.decode(charset, errors="replace")&lt;br&gt;
            except: html = raw.decode("utf-8", errors="replace")&lt;br&gt;
            if once_marker in html: continue&lt;br&gt;
            if where == "html-top":&lt;br&gt;
                html = once_marker + text + html&lt;br&gt;
            elif where == "html-end":&lt;br&gt;
                # insert before &lt;/p&gt;

</description>
      <category>cli</category>
      <category>python</category>
      <category>tooling</category>
    </item>
    <item>
      <title>Minimal Python scanner</title>
      <dc:creator>ByteLedger</dc:creator>
      <pubDate>Wed, 24 Sep 2025 10:20:26 +0000</pubDate>
      <link>https://dev.to/vb_nair/minimal-python-scanner-1nhf</link>
      <guid>https://dev.to/vb_nair/minimal-python-scanner-1nhf</guid>
      <description>&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;#!/usr/bin/env python3
import argparse, os, re, uuid, datetime
from email import policy
from email.parser import BytesParser
from email.generator import BytesGenerator
from email.message import EmailMessage
from email.headerregistry import Address
from io import BytesIO

# ---------- helpers ----------

def new_guid():
    return str(uuid.uuid4())

def new_message_id(domain="example.test"):
    # You can use your org domain if desired
    from email.utils import make_msgid
    return make_msgid(domain=domain)  # returns like &amp;lt;...@domain&amp;gt;

def new_boundary(tag="MESSAGE_ID"):
    # Boundary visible pattern similar to your sample (customize as needed)
    token = uuid.uuid4().hex[:10]
    return f"__{tag}__{token}"

def now_compact_ts():
    # e.g., 20250911104117810 (yyyyMMddHHmmssfff)
    dt = datetime.datetime.utcnow()
    return dt.strftime("%Y%m%d%H%M%S") + f"{int(dt.microsecond/1000):03d}"

def get_boundary_from_header(msg):
    # Works on multipart messages
    # email.message has get_boundary(); fallback to regex on header if needed
    b = msg.get_boundary()
    if b:
        return b
    cth = msg.get("Content-Type", "")
    m = re.search(r'boundary="?(.*?)"?(\s*;|$)', cth, re.I)
    return m.group(1) if m else None

def set_boundary(msg: EmailMessage, boundary: str):
    # Only relevant for multipart containers
    if msg.is_multipart():
        msg.set_boundary(boundary)

def replace_text_payload(part, replace_map, keep_encoding=True):
    """
    Decode text-ish payload, apply replacements, and re-encode according to original part headers.
    keep_encoding=True will retain original Content-Transfer-Encoding (e.g., base64/quoted-printable).
    """
    orig_cte = (part.get("Content-Transfer-Encoding") or "").lower()
    charset = part.get_content_charset() or "utf-8"
    raw = part.get_payload(decode=True) or b""
    try:
        text = raw.decode(charset, errors="replace")
    except Exception:
        text = raw.decode("utf-8", errors="replace")
    for old, new in replace_map.items():
        if old:
            text = text.replace(old, new)
    # Re-set payload
    if keep_encoding and orig_cte in ("base64", "quoted-printable"):
        # set_payload with string + set_charset lets library re-encode
        part.set_payload(text, charset=charset)
        # The library will choose q-p by default for 8-bit; force base64 if originally base64:
        if orig_cte == "base64":
            part["Content-Transfer-Encoding"] = "base64"
    else:
        part.set_payload(text, charset=charset)

def rename_attachment_headers(part, new_filename):
    # Update filename in both headers (Content-Disposition and Content-Type's "name=")
    disp = part.get("Content-Disposition")
    if disp:
        part.set_param("filename", new_filename, header="Content-Disposition")
    ctype = part.get_content_type()
    # We need to re-apply name parameter on Content-Type header
    part.set_param("name", new_filename, header="Content-Type")

def walk_leaf_parts(msg):
    for p in msg.walk():
        if not p.is_multipart():
            yield p

def collect_tokens(msg):
    """
    Collects tokens we intend to replace everywhere:
    - message-id (without &amp;lt; &amp;gt; also useful)
    - conversation id (custom header)
    - boundary string
    - attachment filenames
    Returns dict with keys for mapping.
    """
    tokens = {}
    mid = (msg.get("Message-ID") or "").strip()
    tokens["Message-ID"] = mid
    # Unbracketed variant
    if mid.startswith("&amp;lt;") and mid.endswith("&amp;gt;"):
        tokens["Message-ID-unbracketed"] = mid[1:-1]
    conv = (msg.get("X-Header-ConversationID") or "").strip()
    tokens["ConversationID"] = conv
    bnd = get_boundary_from_header(msg) or ""
    tokens["Boundary"] = bnd

    # any filenames present on parts
    filenames = []
    for p in walk_leaf_parts(msg):
        fn = p.get_filename()
        if fn:
            filenames.append(fn)
    tokens["Filenames"] = filenames
    return tokens

def write_eml(msg, path):
    os.makedirs(os.path.dirname(path), exist_ok=True)
    bio = BytesIO()
    # Ensure CRLF line endings
    BytesGenerator(bio, policy=policy.SMTP).flatten(msg)
    with open(path, "wb") as f:
        f.write(bio.getvalue())

# ---------- core generation ----------

def generate_variant_from_template(template_bytes: bytes, domain_for_msgid="example.test"):
    # Parse template
    msg = BytesParser(policy=policy.default).parsebytes(template_bytes)

    # Collect old tokens
    toks = collect_tokens(msg)

    # Build new tokens
    new_conv = new_guid()
    new_mid = new_message_id(domain_for_msgid)     # includes &amp;lt;...&amp;gt;
    new_mid_unbr = new_mid[1:-1]                   # without angle brackets
    new_bnd = new_boundary("MESSAGE_ID")
    new_ts = now_compact_ts()

    # Build replacement map (textual content + XML/HTML body)
    replace_map = {}
    if toks.get("ConversationID"):
        replace_map[toks["ConversationID"]] = new_conv
    if toks.get("Message-ID"):
        replace_map[toks["Message-ID"]] = new_mid  # in case body contains bracketed mid
    if toks.get("Message-ID-unbracketed"):
        replace_map[toks["Message-ID-unbracketed"]] = new_mid_unbr
    if toks.get("Boundary"):
        replace_map[toks["Boundary"]] = new_bnd

    # Also map filenames (if they embed conv id or timestamp string). We’ll replace exact old names later when we rename.
    old_filenames = toks.get("Filenames", [])

    # --- Update top-level headers ---
    if toks.get("ConversationID"):
        msg.replace_header("X-Header-ConversationID", new_conv)
    if msg.get("Message-ID"):
        msg.replace_header("Message-ID", new_mid)
    # Boundary (top-level multipart)
    set_boundary(msg, new_bnd)

    # --- Update parts ---
    for part in walk_leaf_parts(msg):
        ctype = (part.get_content_type() or "").lower()
        orig_cte = (part.get("Content-Transfer-Encoding") or "").lower()

        # If an attachment has a filename that embeds the old conv id / timestamp, build a new one
        fn = part.get_filename()
        if fn:
            new_fn = fn
            # Replace conv id if present
            if toks.get("ConversationID") and toks["ConversationID"] in new_fn:
                new_fn = new_fn.replace(toks["ConversationID"], new_conv)
            # Replace timestamp-like token inside filename (simple heuristic for (YYYYMMDD...))
            m = re.search(r"\(\d{14,20}\)", new_fn)
            if m:
                new_fn = new_fn.replace(m.group(0), f"({new_ts})")
            # Apply rename
            if new_fn != fn:
                rename_attachment_headers(part, new_fn)

        # For text/html, text/plain, application/xml, text/xml → decode → replace → re-encode
        if ctype.startswith("text/") or ctype in ("application/xml", "text/xml", "application/json"):
            replace_text_payload(part, replace_map, keep_encoding=True)
        else:
            # Binary: do nothing to payload, but boundary replacements in headers are already handled.
            pass

    # If there are nested multiparts, update their boundaries too
    for p in msg.walk():
        if p.is_multipart() and p is not msg:
            # Give child multiparts their own unique boundary so no collisions
            set_boundary(p, new_boundary("PART"))

    return msg, {
        "new_message_id": new_mid,
        "new_conversation_id": new_conv,
        "new_boundary": new_bnd
    }

# ---------- CLI ----------

def main():
    ap = argparse.ArgumentParser(description="Duplicate a sample EML into many unique EMLs.")
    ap.add_argument("--input", required=True, help="Path to the source sample .eml (raw).")
    ap.add_argument("--count", type=int, required=True, help="How many output files to generate.")
    ap.add_argument("--out", required=True, help="Output folder.")
    ap.add_argument("--domain", default="example.test", help="Domain to use for new Message-IDs.")
    args = ap.parse_args()

    with open(args.input, "rb") as f:
        template_bytes = f.read()

    os.makedirs(args.out, exist_ok=True)
    manifest_path = os.path.join(args.out, "manifest.csv")
    with open(manifest_path, "w", encoding="utf-8", newline="") as mf:
        mf.write("filename,new_message_id,new_conversation_id,new_boundary\n")

        for i in range(args.count):
            msg, meta = generate_variant_from_template(template_bytes, domain_for_msgid=args.domain)

            # Decide output filename: use conversation id + timestamp for uniqueness
            ts = now_compact_ts()
            out_name = f"{meta['new_conversation_id']}_{ts}.eml"
            out_path = os.path.join(args.out, out_name)

            write_eml(msg, out_path)
            mf.write(f"{out_name},{meta['new_message_id']},{meta['new_conversation_id']},{meta['new_boundary']}\n")

    print(f"✅ Generated {args.count} EML(s) into: {args.out}")
    print(f"   Manifest: {manifest_path}")

if __name__ == "__main__":
    main()

&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;python gen_eml_batch.py --input sample.eml --count 100 --out out/batch_0100&lt;br&gt;
Parses the original (non-decoded) .eml.&lt;/p&gt;

&lt;p&gt;Generates per-file new ConversationID (GUID), new Message-ID, and new MIME boundary.&lt;/p&gt;

&lt;p&gt;Renames attachment filenames if they embed the old ConversationID and/or a timestamp in parentheses.&lt;/p&gt;

&lt;p&gt;Replaces tokens inside HTML/XML/text parts (so duplicates won’t be flagged as identical).&lt;/p&gt;

&lt;p&gt;Keeps binary attachments unchanged, preserving base64.&lt;/p&gt;

&lt;p&gt;Writes output with CRLF line endings &amp;amp; a manifest.csv.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;# add 
from email import encoders

def replace_text_payload(part, replace_map, keep_encoding=True):
    """
    Decode text payload, apply replacements, and re-encode.
    If the original CTE was base64, re-apply base64 WITHOUT creating duplicate
    'Content-Transfer-Encoding' headers.
    """
    orig_cte = (part.get("Content-Transfer-Encoding") or "").lower()
    charset = part.get_content_charset() or "utf-8"

    raw = part.get_payload(decode=True) or b""
    try:
        text = raw.decode(charset, errors="replace")
    except Exception:
        text = raw.decode("utf-8", errors="replace")

    # do replacements
    for old, new in replace_map.items():
        if old:
            text = text.replace(old, new)

    # 1) set_payload first (this may add a CTE like 'quoted-printable' or '8bit')
    part.set_payload(text, charset=charset)

    # 2) now remove any CTE header that set_payload just created
    if part["Content-Transfer-Encoding"]:
        del part["Content-Transfer-Encoding"]

    # 3) re-apply original encoding if it was base64
    if keep_encoding and orig_cte == "base64":
        encoders.encode_base64(part)
    # else leave it with no CTE; the library will choose a suitable one on output

&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



</description>
      <category>python</category>
      <category>tooling</category>
    </item>
    <item>
      <title>Validate Records in OpenSearch from a CSV (with Logging and Health Checks)</title>
      <dc:creator>ByteLedger</dc:creator>
      <pubDate>Mon, 11 Aug 2025 15:16:53 +0000</pubDate>
      <link>https://dev.to/vb_nair/validate-records-in-opensearch-from-a-csv-with-logging-and-health-checks-3345</link>
      <guid>https://dev.to/vb_nair/validate-records-in-opensearch-from-a-csv-with-logging-and-health-checks-3345</guid>
      <description>&lt;h2&gt;
  
  
  📌 Overview
&lt;/h2&gt;

&lt;p&gt;When working with data pipelines or audit tasks, we often need to &lt;strong&gt;verify&lt;/strong&gt; that certain records exist in our OpenSearch cluster.&lt;/p&gt;

&lt;p&gt;This post walks through a Python script that:&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Reads &lt;code&gt;(recordId, recordDate)&lt;/code&gt; pairs from a CSV file&lt;/li&gt;
&lt;li&gt;Checks if each record exists in OpenSearch&lt;/li&gt;
&lt;li&gt;Logs results with a timestamped log file&lt;/li&gt;
&lt;li&gt;Outputs a results CSV for reporting&lt;/li&gt;
&lt;li&gt;Performs a &lt;strong&gt;fast &lt;code&gt;_cluster/health&lt;/code&gt;&lt;/strong&gt; check before running queries&lt;/li&gt;
&lt;/ul&gt;




&lt;h2&gt;
  
  
  🗂 Folder Structure
&lt;/h2&gt;

&lt;p&gt;opensearch-checks/&lt;br&gt;
├─ .env&lt;br&gt;
├─ inputs/&lt;br&gt;
│ └─ records.csv&lt;br&gt;
├─ logs/ # auto-created&lt;br&gt;
├─ outputs/ # auto-created&lt;br&gt;
└─ check_records.py&lt;/p&gt;

&lt;h3&gt;
  
  
  Key features:
&lt;/h3&gt;

&lt;p&gt;Strict config from .env — no defaults in code&lt;br&gt;
Pre-check: / _cluster/health endpoint&lt;br&gt;
Exact match on recordId.keyword&lt;br&gt;
Date range match for the full day&lt;br&gt;
Logs + CSV output for easy tracking&lt;/p&gt;

&lt;p&gt;🚀 How to Run&lt;br&gt;
pip install python-dotenv&lt;br&gt;
python check_records.py&lt;/p&gt;

&lt;h3&gt;
  
  
  inputs/records.csv (example)
&lt;/h3&gt;



&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;recordId,recordDate
00000000-0000-0000-0000-000000000001,2025-07-31
00000000-0000-0000-0000-000000000002,2025-07-31

&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;h3&gt;
  
  
  .env (required)
&lt;/h3&gt;



&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;OPENSEARCH_URL=https://your-opensearch-host:9200
OPENSEARCH_USER=your-username
OPENSEARCH_PASS=your-password
INDEX_PATTERN=my_index_pattern_2025*/_search
INPUT_CSV=./inputs/records.csv
LOG_DIR=./logs
OUTPUT_DIR=./outputs

&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;h3&gt;
  
  
  The script: check_records.py
&lt;/h3&gt;



&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;"""
OpenSearch existence checks for (recordId, recordDate) pairs with a fast _cluster/health pre-check.

What it does
------------
1) Loads configuration strictly from a .env file (no hard-coded defaults).
2) Pre-checks OpenSearch availability via GET /_cluster/health (fails fast if unreachable or red).
3) Reads an input CSV with headers: recordId, recordDate (YYYY-MM-DD).
4) For each row, runs an existence query:
      - exact term match on recordId.keyword
      - range match on recordDate for the full day (00:00:00.000 -&amp;gt; 23:59:59.999)
5) Produces:
      - timestamped log file (PASS/FAIL per row)
      - timestamped results CSV (recordId, recordDate, status)

Security note
-------------
SSL verification is disabled here for convenience in internal/test setups.
For production, enable certificate verification and use a trusted CA/cert chain.
"""

from __future__ import annotations

import csv
import json
import logging
import os
import ssl
from base64 import b64encode
from dataclasses import dataclass
from datetime import datetime
from typing import Dict, Any
from urllib.request import Request, urlopen, URLError, HTTPError

from dotenv import load_dotenv


# ---------------- Configuration ----------------

@dataclass(frozen=True)
class Config:
    """Strongly-typed configuration loaded from environment."""
    url: str
    user: str
    password: str
    index_pattern: str           # e.g., "my_index_pattern_2025*/_search"
    input_csv: str               # e.g., "./inputs/records.csv"
    log_dir: str                 # e.g., "./logs"
    output_dir: str              # e.g., "./outputs"

    @staticmethod
    def from_env() -&amp;gt; "Config":
        load_dotenv()  # only reads .env / process env; no defaults
        url        = os.getenv("OPENSEARCH_URL")
        user       = os.getenv("OPENSEARCH_USER")
        password   = os.getenv("OPENSEARCH_PASS")
        index      = os.getenv("INDEX_PATTERN")
        input_csv  = os.getenv("INPUT_CSV")
        log_dir    = os.getenv("LOG_DIR")
        output_dir = os.getenv("OUTPUT_DIR")

        missing = [k for k, v in {
            "OPENSEARCH_URL": url,
            "OPENSEARCH_USER": user,
            "OPENSEARCH_PASS": password,
            "INDEX_PATTERN": index,
            "INPUT_CSV": input_csv,
            "LOG_DIR": log_dir,
            "OUTPUT_DIR": output_dir,
        }.items() if not v]
        if missing:
            raise ValueError(f"Missing required variables in .env: {', '.join(missing)}")

        return Config(url, user, password, index, input_csv, log_dir, output_dir)


# ---------------- Minimal OpenSearch client ----------------

class OpenSearchClient:
    """Tiny OpenSearch client using urllib (no external HTTP deps)."""
    def __init__(self, base_url: str, user: str, password: str, verify_ssl: bool = False) -&amp;gt; None:
        self.base_url = base_url.rstrip("/")
        self.auth_header = {
            "Authorization": "Basic " + b64encode(f"{user}:{password}".encode()).decode(),
            "Content-Type": "application/json",
        }
        self.ctx = ssl.create_default_context()
        if not verify_ssl:
            self.ctx.check_hostname = False
            self.ctx.verify_mode = ssl.CERT_NONE

    def get_json(self, endpoint: str) -&amp;gt; Dict[str, Any]:
        url = f"{self.base_url}/{endpoint.lstrip('/')}"
        req = Request(url, method="GET")
        # Only Authorization header on GET (no need for Content-Type)
        req.add_header("Authorization", self.auth_header["Authorization"])
        with urlopen(req, context=self.ctx) as resp:
            payload = json.loads(resp.read().decode("utf-8"))
            logging.info("GET %s -&amp;gt; %s", endpoint, resp.status)
            return payload

    def post_json(self, endpoint: str, body: Dict[str, Any]) -&amp;gt; Dict[str, Any]:
        url = f"{self.base_url}/{endpoint.lstrip('/')}"
        req = Request(url, data=json.dumps(body).encode("utf-8"), method="POST")
        for k, v in self.auth_header.items():
            req.add_header(k, v)
        with urlopen(req, context=self.ctx) as resp:
            payload = json.loads(resp.read().decode("utf-8"))
            logging.info("POST %s -&amp;gt; %s", endpoint, resp.status)
            return payload


# ---------------- Health pre-check ----------------

def assert_cluster_healthy(client: OpenSearchClient) -&amp;gt; None:
    """
    Fail fast if OpenSearch is not reachable or reports 'red' status.
    Accepts 'green' and 'yellow' as pass for read-only checks.
    """
    try:
        health = client.get_json("_cluster/health")
        status = (health.get("status") or "").lower()
        if status not in {"green", "yellow"}:
            raise RuntimeError(f"Cluster health is '{status}', expected green/yellow.")
        logging.info("Cluster health OK: %s", status)
    except (HTTPError, URLError) as e:
        raise RuntimeError(f"Cluster health check failed: {e}") from e


# ---------------- Existence query ----------------

def record_exists(client: OpenSearchClient, index_pattern: str, record_id: str, record_date: str) -&amp;gt; bool:
    """
    Return True if any document exists for:
      - recordId.keyword == record_id
      - recordDate between 'record_date 00:00:00.000' and 'record_date 23:59:59.999'
    """
    start = f"{record_date} 00:00:00.000"
    end   = f"{record_date} 23:59:59.999"
    query = {
        "query": {
            "bool": {
                "must": [
                    {"term": {"recordId.keyword": record_id}},
                    {"range": {"recordDate": {"gte": start, "lte": end}}},
                ]
            }
        },
        "size": 0
    }
    try:
        res = client.post_json(index_pattern, query)
        total = res.get("hits", {}).get("total", {})
        value = total.get("value", total if isinstance(total, int) else 0)
        return value &amp;gt; 0
    except Exception as e:
        logging.error("Query failed for recordId=%s date=%s: %s", record_id, record_date, e)
        return False


# ---------------- Main ----------------

def main() -&amp;gt; None:
    cfg = Config.from_env()

    # Prepare paths + logging
    os.makedirs(cfg.log_dir, exist_ok=True)
    os.makedirs(cfg.output_dir, exist_ok=True)
    ts = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
    log_file = os.path.join(cfg.log_dir,    f"record_check_{ts}.log")
    out_file = os.path.join(cfg.output_dir, f"record_results_{ts}.csv")

    logging.basicConfig(filename=log_file, level=logging.INFO,
                        format="%(asctime)s - %(levelname)s - %(message)s")

    # Initialize client
    client = OpenSearchClient(cfg.url, cfg.user, cfg.password, verify_ssl=False)

    # Pre-flight: cluster health
    assert_cluster_healthy(client)

    # Process CSV -&amp;gt; Results CSV
    with open(cfg.input_csv, newline="", encoding="utf-8") as fin, \
         open(out_file, "w", newline="", encoding="utf-8") as fout:

        reader = csv.DictReader(fin)
        required_headers = {"recordId", "recordDate"}
        if not reader.fieldnames or required_headers - set(reader.fieldnames):
            raise ValueError(f"CSV must have headers: recordId,recordDate (got {reader.fieldnames})")

        writer = csv.writer(fout)
        writer.writerow(["recordId", "recordDate", "status"])

        for row in reader:
            rec_id = (row.get("recordId") or "").strip()
            date   = (row.get("recordDate") or "").strip()
            if not rec_id or not date:
                logging.warning("Skipping row with missing values: %s", row)
                continue

            ok = record_exists(client, cfg.index_pattern, rec_id, date)
            status = "PASS" if ok else "FAIL"
            writer.writerow([rec_id, date, status])
            (logging.info if ok else logging.warning)("%s: recordId=%s date=%s", status, rec_id, date)

    print(f"Done.\nLog: {log_file}\nResults: {out_file}")


if __name__ == "__main__":
    main()

&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



</description>
      <category>opensearch</category>
      <category>python</category>
      <category>logging</category>
      <category>automation</category>
    </item>
    <item>
      <title>Building a simple Full-Stack Restaurant Finder App with React, Redux, Node.js, and Google Places API (Part 2)</title>
      <dc:creator>ByteLedger</dc:creator>
      <pubDate>Wed, 19 Jun 2024 18:27:01 +0000</pubDate>
      <link>https://dev.to/vb_nair/building-a-simple-full-stack-restaurant-finder-app-with-react-redux-nodejs-and-google-places-api-part-2-557a</link>
      <guid>https://dev.to/vb_nair/building-a-simple-full-stack-restaurant-finder-app-with-react-redux-nodejs-and-google-places-api-part-2-557a</guid>
      <description>&lt;p&gt;&lt;strong&gt;Part 2: Building a Dynamic Frontend with React, Redux Toolkit, and Google Maps API&lt;/strong&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Introduction:&lt;/strong&gt;&lt;br&gt;
Welcome to the second part of our tutorial on building a full-stack "Restaurant Finder" application. &lt;br&gt;
In this blog post, we will focus on developing the frontend components using React and Redux Toolkit. Our frontend will provide users with an intuitive interface to explore nearby restaurants, view detailed information such as ratings and photos, and seamlessly navigate to locations using Google Maps integration.&lt;/p&gt;

&lt;p&gt;Throughout this tutorial, we'll cover essential frontend development concepts including &lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;state management with Redux Toolkit, &lt;/li&gt;
&lt;li&gt;integrating Google Maps for interactive map displays, and &lt;/li&gt;
&lt;li&gt;creating reusable components for a consistent user experience. &lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;By the end of this guide, you'll have a complete understanding of how to implement a simple and responsive frontend for our "Restaurant Finder" application.&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Step 1: Setting Up React App&lt;/strong&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Initialize Project:&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;-&amp;gt; Create a new directory for your client and navigate into it.&lt;br&gt;
Run &lt;code&gt;npx create-react-app client --template typescript&lt;/code&gt; to create a new React app with TypeScript.&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Install Dependencies:&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;Run &lt;code&gt;npm install redux react-redux @reduxjs/toolkit axios tailwindcss daisyui @vis.gl/react-google-maps&lt;/code&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Configure Tailwind CSS:&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;-&amp;gt; Create a tailwind.config.js file and configure Tailwind CSS:&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;// tailwind.config.js
module.exports = {
  purge: ['./src/**/*.{js,jsx,ts,tsx}', './public/index.html'],
  darkMode: false, // or 'media' or 'class'
  theme: {
    extend: {},
  },
  variants: {
    extend: {},
  },
  plugins: [require('daisyui')],
};
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;ul&gt;
&lt;li&gt;Setup Tailwind in CSS:
-&amp;gt; Add the following to src/index.css:
&lt;/li&gt;
&lt;/ul&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;@tailwind base;
@tailwind components;
@tailwind utilities;
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;&lt;strong&gt;Step 2: Creating Components&lt;/strong&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Header Component:
-&amp;gt; Create a Header.tsx file in src/components and add the following:
&lt;/li&gt;
&lt;/ul&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;import React from "react";

const Header: React.FC = () =&amp;gt; {
  return (
    &amp;lt;header className=" mx-1 my-5 p-4 flex justify-between items-center"&amp;gt;
      &amp;lt;div className="text-sm font-semibold bg-neutral-700 rounded-md p-1"&amp;gt;
        &amp;lt;span className="text-white mr-1"&amp;gt;Restaurant&amp;lt;/span&amp;gt;
        &amp;lt;span className=" w-12 h-8 rounded bg-neutral-100 px-1 text-neutral-700 font-bold"&amp;gt;
          Finder
        &amp;lt;/span&amp;gt;
      &amp;lt;/div&amp;gt;
      &amp;lt;p className=" text-sm font-semibold mx-2 px-1"&amp;gt;
        "Good food is the foundation of genuine happiness !"
      &amp;lt;/p&amp;gt;
    &amp;lt;/header&amp;gt;
  );
};

export default Header;
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;ul&gt;
&lt;li&gt;Footer Component:
-&amp;gt; Create a Footer.tsx file in src/components and add the following:
&lt;/li&gt;
&lt;/ul&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;import React from "react";

const Footer: React.FC = () =&amp;gt; {
  const year = new Date().getFullYear();
  return (
    &amp;lt;footer className=" text-neutral-300 p-1 m-0 flex justify-center items-center font-thin text-xs"&amp;gt;
      &amp;lt;p className=""&amp;gt;
        &amp;lt;span&amp;gt;@ {year} © Your Name.&amp;lt;/span&amp;gt;
      &amp;lt;/p&amp;gt;
    &amp;lt;/footer&amp;gt;
  );
};

export default Footer;
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;ul&gt;
&lt;li&gt;PlaceAutocomplete Component:
-&amp;gt; Create a PlaceAutocomplete.tsx file in src/components and add the following:
&lt;/li&gt;
&lt;/ul&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;import React from "react";
import { useGoogleAutocomplete } from "@vis.gl/react-google-maps";
import { useDispatch } from "react-redux";
import { fetchRestaurants } from "../redux/restaurantSlice";

const PlaceAutocomplete: React.FC = () =&amp;gt; {
  const dispatch = useDispatch();

  const {
    value,
    suggestions: { status, data },
    setValue,
    clearSuggestions,
  } = useGoogleAutocomplete({
    apiKey: process.env.REACT_APP_GOOGLE_PLACES_API_KEY,
    debounce: 300,
    minLength: 3,
  });

  const handleSelect = ({ description }) =&amp;gt; {
    setValue(description, false);
    clearSuggestions();

    const geocoder = new window.google.maps.Geocoder();
    geocoder.geocode({ address: description }, (results, status) =&amp;gt; {
      if (status === "OK") {
        const { lat, lng } = results[0].geometry.location;
        dispatch(fetchRestaurants({ lat: lat(), lng: lng() }));
      }
    });
  };

  return (
    &amp;lt;div className="relative"&amp;gt;
      &amp;lt;input
        value={value}
        onChange={(e) =&amp;gt; setValue(e.target.value)}
        placeholder="Enter a place"
        className="w-full px-4 py-2 border rounded"
      /&amp;gt;
      {status === "OK" &amp;amp;&amp;amp; (
        &amp;lt;ul className="absolute z-10 w-full bg-white border rounded shadow-md mt-1"&amp;gt;
          {data.map((suggestion) =&amp;gt; (
            &amp;lt;li
              key={suggestion.place_id}
              onClick={() =&amp;gt; handleSelect(suggestion)}
              className="px-4 py-2 cursor-pointer hover:bg-gray-200"
            &amp;gt;
              {suggestion.description}
            &amp;lt;/li&amp;gt;
          ))}
        &amp;lt;/ul&amp;gt;
      )}
    &amp;lt;/div&amp;gt;
  );
};

export default PlaceAutocomplete;

&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;ul&gt;
&lt;li&gt;RestaurantList Component:
-&amp;gt; Create a RestaurantList.tsx file in src/components and add the following:
&lt;/li&gt;
&lt;/ul&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;import React from "react";
import { useSelector } from "react-redux";
import { RootState } from "../redux/store";
import RestaurantCard from "./RestaurantCard";

const RestaurantList: React.FC = () =&amp;gt; {
  const restaurants = useSelector(
    (state: RootState) =&amp;gt; state.restaurants.restaurants
  );

  return (
    &amp;lt;div className="grid grid-cols-1 md:grid-cols-2 lg:grid-cols-3 gap-4 p-4"&amp;gt;
      {restaurants.map((restaurant) =&amp;gt; (
        &amp;lt;RestaurantCard key={restaurant.place_id} restaurant={restaurant} /&amp;gt;
      ))}
    &amp;lt;/div&amp;gt;
  );
};

export default RestaurantList;
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;ul&gt;
&lt;li&gt;RestaurantCard Component:
-&amp;gt; Create a RestaurantCard.tsx file in src/components and add the following:
&lt;/li&gt;
&lt;/ul&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;import React from "react";
import { Restaurant } from "../redux/restaurantSlice";

interface RestaurantCardProps {
  restaurant: Restaurant;
}

const RestaurantCard: React.FC&amp;lt;RestaurantCardProps&amp;gt; = ({ restaurant }) =&amp;gt; {
  return (
    &amp;lt;div className="bg-white p-4 rounded shadow"&amp;gt;
      {restaurant.photoUrl &amp;amp;&amp;amp; (
        &amp;lt;img
          src={restaurant.photoUrl}
          alt={restaurant.name}
          className="w-full h-48 object-cover rounded mb-4"
        /&amp;gt;
      )}
      &amp;lt;h3 className="text-lg font-semibold mb-2"&amp;gt;{restaurant.name}&amp;lt;/h3&amp;gt;
      &amp;lt;p className="text-sm text-gray-600 mb-2"&amp;gt;{restaurant.vicinity}&amp;lt;/p&amp;gt;
      &amp;lt;p className="text-sm text-gray-600 mb-2"&amp;gt;
        Rating: {restaurant.rating} ({restaurant.user_ratings_total} reviews)
      &amp;lt;/p&amp;gt;
      &amp;lt;p className="text-sm text-gray-600 mb-2"&amp;gt;
        Distance: {restaurant.distance.toFixed(2)} km
      &amp;lt;/p&amp;gt;
      &amp;lt;a
        href={`https://www.google.com/maps/place/?q=place_id:${restaurant.place_id}`}
        target="_blank"
        rel="noopener noreferrer"
        className="text-blue-500 hover:underline"
      &amp;gt;
        View on Google Maps
      &amp;lt;/a&amp;gt;
    &amp;lt;/div&amp;gt;
  );
};

export default RestaurantCard;
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;&lt;strong&gt;Step 3: Setting Up Redux&lt;/strong&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Create Redux Store:
-&amp;gt; Create a redux/store.ts file and add the following:
&lt;/li&gt;
&lt;/ul&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;import { configureStore } from "@reduxjs/toolkit";
import restaurantReducer from "./restaurantSlice";

const store = configureStore({
  reducer: {
    restaurants: restaurantReducer,
  },
});

export type RootState = ReturnType&amp;lt;typeof store.getState&amp;gt;;
export type AppDispatch = typeof store.dispatch;

export default store;
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;ul&gt;
&lt;li&gt;src/redux/hooks.ts
&lt;/li&gt;
&lt;/ul&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;import { TypedUseSelectorHook, useDispatch, useSelector } from 'react-redux';
import type { RootState, AppDispatch } from './store';

export const useAppDispatch: () =&amp;gt; AppDispatch = useDispatch;
export const useAppSelector: TypedUseSelectorHook&amp;lt;RootState&amp;gt; = useSelector;
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;ul&gt;
&lt;li&gt;Create Restaurant Slice:
-&amp;gt; Create a redux/restaurantSlice.ts file and add the following:
&lt;/li&gt;
&lt;/ul&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;import { createSlice, createAsyncThunk } from "@reduxjs/toolkit";
import axios from "axios";

export interface Restaurant {
  name: string;
  vicinity: string;
  rating: number;
  user_ratings_total: number;
  distance: number;
  photoUrl: string | null;
  place_id: string;
}

interface RestaurantState {
  restaurants: Restaurant[];
  status: "idle" | "loading" | "succeeded" | "failed";
  error: string | null;
}

const initialState: RestaurantState = {
  restaurants: [],
  status: "idle",
  error: null,
};

export const fetchRestaurants = createAsyncThunk(
  "restaurants/fetchRestaurants",
  async ({ lat, lng }: { lat: number; lng: number }) =&amp;gt; {
    const response = await axios.get("http://localhost:3001/api/places", {
      params: { lat, lng },
    });
    return response.data;
  }
);

const restaurantSlice = createSlice({
  name: "restaurants",
  initialState,
  reducers: {},
  extraReducers: (builder) =&amp;gt; {
    builder
      .addCase(fetchRestaurants.pending, (state) =&amp;gt; {
        state.status = "loading";
      })
      .addCase(fetchRestaurants.fulfilled, (state, action) =&amp;gt; {
        state.status = "succeeded";
        state.restaurants = action.payload;
      })
      .addCase(fetchRestaurants.rejected, (state, action) =&amp;gt; {
        state.status = "failed";
        state.error = action.error.message || null;
      });
  },
});

export default restaurantSlice.reducer;
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;ul&gt;
&lt;li&gt;Configure Store Provider:
-&amp;gt; Wrap your app with the Redux provider in src/index.tsx:
&lt;/li&gt;
&lt;/ul&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;import React from "react";
import ReactDOM from "react-dom";
import "./index.css";
import App from "./App";
import reportWebVitals from "./reportWebVitals";
import { Provider } from "react-redux";
import store from "./redux/store";

ReactDOM.render(
  &amp;lt;React.StrictMode&amp;gt;
    &amp;lt;Provider store={store}&amp;gt;
      &amp;lt;App /&amp;gt;
    &amp;lt;/Provider&amp;gt;
  &amp;lt;/React.StrictMode&amp;gt;,
  document.getElementById("root")
);

reportWebVitals();
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;&lt;strong&gt;Step 4: Assembling the App&lt;/strong&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Create App Component:
-&amp;gt; Update src/App.tsx to include all components:
&lt;/li&gt;
&lt;/ul&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;import React from "react";
import Header from "./components/Header";
import Footer from "./components/Footer";
import PlaceAutocomplete from "./components/PlaceAutocomplete";
import RestaurantList from "./components/RestaurantList";

const App: React.FC = () =&amp;gt; {
  return (
    &amp;lt;div className="flex flex-col min-h-screen"&amp;gt;
      &amp;lt;Header /&amp;gt;
      &amp;lt;div className="flex-grow flex flex-col items-center p-4"&amp;gt;
        &amp;lt;PlaceAutocomplete /&amp;gt;
        &amp;lt;RestaurantList /&amp;gt;
      &amp;lt;/div&amp;gt;
      &amp;lt;Footer /&amp;gt;
    &amp;lt;/div&amp;gt;
  );
};

export default App;
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;&lt;strong&gt;Run the Frontend:&lt;/strong&gt;&lt;br&gt;
-&amp;gt; Navigate to the client directory and run npm start to start the React app.&lt;br&gt;
-&amp;gt; Open a browser and navigate to &lt;a href="http://localhost:3000" rel="noopener noreferrer"&gt;http://localhost:3000&lt;/a&gt; to see the application in action.&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Step 5: Testing the App&lt;/strong&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Functionality Testing:&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;-&amp;gt; Enter a location in the search bar and verify the list of restaurants updates accordingly.&lt;br&gt;
-&amp;gt; Check that the restaurant cards display all relevant information and links to Google Maps.&lt;/p&gt;

&lt;p&gt;Code Quality:&lt;br&gt;
Ensure your code follows best practices and is well-structured.&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Project Structure&lt;/strong&gt;&lt;/p&gt;

&lt;p&gt;client&lt;br&gt;
├── src/&lt;br&gt;
│   ├── components/&lt;br&gt;
│   │   ├── Footer.tsx&lt;br&gt;
│   │   ├── Header.tsx&lt;br&gt;
│   │   ├── PlaceAutocomplete.tsx&lt;br&gt;
│   │   ├── RestaurantItem.tsx&lt;br&gt;
│   │   ├── RestaurantList.tsx&lt;br&gt;
│   ├── images/&lt;br&gt;
│   │   ├── def-restaurant.jpg&lt;br&gt;
│   │   ├── menuplate.jpg&lt;br&gt;
│   ├── redux/&lt;br&gt;
│   │   ├── hooks.ts&lt;br&gt;
│   │   ├── store.ts&lt;br&gt;
│   │   ├── restaurantsSlice.ts&lt;br&gt;
│   ├── App.tsx&lt;br&gt;
│   ├── index.tsx&lt;br&gt;
│   ├── .env&lt;br&gt;
│   ├── package.json&lt;br&gt;
server&lt;br&gt;
├── server.js&lt;br&gt;
├── .env&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;_Great job! _&lt;/strong&gt;&lt;br&gt;
You have successfully built a user-friendly frontend for the "Restaurant Finder" app. Your React application is now equipped with features like location-based restaurant search, and it integrates seamlessly with the backend you built earlier.&lt;/p&gt;

&lt;p&gt;With both the backend and frontend completed, you have a full-stack application ready for deployment. Feel free to explore further enhancements, such as adding more filters or improving the UI. &lt;br&gt;
&lt;em&gt;Happy coding!&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;a href="https://dev.to/vb_nair/building-a-simple-full-stack-restaurant-finder-app-with-react-redux-nodejs-and-google-places-api-1acd" class="ltag_cta ltag_cta--branded"&gt;Part 1: Building the Backend&lt;/a&gt;
&lt;/p&gt;

&lt;p&gt;&lt;a href="https://github.com/vbnair/restaurantFinder.git" class="ltag_cta ltag_cta--branded" rel="noopener noreferrer"&gt;GitHub Repo&lt;/a&gt;
&lt;/p&gt;

&lt;p&gt;React components and hooks for the Google Maps JavaScript API.&lt;br&gt;
&lt;a href="https://visgl.github.io/react-google-maps/" class="ltag_cta ltag_cta--branded" rel="noopener noreferrer"&gt;React components and hooks for the Google Maps JavaScript API.&lt;/a&gt;
&lt;/p&gt;

&lt;p&gt;&lt;a href="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fnrn2grk587asi16kcx2s.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fnrn2grk587asi16kcx2s.png" alt=" " width="800" height="673"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;📚 Explore and Learn!&lt;br&gt;
This project is a gateway to exploring n learning, and planning to add on further iterations to enhance and expand. created it for exploration and showcase the integration of various technologies. Dive in, experiment, and enjoy the journey! 🌟&lt;/p&gt;

</description>
      <category>react</category>
      <category>typescript</category>
      <category>redux</category>
      <category>googleplaces</category>
    </item>
    <item>
      <title>Building a simple Full-Stack Restaurant Finder App with React, Redux, Node.js, and Google Places API (Part 1)</title>
      <dc:creator>ByteLedger</dc:creator>
      <pubDate>Wed, 19 Jun 2024 14:47:00 +0000</pubDate>
      <link>https://dev.to/vb_nair/building-a-simple-full-stack-restaurant-finder-app-with-react-redux-nodejs-and-google-places-api-1acd</link>
      <guid>https://dev.to/vb_nair/building-a-simple-full-stack-restaurant-finder-app-with-react-redux-nodejs-and-google-places-api-1acd</guid>
      <description>&lt;p&gt;&lt;strong&gt;Introduction&lt;/strong&gt;&lt;br&gt;
Welcome to the first part of our tutorial series on building a full-stack "Restaurant Finder" application. In this blog post, we will guide you through the process of creating a robust backend using Node.js and Express. The backend will serve as the foundation for our application, allowing us to integrate seamlessly with the Google Places API to retrieve restaurant data based on user queries.&lt;/p&gt;

&lt;p&gt;Our goal is to develop a RESTful API that can handle requests for nearby restaurants, provide detailed information such as ratings and distances, and interact efficiently with the frontend built in React. By the end of this tutorial, you will have a functional backend server ready to power the restaurant discovery capabilities of our application.&lt;/p&gt;

&lt;p&gt;Let's dive into the world of backend development and set the stage for a powerful and responsive "Restaurant Finder" experience!&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;&lt;u&gt;Part 1: Building the Backend&lt;/u&gt;&lt;/strong&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;-&amp;gt; Creating a RESTful Backend with Node.js and Express&lt;/strong&gt;&lt;/p&gt;

&lt;p&gt;In this comprehensive tutorial, we'll walk you through creating a RESTful backend for a "Restaurant Finder" app using Node.js and Express. &lt;br&gt;
You'll learn&lt;br&gt;
-&amp;gt; how to set up a basic server,&lt;br&gt;
-&amp;gt; integrate the Google Places API to fetch nearby restaurants,&lt;br&gt;
-&amp;gt; build a full-featured server that processes and returns sorted restaurant data based on user location.&lt;/p&gt;

&lt;p&gt;Perfect for beginners, this guide will provide all the steps you need to get your backend up and running.&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Initialize a new Node.js project&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;&lt;code&gt;mkdir restaurant-finder&lt;br&gt;
cd restaurant-finder&lt;br&gt;
mkdir server&lt;br&gt;
npm init -y&lt;/code&gt;&lt;/p&gt;

&lt;p&gt;-&amp;gt; Install Express, Axios, CORS, and Dotenv:&lt;/p&gt;

&lt;p&gt;&lt;code&gt;npm install express axios cors dotenv&lt;/code&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Step 1: Setting Up a Basic Express Server&lt;/strong&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Create Basic Server:&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;-&amp;gt; Create a file named 'server.js' in the server folder.&lt;br&gt;
Add the following code to set up a basic Express server:&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;// server.js
const express = require("express");
const cors = require("cors");
require("dotenv").config();

const app = express();
const port = 3001;

app.use(cors());

app.get("/", (req, res) =&amp;gt; {
  res.send("Hello, World!");
});

app.listen(port, () =&amp;gt; {
  console.log(`Server running on http://localhost:${port}`);
});

&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;ul&gt;
&lt;li&gt;Run the Server:
-&amp;gt; Run node server.js in terminal to start the server.
-&amp;gt; Open a browser and navigate to &lt;a href="http://localhost:3001" rel="noopener noreferrer"&gt;http://localhost:3001&lt;/a&gt; to see "Hello, World!" message.&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;&lt;strong&gt;Step 2: Adding Google Places API&lt;/strong&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Obtain API Key:&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;-&amp;gt; Go to the Google Cloud Console.&lt;br&gt;
Create a new project or select an existing one.&lt;br&gt;
Navigate to "APIs &amp;amp; Services" &amp;gt; "Library" and enable the "Places API".&lt;br&gt;
Navigate to "APIs &amp;amp; Services" &amp;gt; "Credentials" and create an API key.&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Add API Key to .env:&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;-&amp;gt; Create a .env file in the server folder and add your API key:&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;REACT_APP_GOOGLE_PLACES_API_KEY=your_api_key_here

&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;ul&gt;
&lt;li&gt;Update Server to Use Google Places API:
Update server.js to include a new endpoint that interacts with the Google Places API:
&lt;/li&gt;
&lt;/ul&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;// server.js
const express = require("express");
const axios = require("axios");
const cors = require("cors");
require("dotenv").config();

const app = express();
const port = 3001;

app.use(cors());

app.get("/api/places", async (req, res) =&amp;gt; {
  try {
    const { lat, lng } = req.query;
    const response = await axios.get(
      "https://maps.googleapis.com/maps/api/place/nearbysearch/json",
      {
        params: {
          location: `${lat},${lng}`,
          radius: 1500,
          type: "restaurant",
          key: process.env.REACT_APP_GOOGLE_PLACES_API_KEY,
        },
      }
    );
    res.json(response.data.results);
  } catch (error) {
    res.status(500).json({ error: error.message });
  }
});

app.listen(port, () =&amp;gt; {
  console.log(`Server running on http://localhost:${port}`);
});

&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;ul&gt;
&lt;li&gt;Test the Endpoint:&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;-&amp;gt; Use a tool like Postman to send a GET request to &lt;a href="http://localhost:3001/api/places?lat=37.7749&amp;amp;lng=-122.4194" rel="noopener noreferrer"&gt;http://localhost:3001/api/places?lat=37.7749&amp;amp;lng=-122.4194&lt;/a&gt; (replace with appropriate coordinates).&lt;br&gt;
-&amp;gt; Verify that the response contains restaurant data from the Google Places API.&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Step 3: Building the Full Functional Server&lt;/strong&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Calculate Distance:
-&amp;gt; Add a function to calculate the distance between two coordinates using the Haversine formula:
(The Haversine formula calculates the shortest distance between two points on a sphere using their latitudes and longitudes measured along the surface. It is important for use in navigation.)
&lt;/li&gt;
&lt;/ul&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;// server.js
const haversineDistance = (coords1, coords2) =&amp;gt; {
  function toRad(x) {
    return (x * Math.PI) / 180;
  }

  const lat1 = coords1.lat;
  const lon1 = coords1.lng;
  const lat2 = coords2.lat;
  const lon2 = coords2.lng;

  const R = 6371; // Radius of the Earth in kilometers

  const x1 = lat2 - lat1;
  const dLat = toRad(x1);
  const x2 = lon2 - lon1;
  const dLon = toRad(x2);
  const a =
    Math.sin(dLat / 2) * Math.sin(dLat / 2) +
    Math.cos(toRad(lat1)) *
      Math.cos(toRad(lat2)) *
      Math.sin(dLon / 2) *
      Math.sin(dLon / 2);
  const c = 2 * Math.atan2(Math.sqrt(a), Math.sqrt(1 - a));

  const d = R * c;

  return d;
};

&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;ul&gt;
&lt;li&gt;Update Endpoint to Include Distance and Sort:
-&amp;gt; Modify the /api/places endpoint to include distance calculation and sorting:
&lt;/li&gt;
&lt;/ul&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;// server.js
app.get("/api/places", async (req, res) =&amp;gt; {
  try {
    const { lat, lng } = req.query;
    const response = await axios.get(
      "https://maps.googleapis.com/maps/api/place/nearbysearch/json",
      {
        params: {
          location: `${lat},${lng}`,
          radius: 1500,
          type: "restaurant",
          key: process.env.REACT_APP_GOOGLE_PLACES_API_KEY,
        },
      }
    );

    const restaurants = response.data.results.map((restaurant) =&amp;gt; {
      const photoUrl = restaurant.photos
        ? `https://maps.googleapis.com/maps/api/place/photo?maxwidth=400&amp;amp;photoreference=${restaurant.photos[0].photo_reference}&amp;amp;key=${process.env.REACT_APP_GOOGLE_PLACES_API_KEY}`
        : null;
      return {
        name: restaurant.name,
        vicinity: restaurant.vicinity,
        rating: restaurant.rating,
        user_ratings_total: restaurant.user_ratings_total,
        distance: haversineDistance(
          { lat: parseFloat(lat), lng: parseFloat(lng) },
          {
            lat: restaurant.geometry.location.lat,
            lng: restaurant.geometry.location.lng,
          }
        ),
        photoUrl,
        place_id: restaurant.place_id
      };
    });

    const sortedRestaurants = restaurants
      .sort((a, b) =&amp;gt; a.distance - b.distance)
      .slice(0, 10);

    res.json(sortedRestaurants);
  } catch (error) {
    res.status(500).json({ error: error.message });
  }
});

&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;ul&gt;
&lt;li&gt;Test Full Functionality:&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;-&amp;gt; Test the endpoint again to verify it returns a sorted list of nearby restaurants with all required details.&lt;/p&gt;

&lt;p&gt;&lt;em&gt;&lt;strong&gt;Congratulations!&lt;/strong&gt;&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;You have successfully built a robust backend for the "Restaurant Finder" app. Your server can now handle requests, interact with the Google Places API, and return sorted restaurant data based on the user's location.&lt;/p&gt;

&lt;p&gt;In the next part of this series, we'll focus on creating the frontend using React and TypeScript, ensuring that users have a seamless experience when searching for restaurants. &lt;br&gt;
Read the next part &lt;a href="https://dev.to/vb_nair/building-a-simple-full-stack-restaurant-finder-app-with-react-redux-nodejs-and-google-places-api-part-2-557a" class="ltag_cta ltag_cta--branded"&gt;here&lt;/a&gt;
. &lt;/p&gt;

&lt;p&gt;&lt;em&gt;Stay tuned!&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;&lt;em&gt;&lt;strong&gt;Fun Fact😉:&lt;/strong&gt;&lt;br&gt;
This project started as a take-home assignment for a job interview 🎓. While working on it, I decided to create a tutorial to help others looking for references to build similar solutions 💪. Happy coding! 🧑‍💻&lt;/em&gt;&lt;/p&gt;

</description>
      <category>node</category>
      <category>googlemaps</category>
      <category>googleplaces</category>
      <category>backend</category>
    </item>
  </channel>
</rss>
