Introduction
Welcome to Part 3! In the previous parts, we set up our environment and created a basic API server. Today, we'll integrate Kubernetes functionality into our Node.js application and create our first Helm charts for database deployment.
By the end of this post, you'll have:
- Kubernetes client integration in Node.js
- Custom Helm charts for PostgreSQL, MySQL, and MariaDB
- Basic database instance deployment functionality
- Error handling for Kubernetes operations
What We'll Build Today
We'll create a system that can:
- Deploy database instances using Helm charts
- Monitor deployment status in real-time
- Handle Kubernetes resource management
- Provide proper error handling and logging
Step 1: Adding Kubernetes Dependencies
First, let's add the necessary dependencies to our package.json:
{
"name": "mini-dbaas-backend",
"version": "1.0.0",
"description": "Mini DBaaS API Server",
"main": "index.js",
"scripts": {
"start": "node index.js",
"dev": "nodemon index.js",
"test": "jest"
},
"dependencies": {
"express": "^4.18.2",
"cors": "^2.8.5",
"helmet": "^7.1.0",
"dotenv": "^16.3.1",
"winston": "^3.11.0",
"joi": "^17.11.0",
"@kubernetes/client-node": "^0.20.0",
"yaml": "^2.3.4",
"uuid": "^9.0.1"
},
"devDependencies": {
"nodemon": "^3.0.2",
"jest": "^29.7.0"
},
"keywords": ["kubernetes", "database", "dbaas", "nodejs"],
"author": "Your Name",
"license": "MIT"
}
Install the new dependencies:
cd backend
npm install
Step 2: Creating Kubernetes Service
Let's create a service to handle all Kubernetes operations:
const k8s = require('@kubernetes/client-node');
const { exec } = require('child_process');
const { promisify } = require('util');
const logger = require('../utils/logger');
const ResponseUtil = require('../utils/response');
const execAsync = promisify(exec);
class KubernetesService {
constructor() {
this.kc = new k8s.KubeConfig();
this.kc.loadFromDefault();
this.k8sApi = this.kc.makeApiClient(k8s.CoreV1Api);
this.appsV1Api = this.kc.makeApiClient(k8s.AppsV1Api);
this.storageV1Api = this.kc.makeApiClient(k8s.StorageV1Api);
}
// Check cluster connectivity
async checkCluster() {
try {
const response = await this.k8sApi.listNamespace();
logger.info(`Connected to Kubernetes cluster. Found ${response.body.items.length} namespaces`);
return ResponseUtil.success({
connected: true,
namespaces: response.body.items.length
});
} catch (error) {
logger.error('Failed to connect to Kubernetes cluster', error);
return ResponseUtil.error('Failed to connect to Kubernetes cluster', 500, error.message);
}
}
// Create namespace
async createNamespace(name) {
try {
const namespace = {
apiVersion: 'v1',
kind: 'Namespace',
metadata: {
name: name,
labels: {
'app': 'mini-dbaas',
'managed-by': 'mini-dbaas-api'
}
}
};
await this.k8sApi.createNamespace(namespace);
logger.info(`Created namespace: ${name}`);
return ResponseUtil.success({ namespace: name });
} catch (error) {
if (error.statusCode === 409) {
logger.info(`Namespace ${name} already exists`);
return ResponseUtil.success({ namespace: name, exists: true });
}
logger.error(`Failed to create namespace ${name}`, error);
return ResponseUtil.error(`Failed to create namespace ${name}`, 500, error.message);
}
}
// Delete namespace
async deleteNamespace(name) {
try {
await this.k8sApi.deleteNamespace(name);
logger.info(`Deleted namespace: ${name}`);
return ResponseUtil.success({ namespace: name, deleted: true });
} catch (error) {
logger.error(`Failed to delete namespace ${name}`, error);
return ResponseUtil.error(`Failed to delete namespace ${name}`, 500, error.message);
}
}
// Get pod status
async getPodStatus(namespace, podName) {
try {
const response = await this.k8sApi.readNamespacedPod(podName, namespace);
const pod = response.body;
return ResponseUtil.success({
name: pod.metadata.name,
namespace: pod.metadata.namespace,
status: pod.status.phase,
ready: pod.status.containerStatuses?.[0]?.ready || false,
restartCount: pod.status.containerStatuses?.[0]?.restartCount || 0,
image: pod.status.containerStatuses?.[0]?.image,
createdAt: pod.metadata.creationTimestamp
});
} catch (error) {
logger.error(`Failed to get pod status for ${podName} in ${namespace}`, error);
return ResponseUtil.error(`Failed to get pod status`, 500, error.message);
}
}
// Get all pods in namespace
async getPodsInNamespace(namespace) {
try {
const response = await this.k8sApi.listNamespacedPod(namespace);
const pods = response.body.items.map(pod => ({
name: pod.metadata.name,
status: pod.status.phase,
ready: pod.status.containerStatuses?.[0]?.ready || false,
restartCount: pod.status.containerStatuses?.[0]?.restartCount || 0,
image: pod.status.containerStatuses?.[0]?.image,
createdAt: pod.metadata.creationTimestamp
}));
return ResponseUtil.success({ pods, count: pods.length });
} catch (error) {
logger.error(`Failed to get pods in namespace ${namespace}`, error);
return ResponseUtil.error(`Failed to get pods`, 500, error.message);
}
}
// Get PVC status
async getPVCStatus(namespace, pvcName) {
try {
const response = await this.k8sApi.readNamespacedPersistentVolumeClaim(pvcName, namespace);
const pvc = response.body;
return ResponseUtil.success({
name: pvc.metadata.name,
namespace: pvc.metadata.namespace,
status: pvc.status.phase,
capacity: pvc.status.capacity?.storage,
accessModes: pvc.status.accessModes,
createdAt: pvc.metadata.creationTimestamp
});
} catch (error) {
logger.error(`Failed to get PVC status for ${pvcName} in ${namespace}`, error);
return ResponseUtil.error(`Failed to get PVC status`, 500, error.message);
}
}
// Execute kubectl command
async executeKubectl(command) {
try {
const { stdout, stderr } = await execAsync(`kubectl ${command}`);
if (stderr) {
logger.warn(`kubectl stderr: ${stderr}`);
}
return ResponseUtil.success({ output: stdout, command });
} catch (error) {
logger.error(`kubectl command failed: ${command}`, error);
return ResponseUtil.error(`kubectl command failed`, 500, error.message);
}
}
// Execute helm command
async executeHelm(command) {
try {
const { stdout, stderr } = await execAsync(`helm ${command}`);
if (stderr) {
logger.warn(`helm stderr: ${stderr}`);
}
return ResponseUtil.success({ output: stdout, command });
} catch (error) {
logger.error(`helm command failed: ${command}`, error);
return ResponseUtil.error(`helm command failed`, 500, error.message);
}
}
}
module.exports = new KubernetesService();
Step 3: Creating Helm Chart Service
Now let's create a service to manage Helm chart operations:
const { exec } = require('child_process');
const { promisify } = require('util');
const fs = require('fs').promises;
const path = require('path');
const logger = require('../utils/logger');
const ResponseUtil = require('../utils/response');
const k8sService = require('./k8s');
const execAsync = promisify(exec);
class HelmService {
constructor() {
this.chartsPath = path.join(__dirname, '../../helm-charts');
}
// Deploy database using Helm
async deployDatabase(namespace, instanceName, dbType, config) {
try {
// Create namespace if it doesn't exist
await k8sService.createNamespace(namespace);
// Prepare Helm values
const values = this.prepareHelmValues(dbType, config);
const valuesFile = path.join(this.chartsPath, `${dbType}-local`, 'values.yaml`);
// Write values to file
await fs.writeFile(valuesFile, JSON.stringify(values, null, 2));
// Deploy using Helm
const helmCommand = `install ${instanceName} ${this.chartsPath}/${dbType}-local --namespace ${namespace} --values ${valuesFile}`;
const result = await k8sService.executeHelm(helmCommand);
if (result.success) {
logger.info(`Successfully deployed ${dbType} instance: ${instanceName} in namespace: ${namespace}`);
return ResponseUtil.success({
instanceName,
namespace,
dbType,
status: 'deploying',
helmOutput: result.data.output
});
} else {
throw new Error(result.message);
}
} catch (error) {
logger.error(`Failed to deploy ${dbType} instance: ${instanceName}`, error);
return ResponseUtil.error(`Failed to deploy database instance`, 500, error.message);
}
}
// Delete database using Helm
async deleteDatabase(namespace, instanceName) {
try {
const helmCommand = `uninstall ${instanceName} --namespace ${namespace}`;
const result = await k8sService.executeHelm(helmCommand);
if (result.success) {
logger.info(`Successfully deleted instance: ${instanceName} from namespace: ${namespace}`);
return ResponseUtil.success({
instanceName,
namespace,
status: 'deleted',
helmOutput: result.data.output
});
} else {
throw new Error(result.message);
}
} catch (error) {
logger.error(`Failed to delete instance: ${instanceName}`, error);
return ResponseUtil.error(`Failed to delete database instance`, 500, error.message);
}
}
// Get Helm release status
async getReleaseStatus(namespace, releaseName) {
try {
const helmCommand = `status ${releaseName} --namespace ${namespace} --output json`;
const result = await k8sService.executeHelm(helmCommand);
if (result.success) {
const status = JSON.parse(result.data.output);
return ResponseUtil.success({
name: releaseName,
namespace,
status: status.info?.status,
revision: status.version,
lastDeployed: status.info?.last_deployed,
description: status.info?.description
});
} else {
throw new Error(result.message);
}
} catch (error) {
logger.error(`Failed to get release status for: ${releaseName}`, error);
return ResponseUtil.error(`Failed to get release status`, 500, error.message);
}
}
// List all Helm releases
async listReleases(namespace = null) {
try {
const helmCommand = namespace
? `list --namespace ${namespace} --output json`
: `list --all-namespaces --output json`;
const result = await k8sService.executeHelm(helmCommand);
if (result.success) {
const releases = JSON.parse(result.data.output);
return ResponseUtil.success({
releases: releases.map(release => ({
name: release.name,
namespace: release.namespace,
status: release.status,
revision: release.revision,
lastDeployed: release.updated
})),
count: releases.length
});
} else {
throw new Error(result.message);
}
} catch (error) {
logger.error('Failed to list Helm releases', error);
return ResponseUtil.error('Failed to list Helm releases', 500, error.message);
}
}
// Prepare Helm values based on database type and config
prepareHelmValues(dbType, config) {
const baseValues = {
global: {
storageClass: "standard"
},
persistence: {
enabled: true,
size: config.storage || "1Gi"
},
resources: {
requests: {
memory: config.memory || "256Mi",
cpu: config.cpu || "250m"
},
limits: {
memory: config.memoryLimit || "512Mi",
cpu: config.cpuLimit || "500m"
}
}
};
switch (dbType) {
case 'postgresql':
return {
...baseValues,
auth: {
postgresPassword: config.password,
database: config.database || "postgres"
},
primary: {
persistence: {
enabled: true,
size: config.storage || "1Gi"
}
}
};
case 'mysql':
return {
...baseValues,
auth: {
rootPassword: config.password,
database: config.database || "mysql"
},
primary: {
persistence: {
enabled: true,
size: config.storage || "1Gi"
}
}
};
case 'mariadb':
return {
...baseValues,
auth: {
rootPassword: config.password,
database: config.database || "mariadb"
},
primary: {
persistence: {
enabled: true,
size: config.storage || "1Gi"
}
}
};
default:
throw new Error(`Unsupported database type: ${dbType}`);
}
}
}
module.exports = new HelmService();
Step 4: Creating Database Instance Controller
Let's create a controller to handle database instance operations:
const helmService = require('../services/helm');
const k8sService = require('../services/k8s');
const logger = require('../utils/logger');
const ResponseUtil = require('../utils/response');
class InstanceController {
// Create a new database instance
async createInstance(req, res) {
try {
const { type, name, config } = req.body;
const namespace = `dbaas-${name}`;
logger.info(`Creating ${type} instance: ${name} in namespace: ${namespace}`);
// Validate required fields
if (!type || !name || !config) {
return res.status(400).json(
ResponseUtil.error('Missing required fields: type, name, config', 400)
);
}
// Deploy database using Helm
const result = await helmService.deployDatabase(namespace, name, type, config);
if (result.success) {
res.status(201).json(result);
} else {
res.status(500).json(result);
}
} catch (error) {
logger.error('Error creating instance', error);
res.status(500).json(
ResponseUtil.error('Failed to create instance', 500, error.message)
);
}
}
// Get all instances
async getInstances(req, res) {
try {
logger.info('Fetching all instances');
const result = await helmService.listReleases();
if (result.success) {
// Filter only our DBaaS instances
const dbaasInstances = result.data.releases.filter(release =>
release.namespace.startsWith('dbaas-')
);
res.json(ResponseUtil.success({
instances: dbaasInstances,
count: dbaasInstances.length
}));
} else {
res.status(500).json(result);
}
} catch (error) {
logger.error('Error fetching instances', error);
res.status(500).json(
ResponseUtil.error('Failed to fetch instances', 500, error.message)
);
}
}
// Get specific instance
async getInstance(req, res) {
try {
const { name } = req.params;
const namespace = `dbaas-${name}`;
logger.info(`Fetching instance: ${name}`);
// Get Helm release status
const releaseResult = await helmService.getReleaseStatus(namespace, name);
if (!releaseResult.success) {
return res.status(404).json(
ResponseUtil.error(`Instance ${name} not found`, 404)
);
}
// Get pod status
const podsResult = await k8sService.getPodsInNamespace(namespace);
res.json(ResponseUtil.success({
...releaseResult.data,
pods: podsResult.success ? podsResult.data.pods : []
}));
} catch (error) {
logger.error(`Error fetching instance: ${req.params.name}`, error);
res.status(500).json(
ResponseUtil.error('Failed to fetch instance', 500, error.message)
);
}
}
// Delete instance
async deleteInstance(req, res) {
try {
const { name } = req.params;
const namespace = `dbaas-${name}`;
logger.info(`Deleting instance: ${name}`);
// Delete Helm release
const result = await helmService.deleteDatabase(namespace, name);
if (result.success) {
// Delete namespace after a delay to ensure cleanup
setTimeout(async () => {
await k8sService.deleteNamespace(namespace);
}, 5000);
res.json(result);
} else {
res.status(500).json(result);
}
} catch (error) {
logger.error(`Error deleting instance: ${req.params.name}`, error);
res.status(500).json(
ResponseUtil.error('Failed to delete instance', 500, error.message)
);
}
}
// Get instance connection info
async getConnectionInfo(req, res) {
try {
const { name } = req.params;
const namespace = `dbaas-${name}`;
logger.info(`Fetching connection info for instance: ${name}`);
// Get service info
const serviceResult = await k8sService.executeKubectl(
`get svc -n ${namespace} -o json`
);
if (!serviceResult.success) {
return res.status(404).json(
ResponseUtil.error(`Instance ${name} not found`, 404)
);
}
const services = JSON.parse(serviceResult.data.output);
const dbService = services.items.find(svc =>
svc.metadata.name.includes(name) && svc.spec.ports
);
if (!dbService) {
return res.status(404).json(
ResponseUtil.error(`Service not found for instance ${name}`, 404)
);
}
const connectionInfo = {
host: `${dbService.metadata.name}.${namespace}.svc.cluster.local`,
port: dbService.spec.ports[0].port,
service: dbService.metadata.name,
namespace: namespace
};
res.json(ResponseUtil.success(connectionInfo));
} catch (error) {
logger.error(`Error fetching connection info for: ${req.params.name}`, error);
res.status(500).json(
ResponseUtil.error('Failed to fetch connection info', 500, error.message)
);
}
}
}
module.exports = new InstanceController();
Step 5: Creating Routes
Now let's create the routes for our API:
const express = require('express');
const router = express.Router();
const InstanceController = require('../controllers/InstanceController');
const { validate, schemas } = require('../middleware/validation');
// Create new instance
router.post('/', validate(schemas.instance), InstanceController.createInstance);
// Get all instances
router.get('/', InstanceController.getInstances);
// Get specific instance
router.get('/:name', InstanceController.getInstance);
// Get instance connection info
router.get('/:name/connection', InstanceController.getConnectionInfo);
// Delete instance
router.delete('/:name', InstanceController.deleteInstance);
module.exports = router;
Step 6: Updating Main Server File
Let's update our main server file to include the new routes:
const express = require('express');
const cors = require('cors');
const helmet = require('helmet');
require('dotenv').config();
const app = express();
const PORT = process.env.PORT || 3000;
// Import routes
const instancesRouter = require('./routes/instances');
// Middleware
app.use(helmet());
app.use(cors());
app.use(express.json());
app.use(express.urlencoded({ extended: true }));
// Basic logging middleware
app.use((req, res, next) => {
console.log(`${new Date().toISOString()} - ${req.method} ${req.path}`);
next();
});
// Health check endpoint
app.get('/health', (req, res) => {
res.json({
status: 'healthy',
timestamp: new Date().toISOString(),
uptime: process.uptime(),
environment: process.env.NODE_ENV
});
});
// API information endpoint
app.get('/', (req, res) => {
res.json({
name: 'Mini DBaaS API',
version: '1.0.0',
description: 'Database as a Service API built with Node.js and Kubernetes',
endpoints: {
health: '/health',
instances: '/instances',
'ha-clusters': '/ha-clusters'
}
});
});
// Routes
app.use('/instances', instancesRouter);
// Error handling middleware
app.use((err, req, res, next) => {
console.error(err.stack);
res.status(500).json({
error: 'Something went wrong!',
message: process.env.NODE_ENV === 'development' ? err.message : 'Internal server error'
});
});
// 404 handler
app.use('*', (req, res) => {
res.status(404).json({
error: 'Endpoint not found',
path: req.originalUrl
});
});
// Start server
app.listen(PORT, () => {
console.log(`🚀 Mini DBaaS API server running on port ${PORT}`);
console.log(`📊 Health check: http://localhost:${PORT}/health`);
console.log(`📚 API docs: http://localhost:${PORT}/`);
console.log(`🗄️ Instances API: http://localhost:${PORT}/instances`);
});
module.exports = app;
Step 7: Creating Basic Helm Charts
Let's create basic Helm charts for our databases. First, let's create the PostgreSQL chart:
apiVersion: v2
name: postgresql-local
description: A Helm chart for PostgreSQL database instances
type: application
version: 0.1.0
appVersion: "15.0"
# Default values for postgresql-local
global:
storageClass: "standard"
auth:
postgresPassword: "postgres"
database: "postgres"
primary:
persistence:
enabled: true
size: "1Gi"
resources:
requests:
memory: "256Mi"
cpu: "250m"
limits:
memory: "512Mi"
cpu: "500m"
service:
type: ClusterIP
port: 5432
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: {{ include "postgresql-local.fullname" . }}
labels:
{{- include "postgresql-local.labels" . | nindent 4 }}
spec:
serviceName: {{ include "postgresql-local.fullname" . }}
replicas: 1
selector:
matchLabels:
{{- include "postgresql-local.selectorLabels" . | nindent 6 }}
template:
metadata:
labels:
{{- include "postgresql-local.selectorLabels" . | nindent 8 }}
spec:
containers:
- name: postgresql
image: postgres:15
ports:
- containerPort: 5432
name: postgresql
env:
- name: POSTGRES_PASSWORD
value: {{ .Values.auth.postgresPassword }}
- name: POSTGRES_DB
value: {{ .Values.auth.database }}
- name: PGDATA
value: /var/lib/postgresql/data/pgdata
volumeMounts:
- name: data
mountPath: /var/lib/postgresql/data
resources:
{{- toYaml .Values.primary.resources | nindent 10 }}
volumeClaimTemplates:
- metadata:
name: data
spec:
accessModes: [ "ReadWriteOnce" ]
storageClassName: {{ .Values.global.storageClass }}
resources:
requests:
storage: {{ .Values.primary.persistence.size }}
apiVersion: v1
kind: Service
metadata:
name: {{ include "postgresql-local.fullname" . }}
labels:
{{- include "postgresql-local.labels" . | nindent 4 }}
spec:
type: {{ .Values.service.type }}
ports:
- port: {{ .Values.service.port }}
targetPort: postgresql
protocol: TCP
name: postgresql
selector:
{{- include "postgresql-local.selectorLabels" . | nindent 4 }}
{{/*
Expand the name of the chart.
*/}}
{{- define "postgresql-local.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Create a default fully qualified app name.
*/}}
{{- define "postgresql-local.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{- end }}
{{/*
Common labels
*/}}
{{- define "postgresql-local.labels" -}}
helm.sh/chart: {{ include "postgresql-local.chart" . }}
{{ include "postgresql-local.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "postgresql-local.selectorLabels" -}}
app.kubernetes.io/name: {{ include "postgresql-local.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "postgresql-local.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
Step 8: Testing Our Kubernetes Integration
Let's create a test script to verify our setup:
#!/bin/bash
echo "🧪 Testing Kubernetes Integration"
# Test 1: Check cluster connectivity
echo "🔗 Testing cluster connectivity..."
curl -s http://localhost:3000/health | grep -q "healthy" && echo "✅ Health check passed" || echo "❌ Health check failed"
# Test 2: Create PostgreSQL instance
echo "🗄️ Creating PostgreSQL instance..."
CREATE_RESPONSE=$(curl -s -X POST http://localhost:3000/instances \
-H "Content-Type: application/json" \
-d '{
"type": "postgresql",
"name": "test-postgres",
"config": {
"password": "testpass123",
"storage": "1Gi",
"database": "testdb"
}
}')
echo "Create response: $CREATE_RESPONSE"
# Test 3: List instances
echo "📋 Listing instances..."
LIST_RESPONSE=$(curl -s http://localhost:3000/instances)
echo "List response: $LIST_RESPONSE"
# Test 4: Get instance status
echo "📊 Getting instance status..."
sleep 10
STATUS_RESPONSE=$(curl -s http://localhost:3000/instances/test-postgres)
echo "Status response: $STATUS_RESPONSE"
# Test 5: Get connection info
echo "🔌 Getting connection info..."
CONNECTION_RESPONSE=$(curl -s http://localhost:3000/instances/test-postgres/connection)
echo "Connection response: $CONNECTION_RESPONSE"
echo "🎉 Kubernetes integration test completed!"
Make it executable and run:
chmod +x scripts/test-k8s-integration.sh
./scripts/test-k8s-integration.sh
What We've Accomplished
Today we've successfully:
✅ Integrated Kubernetes client into our Node.js application
✅ Created Helm service for database deployment management
✅ Built instance controller with full CRUD operations
✅ Implemented proper error handling for Kubernetes operations
✅ Created basic Helm charts for PostgreSQL deployment
✅ Added comprehensive logging for debugging
✅ Built RESTful API endpoints for instance management
✅ Implemented validation middleware for API requests
Testing the API
Now you can test your API with these commands:
# Create a PostgreSQL instance
curl -X POST http://localhost:3000/instances \
-H "Content-Type: application/json" \
-d '{
"type": "postgresql",
"name": "my-postgres",
"config": {
"password": "securepass123",
"storage": "2Gi",
"database": "myapp"
}
}'
# List all instances
curl http://localhost:3000/instances
# Get specific instance status
curl http://localhost:3000/instances/my-postgres
# Get connection information
curl http://localhost:3000/instances/my-postgres/connection
# Delete instance
curl -X DELETE http://localhost:3000/instances/my-postgres
Next Steps
In Part 4, we'll enhance our database instance management with:
- MySQL and MariaDB Helm charts
- Advanced monitoring and status tracking
- Connection pooling and optimization
- Backup and recovery preparation
Troubleshooting
Common Issues
1. Kubernetes client connection issues
# Check if kubectl is working
kubectl get nodes
# Verify kubeconfig
kubectl config view
2. Helm chart deployment failures
# Check Helm chart syntax
helm lint helm-charts/postgresql-local
# Test chart installation
helm install test-postgres helm-charts/postgresql-local --dry-run
3. Pod startup issues
# Check pod logs
kubectl logs -f <pod-name> -n <namespace>
# Check pod events
kubectl describe pod <pod-name> -n <namespace>
Summary
We now have a fully functional Kubernetes-integrated API server that can deploy and manage database instances! Our system can:
- Deploy PostgreSQL instances using custom Helm charts
- Monitor deployment status in real-time
- Provide connection information for applications
- Handle proper cleanup and resource management
In the next part, we'll add MySQL and MariaDB support, and implement more advanced features. Get ready to see your first database running in Kubernetes! 🚀
Series Navigation:
- Part 1: Architecture Overview
- Part 2: Environment Setup & Basic API Server
- Part 3: Kubernetes Integration & Helm Charts (this post)
- Part 4: Database Instance Management
- Part 5: Backup & Recovery with CSI VolumeSnapshots
- Part 6: High Availability with PostgreSQL Operator
- Part 7: Multi-Tenant Features & Final Testing
Top comments (0)