Docker Labs: From Beginner to Advanced on Azure
Note: Thanks for reading, here in this segment i will be talking about docker labs some of this is my own thoughts. I revising what i have learned most of you guys can join me
Lab 1: Initial Azure Setup and Docker Installation
Step 1: Create Azure VM with Docker
# Create resource group
az group create --name DockerLabs --location eastus
# Create Ubuntu VM with Docker
az vm create \
--resource-group DockerLabs \
--name DockerVM \
--image Ubuntu2204 \
--admin-username azureuser \
--generate-ssh-keys \
--size Standard_B2s \
--custom-data cloud-init-docker.txt
Create cloud-init-docker.txt:
#cloud-config
package_upgrade: true
packages:
- apt-transport-https
- ca-certificates
- curl
- gnupg
- lsb-release
runcmd:
- curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg
- echo "deb [arch=amd64 signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
- apt-get update
- apt-get install -y docker-ce docker-ce-cli containerd.io docker-compose-plugin
- usermod -aG docker azureuser
- systemctl enable docker
Step 2: Connect and Verify
# SSH to your VM
ssh azureuser@<your-vm-ip>
# Verify Docker installation
docker --version
docker-compose --version
docker system info
# Test with hello-world
docker run hello-world
Lab 2: Basic Docker Operations
Step 1: Container Lifecycle Management
# Pull an image
docker pull nginx:alpine
# Run a container
docker run -d --name web-server -p 80:80 nginx:alpine
# Check running containers
docker ps
# View container logs
docker logs web-server
# Execute commands in running container
docker exec -it web-server sh
# Stop and remove container
docker stop web-server
docker rm web-server
Step 2: Container Networking Basics
# Create a custom network
docker network create my-network
# Run containers on the same network
docker run -d --name web1 --network my-network nginx:alpine
docker run -d --name web2 --network my-network nginx:alpine
# Test connectivity between containers
docker exec web1 ping web2
# Inspect network
docker network inspect my-network
Lab 3: Building Custom Images
Step 1: Create a Simple Web Application
Create project structure:
mkdir my-webapp && cd my-webapp
Create app.py:
from flask import Flask
import os
app = Flask(__name__)
@app.route('/')
def hello():
return f"""
<html>
<body>
<h1>Hello from Docker!</h1>
<p>Hostname: {os.environ.get('HOSTNAME', 'Unknown')}</p>
<p>Visit count: <!-- will be implemented later --></p>
</body>
</html>
"""
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5000)
Create requirements.txt:
Flask==2.3.3
Redis==4.6.0
Step 2: Create Dockerfile
# Use official Python runtime
FROM python:3.9-slim
# Set working directory
WORKDIR /app
# Copy requirements and install dependencies
COPY requirements.txt .
RUN pip install --no-cache-dir -r requirements.txt
# Copy application code
COPY . .
# Expose port
EXPOSE 5000
# Set environment variables
ENV FLASK_ENV=production
# Run the application
CMD ["python", "app.py"]
Step 3: Build and Run
# Build the image
docker build -t my-webapp .
# Run the container
docker run -d --name my-app -p 5000:5000 my-webapp
# Test the application
curl http://localhost:5000
Lab 4: Docker Compose - Multi-Container Application
Step 1: Create Redis Counter Application
Update app.py:
from flask import Flask
import redis
import os
app = Flask(__name__)
redis_client = redis.Redis(host='redis', port=6379, decode_responses=True)
@app.route('/')
def hello():
count = redis_client.incr('visit_count')
return f"""
<html>
<body>
<h1>Hello from Docker Compose!</h1>
<p>Hostname: {os.environ.get('HOSTNAME', 'Unknown')}</p>
<p>Visit count: {count}</p>
</body>
</html>
"""
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5000)
Step 2: Create docker-compose.yml
version: '3.8'
services:
web:
build: .
ports:
- "5000:5000"
environment:
- FLASK_ENV=production
depends_on:
- redis
networks:
- app-network
redis:
image: redis:7-alpine
networks:
- app-network
volumes:
- redis-data:/data
nginx:
image: nginx:alpine
ports:
- "80:80"
volumes:
- ./nginx.conf:/etc/nginx/nginx.conf
depends_on:
- web
networks:
- app-network
networks:
app-network:
driver: bridge
volumes:
redis-data:
Step 3: Create Nginx Configuration
Create nginx.conf:
events {
worker_connections 1024;
}
http {
upstream webapp {
server web:5000;
}
server {
listen 80;
location / {
proxy_pass http://webapp;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
}
}
}
Step 4: Deploy with Compose
# Start all services
docker compose up -d
# Check status
docker compose ps
# View logs
docker compose logs -f
# Scale web service
docker compose up -d --scale web=3
Lab 5: Data Management and Volumes
Step 1: Persistent Data with Volumes
# Create named volume
docker volume create app-data
# Run container with volume
docker run -d \
--name db \
-v app-data:/var/lib/mysql \
-e MYSQL_ROOT_PASSWORD=secret \
mysql:8.0
# Inspect volume
docker volume inspect app-data
# Backup volume
docker run --rm \
-v app-data:/source \
-v $(pwd):/backup \
alpine tar czf /backup/backup.tar.gz -C /source .
Step 2: Bind Mounts for Development
# Development with bind mount
docker run -d \
--name dev-app \
-p 5001:5000 \
-v $(pwd):/app \
-w /app \
python:3.9-slim \
sh -c "pip install -r requirements.txt && python app.py"
Lab 6: Advanced Dockerfile Techniques
Step 1: Multi-Stage Build
Create advanced Dockerfile.advanced:
# Build stage
FROM python:3.9-slim as builder
WORKDIR /build
COPY requirements.txt .
RUN pip install --user -r requirements.txt
# Runtime stage
FROM python:3.9-slim
# Install security updates
RUN apt-get update && \
apt-get upgrade -y && \
rm -rf /var/lib/apt/lists/*
# Create non-root user
RUN groupadd -r appuser && useradd -r -g appuser appuser
WORKDIR /app
# Copy installed packages from builder
COPY --from=builder /root/.local /home/appuser/.local
COPY --chown=appuser:appuser . .
# Switch to non-root user
USER appuser
# Add user's local bin to PATH
ENV PATH=/home/appuser/.local/bin:$PATH
EXPOSE 5000
HEALTHCHECK --interval=30s --timeout=3s \
CMD curl -f http://localhost:5000/ || exit 1
CMD ["python", "app.py"]
Step 2: Build and Test Advanced Image
# Build with advanced Dockerfile
docker build -t my-webapp:secure -f Dockerfile.advanced .
# Run with security context
docker run -d \
--name secure-app \
-p 5002:5000 \
--read-only \
--tmpfs /tmp \
my-webapp:secure
# Check health status
docker inspect --format='{{.State.Health.Status}}' secure-app
Lab 7: Container Orchestration with Docker Swarm
Step 1: Initialize Swarm Mode
# Initialize swarm (on manager node)
docker swarm init
# Create overlay network
docker network create -d overlay app-overlay
# Deploy stack
docker stack deploy -c docker-compose.swarm.yml myapp
Step 2: Create Swarm Compose File
Create docker-compose.swarm.yml:
version: '3.8'
services:
web:
image: my-webapp:secure
ports:
- "5000:5000"
deploy:
replicas: 3
update_config:
parallelism: 2
delay: 10s
restart_policy:
condition: on-failure
resources:
limits:
memory: 256M
reservations:
memory: 128M
networks:
- app-overlay
redis:
image: redis:7-alpine
deploy:
replicas: 1
placement:
constraints:
- node.role == manager
volumes:
- redis-data:/data
networks:
- app-overlay
visualizer:
image: dockersamples/visualizer:stable
ports:
- "8080:8080"
volumes:
- /var/run/docker.sock:/var/run/docker.sock
deploy:
placement:
constraints:
- node.role == manager
networks:
- app-overlay
networks:
app-overlay:
external: true
volumes:
redis-data:
Step 3: Manage Swarm Services
# Check service status
docker service ls
# Scale services
docker service scale myapp_web=5
# View service logs
docker service logs myapp_web
# Update service
docker service update --image my-webapp:new-version myapp_web
Lab 8: Monitoring and Logging
Step 1: Set up Monitoring Stack
Create monitoring/docker-compose.monitoring.yml:
version: '3.8'
services:
prometheus:
image: prom/prometheus:latest
ports:
- "9090:9090"
volumes:
- ./prometheus.yml:/etc/prometheus/prometheus.yml
- prom_data:/prometheus
command:
- '--config.file=/etc/prometheus/prometheus.yml'
- '--storage.tsdb.path=/prometheus'
- '--web.console.libraries=/etc/prometheus/console_libraries'
- '--web.console.templates=/etc/prometheus/consoles'
- '--storage.tsdb.retention.time=200h'
- '--web.enable-lifecycle'
networks:
- monitoring
grafana:
image: grafana/grafana:latest
ports:
- "3000:3000"
environment:
- GF_SECURITY_ADMIN_PASSWORD=admin
volumes:
- grafana_data:/var/lib/grafana
networks:
- monitoring
node-exporter:
image: prom/node-exporter:latest
volumes:
- /proc:/host/proc:ro
- /sys:/host/sys:ro
- /:/rootfs:ro
command:
- '--path.procfs=/host/proc'
- '--path.rootfs=/rootfs'
- '--path.sysfs=/host/sys'
- '--collector.filesystem.mount-points-exclude=^/(sys|proc|dev|host|etc)($$|/)'
networks:
- monitoring
networks:
monitoring:
driver: bridge
volumes:
prom_data:
grafana_data:
Step 2: Prometheus Configuration
Create monitoring/prometheus.yml:
global:
scrape_interval: 15s
scrape_configs:
- job_name: 'prometheus'
static_configs:
- targets: ['localhost:9090']
- job_name: 'node-exporter'
static_configs:
- targets: ['node-exporter:9100']
- job_name: 'cadvisor'
static_configs:
- targets: ['cadvisor:8080']
- job_name: 'docker-containers'
static_configs:
- targets: ['localhost:9323']
Lab 9: Security Best Practices
Step 1: Security Scanning
# Install Docker Scout (if not available, use Trivy)
curl -sSfL https://raw.githubusercontent.com/anchore/grype/main/install.sh | sh -s -- -b /usr/local/bin
# Scan image for vulnerabilities
grype my-webapp:secure
# Use Docker Bench Security
docker run -it --net host --pid host --userns host --cap-add audit_control \
-e DOCKER_CONTENT_TRUST=1 \
-v /var/lib:/var/lib \
-v /var/run/docker.sock:/var/run/docker.sock \
-v /etc:/etc \
--label docker_bench_security \
docker/docker-bench-security
Step 2: Content Trust and Signing
# Enable Docker Content Trust
export DOCKER_CONTENT_TRUST=1
# Build and push with signing
docker build -t myregistry.azurecr.io/my-webapp:1.0 .
docker push myregistry.azurecr.io/my-webapp:1.0
Lab 10: Azure Container Registry Integration
Step 1: Create and Configure ACR
# Create Azure Container Registry
az acr create --resource-group DockerLabs --name myDockerRegistry --sku Basic
# Login to ACR
az acr login --name myDockerRegistry
# Tag and push images
docker tag my-webapp:secure mydockerregistry.azurecr.io/my-webapp:1.0
docker push mydockerregistry.azurecr.io/my-webapp:1.0
# Pull from ACR
docker pull mydockerregistry.azurecr.io/my-webapp:1.0
Step 2: Automated Builds with ACR Tasks
# Create ACR build task
az acr build --registry myDockerRegistry --image my-webapp:latest .
# Set up automated build on Git commit
az acr task create \
--registry myDockerRegistry \
--name buildwebapp \
--image my-webapp:{{.Run.ID}} \
--context https://github.com/yourusername/my-webapp.git \
--file Dockerfile \
--branch main \
--git-access-token <your-token>
Final Project: Complete Microservices Application
Create final-project/docker-compose.prod.yml:
version: '3.8'
services:
frontend:
image: mydockerregistry.azurecr.io/my-webapp:1.0
deploy:
replicas: 3
environment:
- REDIS_HOST=redis
networks:
- frontend
- backend
api:
image: mydockerregistry.azurecr.io/api-service:1.0
deploy:
replicas: 2
environment:
- DATABASE_URL=postgresql://user:pass@db:5432/app
networks:
- backend
redis:
image: redis:7-alpine
deploy:
placement:
constraints:
- node.role == manager
volumes:
- redis-data:/data
networks:
- backend
db:
image: postgres:13
environment:
- POSTGRES_DB=app
- POSTGRES_USER=user
- POSTGRES_PASSWORD=pass
volumes:
- postgres-data:/var/lib/postgresql/data
networks:
- backend
traefik:
image: traefik:v2.9
ports:
- "80:80"
- "443:443"
volumes:
- /var/run/docker.sock:/var/run/docker.sock
- ./traefik.yml:/etc/traefik/traefik.yml
networks:
- frontend
networks:
frontend:
driver: overlay
backend:
driver: overlay
volumes:
redis-data:
postgres-data:
Top comments (0)