DEV Community

Cover image for Deploy laravel project with docker swarm
Shahin
Shahin

Posted on

Deploy laravel project with docker swarm

We check three major step in this guide

  • Setup laravel project with docker compose
  • Deploy the stack to the swarm
  • Create gitlab-ci

Setup laravel project with docker compose

we will explore the process of deploying a laravel project using docker swarm and setting up a CI/CD pipline to automate the deployment process.
Now let’s start with containerize a laravel project with docker compose
we need three separate service containers:

  • An app service running PHP7.4-FPM;
  • A db service running MySQL 5.7;
  • An nginx service that uses the app service to parse PHP code

Step 1. Set a env variable in project

In root directory of project we have .env file now we need to update some variable

DB_CONNECTION=mysql
DB_HOST=db
DB_PORT=3306
DB_DATABASE=experience
DB_USERNAME=experience_user
DB_PASSWORD=your-password
Enter fullscreen mode Exit fullscreen mode

Step 2. Setting up the application’s Docekrfile

we need to build a custom image for the application container. We’ll create a new Dockerfile for that.

Docker file

FROM php:7.4-fpm
# Install system dependencies
RUN apt-get update && apt-get install -y \
git \
curl \
libpng-dev \
libonig-dev \
libxml2-dev \
zip \
unzip
# Clear cache
RUN apt-get clean && rm -rf /var/lib/apt/lists/*
# Install PHP extensions
RUN docker-php-ext-install pdo_mysql mbstring exif pcntl bcmath gd
# Get latest Composer
COPY --from=composer:latest /usr/bin/composer /usr/bin/composer
# Set working directory
WORKDIR /var/www
Enter fullscreen mode Exit fullscreen mode

Step 3. Setting up Nginx config and Database dump file

In root directory create a new directory called docker-compose
Now we need two other directories, a nginx directory and mysql directory
So we have this two route in our project

  1. laravel-project/docker-compose/nginx/
  2. laravel-project/docker-compose/mysql/

In nginx directory create a file called experience.conf we write nginx config in this file like:

server {
   listen 80;
   index index.php index.html;
   error_log  /var/log/nginx/error.log;
   access_log /var/log/nginx/access.log;
   root /var/www/public;
   location ~ \.php$ {
   try_files $uri =404;
       fastcgi_split_path_info ^(.+\.php)(/.+)$;
       fastcgi_pass app:9000;
       fastcgi_index index.php;
       include fastcgi_params;
       fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
       fastcgi_param PATH_INFO $fastcgi_path_info;
   }
   location / {
       try_files $uri $uri/ /index.php?$query_string;
       gzip_static on;
   }
}
Enter fullscreen mode Exit fullscreen mode

In mysql directory create a file called init_db.init we write mysql initialization in this file like:

DROP TABLE IF EXISTS `places`;

CREATE TABLE `places` (
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
 `name` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL,
 `visited` tinyint(1) NOT NULL DEFAULT '0',
 PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=12 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci;

INSERT INTO `places` (name, visited) VALUES ('Berlin',0),('Budapest',0),('Cincinnati',1),('Denver',0),('Helsinki',0),('Lisbon',0),('Moscow',1);
Enter fullscreen mode Exit fullscreen mode

Step 4. Creating a multi container with docker-compose

We need a building three container that should share networks and data volumes.
Ok so create a docker-compose file in root directory of project
For craete a network for connecting services we define network in docker-compose file like this:

networks:
  experience:
    driver: bridge
Enter fullscreen mode Exit fullscreen mode

App service:

app:
 build:
   context: ./
   dockerfile: Dockerfile
 image: travellist
 container_name: experience-app
 restart: unless-stopped
 working_dir: /var/www/
 volumes:
   - ./:/var/www
 networks:
   - experience
Enter fullscreen mode Exit fullscreen mode

DB service:

db:
 image: mysql:8.0
 container_name: experience-db
 restart: unless-stopped
 environment:
    MYSQL_DATABASE: ${DB_DATABASE}
    MYSQL_ROOT_PASSWORD: ${DB_PASSWORD}
    MYSQL_PASSWORD: ${DB_PASSWORD}
    MYSQL_USER: ${DB_USERNAME}
    SERVICE_TAGS: dev
    SERVICE_NAME: mysql
 volumes:
  - ./docker-compose/mysql:/docker-entrypoint-initdb.d
 networks:
  - experience
Enter fullscreen mode Exit fullscreen mode

Nginx service:

nginx:
 image: nginx:1.17-alpine
 container_name: experience-nginx
 restart: unless-stopped
 ports:
  - 8000:80
 volumes:
  - ./:/var/www
  - ./docker-compose/nginx:/etc/nginx/conf.d
 networks:
  - experience
Enter fullscreen mode Exit fullscreen mode

So our docker-compose file be like this:

version: "3.7"
services:
 app:
   build:
     context: ./
     dockerfile: Dockerfile
   image: travellist
   container_name: experience-app
   restart: unless-stopped
   working_dir: /var/www/
   volumes:
     - ./:/var/www
   networks:
     - experience

 db:
   image: mysql:8.0
   container_name: experience-db
   restart: unless-stopped
   environment:
     MYSQL_DATABASE: ${DB_DATABASE}
     MYSQL_ROOT_PASSWORD: ${DB_PASSWORD}
     MYSQL_PASSWORD: ${DB_PASSWORD}
     MYSQL_USER: ${DB_USERNAME}
     SERVICE_TAGS: dev
     SERVICE_NAME: mysql
   volumes:
     - ./docker-compose/mysql:/docker-entrypoint-initdb.d
   networks:
     - experience

 nginx:
   image: nginx:alpine
   container_name: experience-nginx
   restart: unless-stopped
   ports:
     - 8100:80
   volumes:
     - ./:/var/www
     - ./docker-compose/nginx:/etc/nginx/conf.d/
   networks:
     - experience

networks:
 experience:
    driver: bridge
Enter fullscreen mode Exit fullscreen mode

Step 5. Running application with docker compose

Now we can build the app image with this command:
$ docker-compose build app
When the build is finished, we can run the environment in background mode with:
$ docker-compose up -d

Output:
Creating exprience-db   ... done
Creating exprience-app   ... done
Creating exprience-nginx ... done
Enter fullscreen mode Exit fullscreen mode

to show information about the state of your active services, run:
$ docker-compose ps
Well in these 5 simple steps, we have successfully ran our application.

Image description

Now we have a docker-compose file for our application that needs for using in docker swarm.

Let’s start Initialize docker swarm.

After installing docker in your server
*attention: To install Docker, be sure to use the official documentation install docker
check docker information with this command:
$ docker info
You should see “swarm : inactive” in output
For activate swarm in docker use this command:
$ docker swarm init
The docker engine targeted by this command becomes a manager in the newly created single-node swarm.
What we want to use is the services of this docker swarm.
We want to update our service like app with docker swarm, The advantage of updating our service in Docker Swarm is that there is no need to down the app service first, update the service, and then bring the service up.
In this method, with one command, we can give the image related to the service to Docker and give the update command. Docker raises the new service without down the old service and slowly transfers the load from the old service to the new service.
When running Docker Engine in swarm mode, we can use docker stack deploy to deploy a complete application stack to the swarm. The deploy command accepts a stack description in the form of a Compose file.
So we down our docker compose with this command:
$ docker-compose down
And create our stack.

ok if everything is ok until now take a rest

Image description

Deploy the stack to the swarm

$ docker stack deploy --compose-file docker-compose.yml <your-stack-name>
For example :
$ docker stack deploy --compose-file docker-compose.yml staging
Probably you see this in output:

Creating network staging_exprience
Creating service staging_nginx
failed to create service staging_nginx: Error response from daemon: The network staging_exprience cannot be used with services. Only networks scoped to the swarm can be used, such as those created with the overlay driver.
Enter fullscreen mode Exit fullscreen mode

This is because of “driver: bridge” for deploying your service in swarm mode you must use overlay driver for network if you remove this line in your docker compose file When the stack is being deployed this network will be create on overlay driver automatically. So our docker-compose file in network section be like this:

networks:
 experience:
Enter fullscreen mode Exit fullscreen mode

And run upper command:
$ docker stack deploy --compose-file docker-compose.yml staging
For now you probably you see this error :

failed to create service staging_nginx: Error response from daemon: The network staging_experience cannot be used with services. Only networks scoped to the swarm can be used, such as those created with the overlay driver.
Enter fullscreen mode Exit fullscreen mode

Get network list in your docker:
$ docker network ls

Output:
NETWORK ID      NAME                   DRIVER     SCOPE
30f94ae1c94d    staging_experience     bridge     local
Enter fullscreen mode Exit fullscreen mode

So your network has local scope yet because in first time deploy stack this network save in local scope and we must remove that by:
$ docker network rm staging_experience
After all this run command:
$ docker stack deploy --compose-file docker-compose.yml staging

Output:
Creating network staging_experience
Creating service staging_app
Creating service staging_db
Creating service staging_nginx
Enter fullscreen mode Exit fullscreen mode

Now get check stack by:
$ docker stack ls

Output:
NAME           SERVICES
staging         3
Enter fullscreen mode Exit fullscreen mode

And get service list by:
$ docker service ls
Output:

Image description

If your REPLICAS is 0/1 something wrong is your service
For checking service status run this command:
$ docker service ps staging_app for example
And for check detail of service run this command:
$ docker service logs staging_app for example
Output of this command show you what is problem of your service.
And for updating your a service with an image the command you need is this:

$ docker service update --image "<your-image>" "<name-of-your-service>" --force

That's it your docker swarm is ready for zero down time deployment :)))

Image description
Last step for have a complete process zero down time deployment is create pipeline in gitlab.

Create gitlab-ci

In this step we want create a pipeline in gitlab for build, test and deploy a project
So we have three stage:

stages:
 - Build
 - Test
 - Deploy
Enter fullscreen mode Exit fullscreen mode

Ok let’s clear what we need and what is going on in this step .
We want update laravel project and push our change in gitlab create a new image of this changes and test that and after that log in to host server pull that updated image in server, and update service of project.
For login to server we need define some variable in gitlab in your repository goto setting->CI/CD->VARIABLES Add variable
Add this variables:

CI_REGISTRY : https://registry.gitlab.com
DOCKER_AUTH_CONFIG:

{
  "auths": {
    "registry.gitlab.com": {
        "auth": "<auth-key>"
    }
  }
}
Enter fullscreen mode Exit fullscreen mode

auth-key is base64 hash of “gitlab-username:gitlab-password”

SSH_KNOWN_HOSTS:
Like 192.168.1.1 ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCGUCqCK3hNl+4TIbh3+Af3np+v91AyW4+BxXRtHBC2Y/uPJXF2jdR6IHlSS/0RFR3hOY+8+5a/r8O1O9qTPgxG8BSIm9omb8YxF2c4Sz/USPDK3ld2oQxbBg5qdhRN28EvRbtN66W3vgYIRlYlpNyJA+b3HQ/uJ+t3UxP1VjAsKbrBRFBth845RskSr1V7IirMiOh7oKGdEfXwlOENxOI7cDytxVR7h3/bVdJdxmjFqagrJqBuYm30
You can see how generate ssh key in this post: generate sshkey
SSH_PRIVATE_KEY:
SSH_REMOTE_HOST: root@

This is your variables in gitlab.
So let’s back to gitlab-ci
In root directory of project create a new file .gitlab-ci.yml and
set build stage
set test stage
And in the last set deploy stage like:

stages:
 - Build
 - Test
 - Deploy

variables:
    IMAGE_TAG: $CI_REGISTRY_IMAGE:$CI_COMMIT_REF_SLUG-$CI_COMMIT_SHORT_SHA

build:
  stage: Build
  image: docker:20.10.16
  services:
    - docker:dind
  script:
    - docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY
    - docker build --pull -f Dockerfile -t $IMAGE_TAG .
    - docker push $IMAGE_TAG

preparation:
 stage: Test
 image: $IMAGE_TAG
 needs:
   - build
 script:
   - composer install
 artifacts:
   expire_in: 1 day
   paths:
     - ./vendor
 cache:
   key: ${CI_COMMIT_REF_SLUG}-composer
   paths:
     - ./vendor

unit-test:
 stage: Test
 image: $IMAGE_TAG
 services:
   - name: mysql:8
     alias: mysql-test
 needs:
   - preparation
 variables:
   APP_KEY: ${APP_KEY}
   MYSQL_ROOT_PASSWORD: ${MYSQL_ROOT_PASSWORD}
   MYSQL_DATABASE: ${MYSQL_DATABASE}
   DB_HOST: ${DB_HOST}
   DB_USERNAME: ${DB_USERNAME}
   DB_PASSWORD: ${DB_PASSWORD}
 script:
   - php vendor/bin/phpunit

staging-deploy:
 stage: Deploy
 extends:
   - .deploy-script
 variables:
   APP: "stackdemo_app"
   STACK: "travellist-staging"
 only:
   - develop
 needs:
   - unit-test
 environment:
   name: stage

.remote-docker:
 variables:
   DOCKER_HOST: ssh://${SSH_REMOTE_HOST}
 image: docker:20.10.16
 before_script:
   - eval $(ssh-agent -s)
   - echo $IMAGE_TAG
   - echo "$SSH_PRIVATE_KEY" | tr -d '\r' | ssh-add -
   - mkdir -p ~/.ssh
   - chmod 700 ~/.ssh
   - echo "HOST *" > ~/.ssh/config
   - echo "StrictHostKeyChecking no" >> ~/.ssh/config
   - echo -n $CI_REGISTRY_PASSWORD | docker login -u $CI_REGISTRY_USER --password-stdin $CI_REGISTRY

.deploy-script:
 extends:
   - .remote-docker
 script:
   - cp $develop_config /root/project/core
   - docker pull $IMAGE_TAG
   - docker service update --image "$IMAGE_TAG" "$APP" --force
 dependencies: []
Enter fullscreen mode Exit fullscreen mode

Change something in your project and push to gitlab and wait for it
To see all pipeline pass like this :

Image description

And this is beautiful.

Image description

Oldest comments (0)