Here in test environment, it will deploy into aws s3 bucket and in production evironment, it will deploy docker image into own server.
Gitlab container registry is used for building and storing docker image, then finally pull docker image into server.
.gitlab.-ci.yml
image: node:14
variables:
GIT_DEPTH: '0'
DOCKER_HOST: tcp://docker:2375/
before_script:
- apt-get update
- apt-get install zip
stages:
- install_dependencies
- build
- docker-image-creation
- deploy
install_dependencies:
stage: install_dependencies
cache:
key: ${CI_COMMIT_REF_SLUG}
paths:
- node_modules/
- dist/
script:
- npm ci
only:
changes:
- package-lock.json
build:
stage: build
script:
- npm run build:ssr
- cd dist/${PROJECT_NAME}
- ls -al -F
- echo "BUILD SUCCESSFUL"
dependencies:
- install_dependencies
cache:
key: ${CI_COMMIT_REF_SLUG}
paths:
- node_modules/
policy: pull
only:
changes:
- src/*
- angular.json
artifacts:
when: on_success
paths:
- dist/${PROJECT_NAME}
only:
- develop
- main
test-deploy_to_s3_bucker:
image: python:latest
stage: deploy
cache:
policy: pull
dependencies:
- build
- install_dependencies
before_script:
- pip install awscli
script:
- ls -lh
- find dist/${PROJECT_NAME}/browser \( -name '*.*' \) -exec gzip --verbose --best --force {} \; -exec mv "{}.gz" "{}" \; # Gzip recursively all files in the directory.
- aws s3 rm s3://${TEST_S3_BUCKET_NAME} --recursive
- aws s3 cp ./dist/${PROJECT_NAME}/browser S3://${TEST_S3_BUCKET_NAME}/ --recursive --acl public-read --content-encoding gzip
- echo "Deployed Successfully"
only:
- develop
environment:
name: test
deploy_production_to_server:
stage: deploy
cache:
policy: pull
before_script:
- 'which ssh-agent || ( apt-get update -y && apt-get install openssh-client -y )'
- eval $(ssh-agent -s)
- mkdir -p ~/.ssh
- chmod 700 ~/.ssh
- chmod 400 $PRIVATE_KEY
- echo -e "Host *\n\tStrictHostKeyChecking no\n\n" > ~/.ssh/config
- apt-get update -y
- apt-get -y install rsync
script:
- ssh -i $PRIVATE_KEY ubuntu@$SERVER_IP_ADDRESS
- rsync -zvhr -auv -e "ssh -i $PRIVATE_KEY" dist/${PROJECT_NAME}/browser ubuntu@$SERVER_IP_ADDRESS:/var/www/html/angular/
only: ['main']
environment:
name: production
docker_image-creation:
image: docker:git # image with docker installed to execute docker commands
stage: docker-image-creation # notice a new
cache:
policy: pull
services:
- docker:dind #used to be able to execute docker commands inside of a docker container
before_script:
- docker ps #overrides previous docker script
script:
- docker login -u $CI_REGISTRY_USER -p $DOCKER_CI_TOKEN registry.gitlab.com #logs into gitlab docker registery, make sure to have this variables defined
- docker build -t registry.gitlab.com/****/*** . # creates a docker image
- docker push registry.gitlab.com/*****/*** # pushes the create docker image to docker registry
dependencies:
- build
deploy_docker_image_to_server:
image: ubuntu
cache:
policy: pull
before_script: #checks if ssh installed and if not, attempts to install it
- "which ssh-agent || ( apt-get update -y && apt-get install openssh-client git -y )"
- eval $(ssh-agent -s)
# Inject the remote's private key
- echo "$PRIVATE_KEY" | tr -d '\r' | ssh-add - > /dev/null #adding a ssh private key from variables, pair of the one registered on digital ocean
- mkdir -p ~/.ssh
- chmod 700 ~/.ssh
# Append keyscan output into known hosts
- ssh-keyscan $SERVER_IP_ADDRESS >> ~/.ssh/known_hosts
- chmod 644 ~/.ssh/known_hosts
stage: deploy #new stage after release
script:
- ssh $SERVER_USERNAME@$SERVER_IP_ADDRESS ls
- ssh $SERVER_USERNAME@$SERVER_IP_ADDRESS "docker login -u ${CI_REGISTRY_USER} -p ${DOCKER_CI_TOKEN} registry.gitlab.com;
docker stop aapp_name;
docker rm app_name;
docker rmi "$(docker images -aq)"
docker pull registry.gitlab.com/${PROJECT_NAME};
docker run --name app_name -d -p 80:4000 ${PROJECT_NAME}"
Top comments (0)