From e863c90d172aa2a57add0a214d52d31aa9b18d75 Mon Sep 17 00:00:00 2001 From: Mahdi Khanzadi Date: Sun, 5 Jan 2025 11:18:24 +0100 Subject: [PATCH] first --- .github/actions/docker-build/action.yaml | 70 ++++ .github/actions/terraform/action.yaml | 68 ++++ .github/workflows/infrastructure.yaml | 133 +++++++ .gitignore | 13 + Makefile | 90 +++++ compose.docker.yaml | 21 ++ compose.docker_dashboard.yaml | 23 ++ compose.mongodb.yaml | 23 ++ compose.mongodb_dashboard.yaml | 25 ++ compose.nats.yaml | 20 + compose.proxy.yaml | 33 ++ proxy/Dockerfile | 5 + proxy/nginx.conf | 56 +++ resources.tf | 456 +++++++++++++++++++++++ 14 files changed, 1036 insertions(+) create mode 100644 .github/actions/docker-build/action.yaml create mode 100644 .github/actions/terraform/action.yaml create mode 100644 .github/workflows/infrastructure.yaml create mode 100644 .gitignore create mode 100644 Makefile create mode 100644 compose.docker.yaml create mode 100644 compose.docker_dashboard.yaml create mode 100644 compose.mongodb.yaml create mode 100644 compose.mongodb_dashboard.yaml create mode 100644 compose.nats.yaml create mode 100644 compose.proxy.yaml create mode 100644 proxy/Dockerfile create mode 100644 proxy/nginx.conf create mode 100644 resources.tf diff --git a/.github/actions/docker-build/action.yaml b/.github/actions/docker-build/action.yaml new file mode 100644 index 0000000..9cd5754 --- /dev/null +++ b/.github/actions/docker-build/action.yaml @@ -0,0 +1,70 @@ +name: 'Docker Build Action' +description: 'Builds a Docker image using a specified Dockerfile and context' + +inputs: + context: + description: 'The Docker build context (path to the directory containing the Dockerfile)' + required: true + default: '.' + dockerfile: + description: 'The path to the Dockerfile (relative to the context)' + required: true + default: 'Dockerfile' + image-name: + description: 'The name of the image to build' + required: true + push: + description: 'Determines if the built image should be pushed' + required: true + default: 'no' + container-registry: + description: 'container registry address (example: ghcr.io)' + required: false + container-registry-username: + description: 'container registry username' + required: false + container-registry-password: + description: 'container registry password' + required: false + +runs: + using: 'composite' + + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Login to GitHub Container Registry + uses: docker/login-action@v3 + if: ${{ inputs.push == 'true' }} + with: + logout: false + registry: ${{ inputs.container-registry }} + username: ${{ inputs.container-registry-username }} + password: ${{ inputs.container-registry-password }} + + - name: Set up QEMU + uses: docker/setup-qemu-action@v3 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Extract metadata (tags, labels) for Docker image + id: meta + uses: docker/metadata-action@v5 + with: + images: | + ${{ inputs.container-registry }}/${{ github.repository_owner }}/${{ inputs.image-name }} + tags: | + type=raw,value=latest,priority=200,enable={{is_default_branch}} + type=sha,enable=true,priority=100,prefix=,suffix=,format=short + + - name: Build image and push (optional) + uses: docker/build-push-action@v6 + with: + platforms: linux/amd64,linux/arm64 + push: ${{ inputs.push == 'true' }} + context: ${{ inputs.context }} + file: ${{ inputs.dockerfile }} + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} diff --git a/.github/actions/terraform/action.yaml b/.github/actions/terraform/action.yaml new file mode 100644 index 0000000..80003ab --- /dev/null +++ b/.github/actions/terraform/action.yaml @@ -0,0 +1,68 @@ +name: 'Docker Build Action' +description: 'Builds a Docker image using a specified Dockerfile and context' + +inputs: + aws-access-key-id: + description: 'AWS access key id' + required: true + aws-secret-access-key: + description: 'AWS secret access key' + required: true + aws-region: + description: 'AWS region' + required: true + validate: + description: 'Determines if a round of validation should be done' + required: true + default: 'no' + plan: + description: 'Determines if terraform plan should be called' + required: true + default: 'no' + apply: + description: 'Determines if terraform apply should be called' + required: true + default: 'no' + +runs: + using: 'composite' + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Set up AWS credentials + uses: aws-actions/configure-aws-credentials@v1 + with: + aws-access-key-id: ${{ inputs.aws-access-key-id }} + aws-secret-access-key: ${{ inputs.aws-secret-access-key }} + aws-region: ${{ inputs.aws-region }} + + - name: Setup Terraform + uses: hashicorp/setup-terraform@v3 + + - name: Terraform Format + if: ${{ inputs.validate == 'true' }} + run: terraform fmt -check + shell: bash + continue-on-error: false + + - name: Terraform Init + run: terraform init + shell: bash + + - name: Terraform Validate + if: ${{ inputs.validate == 'true' }} + run: terraform validate -no-color + shell: bash + continue-on-error: false + + - name: Terraform Plan + if: ${{ inputs.plan == 'true' }} + run: terraform plan -no-color -input=false + shell: bash + continue-on-error: false + + - name: Terraform Apply + if: ${{ inputs.apply == 'true' }} + shell: bash + run: terraform apply -auto-approve -input=false diff --git a/.github/workflows/infrastructure.yaml b/.github/workflows/infrastructure.yaml new file mode 100644 index 0000000..ddd39b3 --- /dev/null +++ b/.github/workflows/infrastructure.yaml @@ -0,0 +1,133 @@ +name: CI and CD + +on: + push: + pull_request: + +defaults: + run: + working-directory: ./infrastructure + +env: + TF_VAR_project_name: tarhche + TF_VAR_instance_name: backend + + PROXY_IMAGE_NAME: proxy + + EC2_SSH_ADDRESS: ${{ secrets.EC2_SSH_ADDRESS }} + EC2_SSH_ENDPOINT: ${{ secrets.EC2_SSH_USER }}@${{ secrets.EC2_SSH_ADDRESS }} + +jobs: + # ci: + # runs-on: ubuntu-latest + + # steps: + # - name: Checkout + # uses: actions/checkout@v4 + + # - name: Terraform validate and apply + # uses: ./.github/actions/terraform + # with: + # aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} + # aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + # aws-region: ${{ secrets.AWS_REGION }} + # validate: true + # apply: true + + # - name: Build image + # uses: ./.github/actions/docker-build + # with: + # context: ./proxy + # dockerfile: ./proxy/Dockerfile + # image-name: ${{ env.PROXY_IMAGE_NAME }} + # push: false + # container-registry: ghcr.io + + cd: + runs-on: ubuntu-latest + + if: ${{ format('refs/heads/{0}', github.event.repository.default_branch) == github.ref }} + + # needs: + # - ci + + permissions: + contents: read + packages: write + + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Terraform validate and apply + uses: ./.github/actions/terraform + with: + aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} + aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + aws-region: ${{ secrets.AWS_REGION }} + validate: true + apply: true + + - name: Build and push proxy image + uses: ./.github/actions/docker-build + with: + context: ./proxy + dockerfile: ./proxy/Dockerfile + image-name: ${{ env.PROXY_IMAGE_NAME }} + push: true + container-registry: ghcr.io + container-registry-username: ${{ github.actor }} + container-registry-password: ${{ secrets.GITHUB_TOKEN }} + + - name: Deploy services + run: | + # Setup ssh key + echo '${{ secrets.EC2_SSH_PRIVATE_KEY }}' > ~/ec2-key.pem + chmod 400 ~/ec2-key.pem + + mkdir -p ~/.ssh + ssh-keyscan -H $EC2_SSH_ADDRESS >> ~/.ssh/known_hosts + + # Ensure remote directory exists + ssh -q -i ~/ec2-key.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null $EC2_SSH_ENDPOINT > /dev/null 2>&1 << 'EOF' + export VOLUME_PATH='${{ secrets.VOLUME_PATH }}' + + sudo mkdir -p /opt/deployment + sudo chown ${{ secrets.EC2_SSH_USER }}:${{ secrets.EC2_SSH_USER }} /opt/deployment + + # create volumes directories + sudo mkdir -p $VOLUME_PATH/mongodb/db + sudo mkdir -p $VOLUME_PATH/mongodb/configdb + sudo mkdir -p $VOLUME_PATH/nats + EOF + + # Copy files + scp -q -i ~/ec2-key.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -r ./* $EC2_SSH_ENDPOINT:/opt/deployment/ > /dev/null 2>&1 + + # Connect and deploy services + ssh -q -i ~/ec2-key.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null $EC2_SSH_ENDPOINT > /dev/null 2>&1 << 'EOF' + export VOLUME_PATH='${{ secrets.VOLUME_PATH }}' + + export MONGO_USERNAME='${{ secrets.MONGO_USERNAME }}' + export MONGO_PASSWORD='${{ secrets.MONGO_PASSWORD }}' + + export DASHBOARD_MONGO_USERNAME='${{ secrets.DASHBOARD_MONGO_USERNAME }}' + export DASHBOARD_MONGO_PASSWORD='${{ secrets.DASHBOARD_MONGO_PASSWORD }}' + export DASHBOARD_MONGO_MONGODB_URL='mongodb://${{ secrets.MONGO_USERNAME }}:${{ secrets.MONGO_PASSWORD }}@mongodb:27017' + + export PROXY_IMAGE='${{ secrets.PROXY_IMAGE }}' + + export PORTAINER_ADMIN_PASSWORD='${{ secrets.PORTAINER_ADMIN_PASSWORD }}' + + # Run Docker Compose + cd /opt/deployment/ + + docker compose -f compose.mongodb.yaml --project-name mongodb up --pull always --detach + docker compose -f compose.mongodb_dashboard.yaml --project-name mongodb_dashboard up --pull always --detach + docker compose -f compose.nats.yaml --project-name nats up --pull always --detach + docker compose -f compose.docker.yaml --project-name docker up --pull always --detach + docker compose -f compose.docker_dashboard.yaml --project-name docker_dashboard up --pull always --detach + docker compose -f compose.app.yaml --project-name app up --pull always --detach + docker compose -f compose.frontend.yaml --project-name frontend up --pull always --detach + docker compose -f compose.proxy.yaml --project-name proxy up --pull always --detach + EOF diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..1504445 --- /dev/null +++ b/.gitignore @@ -0,0 +1,13 @@ +.DS_Store +.vscode +.idea +/tmp + +# SSH keys +/*.pem +/*.pub + +# Terraform files +*.tfstate +*.tfstate.backup +.terraform/ diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..373cff8 --- /dev/null +++ b/Makefile @@ -0,0 +1,90 @@ +export TF_VAR_project_name = tarhche +export TF_VAR_instance_name = backend + +export EC2_SSH_ADDRESS = ec2-3-124-72-48.eu-central-1.compute.amazonaws.com +export EC2_SSH_USER = ubuntu +export EC2_SSH_ENDPOINT = ${EC2_SSH_USER}@${EC2_SSH_ADDRESS} +export VOLUME_PATH = ./tmp/volume_01 + +export MONGO_USERNAME = test +export MONGO_PASSWORD = test‍ + +export DASHBOARD_MONGO_USERNAME = username +export DASHBOARD_MONGO_PASSWORD = password +export DASHBOARD_MONGO_MONGODB_URL = mongodb://${MONGO_USERNAME}:${MONGO_PASSWORD}@mongodb:27017 + +export BACKEND_NATS_URL = +export BACKEND_PRIVATE_KEY = + +export BACKEND_MONGO_HOST = mongodb +export BACKEND_MONGO_PORT = 27017 +export BACKEND_MONGO_SCHEME = mongodb +export BACKEND_MONGO_DATABASE_NAME = test +export BACKEND_MONGO_USERNAME = ${MONGO_USERNAME} +export BACKEND_MONGO_PASSWORD = ${MONGO_PASSWORD} + +export BACKEND_MAIL_SMTP_PASSWORD = +export BACKEND_MAIL_SMTP_HOST = +export BACKEND_MAIL_SMTP_FROM = +export BACKEND_MAIL_SMTP_USERNAME = +export BACKEND_MAIL_SMTP_PORT = + +export BACKEND_S3_ENDPOINT = +export BACKEND_S3_SECRET_KEY = +export BACKEND_S3_ACCESS_KEY = +export BACKEND_S3_USE_SSL = false +export BACKEND_S3_BUCKET_NAME = + +export PROXY_IMAGE = ghcr.io/tarhche/proxy:latest + +export APP_IMAGE = ghcr.io/tarhche/backend:latest + +# username: admin +# password: admin-password (in bcrypt, a dollar-sign should be escaped by an arbitrary dollar-sign ($ --> $$)) +export PORTAINER_ADMIN_PASSWORD = $$2a$$12$$4xcOa82Ni5rjgQF.v.JWi.i71OyUm3fwmfWiumgJHIAPGU.uOw3qu + +export FRONTEND_IMAGE = ghcr.io/tarhche/frontend:latest +export NEXT_PUBLIC_EXTERNAL_BACKEND_BASE_URL = +export INTERNAL_BACKEND_BASE_URL = http://app +export NEXT_PUBLIC_FILES_BASE_URL = + +validate: + terraform validate + +fmt: + terraform fmt + +init: + terraform init + +state: + terraform state list + +plan: + terraform plan + +apply: + terraform apply + rm -f terraform.tfstate *.tfstate.* + +public_key: + ssh-keygen -y -f ssh-private-key.pem > ssh-public-key.pub + +ssh: + ssh -i "ssh-private-key.pem" ${EC2_SSH_ENDPOINT} + +up: + docker compose -f compose.mongodb.yaml --project-name mongodb up --pull always --detach + docker compose -f compose.mongodb_dashboard.yaml --project-name mongodb_dashboard up --pull always --detach + docker compose -f compose.nats.yaml --project-name nats up --pull always --detach + docker compose -f compose.docker.yaml --project-name docker up --pull always --detach + docker compose -f compose.docker_dashboard.yaml --project-name docker_dashboard up --pull always --detach + docker compose -f compose.proxy.yaml --project-name proxy up --pull always --detach + +down: + docker compose -f compose.proxy.yaml --project-name proxy down --volumes --remove-orphans + docker compose -f compose.nats.yaml --project-name nats down --volumes --remove-orphans + docker compose -f compose.docker_dashboard.yaml --project-name docker_dashboard down --volumes --remove-orphans + docker compose -f compose.docker.yaml --project-name docker down --volumes --remove-orphans + docker compose -f compose.mongodb_dashboard.yaml --project-name mongodb_dashboard down --volumes --remove-orphans + docker compose -f compose.mongodb.yaml --project-name mongodb down --volumes --remove-orphans diff --git a/compose.docker.yaml b/compose.docker.yaml new file mode 100644 index 0000000..0b09267 --- /dev/null +++ b/compose.docker.yaml @@ -0,0 +1,21 @@ +services: + docker: + image: docker:27-dind + networks: + - docker + deploy: + mode: replicated + replicas: 1 + endpoint_mode: vip + restart_policy: + condition: on-failure + delay: 5s + max_attempts: 3 + runtime: sysbox-runc + # privileged: true # To test localy: enable this line and disable runtime + environment: + DOCKER_TLS_CERTDIR: "" # disable certs + +networks: + docker: + name: docker diff --git a/compose.docker_dashboard.yaml b/compose.docker_dashboard.yaml new file mode 100644 index 0000000..ec87593 --- /dev/null +++ b/compose.docker_dashboard.yaml @@ -0,0 +1,23 @@ +services: + docker_dashboard: + image: portainer/portainer-ce + networks: + - docker + - docker_dashboard + deploy: + mode: replicated + replicas: 1 + endpoint_mode: vip + restart_policy: + condition: on-failure + delay: 5s + max_attempts: 3 + command: > + --admin-password="${PORTAINER_ADMIN_PASSWORD}" + +networks: + docker_dashboard: + name: docker_dashboard + docker: + name: docker + external: true diff --git a/compose.mongodb.yaml b/compose.mongodb.yaml new file mode 100644 index 0000000..55389b2 --- /dev/null +++ b/compose.mongodb.yaml @@ -0,0 +1,23 @@ +services: + mongodb: + image: mongo:8.0 + networks: + - mongodb + deploy: + mode: replicated + replicas: 1 + endpoint_mode: vip + restart_policy: + condition: on-failure + delay: 5s + max_attempts: 3 + environment: + MONGO_INITDB_ROOT_USERNAME: ${MONGO_USERNAME} + MONGO_INITDB_ROOT_PASSWORD: ${MONGO_PASSWORD} + volumes: + - ${VOLUME_PATH}/mongodb/db:/data/db + - ${VOLUME_PATH}/mongodb/configdb:/data/configdb + +networks: + mongodb: + name: mongodb diff --git a/compose.mongodb_dashboard.yaml b/compose.mongodb_dashboard.yaml new file mode 100644 index 0000000..3076e8d --- /dev/null +++ b/compose.mongodb_dashboard.yaml @@ -0,0 +1,25 @@ +services: + mongodb_dashboard: + image: mongo-express + networks: + - mongodb + - mongodb_dashboard + deploy: + mode: replicated + replicas: 1 + endpoint_mode: vip + restart_policy: + condition: on-failure + delay: 5s + max_attempts: 3 + environment: + ME_CONFIG_BASICAUTH_USERNAME: ${DASHBOARD_MONGO_USERNAME} + ME_CONFIG_BASICAUTH_PASSWORD: ${DASHBOARD_MONGO_PASSWORD} + ME_CONFIG_MONGODB_URL: ${DASHBOARD_MONGO_MONGODB_URL} + +networks: + mongodb_dashboard: + name: mongodb_dashboard + mongodb: + name: mongodb + external: true diff --git a/compose.nats.yaml b/compose.nats.yaml new file mode 100644 index 0000000..a9bb63e --- /dev/null +++ b/compose.nats.yaml @@ -0,0 +1,20 @@ +services: + nats: + image: nats:2.10 + networks: + - nats + deploy: + mode: replicated + replicas: 1 + endpoint_mode: vip + restart_policy: + condition: on-failure + delay: 5s + max_attempts: 3 + volumes: + - ${VOLUME_PATH}/nats:/data + command: ["--jetstream", "--http_port", "8222", "--port", "4222", "--store_dir", "/data"] + +networks: + nats: + name: nats diff --git a/compose.proxy.yaml b/compose.proxy.yaml new file mode 100644 index 0000000..1d31b61 --- /dev/null +++ b/compose.proxy.yaml @@ -0,0 +1,33 @@ +services: + proxy: + image: ${PROXY_IMAGE} + networks: + - proxy + - frontend + - app + - mongodb_dashboard + - docker_dashboard + deploy: + mode: replicated + replicas: 1 + endpoint_mode: vip + restart_policy: + condition: on-failure + delay: 5s + max_attempts: 3 + ports: + - 80:80 + +networks: + proxy: + name: proxy + frontend: + name: frontend + app: + name: app + mongodb_dashboard: + name: mongodb_dashboard + external: true + docker_dashboard: + name: docker_dashboard + external: true diff --git a/proxy/Dockerfile b/proxy/Dockerfile new file mode 100644 index 0000000..070ba3e --- /dev/null +++ b/proxy/Dockerfile @@ -0,0 +1,5 @@ +FROM nginx:1.26-alpine + +COPY ./nginx.conf /etc/nginx/conf.d/default.conf + +EXPOSE 80 diff --git a/proxy/nginx.conf b/proxy/nginx.conf new file mode 100644 index 0000000..749055f --- /dev/null +++ b/proxy/nginx.conf @@ -0,0 +1,56 @@ +# Server block to catch-all unmatched subdomains +server { + listen 80 default_server; + + server_name "_"; + + location / { + proxy_pass http://frontend:3000; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + } +} + +# Server block for backend subdomain +server { + listen 80; + server_name "backend.*"; + + location / { + proxy_pass http://app:80; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + } +} + +# Server block for dockerdashboard subdomain +server { + listen 80; + server_name "dockerdashboard.*"; + + location / { + proxy_pass http://docker_dashboard:9000; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + } +} + +# Server block for mongodashboard subdomain +server { + listen 80; + server_name "mongodashboard.*"; + + location / { + proxy_pass http://mongodb_dashboard:8081; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + } +} diff --git a/resources.tf b/resources.tf new file mode 100644 index 0000000..992c0b3 --- /dev/null +++ b/resources.tf @@ -0,0 +1,456 @@ +provider "aws" { + region = "eu-central-1" +} + +variable "project_name" { + description = "Project tag given to each deployed Instance" + type = string +} + +variable "instance_name" { + description = "instance_name" + type = string +} + +import { + to = aws_security_group.backend + id = "sg-0c4446cdf14777251" +} + +resource "aws_security_group" "backend" { + name = var.instance_name + description = "Allow HTTP, HTTPS, and SSH inbound traffic" + + tags = { + project_name = var.project_name + } + + # Allow SSH (port 22) from any IP address + ingress { + from_port = 22 + to_port = 22 + protocol = "tcp" + cidr_blocks = ["0.0.0.0/0"] + } + + # Allow HTTP (port 80) from any IP address + ingress { + from_port = 80 + to_port = 80 + protocol = "tcp" + cidr_blocks = ["0.0.0.0/0"] # Allow HTTP from anywhere + } + + # Allow HTTPS (port 443) from any IP address + ingress { + from_port = 443 + to_port = 443 + protocol = "tcp" + cidr_blocks = ["0.0.0.0/0"] + } + + # Allow all outbound traffic + egress { + from_port = 0 + to_port = 0 + protocol = "-1" # all protocols + cidr_blocks = ["0.0.0.0/0"] + } +} + +import { + to = aws_ebs_volume.backend + id = "vol-0d2bab5e75ac580e9" +} + +resource "aws_ebs_volume" "backend" { + availability_zone = aws_instance.backend.availability_zone + encrypted = false + size = 10 + + tags = { + project_name = var.project_name + } +} + +import { + to = aws_volume_attachment.backend + id = "/dev/xvdf:vol-0d2bab5e75ac580e9:${aws_instance.backend.id}" +} + +resource "aws_volume_attachment" "backend" { + device_name = "/dev/xvdf" + instance_id = aws_instance.backend.id + volume_id = aws_ebs_volume.backend.id +} + +import { + to = aws_instance.backend + id = "i-026c60a5a3cdec06e" +} + +resource "aws_instance" "backend" { + ami = "ami-0a628e1e89aaedf80" # Canonical, Ubuntu, 24.04, amd64 noble image + instance_type = "t2.micro" + key_name = "backend" + availability_zone = "eu-central-1b" + + user_data = <<-EOT + #!/bin/bash + + # volumes + sudo mkfs.ext4 /dev/xvdf + sudo mkdir /volume_01 + sudo mount /dev/xvdf /volume_01 + sudo echo "/dev/xvdf /volume_01 ext4 defaults,nofail 0 0" | sudo tee -a /etc/fstab + + # tools + sudo apt install -y wget python3 ca-certificates curl htop jq vim make + + # Add Docker's official GPG key: + sudo install -m 0755 -d /etc/apt/keyrings + sudo curl -fsSL https://download.docker.com/linux/ubuntu/gpg -o /etc/apt/keyrings/docker.asc + sudo chmod a+r /etc/apt/keyrings/docker.asc + + # Add the repository to Apt sources: + echo \ + "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/ubuntu \ + $(. /etc/os-release && echo "$VERSION_CODENAME") stable" | \ + sudo tee /etc/apt/sources.list.d/docker.list > /dev/null + sudo apt-get update + + # install docker and sysbox + sudo apt-get install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin + wget https://downloads.nestybox.com/sysbox/releases/v0.6.5/sysbox-ce_0.6.5-0.linux_amd64.deb + sudo apt install -y ./sysbox-ce_0.6.5-0.linux_amd64.deb + rm ./sysbox-ce_0.6.5-0.linux_amd64.deb + + # setup + sudo systemctl enable docker.service + sudo systemctl start docker.service + sudo usermod -a -G docker ubuntu + id ubuntu + newgrp docker + docker swarm init --advertise-addr 192.168.99.100 + EOT + + root_block_device { + delete_on_termination = true + encrypted = false + volume_size = 20 + volume_type = "gp3" + + tags = { + project_name = var.project_name + } + } + + security_groups = [ + aws_security_group.backend.name + ] + + tags = { + project_name = var.project_name + } +} + +import { + to = aws_eip.backend + id = "eipalloc-02bceef376bc05f89" +} + +resource "aws_eip" "backend" { + instance = aws_instance.backend.id + domain = "vpc" + + tags = { + project_name = var.project_name + } +} + +import { + to = aws_lb.tarhche + id = "arn:aws:elasticloadbalancing:eu-central-1:381491955644:loadbalancer/app/tarhche/6953bf38e49158d7" +} + +resource "aws_lb" "tarhche" { + name = "tarhche" + internal = false + load_balancer_type = "application" + idle_timeout = 60 + ip_address_type = "ipv4" + enable_deletion_protection = true + + security_groups = [ + aws_security_group.backend.id, + ] + + subnets = [ + "subnet-0d68a01f5a4861c65", + "subnet-0fca4d198b88d68d6", + "subnet-0c8f8df628e715018", + ] + + tags = { + project_name = var.project_name + } +} + +import { + to = aws_lb_target_group.http + id = "arn:aws:elasticloadbalancing:eu-central-1:381491955644:targetgroup/HTTP/374d0a16b08c8d4a" +} + +resource "aws_lb_target_group" "http" { + name = "HTTP" + port = 80 + protocol = "HTTP" + vpc_id = "vpc-04db3e4490d90be8e" + ip_address_type = "ipv4" + proxy_protocol_v2 = false + + lambda_multi_value_headers_enabled = false + + health_check { + path = "/" + interval = 30 + timeout = 5 + healthy_threshold = 5 + unhealthy_threshold = 2 + } + + tags = { + project_name = var.project_name + } +} + +# resource "aws_lb_target_group_attachment" "backend_http" { +# target_group_arn = aws_lb_target_group.http.arn +# target_id = aws_instance.backend.id +# port = 80 +# } + +import { + to = aws_lb_listener.http + id = "arn:aws:elasticloadbalancing:eu-central-1:381491955644:listener/app/tarhche/6953bf38e49158d7/637c8770b5e4d6ed" +} + +resource "aws_lb_listener" "http" { + load_balancer_arn = aws_lb.tarhche.arn + port = 80 + protocol = "HTTP" + + default_action { + order = 1 + type = "redirect" + target_group_arn = aws_lb_target_group.http.arn + + redirect { + host = "#{host}" + path = "/#{path}" + port = "443" + protocol = "HTTPS" + query = "#{query}" + status_code = "HTTP_301" + } + } + + tags = { + project_name = var.project_name + } +} + +import { + to = aws_lb_listener.https + id = "arn:aws:elasticloadbalancing:eu-central-1:381491955644:listener/app/tarhche/6953bf38e49158d7/ab1c7847cbb6f739" +} + +resource "aws_lb_listener" "https" { + load_balancer_arn = aws_lb.tarhche.arn + port = 443 + protocol = "HTTPS" + ssl_policy = "ELBSecurityPolicy-TLS13-1-2-2021-06" + certificate_arn = aws_acm_certificate.tarhche_com.arn + + default_action { + order = 1 + type = "forward" + target_group_arn = aws_lb_target_group.http.arn + + forward { + stickiness { + duration = 3600 + enabled = false + } + + target_group { + arn = aws_lb_target_group.http.arn + weight = 1 + } + } + } + + tags = { + project_name = var.project_name + } +} + +import { + to = aws_route53domains_registered_domain.tarhche-com + id = "tarhche.com" +} + +resource "aws_route53domains_registered_domain" "tarhche-com" { + domain_name = "tarhche.com" + + name_server { + name = "ns-1611.awsdns-09.co.uk" + } + + name_server { + name = "ns-1254.awsdns-28.org" + } + + name_server { + name = "ns-143.awsdns-17.com" + } + + name_server { + name = "ns-769.awsdns-32.net" + } + + tags = { + project_name = var.project_name + } +} + +import { + to = aws_route53_zone.tarhche_com + id = "Z0951095A7CDVGITDCUP" +} + +resource "aws_route53_zone" "tarhche_com" { + name = "tarhche.com" + force_destroy = false + comment = "" + + tags = { + project_name = var.project_name + } +} + +import { + to = aws_route53_record.a_record_tarhche_com + id = "${aws_route53_zone.tarhche_com.id}_tarhche.com_A" +} + +resource "aws_route53_record" "a_record_tarhche_com" { + zone_id = aws_route53_zone.tarhche_com.id + name = "tarhche.com" + type = "A" + + alias { + name = aws_lb.tarhche.dns_name + zone_id = aws_lb.tarhche.zone_id + evaluate_target_health = true + } +} + +import { + to = aws_route53_record.a_record_all_tarhche_com + id = "${aws_route53_zone.tarhche_com.id}_*.tarhche.com_A" +} + +resource "aws_route53_record" "a_record_all_tarhche_com" { + zone_id = aws_route53_zone.tarhche_com.id + name = "*.tarhche.com" + type = "A" + + alias { + name = aws_lb.tarhche.dns_name + zone_id = aws_lb.tarhche.zone_id + evaluate_target_health = true + } +} + +import { + to = aws_route53_zone.tarhche_ir + id = "Z07817351L3HY3TPTD5IU" +} + +resource "aws_route53_zone" "tarhche_ir" { + name = "tarhche.ir" + force_destroy = false + comment = "" + + tags = { + project_name = var.project_name + } +} + +import { + to = aws_route53_record.a_record_tarhche_ir + id = "${aws_route53_zone.tarhche_ir.id}_tarhche.ir_A" +} + +resource "aws_route53_record" "a_record_tarhche_ir" { + zone_id = aws_route53_zone.tarhche_ir.id + name = "tarhche.ir" + type = "A" + + alias { + evaluate_target_health = true + name = aws_lb.tarhche.dns_name + zone_id = aws_lb.tarhche.zone_id + } +} + +import { + to = aws_s3_bucket.tarhche-backend + id = "tarhche-backend" +} + +resource "aws_s3_bucket" "tarhche-backend" { + bucket = "tarhche-backend" + force_destroy = false + + tags = { + project_name = var.project_name + } +} + +import { + to = aws_acm_certificate.tarhche_com + id = "arn:aws:acm:eu-central-1:381491955644:certificate/a446a0ad-9cac-479f-a1d6-59b983d633d6" +} + +resource "aws_acm_certificate" "tarhche_com" { + domain_name = "tarhche.com" + validation_method = "DNS" + + subject_alternative_names = [ + "tarhche.com", + "*.tarhche.com", + ] + + lifecycle { + create_before_destroy = true + } + + tags = { + project_name = var.project_name + } +} + +import { + to = aws_route53_record.tarhche_com_ssl_validation + id = "${aws_route53_zone.tarhche_com.id}__e7a6f01cbe22cb6d1db5c70fb80299a8.tarhche.com_CNAME" +} + +resource "aws_route53_record" "tarhche_com_ssl_validation" { + zone_id = aws_route53_zone.tarhche_com.id + name = "_e7a6f01cbe22cb6d1db5c70fb80299a8.tarhche.com" + type = "CNAME" + records = ["_0fdeb4d57a8f62c9a90a8f77b0146a14.zfyfvmchrl.acm-validations.aws."] + ttl = 60 +}