Skip to content

Commit

Permalink
BFD-2586: Add server terraservice to standard deployments (#1738)
Browse files Browse the repository at this point in the history
  • Loading branch information
cbrunefearless authored May 18, 2023
1 parent 61c73cc commit 2ef37d7
Show file tree
Hide file tree
Showing 5 changed files with 46 additions and 33 deletions.
24 changes: 21 additions & 3 deletions Jenkinsfile
Original file line number Diff line number Diff line change
Expand Up @@ -286,7 +286,13 @@ try {

container('bfd-cbc-build') {
awsAuth.assumeRole()
scriptForDeploys.deploy('test', gitBranchName, gitCommitId, amiIds)
terraform.deployTerraservice(
env: bfdEnv,
directory: "ops/terraform/services/server",
tfVars: [
ami_id_override: amiIds.bfdServerAmiId
]
)

awsAuth.assumeRole()
terraform.deployTerraservice(
Expand Down Expand Up @@ -443,7 +449,13 @@ try {
milestone(label: 'stage_deploy_prod_sbx_start')
container('bfd-cbc-build') {
awsAuth.assumeRole()
scriptForDeploys.deploy('prod-sbx', gitBranchName, gitCommitId, amiIds)
terraform.deployTerraservice(
env: bfdEnv,
directory: "ops/terraform/services/server",
tfVars: [
ami_id_override: amiIds.bfdServerAmiId
]
)

awsAuth.assumeRole()
terraform.deployTerraservice(
Expand Down Expand Up @@ -581,7 +593,13 @@ try {

container('bfd-cbc-build') {
awsAuth.assumeRole()
scriptForDeploys.deploy('prod', gitBranchName, gitCommitId, amiIds)
terraform.deployTerraservice(
env: bfdEnv,
directory: "ops/terraform/services/server",
tfVars: [
ami_id_override: amiIds.bfdServerAmiId
]
)

awsAuth.assumeRole()
terraform.deployTerraservice(
Expand Down
34 changes: 10 additions & 24 deletions ops/jenkins/bfd-deploy-apps/Jenkinsfile
Original file line number Diff line number Diff line change
Expand Up @@ -114,7 +114,7 @@ spec:
)

string(
name: 'server_ami',
name: 'server_ami_override',
description: 'The AMI ID to deploy server from',
defaultValue: null
)
Expand Down Expand Up @@ -291,31 +291,17 @@ spec:
script {
currentStage = env.STAGE_NAME
try {
def serverAmi = params.server_ami?.trim()
def terraformVars = (params.server_ami_override?.trim())
? [ami_id_override: params.server_ami_override?.trim()]
: []
lock(resource: lockResource) {
// Deploy the Server
awsAuth.assumeRole()
dir("${workspace}/ops/terraform/env/${trimmedEnv}/stateless") {
// Debug output terraform version
sh "terraform --version"

// Initilize terraform
sh "terraform init -no-color"

// Gathering terraform plan
echo "Timestamp: ${java.time.LocalDateTime.now().toString()}"
sh "terraform plan \
-var='fhir_ami=${serverAmi}' \
-var='ssh_key_name=bfd-${trimmedEnv}' \
-var='git_branch_name=${gitBranchName}' \
-var='git_commit_id=${gitCommitId}' \
-no-color -out=tfplan"

// Apply Terraform plan
echo "Timestamp: ${java.time.LocalDateTime.now().toString()}"
sh "terraform apply \
-no-color -input=false tfplan"
echo "Timestamp: ${java.time.LocalDateTime.now().toString()}"
}
terraform.deployTerraservice(
env: trimmedEnv,
directory: 'ops/terraform/services/server',
tfVars: terraformVars
)

awsAuth.assumeRole()
terraform.deployTerraservice(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ data "aws_rds_cluster" "rds" {
data "external" "rds" {
program = [
"${path.module}/scripts/rds-cluster-config.sh", # helper script
data.aws_rds_cluster.rds.cluster_identifier # verified, positional argument to script
data.aws_rds_cluster.rds.cluster_identifier, # verified, positional argument to script
local.env # environment name, almost exclusively here to provide beta reader functionality for production
]
}
6 changes: 3 additions & 3 deletions ops/terraform/services/server/modules/bfd_server_asg/main.tf
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
locals {
env = terraform.workspace

rds_availability_zone = data.external.rds.result["WriterAZ"]
rds_writer_endpoint = data.external.rds.result["Endpoint"]
# When the CustomEndpoint is empty, fall back to the ReaderEndpoint
rds_reader_endpoint = data.external.rds.result["CustomEndpoint"] == "" ? data.external.rds.result["ReaderEndpoint"] : data.external.rds.result["CustomEndpoint"]

additional_tags = { Layer = var.layer, role = var.role }
}
Expand Down Expand Up @@ -101,7 +101,7 @@ resource "aws_launch_template" "main" {
env = local.env
port = var.lb_config.port
accountId = var.launch_config.account_id
data_server_db_url = "jdbc:postgresql://${local.rds_writer_endpoint}:5432/fhirdb${var.jdbc_suffix}"
data_server_db_url = "jdbc:postgresql://${local.rds_reader_endpoint}:5432/fhirdb${var.jdbc_suffix}"
}))

tag_specifications {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
#######################################
# Results in two calls to the AWS RDS APIs for describe-db-clusters and describe-db-instances.
# Returns a well-formatted json object including the following keys:
# "DBClusterIdentifier", "Endpoint", "ReaderEndpoint", "WriterAZ", and "WriterNode".
# "DBClusterIdentifier", "Endpoint", "ReaderEndpoint", "WriterAZ", "WriterNode" and "CustomEndpoint".
#
# This exists to accommodate the desire for placing write-intensive workloads in the same AZ as the
# writer node. As of May 2022, the data source for DB instances does not expose the `IsClusterWriter`
Expand All @@ -12,6 +12,7 @@
#
# Globals:
# CLUSTER_IDENTIFIER mapped to the "$1" positional argument
# BFD_ENV mapped to the "$2" positional argument
# CLUSTER a modified json object from the aws rds describe-db-clusters command
# WRITER_NODE a string representing the writer node's db-instance-identifier
# WRITER_AZ a string representing the writer node's availability zone
Expand All @@ -23,15 +24,22 @@
set -euo pipefail

CLUSTER_IDENTIFIER="$1"
BFD_ENV="$2"

ENDPOINT_IDENTIFIER="bfd-${BFD_ENV}-ro"

CLUSTER="$(aws rds describe-db-clusters \
--query 'DBClusters[].{DBClusterIdentifier:DBClusterIdentifier,Endpoint:Endpoint,ReaderEndpoint:ReaderEndpoint,Members:DBClusterMembers}[0]' \
--db-cluster-identifier "$CLUSTER_IDENTIFIER")"

CUSTOM_ENDPOINT="$(aws rds describe-db-cluster-endpoints --db-cluster-identifier "$CLUSTER_IDENTIFIER" | jq -r --arg endpoint "$ENDPOINT_IDENTIFIER" '.[][] | select(.DBClusterEndpointIdentifier == $endpoint).Endpoint')"

WRITER_NODE="$(jq -r '.Members[] | select(.IsClusterWriter == true) | .DBInstanceIdentifier' <<<"$CLUSTER")"

WRITER_AZ="$(aws rds describe-db-instances --db-instance-identifier "$WRITER_NODE" --query 'DBInstances[0].AvailabilityZone' --output text)"

WRITER_CONFIG="$(jq --null-input --arg writer_node "$WRITER_NODE" --arg writer_az "$WRITER_AZ" '{ WriterNode: $writer_node, WriterAZ: $writer_az }')"

jq --argjson obj "$WRITER_CONFIG" '. += $obj | del(.Members)' <<<$CLUSTER
# Construct the final object keeping just "DBClusterIdentifier", "Endpoint", "ReaderEndpoint", "WriterAZ" from the $CLUSTER
# Add the objects representing "WriterNode" and "CustomEndpoint"
jq --argjson obj1 "$WRITER_CONFIG" --arg str1 "$CUSTOM_ENDPOINT" '. += $obj1 | . += { "CustomEndpoint": $str1 } | del(.Members)' <<<$CLUSTER

0 comments on commit 2ef37d7

Please sign in to comment.