Skip to content

Commit

Permalink
Final version for AWS Deployment only
Browse files Browse the repository at this point in the history
  • Loading branch information
marcuss committed Oct 21, 2024
1 parent d3828c0 commit 70bf28c
Show file tree
Hide file tree
Showing 7 changed files with 54 additions and 30 deletions.
8 changes: 4 additions & 4 deletions .github/workflows/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ jobs:
outputs:
image: ${{ steps.image-details.outputs.image }}

deploy:
aws_deploy:
needs: build
permissions:
id-token: write # allows creation of an OIDC token
Expand Down Expand Up @@ -102,13 +102,13 @@ jobs:
export AWS_REGION=${{ secrets.AWS_REGION }}
export ECR_REPOSITORY=${{ secrets.ECR_REPOSITORY }}
export IMAGE_TAG=latest
envsubst < infrastructure/k8s/deployment.yaml | tee infrastructure/k8s/deployment-resolved.yaml
envsubst < k8s/deployment.yaml | tee k8s/deployment-resolved.yaml
- name: Deploy Deployment
run: kubectl apply --validate=false -f infrastructure/k8s/deployment-resolved.yaml
run: kubectl apply --validate=false -f k8s/deployment-resolved.yaml

- name: Deploy Service
run: kubectl apply --validate=false -f infrastructure/k8s/service.yaml
run: kubectl apply --validate=false -f k8s/service.yaml

- name: Check Installation
run: |
Expand Down
1 change: 1 addition & 0 deletions infrastructure/providers/aws/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
For destroying terraform resources first destroy all deployments created in Kubernetes otherwise the destroy can find itself in a loop trying to delete vpc, subnets etc, that can not be deleted because theare things like load balancer created by kubernetes deployment that haas not been deleted.
26 changes: 0 additions & 26 deletions infrastructure/providers/aws/federated_deployer_role.tf
Original file line number Diff line number Diff line change
Expand Up @@ -87,32 +87,6 @@ resource "aws_eks_access_policy_association" "federated_deployer_eks_admin_polic
depends_on = [aws_eks_access_entry.eks_federated_deployer_access_entry]
}

# # Associate AmazonEKSAdminViewPolicy TODO: remove if deployment succeeds without this
# resource "aws_eks_access_policy_association" "cluster_admin_policy" {
# cluster_name = module.eks.cluster_name
# policy_arn = "arn:aws:eks::aws:cluster-access-policy/AmazonEKSClusterAdminPolicy"
# principal_arn = aws_iam_role.eks_federated_deployer.arn
#
# access_scope {
# type = "cluster"
# }
#
# depends_on = [aws_eks_access_entry.eks_federated_deployer_access_entry]
# }

# Associate AmazonEKSClusterPolicy
# resource "aws_eks_access_policy_association" "cluster_policy" {
# cluster_name = module.eks.cluster_name
# policy_arn = "arn:aws:eks::aws:cluster-access-policy/AmazonEKSEditPolicy"
# principal_arn = aws_iam_role.eks_federated_deployer.arn
#
# access_scope {
# type = "cluster"
# }
#
# depends_on = [aws_eks_access_entry.eks_federated_deployer_access_entry]
# }

resource "aws_eks_access_entry" "local_user_access_entry" {
cluster_name = module.eks.cluster_name
principal_arn = local.local_aws_user_arn
Expand Down
49 changes: 49 additions & 0 deletions infrastructure/providers/aws/get_the_deployed_service_url.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
#!/bin/bash

# Variables
ROLE_NAME="eks-federated-deployer"
ROLE_SESSION_NAME="eks-federated-deployer-session"
KUBE_CLUSTER_NAME="dev-cluster"
SERVICE_NAME="marco-nico-service"
NAMESPACE="default"

# Get the AWS Account ID
ACCOUNT_ID=$(aws sts get-caller-identity --query "Account" --output text)

if [ -z "$ACCOUNT_ID" ]; then
echo "Error: Unable to retrieve AWS Account ID."
exit 1
fi

# Assume the role
ROLE_ARN="arn:aws:iam::$ACCOUNT_ID:role/$ROLE_NAME"
sts_response=$(aws sts assume-role --role-arn $ROLE_ARN --role-session-name $ROLE_SESSION_NAME)

# Parse the assumed role credentials
AWS_ACCESS_KEY_ID=$(echo $sts_response | jq -r '.Credentials.AccessKeyId')
AWS_SECRET_ACCESS_KEY=$(echo $sts_response | jq -r '.Credentials.SecretAccessKey')
AWS_SESSION_TOKEN=$(echo $sts_response | jq -r '.Credentials.SessionToken')

if [ -z "$AWS_ACCESS_KEY_ID" ] || [ -z "$AWS_SECRET_ACCESS_KEY" ] || [ -z "$AWS_SESSION_TOKEN" ]; then
echo "Error: Unable to assume the role. Please check the role ARN and your AWS CLI configuration."
exit 1
fi

# Update kubeconfig using the assumed role credentials to switch context
aws eks update-kubeconfig --name $KUBE_CLUSTER_NAME --role-arn $ROLE_ARN

# Use the assumed role credentials with kubectl directly to get the service details
SERVICE_DETAILS=$(AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY AWS_SESSION_TOKEN=$AWS_SESSION_TOKEN kubectl get svc ${SERVICE_NAME} --namespace ${NAMESPACE} -o json)

# Extract the service URL by parsing the JSON response
SERVICE_URL=$(echo $SERVICE_DETAILS | jq -r '.status.loadBalancer.ingress[0].ip // .status.loadBalancer.ingress[0].hostname')

# Check if the SERVICE_URL is not empty
if [ -z "$SERVICE_URL" ]; then
echo "Error: Unable to retrieve the LoadBalancer URL for the service ${SERVICE_NAME}"
exit 1
fi

# Print the final service URL
FULL_URL="http://${SERVICE_URL}/play/marco"
echo "The URL is: ${FULL_URL}"
File renamed without changes.
File renamed without changes.
File renamed without changes.

0 comments on commit 70bf28c

Please sign in to comment.