diff --git a/.dockleconfig b/.dockleconfig index 051fd7789..a8c76f2ea 100644 --- a/.dockleconfig +++ b/.dockleconfig @@ -2,3 +2,5 @@ # To allow multiple files, use a list of names, example below. Make sure to remove the leading # # DOCKLE_ACCEPT_FILES="file1,path/to/file2,file3/path,etc" # https://github.com/goodwithtech/dockle#accept-suspicious-environment-variables--files--file-extensions +# The apiflask/settings file is a stub file that apiflask creates, and has no sensitive data in. We are ignoring it since it is unused +DOCKLE_ACCEPT_FILES=api/.venv/lib/python3.11/site-packages/apiflask/settings.py diff --git a/.github/actions/configure-aws-credentials/action.yml b/.github/actions/configure-aws-credentials/action.yml index 309601c75..814e42391 100644 --- a/.github/actions/configure-aws-credentials/action.yml +++ b/.github/actions/configure-aws-credentials/action.yml @@ -1,12 +1,12 @@ -name: "Configure AWS Credentials" -description: "Configure AWS Credentials for a given application and | +name: 'Configure AWS Credentials' +description: 'Configure AWS Credentials for a given application and | environment so that the GitHub Actions workflow can access AWS resources. | This is a wrapper around https://github.com/aws-actions/configure-aws-credentials | that first determines the account, role, and region based on the | - account_names_by_environment configuration in app-config" + account_names_by_environment configuration in app-config' inputs: app_name: - description: "Name of application folder under /infra" + description: 'Name of application folder under /infra' required: true environment: description: 'Name of environment (dev, staging, prod) that AWS resources live in, or "shared" for resources that are shared across environments' @@ -52,7 +52,7 @@ runs: echo "AWS_REGION=$AWS_REGION" >> "$GITHUB_ENV" shell: bash - name: Configure AWS credentials - uses: aws-actions/configure-aws-credentials@v1 + uses: aws-actions/configure-aws-credentials@v3 with: role-to-assume: ${{ env.AWS_ROLE_TO_ASSUME }} aws-region: ${{ env.AWS_REGION }} diff --git a/.github/workflows/cd-api.yml b/.github/workflows/cd-api.yml new file mode 100644 index 000000000..edd96b363 --- /dev/null +++ b/.github/workflows/cd-api.yml @@ -0,0 +1,56 @@ +name: Deploy API +# Need to set a default value for when the workflow is triggered from a git push +# which bypasses the default configuration for inputs +run-name: Deploy ${{ github.ref_name }} to API ${{ inputs.environment || 'dev' }} + +on: + push: + branches: + - "main" + paths: + - "api/**" + - "bin/**" + - "infra/**" + release: + types: [published] + workflow_dispatch: + inputs: + app_name: + description: "name of application folder under infra directory" + required: true + type: string + environment: + description: "target environment" + required: true + default: "dev" + type: choice + options: + - dev + - prod + +jobs: + wait-for-checks: + runs-on: ubuntu-latest + + steps: + - name: Checkout code + uses: actions/checkout@v3 + + - name: Authenticate GitHub CLI + run: echo "${{ github.token }}" | gh auth login --with-token + + - name: Run verify-checks script + env: + CURRENT_GITHUB_RUN_ID: ${{ github.run_id }} + SHA: ${{ github.sha }} + REPO: "hhs/grants-equity" + run: | + ${{ github.workspace }}/bin/verify-checks.sh + + deploy: + name: Deploy + needs: wait-for-checks + uses: ./.github/workflows/deploy.yml + with: + app_name: ${{ inputs.app_name || 'api' }} + environment: ${{ inputs.environment || (github.event_name == 'release' && 'prod') || 'dev' }} diff --git a/.github/workflows/check-infra-auth.yml b/.github/workflows/check-infra-auth.yml index 120e24006..3e908148e 100644 --- a/.github/workflows/check-infra-auth.yml +++ b/.github/workflows/check-infra-auth.yml @@ -23,7 +23,7 @@ jobs: - name: Checkout code uses: actions/checkout@v3 - name: Configure AWS credentials - uses: aws-actions/configure-aws-credentials@v1 + uses: aws-actions/configure-aws-credentials@v3 with: aws-region: ${{ inputs.aws_region }} role-to-assume: ${{ inputs.role_to_assume }} diff --git a/.github/workflows/ci-infra-service.yml b/.github/workflows/ci-infra-service.yml new file mode 100644 index 000000000..aaa39e4ad --- /dev/null +++ b/.github/workflows/ci-infra-service.yml @@ -0,0 +1,50 @@ +name: CI Infra Service Checks + +on: + push: + branches: + - main + paths: + - infra/*/service/** + - infra/test/** + - .github/workflows/ci-infra-service.yml + pull_request: + paths: + - infra/*/service/** + - infra/test/** + - .github/workflows/ci-infra-service.yml + workflow_dispatch: + +env: + APP_NAME: frontend + +jobs: + infra-test-e2e: + name: Test service + runs-on: ubuntu-latest + + permissions: + contents: read + id-token: write + + steps: + - uses: actions/checkout@v3 + + - uses: hashicorp/setup-terraform@v2 + with: + terraform_version: 1.2.1 + terraform_wrapper: false + + - uses: actions/setup-go@v3 + with: + go-version: ">=1.19.0" + + - name: Configure AWS credentials + uses: ./.github/actions/configure-aws-credentials + with: + app_name: ${{ env.APP_NAME }} + # Run infra CI on dev environment + environment: dev + + - name: Run Terratest + run: make APP_NAME=${{ env.APP_NAME }} infra-test-service diff --git a/.github/workflows/ci-infra.yml b/.github/workflows/ci-infra.yml index b4bcecffb..763783c32 100644 --- a/.github/workflows/ci-infra.yml +++ b/.github/workflows/ci-infra.yml @@ -5,18 +5,37 @@ on: branches: - main paths: + - bin/** - infra/** - - .github/workflows/ci-infra.yml + - .github/workflows/** pull_request: paths: + - bin/** - infra/** - - test/** - - .github/workflows/ci-infra.yml - -env: - APP_NAME: frontend + - .github/workflows/** jobs: + lint-github-actions: + # Lint github actions files using https://github.com/rhysd/actionlint + # This job configuration is largely copied from https://github.com/rhysd/actionlint/blob/main/docs/usage.md#use-actionlint-on-github-actions + name: Lint GitHub Actions workflows + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - name: Download actionlint + id: get_actionlint + run: bash <(curl https://raw.githubusercontent.com/rhysd/actionlint/main/scripts/download-actionlint.bash) + shell: bash + - name: Check workflow files + run: ${{ steps.get_actionlint.outputs.executable }} -color + shell: bash + lint-scripts: + name: Lint scripts + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - name: Shellcheck + run: make infra-lint-scripts check-terraform-format: name: Check Terraform format runs-on: ubuntu-latest @@ -26,10 +45,10 @@ jobs: with: terraform_version: 1.4.6 terraform_wrapper: false - - name: Run infra-lint + - name: Run infra-lint-terraform run: | echo "If this fails, run 'make infra-format'" - make infra-lint + make infra-lint-terraform validate-terraform: name: Validate Terraform modules runs-on: ubuntu-latest @@ -39,8 +58,8 @@ jobs: with: terraform_version: 1.4.6 terraform_wrapper: false - - name: Run infra-validate - run: make infra-validate + - name: Validate + run: make infra-validate-modules check-compliance-with-checkov: name: Check compliance with checkov runs-on: ubuntu-latest @@ -74,33 +93,3 @@ jobs: uses: aquasecurity/tfsec-pr-commenter-action@v1.2.0 with: github_token: ${{ github.token }} - - infra-test-e2e: - name: End-to-end tests - runs-on: ubuntu-latest - - permissions: - contents: read - id-token: write - - steps: - - uses: actions/checkout@v3 - - - uses: hashicorp/setup-terraform@v2 - with: - terraform_version: 1.2.1 - terraform_wrapper: false - - - uses: actions/setup-go@v3 - with: - go-version: ">=1.19.0" - - - name: Configure AWS credentials - uses: ./.github/actions/configure-aws-credentials - with: - app_name: frontend - # Run infra CI on dev environment - environment: dev - - - name: Run Terratest - run: make APP_NAME=${{ env.APP_NAME }} infra-test diff --git a/.github/workflows/ci-vulnerability-scans.yml b/.github/workflows/ci-vulnerability-scans.yml index a6dace9a8..58a7e8ae4 100644 --- a/.github/workflows/ci-vulnerability-scans.yml +++ b/.github/workflows/ci-vulnerability-scans.yml @@ -9,6 +9,7 @@ on: - main paths: - frontend/** + - api/** - .grype.yml - .hadolint.yaml - .trivyignore @@ -18,195 +19,18 @@ on: pull_request: paths: - frontend/** + - api/** - .grype.yml - .hadolint.yaml - .trivyignore - .github/workflows/ci-vulnerability-scans.yml -env: - APP_NAME: frontend - jobs: - hadolint-scan: - name: Hadolint Scan - runs-on: ubuntu-latest - - steps: - - uses: actions/checkout@v3 - - # Scans Dockerfile for any bad practices or issues - - name: Scan Dockerfile by hadolint - uses: hadolint/hadolint-action@v3.1.0 - with: - dockerfile: frontend/Dockerfile - format: tty - failure-threshold: warning - output-file: hadolint-results.txt - - - name: Save output to workflow summary - if: always() # Runs even if there is a failure - run: | - cat hadolint-results.txt >> $GITHUB_STEP_SUMMARY - - build-and-cache: - runs-on: ubuntu-latest - outputs: - image: ${{ steps.shared-output.outputs.image }} - - steps: - - uses: actions/checkout@v3 - - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@master - - - name: Cache Docker layers - id: cache-buildx - uses: actions/cache@v3 - with: - path: /tmp/.buildx-cache - key: ${{ runner.os }}-buildx-${{ github.sha }} - restore-keys: | - ${{ runner.os }}-buildx- - - - name: Ensure Buildx cache exists - run: | - mkdir -p /tmp/.buildx-cache - - - name: Set shared outputs - id: shared-output - run: | - IMAGE_NAME=$(make APP_NAME=${{env.APP_NAME}} release-image-name) - IMAGE_TAG=$(make release-image-tag) - echo "image=$IMAGE_NAME:$IMAGE_TAG" >> $GITHUB_OUTPUT - - - name: Build and tag Docker image for scanning - # If there's an exact match in cache, skip build entirely - if: steps.cache-buildx.outputs.cache-hit != 'true' - run: | - make release-build \ - APP_NAME=${{ env.APP_NAME }} \ - OPTIONAL_BUILD_FLAGS=" \ - --cache-from=type=local,src=/tmp/.buildx-cache \ - --cache-to=type=local,dest=/tmp/.buildx-cache" - - - name: Save Docker image - if: steps.cache-buildx.outputs.cache-hit != 'true' - run: | - docker save ${{ steps.shared-output.outputs.image }} > /tmp/docker-image.tar - - - name: Cache Docker image - if: steps.cache-buildx.outputs.cache-hit != 'true' - uses: actions/cache/save@v3 - with: - path: /tmp/docker-image.tar - key: ${{ runner.os }}-docker-image-${{ github.sha }} - - trivy-scan: - name: Trivy Scan - runs-on: ubuntu-latest - needs: build-and-cache - - steps: - - uses: actions/checkout@v3 - - - name: Restore cached Docker image - uses: actions/cache/restore@v3 - with: - path: /tmp/docker-image.tar - key: ${{ runner.os }}-docker-image-${{ github.sha }} - restore-keys: | - ${{ runner.os }}-docker-image- - - - name: Load cached Docker image - run: | - docker load < /tmp/docker-image.tar - - - name: Run Trivy vulnerability scan - uses: aquasecurity/trivy-action@master - with: - scan-type: image - image-ref: ${{ needs.build-and-cache.outputs.image }} - format: table - exit-code: 1 - ignore-unfixed: true - vuln-type: os - scanners: vuln,secret - - - name: Save output to workflow summary - if: always() # Runs even if there is a failure - run: | - echo "View results in GitHub Action logs" >> $GITHUB_STEP_SUMMARY - - anchore-scan: - name: Anchore Scan - runs-on: ubuntu-latest - needs: build-and-cache - - steps: - - uses: actions/checkout@v3 - - - name: Restore cached Docker image - uses: actions/cache/restore@v3 - with: - path: /tmp/docker-image.tar - key: ${{ runner.os }}-docker-image-${{ github.sha }} - restore-keys: | - ${{ runner.os }}-docker-image- - - - name: Load cached Docker image - run: | - docker load < /tmp/docker-image.tar - - - name: Run Anchore vulnerability scan - uses: anchore/scan-action@v3 - with: - image: ${{ needs.build-and-cache.outputs.image }} - output-format: table - - - name: Save output to workflow summary - if: always() # Runs even if there is a failure - run: | - echo "View results in GitHub Action logs" >> $GITHUB_STEP_SUMMARY - - dockle-scan: - name: Dockle Scan - runs-on: ubuntu-latest - needs: build-and-cache - - steps: - - uses: actions/checkout@v3 - - - name: Restore cached Docker image - uses: actions/cache/restore@v3 - with: - path: /tmp/docker-image.tar - key: ${{ runner.os }}-docker-image-${{ github.sha }} - restore-keys: | - ${{ runner.os }}-docker-image- - - - name: Load cached Docker image - run: | - docker load < /tmp/docker-image.tar - - # Dockle doesn't allow you to have an ignore file for the DOCKLE_ACCEPT_FILES - # variable, this will save the variable in this file to env for Dockle - - name: Set any acceptable Dockle files - run: | - if grep -q "^DOCKLE_ACCEPT_FILES=.*" .dockleconfig; then - grep -s '^DOCKLE_ACCEPT_FILES=' .dockleconfig >> $GITHUB_ENV - fi - - - name: Run Dockle container linter - uses: erzz/dockle-action@v1.3.1 - with: - image: ${{ needs.build-and-cache.outputs.image }} - exit-code: "1" - failure-threshold: WARN - accept-filenames: ${{ env.DOCKLE_ACCEPT_FILES }} - - - name: Save output to workflow summary - if: always() # Runs even if there is a failure - run: | - echo "```json" >> $GITHUB_STEP_SUMMARY - cat dockle-report.json >> $GITHUB_STEP_SUMMARY - echo "```" >> $GITHUB_STEP_SUMMARY + vulnerability-scans: + name: Vulnerability Scans + strategy: + matrix: + app_name: ["frontend", "api"] + uses: ./.github/workflows/vulnerability-scans.yml + with: + app_name: ${{ matrix.app_name }} diff --git a/.github/workflows/vulnerability-scans.yml b/.github/workflows/vulnerability-scans.yml new file mode 100644 index 000000000..9eb93cf5a --- /dev/null +++ b/.github/workflows/vulnerability-scans.yml @@ -0,0 +1,199 @@ +# GitHub Actions CI workflow that runs vulnerability scans on the application's Docker image +# to ensure images built are secure before they are deployed. + +name: CI Vulnerability Scans + +on: + workflow_call: + inputs: + app_name: + description: "name of application folder under infra directory" + required: true + type: string + +jobs: + hadolint-scan: + name: Hadolint Scan + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v3 + + # Scans Dockerfile for any bad practices or issues + - name: Scan Dockerfile by hadolint + uses: hadolint/hadolint-action@v3.1.0 + with: + dockerfile: ${{ inputs.app_name }}/Dockerfile + format: tty + failure-threshold: warning + output-file: hadolint-results.txt + + - name: Save output to workflow summary + if: always() # Runs even if there is a failure + run: | + cat hadolint-results.txt >> "$GITHUB_STEP_SUMMARY" + + build-and-cache: + runs-on: ubuntu-latest + outputs: + image: ${{ steps.shared-output.outputs.image }} + + steps: + - uses: actions/checkout@v3 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@master + + - name: Cache Docker layers + id: cache-buildx + uses: actions/cache@v3 + with: + path: /tmp/.buildx-cache + key: ${{ inputs.app_name }}-buildx-${{ github.sha }} + restore-keys: | + ${{ inputs.app_name }}-buildx- + + - name: Ensure Buildx cache exists + run: | + mkdir -p /tmp/.buildx-cache + + - name: Set shared outputs + id: shared-output + run: | + IMAGE_NAME=$(make APP_NAME=${{ inputs.app_name }} release-image-name) + IMAGE_TAG=$(make release-image-tag) + echo "image=$IMAGE_NAME:$IMAGE_TAG" >> "$GITHUB_OUTPUT" + + - name: Build and tag Docker image for scanning + # If there's an exact match in cache, skip build entirely + if: steps.cache-buildx.outputs.cache-hit != 'true' + run: | + make release-build \ + APP_NAME=${{ inputs.app_name }} \ + OPTIONAL_BUILD_FLAGS=" \ + --cache-from=type=local,src=/tmp/.buildx-cache \ + --cache-to=type=local,dest=/tmp/.buildx-cache" + + - name: Save Docker image + if: steps.cache-buildx.outputs.cache-hit != 'true' + run: | + docker save ${{ steps.shared-output.outputs.image }} > /tmp/docker-image.tar + + - name: Cache Docker image + if: steps.cache-buildx.outputs.cache-hit != 'true' + uses: actions/cache/save@v3 + with: + path: /tmp/docker-image.tar + key: ${{ inputs.app_name }}-docker-image-${{ github.sha }} + + trivy-scan: + name: Trivy Scan + runs-on: ubuntu-latest + needs: build-and-cache + + steps: + - uses: actions/checkout@v3 + + - name: Restore cached Docker image + uses: actions/cache/restore@v3 + with: + path: /tmp/docker-image.tar + key: ${{ inputs.app_name }}-docker-image-${{ github.sha }} + restore-keys: | + ${{ inputs.app_name }}-docker-image- + + - name: Load cached Docker image + run: | + docker load < /tmp/docker-image.tar + + - name: Run Trivy vulnerability scan + uses: aquasecurity/trivy-action@master + with: + scan-type: image + image-ref: ${{ needs.build-and-cache.outputs.image }} + format: table + exit-code: 1 + ignore-unfixed: true + vuln-type: os + scanners: vuln,secret + + - name: Save output to workflow summary + if: always() # Runs even if there is a failure + run: | + echo "View results in GitHub Action logs" >> "$GITHUB_STEP_SUMMARY" + + anchore-scan: + name: Anchore Scan + runs-on: ubuntu-latest + needs: build-and-cache + + steps: + - uses: actions/checkout@v3 + + - name: Restore cached Docker image + uses: actions/cache/restore@v3 + with: + path: /tmp/docker-image.tar + key: ${{ inputs.app_name }}-docker-image-${{ github.sha }} + restore-keys: | + ${{ inputs.app_name }}-docker-image- + + - name: Load cached Docker image + run: | + docker load < /tmp/docker-image.tar + + - name: Run Anchore vulnerability scan + uses: anchore/scan-action@v3 + with: + image: ${{ needs.build-and-cache.outputs.image }} + output-format: table + + - name: Save output to workflow summary + if: always() # Runs even if there is a failure + run: | + echo "View results in GitHub Action logs" >> "$GITHUB_STEP_SUMMARY" + + dockle-scan: + name: Dockle Scan + runs-on: ubuntu-latest + needs: build-and-cache + + steps: + - uses: actions/checkout@v3 + + - name: Restore cached Docker image + uses: actions/cache/restore@v3 + with: + path: /tmp/docker-image.tar + key: ${{ inputs.app_name }}-docker-image-${{ github.sha }} + restore-keys: | + ${{ inputs.app_name }}-docker-image- + + - name: Load cached Docker image + run: | + docker load < /tmp/docker-image.tar + + # Dockle doesn't allow you to have an ignore file for the DOCKLE_ACCEPT_FILES + # variable, this will save the variable in this file to env for Dockle + - name: Set any acceptable Dockle files + run: | + if grep -q "^DOCKLE_ACCEPT_FILES=.*" .dockleconfig; then + grep -s '^DOCKLE_ACCEPT_FILES=' .dockleconfig >> "$GITHUB_ENV" + fi + + - name: Run Dockle container linter + uses: erzz/dockle-action@v1.3.1 + with: + image: ${{ needs.build-and-cache.outputs.image }} + exit-code: "1" + failure-threshold: WARN + accept-filenames: ${{ env.DOCKLE_ACCEPT_FILES }} + + - name: Save output to workflow summary + if: always() # Runs even if there is a failure + run: | + { + echo '```json' + cat dockle-report.json + echo '```' + } >> "$GITHUB_STEP_SUMMARY" diff --git a/.grype.yml b/.grype.yml index a2d0a7d15..60117112a 100644 --- a/.grype.yml +++ b/.grype.yml @@ -16,30 +16,3 @@ ignore: - fix-state: not-fixed - fix-state: wont-fix - fix-state: unknown - - # glob-parent before 5.1.2 vulnerable to Regular Expression Denial of Service in enclosure regex - # https://github.com/advisories/GHSA-ww39-953v-wcq6 - # High severity - # Ignoring since this is only a dependency of dev tools: storybook (for storybook docs site), - # eslint (for Linting in CI), and sass (for compiling CSS during CI build phase) - - vulnerability: GHSA-ww39-953v-wcq6 - - # Regular Expression Denial of Service in trim - # https://github.com/advisories/GHSA-w5p7-h5w8-2hfq - # High severity - # Ignoring since this is only used in storybook which is a dev tool - - vulnerability: GHSA-w5p7-h5w8-2hfq - - # Uncontrolled Resource Consumption in trim-newlines - # https://github.com/advisories/GHSA-7p7h-4mm5-852v - # Ignoring since this is only used in storybook which is a dev tool - - vulnerability: GHSA-7p7h-4mm5-852v - - ##################### - ## False positives ## - ##################### - - # http-cache-semantics vulnerable to Regular Expression Denial of Service - # https://github.com/advisories/GHSA-rc47-6667-2j5j - # http-cache-semantics does not exist as a dependency in this app - - vulnerability: GHSA-rc47-6667-2j5j diff --git a/.hadolint.yaml b/.hadolint.yaml index d552e3548..00f76a4a3 100644 --- a/.hadolint.yaml +++ b/.hadolint.yaml @@ -4,3 +4,9 @@ # https://github.com/hadolint/hadolint#configure failure-threshold: warning ignored: [] +override: + info: + # Casts the apt-get install = finding as info + # We have this set since there is no way to specify version for + # build-essentials in the Dockerfile + - DL3008 diff --git a/.template-version b/.template-version new file mode 100644 index 000000000..4a6150bfe --- /dev/null +++ b/.template-version @@ -0,0 +1 @@ +1a5444db0b4d3f85021f729affcf0b014ae3e81c diff --git a/Makefile b/Makefile index 90abc8826..4dbe93cc1 100644 --- a/Makefile +++ b/Makefile @@ -27,33 +27,54 @@ __check_defined = \ .PHONY : \ - infra-validate-modules \ - infra-validate-env-template \ - infra-check-compliance \ + help \ + infra-check-app-database-roles \ infra-check-compliance-checkov \ infra-check-compliance-tfsec \ - infra-lint \ + infra-check-compliance \ + infra-configure-app-build-repository \ + infra-configure-app-database \ + infra-configure-app-service \ + infra-configure-monitoring-secrets \ + infra-configure-network \ infra-format \ + infra-lint \ + infra-lint-scripts \ + infra-lint-terraform \ + infra-lint-workflows \ + infra-set-up-account \ + infra-test-service \ + infra-update-app-build-repository \ + infra-update-app-database-roles \ + infra-update-app-database \ + infra-update-app-service \ + infra-update-current-account \ + infra-update-network \ + infra-validate-modules \ release-build \ - release-publish \ release-deploy \ - image-registry-login \ - db-migrate \ - db-migrate-down \ - db-migrate-create + release-image-name \ + release-image-tag \ + release-publish \ + release-run-database-migrations + + infra-set-up-account: ## Configure and create resources for current AWS profile and save tfbackend file to infra/accounts/$ACCOUNT_NAME.ACCOUNT_ID.s3.tfbackend @:$(call check_defined, ACCOUNT_NAME, human readable name for account e.g. "prod" or the AWS account alias) ./bin/set-up-current-account.sh $(ACCOUNT_NAME) +infra-configure-network: ## Configure default network + ./bin/create-tfbackend.sh infra/networks default + infra-configure-app-build-repository: ## Configure infra/$APP_NAME/build-repository tfbackend and tfvars files @:$(call check_defined, APP_NAME, the name of subdirectory of /infra that holds the application's infrastructure code) - ./bin/configure-app-build-repository.sh $(APP_NAME) + ./bin/create-tfbackend.sh "infra/$(APP_NAME)/build-repository" shared infra-configure-app-database: ## Configure infra/$APP_NAME/database module's tfbackend and tfvars files for $ENVIRONMENT @:$(call check_defined, APP_NAME, the name of subdirectory of /infra that holds the application's infrastructure code) @:$(call check_defined, ENVIRONMENT, the name of the application environment e.g. "prod" or "staging") - ./bin/configure-app-database.sh $(APP_NAME) $(ENVIRONMENT) + ./bin/create-tfbackend.sh "infra/$(APP_NAME)/database" "$(ENVIRONMENT)" infra-configure-monitoring-secrets: ## Set $APP_NAME's incident management service integration URL for $ENVIRONMENT @:$(call check_defined, APP_NAME, the name of subdirectory of /infra that holds the application's infrastructure code) @@ -64,20 +85,23 @@ infra-configure-monitoring-secrets: ## Set $APP_NAME's incident management servi infra-configure-app-service: ## Configure infra/$APP_NAME/service module's tfbackend and tfvars files for $ENVIRONMENT @:$(call check_defined, APP_NAME, the name of subdirectory of /infra that holds the application's infrastructure code) @:$(call check_defined, ENVIRONMENT, the name of the application environment e.g. "prod" or "staging") - ./bin/configure-app-service.sh $(APP_NAME) $(ENVIRONMENT) + ./bin/create-tfbackend.sh "infra/$(APP_NAME)/service" "$(ENVIRONMENT)" infra-update-current-account: ## Update infra resources for current AWS profile ./bin/terraform-init-and-apply.sh infra/accounts `./bin/current-account-config-name.sh` +infra-update-network: ## Update default network + ./bin/terraform-init-and-apply.sh infra/networks default + infra-update-app-build-repository: ## Create or update $APP_NAME's build repository @:$(call check_defined, APP_NAME, the name of subdirectory of /infra that holds the application's infrastructure code) ./bin/terraform-init-and-apply.sh infra/$(APP_NAME)/build-repository shared infra-update-app-database: ## Create or update $APP_NAME's database module for $ENVIRONMENT - # APP_NAME has a default value defined above, but check anyways in case the default is ever removed @:$(call check_defined, APP_NAME, the name of subdirectory of /infra that holds the application's infrastructure code) @:$(call check_defined, ENVIRONMENT, the name of the application environment e.g. "prod" or "staging") - ./bin/terraform-init-and-apply.sh infra/$(APP_NAME)/database $(ENVIRONMENT) + terraform -chdir="infra/$(APP_NAME)/database" init -input=false -reconfigure -backend-config="$(ENVIRONMENT).s3.tfbackend" + terraform -chdir="infra/$(APP_NAME)/database" apply -var="environment_name=$(ENVIRONMENT)" infra-update-app-database-roles: ## Create or update database roles and schemas for $APP_NAME's database in $ENVIRONMENT @:$(call check_defined, APP_NAME, the name of subdirectory of /infra that holds the application's infrastructure code) @@ -88,47 +112,48 @@ infra-update-app-service: ## Create or update $APP_NAME's web service module # APP_NAME has a default value defined above, but check anyways in case the default is ever removed @:$(call check_defined, APP_NAME, the name of subdirectory of /infra that holds the application's infrastructure code) @:$(call check_defined, ENVIRONMENT, the name of the application environment e.g. "prod" or "staging") - ./bin/terraform-init-and-apply.sh infra/$(APP_NAME)/service $(ENVIRONMENT) - - -# Validate all infra root and child modules. -infra-validate: \ - infra-validate-modules \ - # !! Uncomment the following line once you've set up the infra/project-config module - # infra-validate-env-template + terraform -chdir="infra/$(APP_NAME)/service" init -input=false -reconfigure -backend-config="$(ENVIRONMENT).s3.tfbackend" + terraform -chdir="infra/$(APP_NAME)/service" apply -var="environment_name=$(ENVIRONMENT)" -# Validate all infra root and child modules. -# Validate all infra reusable child modules. The prerequisite for this rule is obtained by +# The prerequisite for this rule is obtained by # prefixing each module with the string "infra-validate-module-" -infra-validate-modules: $(patsubst %, infra-validate-module-%, $(MODULES)) +infra-validate-modules: $(patsubst %, infra-validate-module-%, $(MODULES)) ## Run terraform validate on reusable child modules infra-validate-module-%: @echo "Validate library module: $*" terraform -chdir=infra/modules/$* init -backend=false terraform -chdir=infra/modules/$* validate -infra-validate-env-template: - @echo "Validate module: env-template" - terraform -chdir=infra/app/env-template init -backend=false - terraform -chdir=infra/app/env-template validate +infra-check-app-database-roles: ## Check that app database roles have been configured properly + @:$(call check_defined, APP_NAME, the name of subdirectory of /infra that holds the application's infrastructure code) + @:$(call check_defined, ENVIRONMENT, the name of the application environment e.g. "prod" or "staging") + ./bin/check-database-roles.sh $(APP_NAME) $(ENVIRONMENT) -infra-check-compliance: infra-check-compliance-checkov infra-check-compliance-tfsec +infra-check-compliance: infra-check-compliance-checkov infra-check-compliance-tfsec ## Run compliance checks -infra-check-compliance-checkov: +infra-check-compliance-checkov: ## Run checkov compliance checks checkov --directory infra -infra-check-compliance-tfsec: +infra-check-compliance-tfsec: ## Run tfsec compliance checks tfsec infra -infra-lint: ## Lint infra code +infra-lint: infra-lint-scripts infra-lint-terraform infra-lint-workflows ## Lint infra code + +infra-lint-scripts: ## Lint shell scripts + shellcheck bin/** + +infra-lint-terraform: ## Lint Terraform code terraform fmt -recursive -check infra +infra-lint-workflows: ## Lint GitHub actions + actionlint + infra-format: ## Format infra code terraform fmt -recursive infra -infra-test: ## Run end-to-end infra Terratest test suite +infra-test-service: ## Run service layer infra test suite @:$(call check_defined, APP_NAME, the name of subdirectory of /infra that holds the application's infrastructure code) - cd infra/test && go test -v -timeout 30m -app_name=$(APP_NAME) + cd infra/test && go test -run TestService -v -timeout 30m -app_name=$(APP_NAME) ######################## ## Release Management ## diff --git a/api/Dockerfile b/api/Dockerfile index 5c14e67bd..246595b73 100644 --- a/api/Dockerfile +++ b/api/Dockerfile @@ -96,7 +96,7 @@ RUN poetry install --no-root --only main # include section in pyproject.toml. Also note that if you change the name or # version section in pyproject.toml, you will need to change the dist/... to match # or the application will not build -RUN poetry build --format wheel && poetry run pip install 'dist/template_application_flask-0.1.0-py3-none-any.whl' +RUN poetry build --format wheel && poetry run pip install 'dist/grants_equity_api-0.1.0-py3-none-any.whl' # Add project's virtual env to the PATH so we can directly run poetry scripts # defiend in pyproject.toml diff --git a/bin/account-ids-by-name.sh b/bin/account-ids-by-name.sh new file mode 100755 index 000000000..b5da95dd5 --- /dev/null +++ b/bin/account-ids-by-name.sh @@ -0,0 +1,24 @@ +#!/bin/bash +# Prints a JSON dictionary that maps account names to account ids for the list +# of accounts given by the terraform backend files of the form +# ..s3.tfbackend in the infra/accounts directory. +set -euo pipefail + + +# We use script dir to make this script agnostic to where it's called from. +# This is needed since this script its called from infra//build-repository +# in an external data source +SCRIPT_DIR=$(dirname "$0") + +KEY_VALUE_PAIRS=() +BACKEND_CONFIG_FILE_PATHS=$(ls -1 "$SCRIPT_DIR"/../infra/accounts/*.*.s3.tfbackend) + +for BACKEND_CONFIG_FILE_PATH in $BACKEND_CONFIG_FILE_PATHS; do + BACKEND_CONFIG_FILE=$(basename "$BACKEND_CONFIG_FILE_PATH") + BACKEND_CONFIG_NAME="${BACKEND_CONFIG_FILE/.s3.tfbackend/}" + IFS='.' read -r ACCOUNT_NAME ACCOUNT_ID <<< "$BACKEND_CONFIG_NAME" + KEY_VALUE_PAIRS+=("\"$ACCOUNT_NAME\":\"$ACCOUNT_ID\"") +done + +IFS="," +echo "{${KEY_VALUE_PAIRS[*]}}" diff --git a/bin/check-database-roles.sh b/bin/check-database-roles.sh new file mode 100755 index 000000000..fd1926176 --- /dev/null +++ b/bin/check-database-roles.sh @@ -0,0 +1,48 @@ +#!/bin/bash +# ----------------------------------------------------------------------------- +# Script that invokes the database role-manager AWS Lambda function to check +# that the Postgres users were configured properly. +# +# Positional parameters: +# APP_NAME (required) – the name of subdirectory of /infra that holds the +# application's infrastructure code. +# ENVIRONMENT (required) - the name of the application environment (e.g. dev +# staging, prod) +# ----------------------------------------------------------------------------- +set -euo pipefail + +APP_NAME=$1 +ENVIRONMENT=$2 + +./bin/terraform-init.sh "infra/$APP_NAME/database" "$ENVIRONMENT" +DB_ROLE_MANAGER_FUNCTION_NAME=$(terraform -chdir="infra/$APP_NAME/database" output -raw role_manager_function_name) + +echo "=======================" +echo "Checking database roles" +echo "=======================" +echo "Input parameters" +echo " APP_NAME=$APP_NAME" +echo " ENVIRONMENT=$ENVIRONMENT" +echo +echo "Invoking Lambda function: $DB_ROLE_MANAGER_FUNCTION_NAME" +echo +CLI_RESPONSE=$(aws lambda invoke \ + --function-name "$DB_ROLE_MANAGER_FUNCTION_NAME" \ + --no-cli-pager \ + --log-type Tail \ + --payload "$(echo -n '"check"' | base64)" \ + --output json \ + response.json) + +# Print logs out (they are returned base64 encoded) +echo "$CLI_RESPONSE" | jq -r '.LogResult' | base64 --decode +echo +echo "Lambda function response:" +cat response.json +rm response.json + +# Exit with nonzero status if function failed +FUNCTION_ERROR=$(echo "$CLI_RESPONSE" | jq -r '.FunctionError') +if [ "$FUNCTION_ERROR" != "null" ]; then + exit 1 +fi diff --git a/bin/check-github-actions-auth.sh b/bin/check-github-actions-auth.sh index 0e1746ca6..e9ce94f31 100755 --- a/bin/check-github-actions-auth.sh +++ b/bin/check-github-actions-auth.sh @@ -8,7 +8,7 @@ GITHUB_ACTIONS_ROLE=$1 PREV_RUN_CREATE_TIME=$(gh run list --workflow check-infra-auth.yml --limit 1 --json createdAt --jq ".[].createdAt") echo "Run check-infra-auth workflow with role_to_assume=$GITHUB_ACTIONS_ROLE" -gh workflow run check-infra-auth.yml --field role_to_assume=$GITHUB_ACTIONS_ROLE +gh workflow run check-infra-auth.yml --field role_to_assume="$GITHUB_ACTIONS_ROLE" ######################### ## Get workflow run id ## @@ -45,4 +45,4 @@ echo "Workflow run id: $WORKFLOW_RUN_ID" echo "Watch workflow run until it exits" # --exit-status causes command to exit with non-zero status if run fails -gh run watch $WORKFLOW_RUN_ID --exit-status +gh run watch "$WORKFLOW_RUN_ID" --exit-status diff --git a/bin/configure-app-build-repository.sh b/bin/configure-app-build-repository.sh deleted file mode 100755 index a56eb9651..000000000 --- a/bin/configure-app-build-repository.sh +++ /dev/null @@ -1,51 +0,0 @@ -#!/bin/bash -# ----------------------------------------------------------------------------- -# This script configures the build-repository module for the specified application. -# It creates a shared.tfvars file and shared.s3.tfbackend in the module directory. -# The configuration will be shared across all of the application's environments. -# -# Positional parameters: -# APP_NAME (required) – the name of subdirectory of /infra that holds the -# application's infrastructure code. -# ----------------------------------------------------------------------------- -set -euo pipefail - - -APP_NAME=$1 - -#-------------------------------------- -# Create terraform backend config file -#-------------------------------------- - -MODULE_DIR="infra/$APP_NAME/build-repository" -BACKEND_CONFIG_NAME="shared" - -./bin/create-tfbackend.sh $MODULE_DIR $BACKEND_CONFIG_NAME - -#-------------------- -# Create tfvars file -#-------------------- - -TF_VARS_FILE="$MODULE_DIR/terraform.tfvars" -REGION=$(terraform -chdir=infra/accounts output -raw region) - -echo "===========================================" -echo "Setting up tfvars file for build-repository" -echo "===========================================" -echo "Input parameters" -echo " APP_NAME=$APP_NAME" -echo - -# Create output file from example file -cp $MODULE_DIR/example.tfvars $TF_VARS_FILE - -# Replace the placeholder values -sed -i.bak "s//$REGION/g" $TF_VARS_FILE - -# Remove the backup file created by sed -rm $TF_VARS_FILE.bak - -echo "Created file: $TF_VARS_FILE" -echo "------------------ file contents ------------------" -cat $TF_VARS_FILE -echo "----------------------- end -----------------------" diff --git a/bin/configure-app-database.sh b/bin/configure-app-database.sh deleted file mode 100755 index d4356a932..000000000 --- a/bin/configure-app-database.sh +++ /dev/null @@ -1,57 +0,0 @@ -#!/bin/bash -# ----------------------------------------------------------------------------- -# This script configures the database module for the specified application -# and environment by creating the .tfvars file and .tfbackend file for the module. -# -# Positional parameters: -# APP_NAME (required) – the name of subdirectory of /infra that holds the -# application's infrastructure code. -# ENVIRONMENT is the name of the application environment (e.g. dev, staging, prod) -# ----------------------------------------------------------------------------- -set -euo pipefail - -APP_NAME=$1 -ENVIRONMENT=$2 - -#-------------------------------------- -# Create terraform backend config file -#-------------------------------------- - -MODULE_DIR="infra/$APP_NAME/database" -BACKEND_CONFIG_NAME="$ENVIRONMENT" - -./bin/create-tfbackend.sh $MODULE_DIR $BACKEND_CONFIG_NAME - -#-------------------- -# Create tfvars file -#-------------------- - -TF_VARS_FILE="$MODULE_DIR/$ENVIRONMENT.tfvars" - -# Get the name of the S3 bucket that was created to store the tf state -# and the name of the DynamoDB table that was created for tf state locks. -# This will be used to configure the S3 backends in all the application -# modules -TF_STATE_BUCKET_NAME=$(terraform -chdir=infra/accounts output -raw tf_state_bucket_name) -TF_LOCKS_TABLE_NAME=$(terraform -chdir=infra/accounts output -raw tf_locks_table_name) -TF_STATE_KEY="$MODULE_DIR/$BACKEND_CONFIG_NAME.tfstate" -REGION=$(terraform -chdir=infra/accounts output -raw region) - - -echo "=======================================" -echo "Setting up tfvars file for app database" -echo "=======================================" -echo "Input parameters" -echo " APP_NAME=$APP_NAME" -echo " ENVIRONMENT=$ENVIRONMENT" -echo - -cp $MODULE_DIR/example.tfvars $TF_VARS_FILE -sed -i.bak "s//$ENVIRONMENT/g" $TF_VARS_FILE -sed -i.bak "s//$REGION/g" $TF_VARS_FILE -rm $TF_VARS_FILE.bak - -echo "Created file: $TF_VARS_FILE" -echo "------------------ file contents ------------------" -cat $TF_VARS_FILE -echo "----------------------- end -----------------------" diff --git a/bin/configure-app-service.sh b/bin/configure-app-service.sh deleted file mode 100755 index 6c1347bf1..000000000 --- a/bin/configure-app-service.sh +++ /dev/null @@ -1,54 +0,0 @@ -#!/bin/bash -# ----------------------------------------------------------------------------- -# This script configures the service module for the specified application -# and environment by creating the .tfvars file and .tfbackend file for the module. -# -# Positional parameters: -# APP_NAME (required) – the name of subdirectory of /infra that holds the -# application's infrastructure code. -# ENVIRONMENT is the name of the application environment (e.g. dev, staging, prod) -# ----------------------------------------------------------------------------- -set -euo pipefail - -APP_NAME=$1 -ENVIRONMENT=$2 - -#-------------------------------------- -# Create terraform backend config file -#-------------------------------------- - -MODULE_DIR="infra/$APP_NAME/service" -BACKEND_CONFIG_NAME="$ENVIRONMENT" - -./bin/create-tfbackend.sh $MODULE_DIR $BACKEND_CONFIG_NAME - -#-------------------- -# Create tfvars file -#-------------------- - -TF_VARS_FILE="$MODULE_DIR/$ENVIRONMENT.tfvars" - -# Get values needed to populate the tfvars file (see infra/app/service/example.tfvars) -TF_STATE_BUCKET_NAME=$(terraform -chdir=infra/accounts output -raw tf_state_bucket_name) -TF_STATE_KEY="$MODULE_DIR/$BACKEND_CONFIG_NAME.tfstate" -REGION=$(terraform -chdir=infra/accounts output -raw region) - -echo "======================================" -echo "Setting up tfvars file for app service" -echo "======================================" -echo "Input parameters" -echo " APP_NAME=$APP_NAME" -echo " ENVIRONMENT=$ENVIRONMENT" -echo - -cp $MODULE_DIR/example.tfvars $TF_VARS_FILE -sed -i.bak "s//$ENVIRONMENT/g" $TF_VARS_FILE -sed -i.bak "s//$TF_STATE_BUCKET_NAME/g" $TF_VARS_FILE -sed -i.bak "s||$TF_STATE_KEY|g" $TF_VARS_FILE -sed -i.bak "s//$REGION/g" $TF_VARS_FILE -rm $TF_VARS_FILE.bak - -echo "Created file: $TF_VARS_FILE" -echo "------------------ file contents ------------------" -cat $TF_VARS_FILE -echo "----------------------- end -----------------------" diff --git a/bin/configure-monitoring-secret.sh b/bin/configure-monitoring-secret.sh index b6d51aecb..6bbaaa4ec 100755 --- a/bin/configure-monitoring-secret.sh +++ b/bin/configure-monitoring-secret.sh @@ -16,16 +16,16 @@ APP_NAME=$1 ENVIRONMENT=$2 INTEGRATION_ENDPOINT_URL=$3 -terraform -chdir=infra/$APP_NAME/app-config init > /dev/null -terraform -chdir=infra/$APP_NAME/app-config refresh > /dev/null +terraform -chdir="infra/$APP_NAME/app-config" init > /dev/null +terraform -chdir="infra/$APP_NAME/app-config" refresh > /dev/null -HAS_INCIDENT_MANAGEMENT_SERVICE=$(terraform -chdir=infra/$APP_NAME/app-config output -raw has_incident_management_service) -if [ $HAS_INCIDENT_MANAGEMENT_SERVICE = "false" ]; then +HAS_INCIDENT_MANAGEMENT_SERVICE=$(terraform -chdir="infra/$APP_NAME/app-config" output -raw has_incident_management_service) +if [ "$HAS_INCIDENT_MANAGEMENT_SERVICE" = "false" ]; then echo "Application does not have incident management service, no secret to create" exit 0 fi -SECRET_NAME=$(terraform -chdir=infra/$APP_NAME/app-config output -json environment_configs | jq -r ".$ENVIRONMENT.incident_management_service_integration.integration_url_param_name") +SECRET_NAME=$(terraform -chdir="infra/$APP_NAME/app-config" output -json environment_configs | jq -r ".$ENVIRONMENT.incident_management_service_integration.integration_url_param_name") echo "=====================" echo "Setting up SSM secret" diff --git a/bin/create-or-update-database-roles.sh b/bin/create-or-update-database-roles.sh index 1e4df77b9..af98f25ab 100755 --- a/bin/create-or-update-database-roles.sh +++ b/bin/create-or-update-database-roles.sh @@ -16,8 +16,8 @@ set -euo pipefail APP_NAME=$1 ENVIRONMENT=$2 -./bin/terraform-init.sh infra/$APP_NAME/database $ENVIRONMENT -DB_ROLE_MANAGER_FUNCTION_NAME=$(terraform -chdir=infra/$APP_NAME/database output -raw role_manager_function_name) +./bin/terraform-init.sh "infra/$APP_NAME/database" "$ENVIRONMENT" +DB_ROLE_MANAGER_FUNCTION_NAME=$(terraform -chdir="infra/$APP_NAME/database" output -raw role_manager_function_name) echo "================================" echo "Creating/updating database users" @@ -27,7 +27,22 @@ echo " APP_NAME=$APP_NAME" echo " ENVIRONMENT=$ENVIRONMENT" echo echo "Invoking Lambda function: $DB_ROLE_MANAGER_FUNCTION_NAME" -aws lambda invoke --function-name $DB_ROLE_MANAGER_FUNCTION_NAME --no-cli-pager response.json +CLI_RESPONSE=$(aws lambda invoke \ + --function-name "$DB_ROLE_MANAGER_FUNCTION_NAME" \ + --no-cli-pager \ + --log-type Tail \ + --output json \ + response.json) + +# Print logs out (they are returned base64 encoded) +echo "$CLI_RESPONSE" | jq -r '.LogResult' | base64 --decode +echo echo "Lambda function response:" cat response.json rm response.json + +# Exit with nonzero status if function failed +FUNCTION_ERROR=$(echo "$CLI_RESPONSE" | jq -r '.FunctionError') +if [ "$FUNCTION_ERROR" != "null" ]; then + exit 1 +fi diff --git a/bin/create-tfbackend.sh b/bin/create-tfbackend.sh index 5381134c8..b28877580 100755 --- a/bin/create-tfbackend.sh +++ b/bin/create-tfbackend.sh @@ -39,19 +39,19 @@ echo " BACKEND_CONFIG_NAME=$BACKEND_CONFIG_NAME" echo # Create output file from example file -cp infra/example.s3.tfbackend $BACKEND_CONFIG_FILE +cp infra/example.s3.tfbackend "$BACKEND_CONFIG_FILE" # Replace the placeholder values -sed -i.bak "s//$TF_STATE_BUCKET_NAME/g" $BACKEND_CONFIG_FILE -sed -i.bak "s||$TF_STATE_KEY|g" $BACKEND_CONFIG_FILE -sed -i.bak "s//$TF_LOCKS_TABLE_NAME/g" $BACKEND_CONFIG_FILE -sed -i.bak "s//$REGION/g" $BACKEND_CONFIG_FILE +sed -i.bak "s//$TF_STATE_BUCKET_NAME/g" "$BACKEND_CONFIG_FILE" +sed -i.bak "s||$TF_STATE_KEY|g" "$BACKEND_CONFIG_FILE" +sed -i.bak "s//$TF_LOCKS_TABLE_NAME/g" "$BACKEND_CONFIG_FILE" +sed -i.bak "s//$REGION/g" "$BACKEND_CONFIG_FILE" # Remove the backup file created by sed -rm $BACKEND_CONFIG_FILE.bak +rm "$BACKEND_CONFIG_FILE.bak" echo "Created file: $BACKEND_CONFIG_FILE" echo "------------------ file contents ------------------" -cat $BACKEND_CONFIG_FILE +cat "$BACKEND_CONFIG_FILE" echo "----------------------- end -----------------------" diff --git a/bin/current-account-config-name.sh b/bin/current-account-config-name.sh index 7e9a2eaf3..f7bd3b1c5 100755 --- a/bin/current-account-config-name.sh +++ b/bin/current-account-config-name.sh @@ -4,4 +4,8 @@ # infra/accounts that matches "..s3.tfbackend". # The config name is "."" set -euo pipefail -ls -1 infra/accounts | grep "$(./bin/current-account-id.sh)" | grep s3.tfbackend | sed 's/.s3.tfbackend//' +CURRENT_ACCOUNT_ID=$(./bin/current-account-id.sh) +BACKEND_CONFIG_FILE_PATH=$(ls -1 infra/accounts/*."$CURRENT_ACCOUNT_ID".s3.tfbackend) +BACKEND_CONFIG_FILE=$(basename "$BACKEND_CONFIG_FILE_PATH") +BACKEND_CONFIG_NAME="${BACKEND_CONFIG_FILE/.s3.tfbackend/}" +echo "$BACKEND_CONFIG_NAME" diff --git a/bin/deploy-release.sh b/bin/deploy-release.sh index 00ad10262..a5cf22197 100755 --- a/bin/deploy-release.sh +++ b/bin/deploy-release.sh @@ -15,8 +15,6 @@ echo " ENVIRONMENT=$ENVIRONMENT" echo echo "Starting $APP_NAME deploy of $IMAGE_TAG to $ENVIRONMENT" -MODULE_DIR="infra/$APP_NAME/service" -CONFIG_NAME="$ENVIRONMENT" -TF_CLI_ARGS_apply="-input=false -auto-approve -var=image_tag=$IMAGE_TAG" ./bin/terraform-init-and-apply.sh $MODULE_DIR $CONFIG_NAME +TF_CLI_ARGS_apply="-input=false -auto-approve -var=image_tag=$IMAGE_TAG" make infra-update-app-service APP_NAME="$APP_NAME" ENVIRONMENT="$ENVIRONMENT" echo "Completed $APP_NAME deploy of $IMAGE_TAG to $ENVIRONMENT" diff --git a/bin/publish-release.sh b/bin/publish-release.sh index f5a814202..ce143efd2 100755 --- a/bin/publish-release.sh +++ b/bin/publish-release.sh @@ -15,13 +15,13 @@ echo " IMAGE_NAME=$IMAGE_NAME" echo " IMAGE_TAG=$IMAGE_TAG" # Need to init module when running in CD since GitHub actions does a fresh checkout of repo -terraform -chdir=infra/$APP_NAME/app-config init > /dev/null -terraform -chdir=infra/$APP_NAME/app-config refresh > /dev/null -IMAGE_REPOSITORY_NAME=$(terraform -chdir=infra/$APP_NAME/app-config output -raw image_repository_name) +terraform -chdir="infra/$APP_NAME/app-config" init > /dev/null +terraform -chdir="infra/$APP_NAME/app-config" refresh > /dev/null +IMAGE_REPOSITORY_NAME=$(terraform -chdir="infra/$APP_NAME/app-config" output -raw image_repository_name) REGION=$(./bin/current-region.sh) -read -r IMAGE_REGISTRY_ID IMAGE_REPOSITORY_URL <<< $(aws ecr describe-repositories --repository-names $IMAGE_REPOSITORY_NAME --query "repositories[0].[registryId,repositoryUri]" --output text) -IMAGE_REGISTRY=$IMAGE_REGISTRY_ID.dkr.ecr.$REGION.amazonaws.com +read -r IMAGE_REGISTRY_ID IMAGE_REPOSITORY_URL <<< "$(aws ecr describe-repositories --repository-names "$IMAGE_REPOSITORY_NAME" --query "repositories[0].[registryId,repositoryUri]" --output text)" +IMAGE_REGISTRY="$IMAGE_REGISTRY_ID.dkr.ecr.$REGION.amazonaws.com" echo "Build repository info:" echo " REGION=$REGION" @@ -30,17 +30,17 @@ echo " IMAGE_REPOSITORY_NAME=$IMAGE_REPOSITORY_NAME" echo " IMAGE_REPOSITORY_URL=$IMAGE_REPOSITORY_URL" echo echo "Authenticating Docker with ECR" -aws ecr get-login-password --region $REGION \ - | docker login --username AWS --password-stdin $IMAGE_REGISTRY +aws ecr get-login-password --region "$REGION" \ + | docker login --username AWS --password-stdin "$IMAGE_REGISTRY" echo echo "Check if tag has already been published..." RESULT="" -RESULT=$(aws ecr describe-images --repository-name $IMAGE_REPOSITORY_NAME --image-ids imageTag=$IMAGE_TAG --region $REGION 2> /dev/null ) || true -if [ ! -z "$RESULT" ];then +RESULT=$(aws ecr describe-images --repository-name "$IMAGE_REPOSITORY_NAME" --image-ids "imageTag=$IMAGE_TAG" --region "$REGION" 2> /dev/null ) || true +if [ -n "$RESULT" ];then echo "Image with tag $IMAGE_TAG already published" exit 0 fi echo "New tag. Publishing image" -docker tag $IMAGE_NAME:$IMAGE_TAG $IMAGE_REPOSITORY_URL:$IMAGE_TAG -docker push $IMAGE_REPOSITORY_URL:$IMAGE_TAG +docker tag "$IMAGE_NAME:$IMAGE_TAG" "$IMAGE_REPOSITORY_URL:$IMAGE_TAG" +docker push "$IMAGE_REPOSITORY_URL:$IMAGE_TAG" diff --git a/bin/run-command.sh b/bin/run-command.sh index a7e336846..e19c4626e 100755 --- a/bin/run-command.sh +++ b/bin/run-command.sh @@ -15,6 +15,9 @@ # ----------------------------------------------------------------------------- set -euo pipefail +# TODO: Add ability to change task IAM Role. Part 3 of multipart update https://github.com/navapbc/template-infra/issues/354#issuecomment-1693973424 +# TODO: Change to keyword arguments. Part 3 of multipart update https://github.com/navapbc/template-infra/issues/354#issuecomment-1693973424 + APP_NAME="$1" ENVIRONMENT="$2" COMMAND="$3" @@ -31,24 +34,28 @@ echo " ENVIRONMENT_VARIABLES=$ENVIRONMENT_VARIABLES" echo # Use the same cluster, task definition, and network configuration that the application service uses -CLUSTER_NAME=$(terraform -chdir=infra/$APP_NAME/service output -raw service_cluster_name) -SERVICE_NAME=$(terraform -chdir=infra/$APP_NAME/service output -raw service_name) +CLUSTER_NAME=$(terraform -chdir="infra/$APP_NAME/service" output -raw service_cluster_name) +SERVICE_NAME=$(terraform -chdir="infra/$APP_NAME/service" output -raw service_name) + +# Get the log group and log stream prefix so that we can print out the ECS task's logs after running the task +LOG_GROUP=$(terraform -chdir="infra/$APP_NAME/service" output -raw application_log_group) +LOG_STREAM_PREFIX=$(terraform -chdir="infra/$APP_NAME/service" output -raw application_log_stream_prefix) -SERVICE_TASK_DEFINITION_ARN=$(aws ecs describe-services --no-cli-pager --cluster $CLUSTER_NAME --services $SERVICE_NAME --query "services[0].taskDefinition" --output text) +SERVICE_TASK_DEFINITION_ARN=$(aws ecs describe-services --no-cli-pager --cluster "$CLUSTER_NAME" --services "$SERVICE_NAME" --query "services[0].taskDefinition" --output text) # For subsequent commands, use the task definition family rather than the service's task definition ARN # because in the case of migrations, we'll deploy a new task definition revision before updating the # service, so the service will be using an old revision, but we want to use the latest revision. -TASK_DEFINITION_FAMILY=$(aws ecs describe-task-definition --no-cli-pager --task-definition $SERVICE_TASK_DEFINITION_ARN --query "taskDefinition.family" --output text) +TASK_DEFINITION_FAMILY=$(aws ecs describe-task-definition --no-cli-pager --task-definition "$SERVICE_TASK_DEFINITION_ARN" --query "taskDefinition.family" --output text) -NETWORK_CONFIG=$(aws ecs describe-services --no-cli-pager --cluster $CLUSTER_NAME --services $SERVICE_NAME --query "services[0].networkConfiguration") +NETWORK_CONFIG=$(aws ecs describe-services --no-cli-pager --cluster "$CLUSTER_NAME" --services "$SERVICE_NAME" --query "services[0].networkConfiguration") CURRENT_REGION=$(./bin/current-region.sh) AWS_USER_ID=$(aws sts get-caller-identity --no-cli-pager --query UserId --output text) ENVIRONMENT_OVERRIDES="" -if [ ! -z "$ENVIRONMENT_VARIABLES" ]; then +if [ -n "$ENVIRONMENT_VARIABLES" ]; then ENVIRONMENT_OVERRIDES="\"environment\": $ENVIRONMENT_VARIABLES," fi -CONTAINER_NAME=$(aws ecs describe-task-definition --task-definition $TASK_DEFINITION_FAMILY --query "taskDefinition.containerDefinitions[0].name" --output text) +CONTAINER_NAME=$(aws ecs describe-task-definition --task-definition "$TASK_DEFINITION_FAMILY" --query "taskDefinition.containerDefinitions[0].name" --output text) OVERRIDES=$(cat << EOF { "containerOverrides": [ @@ -62,35 +69,106 @@ OVERRIDES=$(cat << EOF EOF ) +TASK_START_TIME=$(date +%s) +TASK_START_TIME_MILLIS=$((TASK_START_TIME * 1000)) + AWS_ARGS=( ecs run-task - --region=$CURRENT_REGION - --cluster=$CLUSTER_NAME - --task-definition=$TASK_DEFINITION_FAMILY - --started-by=$AWS_USER_ID + --region="$CURRENT_REGION" + --cluster="$CLUSTER_NAME" + --task-definition="$TASK_DEFINITION_FAMILY" + --started-by="$AWS_USER_ID" --launch-type=FARGATE --platform-version=1.4.0 --network-configuration "$NETWORK_CONFIG" --overrides "$OVERRIDES" ) -echo "Running AWS CLI command" +echo "::group::Running AWS CLI command" printf " ... %s\n" "${AWS_ARGS[@]}" -echo TASK_ARN=$(aws --no-cli-pager "${AWS_ARGS[@]}" --query "tasks[0].taskArn" --output text) +echo "::endgroup::" +echo + +# Get the task id by extracting the substring after the last '/' since the task ARN is of +# the form "arn:aws:ecs:::task//" +ECS_TASK_ID=$(basename "$TASK_ARN") + +# The log stream has the format "prefix-name/container-name/ecs-task-id" +# See https://docs.aws.amazon.com/AmazonECS/latest/userguide/using_awslogs.html +LOG_STREAM="$LOG_STREAM_PREFIX/$CONTAINER_NAME/$ECS_TASK_ID" + +# Wait for log stream to be created before fetching the logs. +# The reason we don't use the `aws ecs wait tasks-running` command is because +# that command can fail on short-lived tasks. In particular, the command polls +# every 6 seconds with describe-tasks until tasks[].lastStatus is RUNNING. A +# task that completes quickly can go from PENDING to STOPPED, causing the wait +# command to error out. +echo "Waiting for log stream to be created" +echo " LOG_STREAM=$LOG_STREAM" +while true; do + IS_LOG_STREAM_CREATED=$(aws logs describe-log-streams --no-cli-pager --log-group-name "$LOG_GROUP" --query "length(logStreams[?logStreamName==\`$LOG_STREAM\`])") + if [ "$IS_LOG_STREAM_CREATED" == "1" ]; then + break + fi + sleep 5 + echo -n "." +done +echo +echo + +# Tail logs until task stops using a loop that polls for new logs. +# The reason why we don't use `aws logs tail` is because that command is meant +# for interactive use. In particular, it will wait forever for new logs, even +# after a task stops, until the user hits Ctrl+C. And the reason why we don't +# wait until the task completes first before fetching logs is so that we can +# show logs in near real-time, which can be useful for long running tasks. +echo "::group::Tailing logs until task stops" +echo " LOG_GROUP=$LOG_GROUP" +echo " LOG_STREAM=$LOG_STREAM" +echo " TASK_START_TIME_MILLIS=$TASK_START_TIME_MILLIS" +# Initialize the logs start time filter to the time we started the task +LOGS_START_TIME_MILLIS=$TASK_START_TIME_MILLIS +while true; do + # Print logs with human readable timestamps by fetching the log events as JSON + # then transforming them afterwards using jq + LOG_EVENTS=$(aws logs get-log-events \ + --no-cli-pager \ + --log-group-name "$LOG_GROUP" \ + --log-stream-name "$LOG_STREAM" \ + --start-time "$LOGS_START_TIME_MILLIS" \ + --start-from-head \ + --no-paginate \ + --output json) + # Divide timestamp by 1000 since AWS timestamps are in milliseconds + echo "$LOG_EVENTS" | jq -r '.events[] | ((.timestamp / 1000 | strftime("%Y-%m-%d %H:%M:%S")) + "\t" + .message)' + + # If the task stopped, then stop tailing logs + LAST_TASK_STATUS=$(aws ecs describe-tasks --cluster "$CLUSTER_NAME" --tasks "$TASK_ARN" --query "tasks[0].containers[?name=='$CONTAINER_NAME'].lastStatus" --output text) + if [ "$LAST_TASK_STATUS" = "STOPPED" ]; then + break + fi + + # If there were new logs printed, then update the logs start time filter + # to be the last log's timestamp + 1 + LAST_LOG_TIMESTAMP=$(echo "$LOG_EVENTS" | jq -r '.events[-1].timestamp' ) + if [ "$LAST_LOG_TIMESTAMP" != "null" ]; then + LOGS_START_TIME_MILLIS=$((LAST_LOG_TIMESTAMP + 1)) + fi -echo "Waiting for task to stop" -echo " TASK_ARN=$TASK_ARN" + # Give the application a moment to generate more logs before fetching again + sleep 1 +done +echo "::endgroup::" echo -aws ecs wait tasks-stopped --region $CURRENT_REGION --cluster $CLUSTER_NAME --tasks $TASK_ARN -CONTAINER_EXIT_CODE=$(aws ecs describe-tasks --cluster $CLUSTER_NAME --tasks $TASK_ARN --query "tasks[0].containers[?name=='$CONTAINER_NAME'].exitCode" --output text) +CONTAINER_EXIT_CODE=$(aws ecs describe-tasks --cluster "$CLUSTER_NAME" --tasks "$TASK_ARN" --query "tasks[0].containers[?name=='$CONTAINER_NAME'].exitCode" --output text) if [[ "$CONTAINER_EXIT_CODE" == "null" || "$CONTAINER_EXIT_CODE" != "0" ]]; then echo "Task failed" >&2 # Although we could avoid extra calls to AWS CLI if we just got the full JSON response from # `aws ecs describe-tasks` and parsed it with jq, we are trying to avoid unnecessary dependencies. - CONTAINER_STATUS=$(aws ecs describe-tasks --cluster $CLUSTER_NAME --tasks $TASK_ARN --query "tasks[0].containers[?name=='$CONTAINER_NAME'].[lastStatus,exitCode,reason]" --output text) - TASK_STATUS=$(aws ecs describe-tasks --cluster $CLUSTER_NAME --tasks $TASK_ARN --query "tasks[0].[lastStatus,stopCode,stoppedAt,stoppedReason]" --output text) + CONTAINER_STATUS=$(aws ecs describe-tasks --cluster "$CLUSTER_NAME" --tasks "$TASK_ARN" --query "tasks[0].containers[?name=='$CONTAINER_NAME'].[lastStatus,exitCode,reason]" --output text) + TASK_STATUS=$(aws ecs describe-tasks --cluster "$CLUSTER_NAME" --tasks "$TASK_ARN" --query "tasks[0].[lastStatus,stopCode,stoppedAt,stoppedReason]" --output text) echo "Container status (lastStatus, exitCode, reason): $CONTAINER_STATUS" >&2 echo "Task status (lastStatus, stopCode, stoppedAt, stoppedReason): $TASK_STATUS" >&2 diff --git a/bin/run-database-migrations.sh b/bin/run-database-migrations.sh index c48d79348..046ced9ce 100755 --- a/bin/run-database-migrations.sh +++ b/bin/run-database-migrations.sh @@ -12,6 +12,9 @@ # ENVIRONMENT (required) – the name of the application environment (e.g. dev, # staging, prod) # ----------------------------------------------------------------------------- + +# TODO: Use migrator role instead of general role. Part 3 of multipart update https://github.com/navapbc/template-infra/issues/354#issuecomment-1693973424 + set -euo pipefail APP_NAME="$1" @@ -28,26 +31,24 @@ echo " ENVIRONMENT=$ENVIRONMENT" echo echo "Step 0. Check if app has a database" -terraform -chdir=infra/$APP_NAME/app-config init > /dev/null -terraform -chdir=infra/$APP_NAME/app-config refresh > /dev/null -HAS_DATABASE=$(terraform -chdir=infra/$APP_NAME/app-config output -raw has_database) -if [ $HAS_DATABASE = "false" ]; then +terraform -chdir="infra/$APP_NAME/app-config" init > /dev/null +terraform -chdir="infra/$APP_NAME/app-config" refresh > /dev/null +HAS_DATABASE=$(terraform -chdir="infra/$APP_NAME/app-config" output -raw has_database) +if [ "$HAS_DATABASE" = "false" ]; then echo "Application does not have a database, no migrations to run" exit 0 fi -DB_MIGRATOR_USER=$(terraform -chdir=infra/$APP_NAME/app-config output -json environment_configs | jq -r ".$ENVIRONMENT.database_config.migrator_username") +DB_MIGRATOR_USER=$(terraform -chdir="infra/$APP_NAME/app-config" output -json environment_configs | jq -r ".$ENVIRONMENT.database_config.migrator_username") echo echo "::group::Step 1. Update task definition without updating service" -MODULE_DIR="infra/$APP_NAME/service" -CONFIG_NAME="$ENVIRONMENT" -TF_CLI_ARGS_apply="-input=false -auto-approve -target=module.service.aws_ecs_task_definition.app -var=image_tag=$IMAGE_TAG" ./bin/terraform-init-and-apply.sh $MODULE_DIR $CONFIG_NAME +TF_CLI_ARGS_apply="-input=false -auto-approve -target=module.service.aws_ecs_task_definition.app -var=image_tag=$IMAGE_TAG" make infra-update-app-service APP_NAME="$APP_NAME" ENVIRONMENT="$ENVIRONMENT" echo "::endgroup::" echo -echo '::group::Step 2. Run "db-migrate" command' +echo 'Step 2. Run "db-migrate" command' COMMAND='["db-migrate"]' @@ -57,5 +58,4 @@ ENVIRONMENT_VARIABLES=$(cat << EOF EOF ) -./bin/run-command.sh $APP_NAME $ENVIRONMENT "$COMMAND" "$ENVIRONMENT_VARIABLES" -echo "::endgroup::" +./bin/run-command.sh "$APP_NAME" "$ENVIRONMENT" "$COMMAND" "$ENVIRONMENT_VARIABLES" diff --git a/bin/set-up-current-account.sh b/bin/set-up-current-account.sh index eed1532b8..b07ce4cb1 100755 --- a/bin/set-up-current-account.sh +++ b/bin/set-up-current-account.sh @@ -24,8 +24,8 @@ set -euo pipefail ACCOUNT_NAME=$1 -ACCOUNT_ID="$(./bin/current-account-id.sh)" -REGION="$(./bin/current-region.sh)" +ACCOUNT_ID=$(./bin/current-account-id.sh) +REGION=$(./bin/current-region.sh) # Get project name terraform -chdir=infra/project-config refresh > /dev/null @@ -53,10 +53,10 @@ echo "Creating bucket: $TF_STATE_BUCKET_NAME" # For creating buckets in us-east-1, LocationConstraint cannot be set # See https://docs.aws.amazon.com/cli/latest/reference/s3api/create-bucket.html CREATE_BUCKET_CONFIGURATION="" -if [ $REGION != "us-east-1" ]; then +if [ "$REGION" != "us-east-1" ]; then CREATE_BUCKET_CONFIGURATION="--create-bucket-configuration LocationConstraint=$REGION" fi -aws s3api create-bucket --bucket $TF_STATE_BUCKET_NAME --region $REGION $CREATE_BUCKET_CONFIGURATION > /dev/null +aws s3api create-bucket --bucket "$TF_STATE_BUCKET_NAME" --region "$REGION" "$CREATE_BUCKET_CONFIGURATION" > /dev/null echo echo "----------------------------------" echo "Creating rest of account resources" @@ -80,7 +80,7 @@ terraform init \ # But first check if the bucket already exists in the state file. If we are # re-running account setup and the bucket already exists then skip the import step if ! terraform state list module.backend.aws_s3_bucket.tf_state; then - terraform import module.backend.aws_s3_bucket.tf_state $TF_STATE_BUCKET_NAME + terraform import module.backend.aws_s3_bucket.tf_state "$TF_STATE_BUCKET_NAME" fi terraform apply \ @@ -91,4 +91,4 @@ cd - MODULE_DIR=infra/accounts BACKEND_CONFIG_NAME="$ACCOUNT_NAME.$ACCOUNT_ID" -./bin/create-tfbackend.sh $MODULE_DIR $BACKEND_CONFIG_NAME $TF_STATE_KEY +./bin/create-tfbackend.sh "$MODULE_DIR" "$BACKEND_CONFIG_NAME" "$TF_STATE_KEY" diff --git a/bin/terraform-apply.sh b/bin/terraform-apply.sh index 013636a67..dc6f8fc0c 100755 --- a/bin/terraform-apply.sh +++ b/bin/terraform-apply.sh @@ -28,14 +28,14 @@ CONFIG_NAME="$2" # 1. Set working directory to the terraform root module directory -cd $MODULE_DIR +cd "$MODULE_DIR" # 2. Run terraform apply with the tfvars file (if it exists) that has the same name as the backend config file TF_VARS_FILE="$CONFIG_NAME.tfvars" TF_VARS_OPTION="" -if [ -f $TF_VARS_FILE ]; then +if [ -f "$TF_VARS_FILE" ]; then TF_VARS_OPTION="-var-file=$TF_VARS_FILE" fi -terraform apply $TF_VARS_OPTION +terraform apply "$TF_VARS_OPTION" diff --git a/bin/terraform-init-and-apply.sh b/bin/terraform-init-and-apply.sh index 8a01de894..70896ceb9 100755 --- a/bin/terraform-init-and-apply.sh +++ b/bin/terraform-init-and-apply.sh @@ -18,6 +18,6 @@ CONFIG_NAME="$2" # For example if a backend config file is named "myaccount.s3.tfbackend", then the CONFIG_NAME would be "myaccount" # MODULE_DIR – the location of the root module to initialize and apply -./bin/terraform-init.sh $MODULE_DIR $CONFIG_NAME +./bin/terraform-init.sh "$MODULE_DIR" "$CONFIG_NAME" -./bin/terraform-apply.sh $MODULE_DIR $CONFIG_NAME +./bin/terraform-apply.sh "$MODULE_DIR" "$CONFIG_NAME" diff --git a/bin/terraform-init.sh b/bin/terraform-init.sh index 845105f86..7cc22fdfe 100755 --- a/bin/terraform-init.sh +++ b/bin/terraform-init.sh @@ -21,7 +21,7 @@ CONFIG_NAME="$2" BACKEND_CONFIG_FILE="$CONFIG_NAME.s3.tfbackend" # Note that the BACKEND_CONFIG_FILE path is relative to MODULE_DIR, not the current working directory -terraform -chdir=$MODULE_DIR init \ +terraform -chdir="$MODULE_DIR" init \ -input=false \ -reconfigure \ - -backend-config=$BACKEND_CONFIG_FILE + -backend-config="$BACKEND_CONFIG_FILE" diff --git a/bin/verify-checks.sh b/bin/verify-checks.sh index 07e4b59dc..0ab6f70c1 100755 --- a/bin/verify-checks.sh +++ b/bin/verify-checks.sh @@ -8,7 +8,7 @@ fi TIMEOUT_MINUTES=5 SLEEP_SECONDS=15 -MAX_ITERATIONS=$(( ($TIMEOUT_MINUTES * 60) / $SLEEP_SECONDS )) +MAX_ITERATIONS=$(( (TIMEOUT_MINUTES * 60) / SLEEP_SECONDS )) iteration=0 @@ -16,10 +16,9 @@ echo "Starting check suite status polling for SHA: $SHA." echo "Maximum iterations: $MAX_ITERATIONS. Time per iteration: $SLEEP_SECONDS seconds." # Fetch the current check_suite_id -CURRENT_CHECK_SUITE_ID=$(gh api repos/$REPO/actions/runs/$CURRENT_GITHUB_RUN_ID --jq '.check_suite_id') - +CURRENT_CHECK_SUITE_ID=$(gh api repos/"$REPO"/actions/runs/"$CURRENT_GITHUB_RUN_ID" --jq '.check_suite_id') while :; do - iteration=$(( $iteration + 1 )) + iteration=$(( iteration + 1 )) echo "Iteration: $iteration of $MAX_ITERATIONS." if (( iteration > MAX_ITERATIONS )); then @@ -28,13 +27,13 @@ while :; do fi echo "Fetching check suite details... <3" - CHECK_SUITE_DETAILS=$(gh api repos/$REPO/commits/$SHA/check-suites) + CHECK_SUITE_DETAILS=$(gh api repos/"$REPO"/commits/"$SHA"/check-suites) total_count=$(echo "${CHECK_SUITE_DETAILS}" | jq '.total_count') all_suites_passed=true echo "Total number of check suites: $total_count" - for (( i=0; i<$total_count; i++ )); do + for (( i=0; i `APP_NAME` needs to be the name of the application folder within the `infra` folder. It defaults to `app`. `ENVIRONMENT` needs to be the name of the environment you are creating. This will create a file called `.s3.tfbackend` in the `infra/app/service` module directory. -Depending on the value of `has_database` in the [app-config module](/infra/app/app-config/main.tf), the application will be configured with or without database access. +Depending on the value of `has_database` in the [app-config module](/infra/frontend/app-config/main.tf), the application will be configured with or without database access. ## 2. Build and publish the application to the application build repository @@ -35,10 +37,10 @@ There are two ways to do this: 1. Trigger the "Build and Publish" workflow from your repo's GitHub Actions tab. This option requires that the `role-to-assume` GitHub workflow variable has already been setup as part of the overall infra account setup process. 1. Alternatively, run the following from the root directory. This option can take much longer than the GitHub workflow, depending on your machine's architecture. - ```bash - make release-build - make release-publish - ``` + ```bash + make release-build + make release-publish + ``` Copy the image tag name that was published. You'll need this in the next step. @@ -49,3 +51,8 @@ Now run the following commands to create the resources, using the image tag that ```bash TF_CLI_ARGS_apply="-var=image_tag=" make infra-update-app-service APP_NAME=app ENVIRONMENT= ``` + +## 4. Configure monitoring alerts + +Configure email alerts, external incident management service integration and additional Cloudwatch Alerts. +[Configure monitoring module](./set-up-monitoring-alerts.md) diff --git a/documentation/infra/set-up-aws-account.md b/documentation/infra/set-up-aws-account.md index a65abcf40..76388b9f8 100644 --- a/documentation/infra/set-up-aws-account.md +++ b/documentation/infra/set-up-aws-account.md @@ -7,8 +7,8 @@ The AWS account setup process will: ## Prerequisites -* You'll need to have [set up infrastructure tools](./set-up-infrastructure-tools.md), like Terraform, AWS CLI, and AWS authentication. -* You'll also need to make sure the [project is configured](/infra/project-config/main.tf). +- You'll need to have [set up infrastructure tools](./set-up-infrastructure-tools.md), like Terraform, AWS CLI, and AWS authentication. +- You'll also need to make sure the [project is configured](/infra/project-config/main.tf). ## Overview of Terraform backend management @@ -42,7 +42,7 @@ This command will create the S3 tfstate bucket and the GitHub OIDC provider. It ### 3. Update the account names map in app-config -In [app-config/main.tf](/infra/app/app-config/main.tf), update the `account_names_by_environment` config to reflect the account name you chose. +In [app-config/main.tf](/infra/frontend/app-config/main.tf), update the `account_names_by_environment` config to reflect the account name you chose. ## Making changes to the account diff --git a/documentation/infra/set-up-database.md b/documentation/infra/set-up-database.md index 00ce59587..9d90933e5 100644 --- a/documentation/infra/set-up-database.md +++ b/documentation/infra/set-up-database.md @@ -46,11 +46,7 @@ The Lambda function's response should describe the resulting PostgreSQL roles an ```json { - "roles": [ - "postgres", - "migrator", - "app" - ], + "roles": ["postgres", "migrator", "app"], "roles_with_groups": { "rds_superuser": "rds_password", "pg_monitor": "pg_read_all_settings,pg_read_all_stats,pg_stat_scan_tables", @@ -73,10 +69,16 @@ Before creating migrations that create tables, first create a migration that inc ALTER DEFAULT PRIVILEGES GRANT ALL ON TABLES TO app ``` -This will cause all future tables created by the `migrator` user to automatically be accessible by the `app` user. See the [Postgres docs on ALTER DEFAULT PRIVILEGES](https://www.postgresql.org/docs/current/sql-alterdefaultprivileges.html) for more info. As an example see the example app's migrations file [migrations.sql](/app/migrations.sql). +This will cause all future tables created by the `migrator` user to automatically be accessible by the `app` user. See the [Postgres docs on ALTER DEFAULT PRIVILEGES](https://www.postgresql.org/docs/current/sql-alterdefaultprivileges.html) for more info. Why is this needed? The reason is because the `migrator` role will be used by the migration task to run database migrations (creating tables, altering tables, etc.), while the `app` role will be used by the web service to access the database. Moreover, in Postgres, new tables won't automatically be accessible by roles other than the creator unless specifically granted, even if those other roles have usage access to the schema that the tables are created in. In other words if the `migrator` user created a new table `foo` in the `app` schema, the `app` user will not have automatically be able to access it by default. +## 4. Check that database roles have been configured properly + +```bash +make infra-check-app-database-roles APP_NAME=app ENVIRONMENT= +``` + ## Set up application environments Once you set up the deployment process, you can proceed to [set up the application service](./set-up-app-env.md) diff --git a/documentation/infra/set-up-infrastructure-tools.md b/documentation/infra/set-up-infrastructure-tools.md index ecc82a6b1..375a1c141 100644 --- a/documentation/infra/set-up-infrastructure-tools.md +++ b/documentation/infra/set-up-infrastructure-tools.md @@ -36,12 +36,21 @@ The [Go programming language](https://go.dev/dl/) is required to run [Terratest] ### Install GitHub CLI -The [GitHub CLI](https://cli.github.com/) is useful for automating certain operations for GitHub such as with GitHub actions. +The [GitHub CLI](https://cli.github.com/) is useful for automating certain operations for GitHub such as with GitHub actions. This is needed to run [check-github-actions-auth.sh](/bin/check-github-actions-auth.sh) ```bash brew install gh ``` +### Install linters + +[Shellcheck](https://github.com/koalaman/shellcheck) and [actionlint](https://github.com/rhysd/actionlint) are optional utilites for running infrastructure linters locally. + +```bash +brew install shellcheck +brew install actionlint +``` + ## AWS Authentication In order for Terraform to authenticate with your accounts you will need to configure your aws credentials using the AWS CLI or manually create your config and credentials file. If you need to manage multiple credentials or create named profiles for use with different environments you can add the `--profile` option. diff --git a/documentation/infra/set-up-monitoring-alerts.md b/documentation/infra/set-up-monitoring-alerts.md new file mode 100644 index 000000000..4a249a79a --- /dev/null +++ b/documentation/infra/set-up-monitoring-alerts.md @@ -0,0 +1,29 @@ +# Set up monitoring notifications + +## Overview + +The monitoring module defines metric-based alerting policies that provides awareness into issues with the cloud application. The module supports integration with external incident management tools like Splunk-On-Call or Pagerduty. It also supports email alerts. + +### Set up email alerts. + +1. Add the `email_alerts_subscription_list` variable to the monitoring module call in the service layer + +For example: +``` +module "monitoring" { + source = "../../modules/monitoring" + email_alerts_subscription_list = ["email1@email.com", "email2@email.com"] + ... +} +``` +2. Run `make infra-update-app-service APP_NAME= ENVIRONMENT=` to apply the changes to each environment. +When any of the alerts described by the module are triggered notification will be send to all email specified in the `email_alerts_subscription_list` + +### Set up External incident management service integration. + +1. Set setting `has_incident_management_service = true` in app-config/main.tf +2. Get the integration URL for the incident management service and store it in AWS SSM Parameter Store by running the following command for each environment: +``` +make infra-configure-monitoring-secrets APP_NAME= ENVIRONMENT= URL= +``` +3. Run `make infra-update-app-service APP_NAME= ENVIRONMENT=` to apply the changes to each environment. diff --git a/documentation/infra/set-up-network.md b/documentation/infra/set-up-network.md new file mode 100644 index 000000000..4d5c3f0f6 --- /dev/null +++ b/documentation/infra/set-up-network.md @@ -0,0 +1,27 @@ +# Set up network + +The network setup process will: + +1. Configure and deploy network resources needed by other modules. If your application has a database, it will create VPC endpoints for the AWS services needed by the database layer and a security group to contain those VPC endpoints. + +## Requirements + +Before setting up the database you'll need to have: + +1. [Set up the AWS account](./set-up-aws-account.md) + +## 1. Configure backend + +To create the tfbackend file for the new application environment, run + +```bash +make infra-configure-network +``` + +## 2. Create network resources + +Now run the following commands to create the resources. Review the terraform before confirming "yes" to apply the changes. + +```bash +make infra-update-network +``` diff --git a/documentation/infra/vulnerability-management.md b/documentation/infra/vulnerability-management.md index 8221ccbfc..8a56e8d81 100644 --- a/documentation/infra/vulnerability-management.md +++ b/documentation/infra/vulnerability-management.md @@ -31,16 +31,16 @@ The workflow will run whenever there is a push to a PR or when merged to main if ### Hadolint -The trivy scanner allows you to ignore or safelist certain findings, which can be specified in the [.hadolint.yaml](/.hadolint.yaml) file. There is a template file here that you can use in your repo. +The hadolint scanner allows you to ignore or safelist certain findings, which can be specified in the [.hadolint.yaml](../../.hadolint.yaml) file. There is a template file here that you can use in your repo. ### Trivy -The trivy scanner allows you to ignore or safelist certain findings, which can be specified in the [.trivyignore](/.trivyignore) file. There is a template file here that you can use in your repo. +The trivy scanner allows you to ignore or safelist certain findings, which can be specified in the [.trivyignore](../../.trivyignore) file. There is a template file here that you can use in your repo. ### Anchore -The anchore scanner allows you to ignore or safelist certain findings, which can be specified in the [.grype.yml](/.grype.yml) file. There is a template file here that you can use in your repo. There are flags set to ignore findings that are in the state `not-fixed`, `wont-fix`, and `unknown`. +The anchore scanner allows you to ignore or safelist certain findings, which can be specified in the [.grype.yml](../../.grype.yml) file. There is a template file here that you can use in your repo. There are flags set to ignore findings that are in the state `not-fixed`, `wont-fix`, and `unknown`. ### Dockle -The dockle scanner action does not have the ability to use an ignore or safelist findings file, but is able to by specifying an allow file, or `DOCKLE_ACCEPT_FILES`, environmental variable. To get around this, there is a step before the dockle scan is ran to check for a file named [.dockleconfig](/.dockleconfig), and pipe it to the environmental variable if it exists. Note that this will not ignore finding types like the other scanner's ignore file, but ignore the file specified in the list +The dockle scanner action does not have the ability to use an ignore or safelist findings file, but is able to by specifying an allow file, or `DOCKLE_ACCEPT_FILES`, environmental variable. To get around this, there is a step before the dockle scan is ran to check for a file named [.dockleconfig](../../.dockleconfig), and pipe it to the environmental variable if it exists. Note that this will not ignore finding types like the other scanner's ignore file, but ignore the file specified in the list diff --git a/infra/.gitignore b/infra/.gitignore index 0b7f907b8..2cafcb4d8 100644 --- a/infra/.gitignore +++ b/infra/.gitignore @@ -1,14 +1,6 @@ # Local .terraform metadata **/.terraform/* -# Ignore Terraform lock files -# As of Feb 2023, Terraform lock files, while well intentioned, have a tendency -# to get into a confusing state that requires recreating the lock file, which -# defeats the purpose. Moreover, lock files are per environment, which can make -# it difficult for people to upgrade dependencies (e.g. upgrade an AWS provider) -# across environments if certain environments are locked down (e.g. production). -.terraform.lock.hcl - # .tfstate files *.tfstate *.tfstate.* diff --git a/infra/accounts/.terraform.lock.hcl b/infra/accounts/.terraform.lock.hcl new file mode 100644 index 000000000..1124b43ff --- /dev/null +++ b/infra/accounts/.terraform.lock.hcl @@ -0,0 +1,25 @@ +# This file is maintained automatically by "terraform init". +# Manual edits may be lost in future updates. + +provider "registry.terraform.io/hashicorp/aws" { + version = "5.6.2" + constraints = "~> 5.6.0" + hashes = [ + "h1:lH9eN+oozDX4z/TvhoXg++5MIE6MQznSW5sXvqzXAVQ=", + "zh:25322d7e1f0054550357d5a03fe29168cc179421e5dcf44b28c25a99d8d6e4e7", + "zh:394aa5bff70003e76d1d33ef4fe37c4826918577cf339d35e56ae84d01e86765", + "zh:485b288bf95b5d3014903e386e8ee2d1182e507f746bc988458b9711c7df7171", + "zh:48cf69750681337d64df7e402116a6753a40b6702c49fc9232ff6621947d85af", + "zh:6ab11d052d681b5157e261b9dd9167482acffe2018fffd1204575e9bf6a08522", + "zh:882f22d0e6c16cd5a5f01a0ae817b1e75e928667d21d986b93a4ee74fa62c067", + "zh:9b12af85486a96aedd8d7984b0ff811a4b42e3d88dad1a3fb4c0b580d04fa425", + "zh:ac3403e3ab5c10869b23626467b919e3f010e7cae6e0acf8515e0cefab0dbff0", + "zh:b959a425c9be83838895e8626037656bf5db81397ad0078595d3b72fd1b816bc", + "zh:bf390951f21a5fe6b96b206c5496fda4d8b95823bd00d1c03a4a53dd215d882a", + "zh:c3534972986cd68a421359f07ab86631ffa8731606936276fce18ec8ae9045f4", + "zh:d4cf29d67ead2c5feb999c2882e5365bd4d04c115e98fb1639b747b682507fea", + "zh:dea669eea5bca9b57dae2975ec783d577d58a39eec769d1c9bd7fc4d50f241d0", + "zh:e7a82063d01eb2be3fd192afbad910150fe8054731db20c1b22c714d9391dbe5", + "zh:fdbbf96948e96dfed614ea4daa4f1706859122a3f978c42c37db8727cb55c94f", + ] +} diff --git a/infra/api/app-config/env-config/outputs.tf b/infra/api/app-config/env-config/outputs.tf new file mode 100644 index 000000000..2dea34db0 --- /dev/null +++ b/infra/api/app-config/env-config/outputs.tf @@ -0,0 +1,24 @@ +output "database_config" { + value = var.has_database ? { + region = var.default_region + cluster_name = "${var.app_name}-${var.environment}" + access_policy_name = "${var.app_name}-${var.environment}-db-access" + app_username = "app" + migrator_username = "migrator" + schema_name = var.app_name + app_access_policy_name = "${var.app_name}-${var.environment}-app-access" + migrator_access_policy_name = "${var.app_name}-${var.environment}-migrator-access" + } : null +} + +output "service_config" { + value = { + region = var.default_region + } +} + +output "incident_management_service_integration" { + value = var.has_incident_management_service ? { + integration_url_param_name = "/monitoring/${var.app_name}/${var.environment}/incident-management-integration-url" + } : null +} diff --git a/infra/api/app-config/env-config/variables.tf b/infra/api/app-config/env-config/variables.tf new file mode 100644 index 000000000..12b22c42e --- /dev/null +++ b/infra/api/app-config/env-config/variables.tf @@ -0,0 +1,21 @@ +variable "app_name" { + type = string +} + +variable "environment" { + description = "name of the application environment (e.g. dev, staging, prod)" + type = string +} + +variable "default_region" { + description = "default region for the project" + type = string +} + +variable "has_database" { + type = bool +} + +variable "has_incident_management_service" { + type = bool +} diff --git a/infra/api/app-config/main.tf b/infra/api/app-config/main.tf new file mode 100644 index 000000000..ea1723ddf --- /dev/null +++ b/infra/api/app-config/main.tf @@ -0,0 +1,63 @@ +locals { + app_name = "api" + environments = ["dev", "prod"] + project_name = module.project_config.project_name + image_repository_name = "${local.project_name}-${local.app_name}" + has_database = true + has_incident_management_service = false + + environment_configs = { for environment in local.environments : environment => module.env_config[environment] } + + build_repository_config = { + region = module.project_config.default_region + } + # Map from environment name to the account name for the AWS account that + # contains the resources for that environment. Resources that are shared + # across environments use the key "shared". + # The list of configured AWS accounts can be found in /infra/account + # by looking for the backend config files of the form: + # ..s3.tfbackend + # + # Projects/applications that use the same AWS account for all environments + # will refer to the same account for all environments. For example, if the + # project has a single account named "myaccount", then infra/accounts will + # have one tfbackend file myaccount.XXXXX.s3.tfbackend, and the + # account_names_by_environment map will look like: + # + # account_names_by_environment = { + # shared = "myaccount" + # dev = "myaccount" + # staging = "myaccount" + # prod = "myaccount" + # } + # + # Projects/applications that have separate AWS accounts for each environment + # might have a map that looks more like this: + # + # account_names_by_environment = { + # shared = "dev" + # dev = "dev" + # staging = "staging" + # prod = "prod" + # } + account_names_by_environment = { + shared = "grants-equity" + dev = "grants-equity" + prod = "grants-equity" + } +} + +module "project_config" { + source = "../../project-config" +} + +module "env_config" { + for_each = toset(local.environments) + + source = "./env-config" + app_name = local.app_name + default_region = module.project_config.default_region + environment = each.key + has_database = local.has_database + has_incident_management_service = local.has_incident_management_service +} diff --git a/infra/api/app-config/outputs.tf b/infra/api/app-config/outputs.tf new file mode 100644 index 000000000..26f012550 --- /dev/null +++ b/infra/api/app-config/outputs.tf @@ -0,0 +1,31 @@ +output "app_name" { + value = local.app_name +} + +output "account_names_by_environment" { + value = local.account_names_by_environment +} + +output "environments" { + value = local.environments +} + +output "has_database" { + value = local.has_database +} + +output "has_incident_management_service" { + value = local.has_incident_management_service +} + +output "image_repository_name" { + value = local.image_repository_name +} + +output "build_repository_config" { + value = local.build_repository_config +} + +output "environment_configs" { + value = local.environment_configs +} diff --git a/infra/api/build-repository/.terraform.lock.hcl b/infra/api/build-repository/.terraform.lock.hcl new file mode 100644 index 000000000..0f42f0af3 --- /dev/null +++ b/infra/api/build-repository/.terraform.lock.hcl @@ -0,0 +1,41 @@ +# This file is maintained automatically by "terraform init". +# Manual edits may be lost in future updates. + +provider "registry.terraform.io/hashicorp/aws" { + version = "4.20.1" + constraints = "~> 4.20.1" + hashes = [ + "h1:1JbjdrwUCLTNVVhlE+acEPnJFJ/FqBTHy5Ooll6nwjI=", + "zh:21d064d8fac08376c633e002e2f36e83eb7958535e251831feaf38f51c49dafd", + "zh:3a37912ff43d89ce8d559ec86265d7506801bccb380c7cfb896e8ff24e3fe79d", + "zh:795eb175c85279ec51dbe12e4d1afa0860c2c0b22e5d36a8e8869f60a93b7931", + "zh:8afb61a18b17f8ff249cb23e9d3b5d2530944001ef1d56c1d53f41b0890c7ab8", + "zh:911701040395e0e4da4b7252279e7cf1593cdd26f22835e1a9eddbdb9691a1a7", + "zh:9b12af85486a96aedd8d7984b0ff811a4b42e3d88dad1a3fb4c0b580d04fa425", + "zh:a46d54a6a5407f569f8178e916af888b2b268f86448c64cad165dc89759c8399", + "zh:c5f71fd5e3519a24fd6af455ef1c26a559cfdde7f626b0afbd2a73bb79f036b1", + "zh:df3b69d6c9b0cdc7e3f90ee08412b22332c32e97ad8ce6ccad528f89f235a7d3", + "zh:e99d6a64c03549d60c2accf792fa04466cfb317f72e895c8f67eff8a02920887", + "zh:eea7a0df8bcb69925c9ce8e15ef403c8bbf16d46c43e8f5607b116531d1bce4a", + "zh:f6a26ce77f7db1d50ce311e32902fd001fb365e5e45e47a9a5cd59d734c89cb6", + ] +} + +provider "registry.terraform.io/hashicorp/external" { + version = "2.3.1" + hashes = [ + "h1:gznGscVJ0USxy4CdihpjRKPsKvyGr/zqPvBoFLJTQDc=", + "zh:001e2886dc81fc98cf17cf34c0d53cb2dae1e869464792576e11b0f34ee92f54", + "zh:2eeac58dd75b1abdf91945ac4284c9ccb2bfb17fa9bdb5f5d408148ff553b3ee", + "zh:2fc39079ba61411a737df2908942e6970cb67ed2f4fb19090cd44ce2082903dd", + "zh:472a71c624952cff7aa98a7b967f6c7bb53153dbd2b8f356ceb286e6743bb4e2", + "zh:4cff06d31272aac8bc35e9b7faec42cf4554cbcbae1092eaab6ab7f643c215d9", + "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", + "zh:7ed16ccd2049fa089616b98c0bd57219f407958f318f3c697843e2397ddf70df", + "zh:842696362c92bf2645eb85c739410fd51376be6c488733efae44f4ce688da50e", + "zh:8985129f2eccfd7f1841ce06f3bf2bbede6352ec9e9f926fbaa6b1a05313b326", + "zh:a5f0602d8ec991a5411ef42f872aa90f6347e93886ce67905c53cfea37278e05", + "zh:bf4ab82cbe5256dcef16949973bf6aa1a98c2c73a98d6a44ee7bc40809d002b8", + "zh:e70770be62aa70198fa899526d671643ff99eecf265bf1a50e798fc3480bd417", + ] +} diff --git a/infra/api/build-repository/example.tfvars b/infra/api/build-repository/example.tfvars new file mode 100644 index 000000000..2063158f7 --- /dev/null +++ b/infra/api/build-repository/example.tfvars @@ -0,0 +1,2 @@ +app_environment_account_ids = [] +region = "" diff --git a/infra/api/build-repository/main.tf b/infra/api/build-repository/main.tf new file mode 100644 index 000000000..cb2aebb78 --- /dev/null +++ b/infra/api/build-repository/main.tf @@ -0,0 +1,59 @@ +data "aws_iam_role" "github_actions" { + name = module.project_config.github_actions_role_name +} + +locals { + # Set project tags that will be used to tag all resources. + tags = merge(module.project_config.default_tags, { + application = module.app_config.app_name + application_role = "build-repository" + description = "Backend resources required for storing built release candidate artifacts to be used for deploying to environments." + }) + + # Get list of AWS account ids for the application environments that + # will need access to the build repository + app_account_names = values(module.app_config.account_names_by_environment) + account_ids_by_name = data.external.account_ids_by_name.result + app_account_ids = [for account_name in local.app_account_names : local.account_ids_by_name[account_name] if contains(keys(local.account_ids_by_name), account_name)] +} + +terraform { + required_version = ">= 1.2.0, < 2.0.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = "~>4.20.1" + } + } + + backend "s3" { + encrypt = "true" + } +} + +provider "aws" { + region = module.app_config.build_repository_config.region + default_tags { + tags = local.tags + } +} + +module "project_config" { + source = "../../project-config" +} + +module "app_config" { + source = "../app-config" +} + +data "external" "account_ids_by_name" { + program = ["../../../bin/account-ids-by-name.sh"] +} + +module "container_image_repository" { + source = "../../modules/container-image-repository" + name = module.app_config.image_repository_name + push_access_role_arn = data.aws_iam_role.github_actions.arn + app_account_ids = local.app_account_ids +} diff --git a/infra/api/build-repository/shared.s3.tfbackend b/infra/api/build-repository/shared.s3.tfbackend new file mode 100644 index 000000000..8cdc7e100 --- /dev/null +++ b/infra/api/build-repository/shared.s3.tfbackend @@ -0,0 +1,4 @@ +bucket = "grants-equity-315341936575-us-east-1-tf" +key = "infra/api/build-repository/shared.tfstate" +dynamodb_table = "grants-equity-315341936575-us-east-1-tf-state-locks" +region = "us-east-1" diff --git a/infra/api/build-repository/terraform.tfvars b/infra/api/build-repository/terraform.tfvars new file mode 100644 index 000000000..336ab1fc6 --- /dev/null +++ b/infra/api/build-repository/terraform.tfvars @@ -0,0 +1,2 @@ +app_environment_account_ids = [] +region = "us-east-1" diff --git a/infra/api/build-repository/variables.tf b/infra/api/build-repository/variables.tf new file mode 100644 index 000000000..45ef60319 --- /dev/null +++ b/infra/api/build-repository/variables.tf @@ -0,0 +1,8 @@ +variable "app_environment_account_ids" { + type = list(string) + description = "List of AWS account ids for the application's environments. This is used to allow environments pull images from the container image repository." +} + +variable "region" { + type = string +} diff --git a/infra/api/database/.terraform.lock.hcl b/infra/api/database/.terraform.lock.hcl new file mode 100644 index 000000000..46e1f1695 --- /dev/null +++ b/infra/api/database/.terraform.lock.hcl @@ -0,0 +1,63 @@ +# This file is maintained automatically by "terraform init". +# Manual edits may be lost in future updates. + +provider "registry.terraform.io/hashicorp/archive" { + version = "2.4.0" + hashes = [ + "h1:cJokkjeH1jfpG4QEHdRx0t2j8rr52H33A7C/oX73Ok4=", + "zh:18e408596dd53048f7fc8229098d0e3ad940b92036a24287eff63e2caec72594", + "zh:392d4216ecd1a1fd933d23f4486b642a8480f934c13e2cae3c13b6b6a7e34a7b", + "zh:655dd1fa5ca753a4ace21d0de3792d96fff429445717f2ce31c125d19c38f3ff", + "zh:70dae36c176aa2b258331ad366a471176417a94dd3b4985a911b8be9ff842b00", + "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", + "zh:7d8c8e3925f1e21daf73f85983894fbe8868e326910e6df3720265bc657b9c9c", + "zh:a032ec0f0aee27a789726e348e8ad20778c3a1c9190ef25e7cff602c8d175f44", + "zh:b8e50de62ba185745b0fe9713755079ad0e9f7ac8638d204de6762cc36870410", + "zh:c8ad0c7697a3d444df21ff97f3473a8604c8639be64afe3f31b8ec7ad7571e18", + "zh:df736c5a2a7c3a82c5493665f659437a22f0baf8c2d157e45f4dd7ca40e739fc", + "zh:e8ffbf578a0977074f6d08aa8734e36c726e53dc79894cfc4f25fadc4f45f1df", + "zh:efea57ff23b141551f92b2699024d356c7ffd1a4ad62931da7ed7a386aef7f1f", + ] +} + +provider "registry.terraform.io/hashicorp/aws" { + version = "4.67.0" + constraints = "~> 4.67.0" + hashes = [ + "h1:5Zfo3GfRSWBaXs4TGQNOflr1XaYj6pRnVJLX5VAjFX4=", + "zh:0843017ecc24385f2b45f2c5fce79dc25b258e50d516877b3affee3bef34f060", + "zh:19876066cfa60de91834ec569a6448dab8c2518b8a71b5ca870b2444febddac6", + "zh:24995686b2ad88c1ffaa242e36eee791fc6070e6144f418048c4ce24d0ba5183", + "zh:4a002990b9f4d6d225d82cb2fb8805789ffef791999ee5d9cb1fef579aeff8f1", + "zh:559a2b5ace06b878c6de3ecf19b94fbae3512562f7a51e930674b16c2f606e29", + "zh:6a07da13b86b9753b95d4d8218f6dae874cf34699bca1470d6effbb4dee7f4b7", + "zh:768b3bfd126c3b77dc975c7c0e5db3207e4f9997cf41aa3385c63206242ba043", + "zh:7be5177e698d4b547083cc738b977742d70ed68487ce6f49ecd0c94dbf9d1362", + "zh:8b562a818915fb0d85959257095251a05c76f3467caa3ba95c583ba5fe043f9b", + "zh:9b12af85486a96aedd8d7984b0ff811a4b42e3d88dad1a3fb4c0b580d04fa425", + "zh:9c385d03a958b54e2afd5279cd8c7cbdd2d6ca5c7d6a333e61092331f38af7cf", + "zh:b3ca45f2821a89af417787df8289cb4314b273d29555ad3b2a5ab98bb4816b3b", + "zh:da3c317f1db2469615ab40aa6baba63b5643bae7110ff855277a1fb9d8eb4f2c", + "zh:dc6430622a8dc5cdab359a8704aec81d3825ea1d305bbb3bbd032b1c6adfae0c", + "zh:fac0d2ddeadf9ec53da87922f666e1e73a603a611c57bcbc4b86ac2821619b1d", + ] +} + +provider "registry.terraform.io/hashicorp/random" { + version = "3.5.1" + hashes = [ + "h1:IL9mSatmwov+e0+++YX2V6uel+dV6bn+fC/cnGDK3Ck=", + "zh:04e3fbd610cb52c1017d282531364b9c53ef72b6bc533acb2a90671957324a64", + "zh:119197103301ebaf7efb91df8f0b6e0dd31e6ff943d231af35ee1831c599188d", + "zh:4d2b219d09abf3b1bb4df93d399ed156cadd61f44ad3baf5cf2954df2fba0831", + "zh:6130bdde527587bbe2dcaa7150363e96dbc5250ea20154176d82bc69df5d4ce3", + "zh:6cc326cd4000f724d3086ee05587e7710f032f94fc9af35e96a386a1c6f2214f", + "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", + "zh:b6d88e1d28cf2dfa24e9fdcc3efc77adcdc1c3c3b5c7ce503a423efbdd6de57b", + "zh:ba74c592622ecbcef9dc2a4d81ed321c4e44cddf7da799faa324da9bf52a22b2", + "zh:c7c5cde98fe4ef1143bd1b3ec5dc04baf0d4cc3ca2c5c7d40d17c0e9b2076865", + "zh:dac4bad52c940cd0dfc27893507c1e92393846b024c5a9db159a93c534a3da03", + "zh:de8febe2a2acd9ac454b844a4106ed295ae9520ef54dc8ed2faf29f12716b602", + "zh:eab0d0495e7e711cca367f7d4df6e322e6c562fc52151ec931176115b83ed014", + ] +} diff --git a/infra/api/database/dev.s3.tfbackend b/infra/api/database/dev.s3.tfbackend new file mode 100644 index 000000000..15f0341eb --- /dev/null +++ b/infra/api/database/dev.s3.tfbackend @@ -0,0 +1,4 @@ +bucket = "grants-equity-315341936575-us-east-1-tf" +key = "infra/api/database/dev.tfstate" +dynamodb_table = "grants-equity-315341936575-us-east-1-tf-state-locks" +region = "us-east-1" diff --git a/infra/api/database/dev.tfvars b/infra/api/database/dev.tfvars new file mode 100644 index 000000000..25fecf16d --- /dev/null +++ b/infra/api/database/dev.tfvars @@ -0,0 +1,2 @@ +environment_name = "dev" +region = "us-east-1" diff --git a/infra/api/database/example.tfvars b/infra/api/database/example.tfvars new file mode 100644 index 000000000..757d768ac --- /dev/null +++ b/infra/api/database/example.tfvars @@ -0,0 +1,2 @@ +environment_name = "" +region = "" diff --git a/infra/api/database/main.tf b/infra/api/database/main.tf new file mode 100644 index 000000000..3f671c26a --- /dev/null +++ b/infra/api/database/main.tf @@ -0,0 +1,90 @@ +# TODO(https://github.com/navapbc/template-infra/issues/152) use non-default VPC +data "aws_vpc" "default" { + default = true +} + +# TODO(https://github.com/navapbc/template-infra/issues/152) use private subnets +data "aws_subnets" "default" { + filter { + name = "default-for-az" + values = [true] + } +} + + +locals { + # The prefix key/value pair is used for Terraform Workspaces, which is useful for projects with multiple infrastructure developers. + # By default, Terraform creates a workspace named “default.” If a non-default workspace is not created this prefix will equal “default”, + # if you choose not to use workspaces set this value to "dev" + prefix = terraform.workspace == "default" ? "" : "${terraform.workspace}-" + + # Add environment specific tags + tags = merge(module.project_config.default_tags, { + environment = var.environment_name + description = "Database resources for the ${var.environment_name} environment" + }) + + environment_config = module.app_config.environment_configs[var.environment_name] + database_config = local.environment_config.database_config +} + +terraform { + required_version = ">=1.4.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = "~>4.67.0" + } + } + + backend "s3" { + encrypt = "true" + } +} + +provider "aws" { + region = local.database_config.region + default_tags { + tags = local.tags + } +} + +module "project_config" { + source = "../../project-config" +} + +module "app_config" { + source = "../app-config" +} + +data "aws_security_groups" "aws_services" { + filter { + name = "group-name" + values = ["${module.project_config.aws_services_security_group_name_prefix}*"] + } + + filter { + name = "vpc-id" + values = [data.aws_vpc.default.id] + } +} + +module "database" { + source = "../../modules/database" + + name = "${local.prefix}${local.database_config.cluster_name}" + access_policy_name = "${local.prefix}${local.database_config.access_policy_name}" + app_access_policy_name = "${local.prefix}${local.database_config.app_access_policy_name}" + migrator_access_policy_name = "${local.prefix}${local.database_config.migrator_access_policy_name}" + + # The following are not AWS infra resources and therefore do not need to be + # isolated via the terraform workspace prefix + app_username = local.database_config.app_username + migrator_username = local.database_config.migrator_username + schema_name = local.database_config.schema_name + + vpc_id = data.aws_vpc.default.id + private_subnet_ids = data.aws_subnets.default.ids + aws_services_security_group_id = data.aws_security_groups.aws_services.ids[0] +} diff --git a/infra/api/database/outputs.tf b/infra/api/database/outputs.tf new file mode 100644 index 000000000..927b820a9 --- /dev/null +++ b/infra/api/database/outputs.tf @@ -0,0 +1,3 @@ +output "role_manager_function_name" { + value = module.database.role_manager_function_name +} diff --git a/infra/api/database/prod.s3.tfbackend b/infra/api/database/prod.s3.tfbackend new file mode 100644 index 000000000..189df729e --- /dev/null +++ b/infra/api/database/prod.s3.tfbackend @@ -0,0 +1,4 @@ +bucket = "grants-equity-315341936575-us-east-1-tf" +key = "infra/api/database/prod.tfstate" +dynamodb_table = "grants-equity-315341936575-us-east-1-tf-state-locks" +region = "us-east-1" diff --git a/infra/api/database/prod.tfvars b/infra/api/database/prod.tfvars new file mode 100644 index 000000000..ee5d5421f --- /dev/null +++ b/infra/api/database/prod.tfvars @@ -0,0 +1,2 @@ +environment_name = "prod" +region = "us-east-1" diff --git a/infra/api/database/variables.tf b/infra/api/database/variables.tf new file mode 100644 index 000000000..c142bdf97 --- /dev/null +++ b/infra/api/database/variables.tf @@ -0,0 +1,4 @@ +variable "environment_name" { + type = string + description = "name of the application environment" +} diff --git a/infra/api/service/.terraform.lock.hcl b/infra/api/service/.terraform.lock.hcl new file mode 100644 index 000000000..41c41631e --- /dev/null +++ b/infra/api/service/.terraform.lock.hcl @@ -0,0 +1,25 @@ +# This file is maintained automatically by "terraform init". +# Manual edits may be lost in future updates. + +provider "registry.terraform.io/hashicorp/aws" { + version = "4.67.0" + constraints = ">= 4.56.0, < 5.0.0" + hashes = [ + "h1:5Zfo3GfRSWBaXs4TGQNOflr1XaYj6pRnVJLX5VAjFX4=", + "zh:0843017ecc24385f2b45f2c5fce79dc25b258e50d516877b3affee3bef34f060", + "zh:19876066cfa60de91834ec569a6448dab8c2518b8a71b5ca870b2444febddac6", + "zh:24995686b2ad88c1ffaa242e36eee791fc6070e6144f418048c4ce24d0ba5183", + "zh:4a002990b9f4d6d225d82cb2fb8805789ffef791999ee5d9cb1fef579aeff8f1", + "zh:559a2b5ace06b878c6de3ecf19b94fbae3512562f7a51e930674b16c2f606e29", + "zh:6a07da13b86b9753b95d4d8218f6dae874cf34699bca1470d6effbb4dee7f4b7", + "zh:768b3bfd126c3b77dc975c7c0e5db3207e4f9997cf41aa3385c63206242ba043", + "zh:7be5177e698d4b547083cc738b977742d70ed68487ce6f49ecd0c94dbf9d1362", + "zh:8b562a818915fb0d85959257095251a05c76f3467caa3ba95c583ba5fe043f9b", + "zh:9b12af85486a96aedd8d7984b0ff811a4b42e3d88dad1a3fb4c0b580d04fa425", + "zh:9c385d03a958b54e2afd5279cd8c7cbdd2d6ca5c7d6a333e61092331f38af7cf", + "zh:b3ca45f2821a89af417787df8289cb4314b273d29555ad3b2a5ab98bb4816b3b", + "zh:da3c317f1db2469615ab40aa6baba63b5643bae7110ff855277a1fb9d8eb4f2c", + "zh:dc6430622a8dc5cdab359a8704aec81d3825ea1d305bbb3bbd032b1c6adfae0c", + "zh:fac0d2ddeadf9ec53da87922f666e1e73a603a611c57bcbc4b86ac2821619b1d", + ] +} diff --git a/infra/api/service/dev.s3.tfbackend b/infra/api/service/dev.s3.tfbackend new file mode 100644 index 000000000..594d3f6e0 --- /dev/null +++ b/infra/api/service/dev.s3.tfbackend @@ -0,0 +1,4 @@ +bucket = "grants-equity-315341936575-us-east-1-tf" +key = "infra/api/service/dev.tfstate" +dynamodb_table = "grants-equity-315341936575-us-east-1-tf-state-locks" +region = "us-east-1" diff --git a/infra/api/service/dev.tfvars b/infra/api/service/dev.tfvars new file mode 100644 index 000000000..ba1329bcc --- /dev/null +++ b/infra/api/service/dev.tfvars @@ -0,0 +1,4 @@ +environment_name = "dev" +tfstate_bucket = "grants-equity-315341936575-us-east-1-tf" +tfstate_key = "infra/api/service/dev.tfstate" +region = "us-east-1" diff --git a/infra/api/service/example.tfvars b/infra/api/service/example.tfvars new file mode 100644 index 000000000..d2d79d550 --- /dev/null +++ b/infra/api/service/example.tfvars @@ -0,0 +1,4 @@ +environment_name = "" +tfstate_bucket = "" +tfstate_key = "" +region = "" diff --git a/infra/api/service/image_tag.tf b/infra/api/service/image_tag.tf new file mode 100644 index 000000000..ce1bc75c2 --- /dev/null +++ b/infra/api/service/image_tag.tf @@ -0,0 +1,56 @@ +# Make the "image_tag" variable optional so that "terraform plan" +# and "terraform apply" work without any required variables. +# +# This works as follows: + +# 1. Accept an optional variable during a terraform plan/apply. (see "image_tag" variable in variables.tf) + +# 2. Read the output used from the last terraform state using "terraform_remote_state". +# Get the backend config by parsing the backend config file +locals { + backend_config_file_path = "${path.module}/${var.environment_name}.s3.tfbackend" + backend_config_file = file("${path.module}/${var.environment_name}.s3.tfbackend") + + # Use regex to parse backend config file to get a map of variables to their + # defined values since there is no built-in terraform function that does that + # + # The backend config file consists of lines that look like + # = " match[1] } + tfstate_bucket = local.backend_config["bucket"] + tfstate_key = local.backend_config["key"] +} +data "terraform_remote_state" "current_image_tag" { + # Don't do a lookup if image_tag is provided explicitly. + # This saves some time and also allows us to do a first deploy, + # where the tfstate file does not yet exist. + count = var.image_tag == null ? 1 : 0 + backend = "s3" + + config = { + bucket = local.tfstate_bucket + key = local.tfstate_key + region = local.service_config.region + } + + defaults = { + image_tag = null + } +} + +# 3. Prefer the given variable if provided, otherwise default to the value from last time. +locals { + image_tag = (var.image_tag == null + ? data.terraform_remote_state.current_image_tag[0].outputs.image_tag + : var.image_tag) +} + +# 4. Store the final value used as a terraform output for next time. +output "image_tag" { + value = local.image_tag +} diff --git a/infra/api/service/main.tf b/infra/api/service/main.tf new file mode 100644 index 000000000..91c68c383 --- /dev/null +++ b/infra/api/service/main.tf @@ -0,0 +1,124 @@ +# TODO(https://github.com/navapbc/template-infra/issues/152) use non-default VPC +data "aws_vpc" "default" { + default = true +} + +# TODO(https://github.com/navapbc/template-infra/issues/152) use private subnets +data "aws_subnets" "default" { + filter { + name = "default-for-az" + values = [true] + } +} + + +locals { + # The prefix key/value pair is used for Terraform Workspaces, which is useful for projects with multiple infrastructure developers. + # By default, Terraform creates a workspace named “default.” If a non-default workspace is not created this prefix will equal “default”, + # if you choose not to use workspaces set this value to "dev" + prefix = terraform.workspace == "default" ? "" : "${terraform.workspace}-" + + # Add environment specific tags + tags = merge(module.project_config.default_tags, { + environment = var.environment_name + description = "Application resources created in ${var.environment_name} environment" + }) + + service_name = "${local.prefix}${module.app_config.app_name}-${var.environment_name}" + + environment_config = module.app_config.environment_configs[var.environment_name] + service_config = local.environment_config.service_config + database_config = local.environment_config.database_config + incident_management_service_integration_config = local.environment_config.incident_management_service_integration +} + +terraform { + required_version = ">= 1.2.0, < 2.0.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 4.56.0, < 5.0.0" + } + } + + backend "s3" { + encrypt = "true" + } +} + +provider "aws" { + region = local.service_config.region + default_tags { + tags = local.tags + } +} + +module "project_config" { + source = "../../project-config" +} + +module "app_config" { + source = "../app-config" +} + +data "aws_rds_cluster" "db_cluster" { + count = module.app_config.has_database ? 1 : 0 + cluster_identifier = local.database_config.cluster_name +} + +data "aws_iam_policy" "db_access_policy" { + count = module.app_config.has_database ? 1 : 0 + name = local.database_config.access_policy_name +} + +data "aws_iam_policy" "app_db_access_policy" { + count = module.app_config.has_database ? 1 : 0 + name = local.database_config.app_access_policy_name +} + +data "aws_iam_policy" "migrator_db_access_policy" { + count = module.app_config.has_database ? 1 : 0 + name = local.database_config.migrator_access_policy_name +} + +# Retrieve url for external incident management tool (e.g. Pagerduty, Splunk-On-Call) + +data "aws_ssm_parameter" "incident_management_service_integration_url" { + count = module.app_config.has_incident_management_service ? 1 : 0 + name = local.incident_management_service_integration_config.integration_url_param_name +} + +module "service" { + source = "../../modules/service" + service_name = local.service_name + image_repository_name = module.app_config.image_repository_name + image_tag = local.image_tag + vpc_id = data.aws_vpc.default.id + subnet_ids = data.aws_subnets.default.ids + + db_vars = module.app_config.has_database ? { + security_group_ids = data.aws_rds_cluster.db_cluster[0].vpc_security_group_ids + access_policy_arn = data.aws_iam_policy.db_access_policy[0].arn + app_access_policy_arn = data.aws_iam_policy.app_db_access_policy[0].arn + migrator_access_policy_arn = data.aws_iam_policy.migrator_db_access_policy[0].arn + connection_info = { + host = data.aws_rds_cluster.db_cluster[0].endpoint + port = data.aws_rds_cluster.db_cluster[0].port + user = local.database_config.app_username + db_name = data.aws_rds_cluster.db_cluster[0].database_name + schema_name = local.database_config.schema_name + } + } : null +} + +module "monitoring" { + source = "../../modules/monitoring" + #Email subscription list: + #email_alerts_subscription_list = ["email1@email.com", "email2@email.com"] + + # Module takes service and ALB names to link all alerts with corresponding targets + service_name = local.service_name + load_balancer_arn_suffix = module.service.load_balancer_arn_suffix + incident_management_service_integration_url = module.app_config.has_incident_management_service ? data.aws_ssm_parameter.incident_management_service_integration_url[0].value : null +} diff --git a/infra/api/service/outputs.tf b/infra/api/service/outputs.tf new file mode 100644 index 000000000..7b3d29c0f --- /dev/null +++ b/infra/api/service/outputs.tf @@ -0,0 +1,20 @@ +output "service_endpoint" { + description = "The public endpoint for the service." + value = module.service.public_endpoint +} + +output "service_cluster_name" { + value = module.service.cluster_name +} + +output "service_name" { + value = local.service_name +} + +output "application_log_group" { + value = module.service.application_log_group +} + +output "application_log_stream_prefix" { + value = module.service.application_log_stream_prefix +} diff --git a/infra/api/service/prod.s3.tfbackend b/infra/api/service/prod.s3.tfbackend new file mode 100644 index 000000000..e93c9406c --- /dev/null +++ b/infra/api/service/prod.s3.tfbackend @@ -0,0 +1,4 @@ +bucket = "grants-equity-315341936575-us-east-1-tf" +key = "infra/api/service/prod.tfstate" +dynamodb_table = "grants-equity-315341936575-us-east-1-tf-state-locks" +region = "us-east-1" diff --git a/infra/api/service/prod.tfvars b/infra/api/service/prod.tfvars new file mode 100644 index 000000000..bf6def6bc --- /dev/null +++ b/infra/api/service/prod.tfvars @@ -0,0 +1,4 @@ +environment_name = "prod" +tfstate_bucket = "grants-equity-315341936575-us-east-1-tf" +tfstate_key = "infra/api/service/prod.tfstate" +region = "us-east-1" diff --git a/infra/api/service/variables.tf b/infra/api/service/variables.tf new file mode 100644 index 000000000..19a5f312f --- /dev/null +++ b/infra/api/service/variables.tf @@ -0,0 +1,10 @@ +variable "environment_name" { + type = string + description = "name of the application environment" +} + +variable "image_tag" { + type = string + description = "image tag to deploy to the environment" + default = null +} diff --git a/infra/frontend/app-config/env-config/outputs.tf b/infra/frontend/app-config/env-config/outputs.tf index 113da908b..2dea34db0 100644 --- a/infra/frontend/app-config/env-config/outputs.tf +++ b/infra/frontend/app-config/env-config/outputs.tf @@ -1,13 +1,22 @@ output "database_config" { value = var.has_database ? { - cluster_name = "${var.app_name}-${var.environment}" - access_policy_name = "${var.app_name}-${var.environment}-db-access" - app_username = "app" - migrator_username = "migrator" - schema_name = var.app_name + region = var.default_region + cluster_name = "${var.app_name}-${var.environment}" + access_policy_name = "${var.app_name}-${var.environment}-db-access" + app_username = "app" + migrator_username = "migrator" + schema_name = var.app_name + app_access_policy_name = "${var.app_name}-${var.environment}-app-access" + migrator_access_policy_name = "${var.app_name}-${var.environment}-migrator-access" } : null } +output "service_config" { + value = { + region = var.default_region + } +} + output "incident_management_service_integration" { value = var.has_incident_management_service ? { integration_url_param_name = "/monitoring/${var.app_name}/${var.environment}/incident-management-integration-url" diff --git a/infra/frontend/app-config/env-config/variables.tf b/infra/frontend/app-config/env-config/variables.tf index 2b7fb1857..12b22c42e 100644 --- a/infra/frontend/app-config/env-config/variables.tf +++ b/infra/frontend/app-config/env-config/variables.tf @@ -7,6 +7,11 @@ variable "environment" { type = string } +variable "default_region" { + description = "default region for the project" + type = string +} + variable "has_database" { type = bool } diff --git a/infra/frontend/app-config/main.tf b/infra/frontend/app-config/main.tf index f153fa762..0b28e2916 100644 --- a/infra/frontend/app-config/main.tf +++ b/infra/frontend/app-config/main.tf @@ -5,8 +5,11 @@ locals { image_repository_name = "${local.project_name}-${local.app_name}" has_database = false has_incident_management_service = false - environment_configs = { for environment in local.environments : environment => module.env_config[environment] } + environment_configs = { for environment in local.environments : environment => module.env_config[environment] } + build_repository_config = { + region = module.project_config.default_region + } # Map from environment name to the account name for the AWS account that # contains the resources for that environment. Resources that are shared # across environments use the key "shared". @@ -52,6 +55,7 @@ module "env_config" { source = "./env-config" app_name = local.app_name + default_region = module.project_config.default_region environment = each.key has_database = local.has_database has_incident_management_service = local.has_incident_management_service diff --git a/infra/frontend/app-config/outputs.tf b/infra/frontend/app-config/outputs.tf index 36741d145..26f012550 100644 --- a/infra/frontend/app-config/outputs.tf +++ b/infra/frontend/app-config/outputs.tf @@ -22,6 +22,10 @@ output "image_repository_name" { value = local.image_repository_name } +output "build_repository_config" { + value = local.build_repository_config +} + output "environment_configs" { value = local.environment_configs } diff --git a/infra/frontend/build-repository/.terraform.lock.hcl b/infra/frontend/build-repository/.terraform.lock.hcl new file mode 100644 index 000000000..0f42f0af3 --- /dev/null +++ b/infra/frontend/build-repository/.terraform.lock.hcl @@ -0,0 +1,41 @@ +# This file is maintained automatically by "terraform init". +# Manual edits may be lost in future updates. + +provider "registry.terraform.io/hashicorp/aws" { + version = "4.20.1" + constraints = "~> 4.20.1" + hashes = [ + "h1:1JbjdrwUCLTNVVhlE+acEPnJFJ/FqBTHy5Ooll6nwjI=", + "zh:21d064d8fac08376c633e002e2f36e83eb7958535e251831feaf38f51c49dafd", + "zh:3a37912ff43d89ce8d559ec86265d7506801bccb380c7cfb896e8ff24e3fe79d", + "zh:795eb175c85279ec51dbe12e4d1afa0860c2c0b22e5d36a8e8869f60a93b7931", + "zh:8afb61a18b17f8ff249cb23e9d3b5d2530944001ef1d56c1d53f41b0890c7ab8", + "zh:911701040395e0e4da4b7252279e7cf1593cdd26f22835e1a9eddbdb9691a1a7", + "zh:9b12af85486a96aedd8d7984b0ff811a4b42e3d88dad1a3fb4c0b580d04fa425", + "zh:a46d54a6a5407f569f8178e916af888b2b268f86448c64cad165dc89759c8399", + "zh:c5f71fd5e3519a24fd6af455ef1c26a559cfdde7f626b0afbd2a73bb79f036b1", + "zh:df3b69d6c9b0cdc7e3f90ee08412b22332c32e97ad8ce6ccad528f89f235a7d3", + "zh:e99d6a64c03549d60c2accf792fa04466cfb317f72e895c8f67eff8a02920887", + "zh:eea7a0df8bcb69925c9ce8e15ef403c8bbf16d46c43e8f5607b116531d1bce4a", + "zh:f6a26ce77f7db1d50ce311e32902fd001fb365e5e45e47a9a5cd59d734c89cb6", + ] +} + +provider "registry.terraform.io/hashicorp/external" { + version = "2.3.1" + hashes = [ + "h1:gznGscVJ0USxy4CdihpjRKPsKvyGr/zqPvBoFLJTQDc=", + "zh:001e2886dc81fc98cf17cf34c0d53cb2dae1e869464792576e11b0f34ee92f54", + "zh:2eeac58dd75b1abdf91945ac4284c9ccb2bfb17fa9bdb5f5d408148ff553b3ee", + "zh:2fc39079ba61411a737df2908942e6970cb67ed2f4fb19090cd44ce2082903dd", + "zh:472a71c624952cff7aa98a7b967f6c7bb53153dbd2b8f356ceb286e6743bb4e2", + "zh:4cff06d31272aac8bc35e9b7faec42cf4554cbcbae1092eaab6ab7f643c215d9", + "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", + "zh:7ed16ccd2049fa089616b98c0bd57219f407958f318f3c697843e2397ddf70df", + "zh:842696362c92bf2645eb85c739410fd51376be6c488733efae44f4ce688da50e", + "zh:8985129f2eccfd7f1841ce06f3bf2bbede6352ec9e9f926fbaa6b1a05313b326", + "zh:a5f0602d8ec991a5411ef42f872aa90f6347e93886ce67905c53cfea37278e05", + "zh:bf4ab82cbe5256dcef16949973bf6aa1a98c2c73a98d6a44ee7bc40809d002b8", + "zh:e70770be62aa70198fa899526d671643ff99eecf265bf1a50e798fc3480bd417", + ] +} diff --git a/infra/frontend/build-repository/main.tf b/infra/frontend/build-repository/main.tf index b615f00ac..cb2aebb78 100644 --- a/infra/frontend/build-repository/main.tf +++ b/infra/frontend/build-repository/main.tf @@ -9,6 +9,12 @@ locals { application_role = "build-repository" description = "Backend resources required for storing built release candidate artifacts to be used for deploying to environments." }) + + # Get list of AWS account ids for the application environments that + # will need access to the build repository + app_account_names = values(module.app_config.account_names_by_environment) + account_ids_by_name = data.external.account_ids_by_name.result + app_account_ids = [for account_name in local.app_account_names : local.account_ids_by_name[account_name] if contains(keys(local.account_ids_by_name), account_name)] } terraform { @@ -27,7 +33,7 @@ terraform { } provider "aws" { - region = var.region + region = module.app_config.build_repository_config.region default_tags { tags = local.tags } @@ -41,9 +47,13 @@ module "app_config" { source = "../app-config" } +data "external" "account_ids_by_name" { + program = ["../../../bin/account-ids-by-name.sh"] +} + module "container_image_repository" { source = "../../modules/container-image-repository" name = module.app_config.image_repository_name push_access_role_arn = data.aws_iam_role.github_actions.arn - app_account_ids = var.app_environment_account_ids + app_account_ids = local.app_account_ids } diff --git a/infra/frontend/service/image_tag.tf b/infra/frontend/service/image_tag.tf index 923487e75..ce1bc75c2 100644 --- a/infra/frontend/service/image_tag.tf +++ b/infra/frontend/service/image_tag.tf @@ -6,6 +6,25 @@ # 1. Accept an optional variable during a terraform plan/apply. (see "image_tag" variable in variables.tf) # 2. Read the output used from the last terraform state using "terraform_remote_state". +# Get the backend config by parsing the backend config file +locals { + backend_config_file_path = "${path.module}/${var.environment_name}.s3.tfbackend" + backend_config_file = file("${path.module}/${var.environment_name}.s3.tfbackend") + + # Use regex to parse backend config file to get a map of variables to their + # defined values since there is no built-in terraform function that does that + # + # The backend config file consists of lines that look like + # = " match[1] } + tfstate_bucket = local.backend_config["bucket"] + tfstate_key = local.backend_config["key"] +} data "terraform_remote_state" "current_image_tag" { # Don't do a lookup if image_tag is provided explicitly. # This saves some time and also allows us to do a first deploy, @@ -14,9 +33,9 @@ data "terraform_remote_state" "current_image_tag" { backend = "s3" config = { - bucket = var.tfstate_bucket - key = var.tfstate_key - region = var.region + bucket = local.tfstate_bucket + key = local.tfstate_key + region = local.service_config.region } defaults = { diff --git a/infra/frontend/service/main.tf b/infra/frontend/service/main.tf index 4c69841bc..91c68c383 100644 --- a/infra/frontend/service/main.tf +++ b/infra/frontend/service/main.tf @@ -27,6 +27,7 @@ locals { service_name = "${local.prefix}${module.app_config.app_name}-${var.environment_name}" environment_config = module.app_config.environment_configs[var.environment_name] + service_config = local.environment_config.service_config database_config = local.environment_config.database_config incident_management_service_integration_config = local.environment_config.incident_management_service_integration } @@ -47,7 +48,7 @@ terraform { } provider "aws" { - region = var.region + region = local.service_config.region default_tags { tags = local.tags } @@ -71,6 +72,16 @@ data "aws_iam_policy" "db_access_policy" { name = local.database_config.access_policy_name } +data "aws_iam_policy" "app_db_access_policy" { + count = module.app_config.has_database ? 1 : 0 + name = local.database_config.app_access_policy_name +} + +data "aws_iam_policy" "migrator_db_access_policy" { + count = module.app_config.has_database ? 1 : 0 + name = local.database_config.migrator_access_policy_name +} + # Retrieve url for external incident management tool (e.g. Pagerduty, Splunk-On-Call) data "aws_ssm_parameter" "incident_management_service_integration_url" { @@ -87,8 +98,10 @@ module "service" { subnet_ids = data.aws_subnets.default.ids db_vars = module.app_config.has_database ? { - security_group_ids = data.aws_rds_cluster.db_cluster[0].vpc_security_group_ids - access_policy_arn = data.aws_iam_policy.db_access_policy[0].arn + security_group_ids = data.aws_rds_cluster.db_cluster[0].vpc_security_group_ids + access_policy_arn = data.aws_iam_policy.db_access_policy[0].arn + app_access_policy_arn = data.aws_iam_policy.app_db_access_policy[0].arn + migrator_access_policy_arn = data.aws_iam_policy.migrator_db_access_policy[0].arn connection_info = { host = data.aws_rds_cluster.db_cluster[0].endpoint port = data.aws_rds_cluster.db_cluster[0].port diff --git a/infra/frontend/service/outputs.tf b/infra/frontend/service/outputs.tf index dfabf49e6..7b3d29c0f 100644 --- a/infra/frontend/service/outputs.tf +++ b/infra/frontend/service/outputs.tf @@ -10,3 +10,11 @@ output "service_cluster_name" { output "service_name" { value = local.service_name } + +output "application_log_group" { + value = module.service.application_log_group +} + +output "application_log_stream_prefix" { + value = module.service.application_log_stream_prefix +} diff --git a/infra/frontend/service/variables.tf b/infra/frontend/service/variables.tf index 0c9be917e..19a5f312f 100644 --- a/infra/frontend/service/variables.tf +++ b/infra/frontend/service/variables.tf @@ -8,15 +8,3 @@ variable "image_tag" { description = "image tag to deploy to the environment" default = null } - -variable "tfstate_bucket" { - type = string -} - -variable "tfstate_key" { - type = string -} - -variable "region" { - type = string -} diff --git a/infra/modules/database/authentication.tf b/infra/modules/database/authentication.tf new file mode 100644 index 000000000..d9fc3906f --- /dev/null +++ b/infra/modules/database/authentication.tf @@ -0,0 +1,66 @@ +# Authentication +# -------------- + + +# TODO: Delete when no longer in use. Part 3 of multipart update https://github.com/navapbc/template-infra/issues/354#issuecomment-1693973424 +resource "aws_iam_policy" "db_access" { + name = var.access_policy_name + policy = data.aws_iam_policy_document.db_access.json +} + +# TODO: Delete when no longer in use. Part 3 of multipart update https://github.com/navapbc/template-infra/issues/354#issuecomment-1693973424 +data "aws_iam_policy_document" "db_access" { + # Policy to allow connection to RDS via IAM database authentication + # which is more secure than traditional username/password authentication + # https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/UsingWithRDS.IAMDBAuth.IAMPolicy.html + statement { + actions = [ + "rds-db:connect" + ] + + resources = [ + "${local.db_user_arn_prefix}/${var.app_username}", + "${local.db_user_arn_prefix}/${var.migrator_username}", + ] + } +} + +resource "aws_iam_policy" "app_db_access" { + name = var.app_access_policy_name + policy = data.aws_iam_policy_document.app_db_access.json +} + +data "aws_iam_policy_document" "app_db_access" { + # Policy to allow connection to RDS via IAM database authentication + # which is more secure than traditional username/password authentication + # https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/UsingWithRDS.IAMDBAuth.IAMPolicy.html + statement { + actions = [ + "rds-db:connect" + ] + + resources = [ + "${local.db_user_arn_prefix}/${var.app_username}", + ] + } +} + +resource "aws_iam_policy" "migrator_db_access" { + name = var.migrator_access_policy_name + policy = data.aws_iam_policy_document.migrator_db_access.json +} + +data "aws_iam_policy_document" "migrator_db_access" { + # Policy to allow connection to RDS via IAM database authentication + # which is more secure than traditional username/password authentication + # https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/UsingWithRDS.IAMDBAuth.IAMPolicy.html + statement { + actions = [ + "rds-db:connect" + ] + + resources = [ + "${local.db_user_arn_prefix}/${var.migrator_username}", + ] + } +} diff --git a/infra/modules/database/backups.tf b/infra/modules/database/backups.tf new file mode 100644 index 000000000..bdf8afe61 --- /dev/null +++ b/infra/modules/database/backups.tf @@ -0,0 +1,66 @@ +# Database Backups +# ---------------- + +# Backup plan that defines when and how to backup and which backup vault to store backups in +# See https://docs.aws.amazon.com/aws-backup/latest/devguide/about-backup-plans.html +resource "aws_backup_plan" "backup_plan" { + name = "${var.name}-db-backup-plan" + + rule { + rule_name = "${var.name}-db-backup-rule" + target_vault_name = aws_backup_vault.backup_vault.name + schedule = "cron(0 7 ? * SUN *)" # Run Sundays at 12pm (EST) + } +} + +# Backup vault that stores and organizes backups +# See https://docs.aws.amazon.com/aws-backup/latest/devguide/vaults.html +resource "aws_backup_vault" "backup_vault" { + name = "${var.name}-db-backup-vault" + kms_key_arn = data.aws_kms_key.backup_vault_key.arn +} + +# KMS Key for the vault +# This key was created by AWS by default alongside the vault +data "aws_kms_key" "backup_vault_key" { + key_id = "alias/aws/backup" +} + +# Backup selection defines which resources to backup +# See https://docs.aws.amazon.com/aws-backup/latest/devguide/assigning-resources.html +# and https://docs.aws.amazon.com/aws-backup/latest/devguide/API_BackupSelection.html +resource "aws_backup_selection" "db_backup" { + name = "${var.name}-db-backup" + plan_id = aws_backup_plan.backup_plan.id + iam_role_arn = aws_iam_role.db_backup_role.arn + + resources = [ + aws_rds_cluster.db.arn + ] +} + +# Role that AWS Backup uses to authenticate when backing up the target resource +resource "aws_iam_role" "db_backup_role" { + name_prefix = "${var.name}-db-backup-role-" + assume_role_policy = data.aws_iam_policy_document.db_backup_policy.json +} + +data "aws_iam_policy_document" "db_backup_policy" { + statement { + actions = [ + "sts:AssumeRole", + ] + + effect = "Allow" + + principals { + type = "Service" + identifiers = ["backup.amazonaws.com"] + } + } +} + +resource "aws_iam_role_policy_attachment" "db_backup_role_policy_attachment" { + role = aws_iam_role.db_backup_role.name + policy_arn = "arn:aws:iam::aws:policy/service-role/AWSBackupServiceRolePolicyForBackup" +} diff --git a/infra/modules/database/main.tf b/infra/modules/database/main.tf index 28de6a8e2..c3b2251f3 100644 --- a/infra/modules/database/main.tf +++ b/infra/modules/database/main.tf @@ -78,161 +78,6 @@ resource "aws_kms_key" "db" { enable_key_rotation = true } -# Network Configuration -# --------------------- - -resource "aws_security_group" "db" { - name_prefix = "${var.name}-db" - description = "Database layer security group" - vpc_id = var.vpc_id -} - -resource "aws_security_group" "role_manager" { - name_prefix = "${var.name}-role-manager" - description = "Database role manager security group" - vpc_id = var.vpc_id -} - -resource "aws_vpc_security_group_egress_rule" "role_manager_egress_to_db" { - security_group_id = aws_security_group.role_manager.id - description = "Allow role manager to access database" - - from_port = 5432 - to_port = 5432 - ip_protocol = "tcp" - referenced_security_group_id = aws_security_group.db.id -} - -resource "aws_vpc_security_group_ingress_rule" "db_ingress_from_role_manager" { - security_group_id = aws_security_group.db.id - description = "Allow inbound requests to database from role manager" - - from_port = 5432 - to_port = 5432 - ip_protocol = "tcp" - referenced_security_group_id = aws_security_group.role_manager.id -} - -# Authentication -# -------------- - -resource "aws_iam_policy" "db_access" { - name = var.access_policy_name - policy = data.aws_iam_policy_document.db_access.json -} - -data "aws_iam_policy_document" "db_access" { - # Policy to allow connection to RDS via IAM database authentication - # which is more secure than traditional username/password authentication - # https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/UsingWithRDS.IAMDBAuth.IAMPolicy.html - statement { - actions = [ - "rds-db:connect" - ] - - resources = [ - "${local.db_user_arn_prefix}/${var.app_username}", - "${local.db_user_arn_prefix}/${var.migrator_username}", - ] - } -} - -# Database Backups -# ---------------- - -# Backup plan that defines when and how to backup and which backup vault to store backups in -# See https://docs.aws.amazon.com/aws-backup/latest/devguide/about-backup-plans.html -resource "aws_backup_plan" "backup_plan" { - name = "${var.name}-db-backup-plan" - - rule { - rule_name = "${var.name}-db-backup-rule" - target_vault_name = aws_backup_vault.backup_vault.name - schedule = "cron(0 7 ? * SUN *)" # Run Sundays at 12pm (EST) - } -} - -# Backup vault that stores and organizes backups -# See https://docs.aws.amazon.com/aws-backup/latest/devguide/vaults.html -resource "aws_backup_vault" "backup_vault" { - name = "${var.name}-db-backup-vault" - kms_key_arn = data.aws_kms_key.backup_vault_key.arn -} - -# KMS Key for the vault -# This key was created by AWS by default alongside the vault -data "aws_kms_key" "backup_vault_key" { - key_id = "alias/aws/backup" -} - -# Backup selection defines which resources to backup -# See https://docs.aws.amazon.com/aws-backup/latest/devguide/assigning-resources.html -# and https://docs.aws.amazon.com/aws-backup/latest/devguide/API_BackupSelection.html -resource "aws_backup_selection" "db_backup" { - name = "${var.name}-db-backup" - plan_id = aws_backup_plan.backup_plan.id - iam_role_arn = aws_iam_role.db_backup_role.arn - - resources = [ - aws_rds_cluster.db.arn - ] -} - -# Role that AWS Backup uses to authenticate when backing up the target resource -resource "aws_iam_role" "db_backup_role" { - name_prefix = "${var.name}-db-backup-role-" - assume_role_policy = data.aws_iam_policy_document.db_backup_policy.json -} - -data "aws_iam_policy_document" "db_backup_policy" { - statement { - actions = [ - "sts:AssumeRole", - ] - - effect = "Allow" - - principals { - type = "Service" - identifiers = ["backup.amazonaws.com"] - } - } -} - -resource "aws_iam_role_policy_attachment" "db_backup_role_policy_attachment" { - role = aws_iam_role.db_backup_role.name - policy_arn = "arn:aws:iam::aws:policy/service-role/AWSBackupServiceRolePolicyForBackup" -} - -#----------------------------------# -# IAM role for enhanced monitoring # -#----------------------------------# - -resource "aws_iam_role" "rds_enhanced_monitoring" { - name_prefix = "${var.name}-enhanced-monitoring-" - assume_role_policy = data.aws_iam_policy_document.rds_enhanced_monitoring.json -} - -resource "aws_iam_role_policy_attachment" "rds_enhanced_monitoring" { - role = aws_iam_role.rds_enhanced_monitoring.name - policy_arn = "arn:aws:iam::aws:policy/service-role/AmazonRDSEnhancedMonitoringRole" -} - -data "aws_iam_policy_document" "rds_enhanced_monitoring" { - statement { - actions = [ - "sts:AssumeRole", - ] - - effect = "Allow" - - principals { - type = "Service" - identifiers = ["monitoring.rds.amazonaws.com"] - } - } -} - # Query Logging # ------------- @@ -253,182 +98,3 @@ resource "aws_rds_cluster_parameter_group" "rds_query_logging" { value = "100" } } - -# Role Manager Lambda Function -# ---------------------------- -# -# Resources for the lambda function that is used for managing database roles -# This includes creating and granting permissions to roles -# as well as viewing existing roles - -resource "aws_lambda_function" "role_manager" { - function_name = local.role_manager_name - - filename = local.role_manager_package - source_code_hash = data.archive_file.role_manager.output_base64sha256 - runtime = "python3.9" - handler = "role_manager.lambda_handler" - role = aws_iam_role.role_manager.arn - kms_key_arn = aws_kms_key.role_manager.arn - - # Only allow 1 concurrent execution at a time - reserved_concurrent_executions = 1 - - vpc_config { - subnet_ids = var.private_subnet_ids - security_group_ids = [aws_security_group.role_manager.id] - } - - environment { - variables = { - DB_HOST = aws_rds_cluster.db.endpoint - DB_PORT = aws_rds_cluster.db.port - DB_USER = local.master_username - DB_NAME = aws_rds_cluster.db.database_name - DB_PASSWORD_PARAM_NAME = aws_ssm_parameter.random_db_password.name - DB_SCHEMA = var.schema_name - APP_USER = var.app_username - MIGRATOR_USER = var.migrator_username - PYTHONPATH = "vendor" - } - } - - # Ensure AWS Lambda functions with tracing are enabled - # https://docs.bridgecrew.io/docs/bc_aws_serverless_4 - tracing_config { - mode = "Active" - } - - # checkov:skip=CKV_AWS_272:TODO(https://github.com/navapbc/template-infra/issues/283) - - # checkov:skip=CKV_AWS_116:Dead letter queue (DLQ) configuration is only relevant for asynchronous invocations -} - -# Installs python packages needed by the role manager lambda function before -# creating the zip archive. Reinstalls whenever requirements.txt changes -resource "terraform_data" "role_manager_python_vendor_packages" { - triggers_replace = file("${path.module}/role_manager/requirements.txt") - - provisioner "local-exec" { - command = "pip3 install -r ${path.module}/role_manager/requirements.txt -t ${path.module}/role_manager/vendor" - } -} - -data "archive_file" "role_manager" { - type = "zip" - source_dir = "${path.module}/role_manager" - output_path = local.role_manager_package - depends_on = [terraform_data.role_manager_python_vendor_packages] -} - -resource "aws_iam_role" "role_manager" { - name = "${var.name}-manager" - assume_role_policy = data.aws_iam_policy_document.role_manager_assume_role.json - managed_policy_arns = [data.aws_iam_policy.lambda_vpc_access.arn] -} - -resource "aws_iam_role_policy" "ssm_access" { - name = "${var.name}-role-manager-ssm-access" - role = aws_iam_role.role_manager.id - - policy = jsonencode({ - Version = "2012-10-17" - Statement = [ - { - Effect = "Allow" - Action = ["ssm:GetParameter*"] - Resource = "${aws_ssm_parameter.random_db_password.arn}" - }, - { - Effect = "Allow" - Action = ["kms:Decrypt"] - Resource = [data.aws_kms_key.default_ssm_key.arn] - } - ] - }) -} - -data "aws_kms_key" "default_ssm_key" { - key_id = "alias/aws/ssm" -} - -data "aws_iam_policy_document" "role_manager_assume_role" { - statement { - effect = "Allow" - - principals { - type = "Service" - identifiers = ["lambda.amazonaws.com"] - } - - actions = ["sts:AssumeRole"] - } -} - -# AWS managed policy required by Lambda functions in order to access VPC resources -# see https://docs.aws.amazon.com/lambda/latest/dg/configuration-vpc.html -data "aws_iam_policy" "lambda_vpc_access" { - name = "AWSLambdaVPCAccessExecutionRole" -} - -# KMS key used to encrypt role manager's environment variables -resource "aws_kms_key" "role_manager" { - description = "Key for Lambda function ${local.role_manager_name}" - enable_key_rotation = true -} - -# VPC Endpoints for accessing AWS Services -# ---------------------------------------- -# -# Since the role manager Lambda function is in the VPC (which is needed to be -# able to access the database) we need to allow the Lambda function to access -# AWS Systems Manager Parameter Store (to fetch the database password) and -# KMS (to decrypt SecureString parameters from Parameter Store). We can do -# this by either allowing internet access to the Lambda, or by using a VPC -# endpoint. The latter is more secure. -# See https://repost.aws/knowledge-center/lambda-vpc-parameter-store -# See https://docs.aws.amazon.com/vpc/latest/privatelink/create-interface-endpoint.html#create-interface-endpoint - -resource "aws_vpc_endpoint" "ssm" { - vpc_id = var.vpc_id - service_name = "com.amazonaws.${data.aws_region.current.name}.ssm" - vpc_endpoint_type = "Interface" - security_group_ids = [aws_security_group.vpc_endpoints.id] - subnet_ids = var.private_subnet_ids - private_dns_enabled = true -} - -resource "aws_vpc_endpoint" "kms" { - vpc_id = var.vpc_id - service_name = "com.amazonaws.${data.aws_region.current.name}.kms" - vpc_endpoint_type = "Interface" - security_group_ids = [aws_security_group.vpc_endpoints.id] - subnet_ids = var.private_subnet_ids - private_dns_enabled = true -} - -resource "aws_security_group" "vpc_endpoints" { - name_prefix = "${var.name}-vpc-endpoints" - description = "VPC endpoints to access SSM and KMS" - vpc_id = var.vpc_id -} - -resource "aws_vpc_security_group_egress_rule" "role_manager_egress_to_vpc_endpoints" { - security_group_id = aws_security_group.role_manager.id - description = "Allow outbound requests from role manager to VPC endpoints" - - from_port = 443 - to_port = 443 - ip_protocol = "tcp" - referenced_security_group_id = aws_security_group.vpc_endpoints.id -} - -resource "aws_vpc_security_group_ingress_rule" "vpc_endpoints_ingress_from_role_manager" { - security_group_id = aws_security_group.vpc_endpoints.id - description = "Allow inbound requests to VPC endpoints from role manager" - - from_port = 443 - to_port = 443 - ip_protocol = "tcp" - referenced_security_group_id = aws_security_group.role_manager.id -} diff --git a/infra/modules/database/monitoring.tf b/infra/modules/database/monitoring.tf new file mode 100644 index 000000000..ee5a6ca0b --- /dev/null +++ b/infra/modules/database/monitoring.tf @@ -0,0 +1,28 @@ +#----------------------------------# +# IAM role for enhanced monitoring # +#----------------------------------# + +resource "aws_iam_role" "rds_enhanced_monitoring" { + name_prefix = "${var.name}-enhanced-monitoring-" + assume_role_policy = data.aws_iam_policy_document.rds_enhanced_monitoring.json +} + +resource "aws_iam_role_policy_attachment" "rds_enhanced_monitoring" { + role = aws_iam_role.rds_enhanced_monitoring.name + policy_arn = "arn:aws:iam::aws:policy/service-role/AmazonRDSEnhancedMonitoringRole" +} + +data "aws_iam_policy_document" "rds_enhanced_monitoring" { + statement { + actions = [ + "sts:AssumeRole", + ] + + effect = "Allow" + + principals { + type = "Service" + identifiers = ["monitoring.rds.amazonaws.com"] + } + } +} diff --git a/infra/modules/database/networking.tf b/infra/modules/database/networking.tf new file mode 100644 index 000000000..afc0e46ef --- /dev/null +++ b/infra/modules/database/networking.tf @@ -0,0 +1,54 @@ +# Network Configuration +# --------------------- + +resource "aws_security_group" "db" { + name_prefix = "${var.name}-db" + description = "Database layer security group" + vpc_id = var.vpc_id +} + +resource "aws_security_group" "role_manager" { + name_prefix = "${var.name}-role-manager" + description = "Database role manager security group" + vpc_id = var.vpc_id +} + +resource "aws_vpc_security_group_egress_rule" "role_manager_egress_to_db" { + security_group_id = aws_security_group.role_manager.id + description = "Allow role manager to access database" + + from_port = 5432 + to_port = 5432 + ip_protocol = "tcp" + referenced_security_group_id = aws_security_group.db.id +} + +resource "aws_vpc_security_group_ingress_rule" "db_ingress_from_role_manager" { + security_group_id = aws_security_group.db.id + description = "Allow inbound requests to database from role manager" + + from_port = 5432 + to_port = 5432 + ip_protocol = "tcp" + referenced_security_group_id = aws_security_group.role_manager.id +} + +resource "aws_vpc_security_group_egress_rule" "role_manager_egress_to_vpc_endpoints" { + security_group_id = aws_security_group.role_manager.id + description = "Allow outbound requests from role manager to VPC endpoints" + + from_port = 443 + to_port = 443 + ip_protocol = "tcp" + referenced_security_group_id = var.aws_services_security_group_id +} + +resource "aws_vpc_security_group_ingress_rule" "vpc_endpoints_ingress_from_role_manager" { + security_group_id = var.aws_services_security_group_id + description = "Allow inbound requests to VPC endpoints from role manager" + + from_port = 443 + to_port = 443 + ip_protocol = "tcp" + referenced_security_group_id = aws_security_group.role_manager.id +} diff --git a/infra/modules/database/role-manager.tf b/infra/modules/database/role-manager.tf new file mode 100644 index 000000000..97d8f8c58 --- /dev/null +++ b/infra/modules/database/role-manager.tf @@ -0,0 +1,131 @@ +# Role Manager Lambda Function +# ---------------------------- +# +# Resources for the lambda function that is used for managing database roles +# This includes creating and granting permissions to roles +# as well as viewing existing roles + +resource "aws_lambda_function" "role_manager" { + function_name = local.role_manager_name + + filename = local.role_manager_package + source_code_hash = data.archive_file.role_manager.output_base64sha256 + runtime = "python3.9" + handler = "role_manager.lambda_handler" + role = aws_iam_role.role_manager.arn + kms_key_arn = aws_kms_key.role_manager.arn + + # Only allow 1 concurrent execution at a time + reserved_concurrent_executions = 1 + + vpc_config { + subnet_ids = var.private_subnet_ids + security_group_ids = [aws_security_group.role_manager.id] + } + + environment { + variables = { + DB_HOST = aws_rds_cluster.db.endpoint + DB_PORT = aws_rds_cluster.db.port + DB_USER = local.master_username + DB_NAME = aws_rds_cluster.db.database_name + DB_PASSWORD_PARAM_NAME = aws_ssm_parameter.random_db_password.name + DB_SCHEMA = var.schema_name + APP_USER = var.app_username + MIGRATOR_USER = var.migrator_username + PYTHONPATH = "vendor" + } + } + + # Ensure AWS Lambda functions with tracing are enabled + # https://docs.bridgecrew.io/docs/bc_aws_serverless_4 + tracing_config { + mode = "Active" + } + + # checkov:skip=CKV_AWS_272:TODO(https://github.com/navapbc/template-infra/issues/283) + + # checkov:skip=CKV_AWS_116:Dead letter queue (DLQ) configuration is only relevant for asynchronous invocations +} + +# Installs python packages needed by the role manager lambda function before +# creating the zip archive. Reinstalls whenever requirements.txt changes +resource "terraform_data" "role_manager_python_vendor_packages" { + triggers_replace = file("${path.module}/role_manager/requirements.txt") + + provisioner "local-exec" { + command = "pip3 install -r ${path.module}/role_manager/requirements.txt -t ${path.module}/role_manager/vendor" + } +} + +data "archive_file" "role_manager" { + type = "zip" + source_dir = "${path.module}/role_manager" + output_path = local.role_manager_package + depends_on = [terraform_data.role_manager_python_vendor_packages] +} + +data "aws_kms_key" "default_ssm_key" { + key_id = "alias/aws/ssm" +} + +# KMS key used to encrypt role manager's environment variables +resource "aws_kms_key" "role_manager" { + description = "Key for Lambda function ${local.role_manager_name}" + enable_key_rotation = true +} + +# IAM for Role Manager lambda function +resource "aws_iam_role" "role_manager" { + name = "${var.name}-manager" + assume_role_policy = data.aws_iam_policy_document.role_manager_assume_role.json + managed_policy_arns = [ + data.aws_iam_policy.lambda_vpc_access.arn, + + # Grant the role manager access to the DB as app and migrator users + # so that it can performance database checks. This is needed by + # the infra database tests + aws_iam_policy.app_db_access.arn, + aws_iam_policy.migrator_db_access.arn + ] +} + +resource "aws_iam_role_policy" "ssm_access" { + name = "${var.name}-role-manager-ssm-access" + role = aws_iam_role.role_manager.id + + policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Effect = "Allow" + Action = ["ssm:GetParameter*"] + Resource = "${aws_ssm_parameter.random_db_password.arn}" + }, + { + Effect = "Allow" + Action = ["kms:Decrypt"] + Resource = [data.aws_kms_key.default_ssm_key.arn] + } + ] + }) +} + +data "aws_iam_policy_document" "role_manager_assume_role" { + statement { + effect = "Allow" + + principals { + type = "Service" + identifiers = ["lambda.amazonaws.com"] + } + + actions = ["sts:AssumeRole"] + } +} + +# AWS managed policy required by Lambda functions in order to access VPC resources +# see https://docs.aws.amazon.com/lambda/latest/dg/configuration-vpc.html +data "aws_iam_policy" "lambda_vpc_access" { + name = "AWSLambdaVPCAccessExecutionRole" +} diff --git a/infra/modules/database/role_manager/role_manager.py b/infra/modules/database/role_manager/role_manager.py index 9914238e8..4b5c222c1 100644 --- a/infra/modules/database/role_manager/role_manager.py +++ b/infra/modules/database/role_manager/role_manager.py @@ -9,10 +9,18 @@ logger.setLevel(logging.INFO) def lambda_handler(event, context): - conn = connect() + if event == "check": + return check() + else: + return manage() - logger.info("Current database configuration") +def manage(): + """Manage database roles, schema, and privileges""" + + logger.info("Running command 'manage' to manage database roles, schema, and privileges") + conn = connect_as_master_user() + logger.info("Current database configuration") prev_roles = get_roles(conn) print_roles(prev_roles) @@ -40,7 +48,48 @@ def lambda_handler(event, context): }, } -def connect() -> Connection: +def check(): + """Check that database roles, schema, and privileges were + properly configured + """ + logger.info("Running command 'check' to check database roles, schema, and privileges") + app_username = os.environ.get("APP_USER") + migrator_username = os.environ.get("MIGRATOR_USER") + schema_name = os.environ.get("DB_SCHEMA") + app_conn = connect_using_iam(app_username) + migrator_conn = connect_using_iam(migrator_username) + + check_search_path(migrator_conn, schema_name) + check_migrator_create_table(migrator_conn, app_username) + check_app_use_table(app_conn) + cleanup_migrator_drop_table(migrator_conn) + + return {"success": True} + + +def check_search_path(migrator_conn: Connection, schema_name: str): + logger.info("Checking that search path is %s", schema_name) + assert migrator_conn.run("SHOW search_path") == [[schema_name]] + + +def check_migrator_create_table(migrator_conn: Connection, app_username: str): + logger.info("Checking that migrator is able to create tables and grant access to app user: %s", app_username) + migrator_conn.run("CREATE TABLE IF NOT EXISTS temporary(created_at TIMESTAMP)") + migrator_conn.run(f"GRANT ALL PRIVILEGES ON temporary TO {identifier(app_username)}") + + +def check_app_use_table(app_conn: Connection): + logger.info("Checking that app is able to read and write from the table") + app_conn.run("INSERT INTO temporary (created_at) VALUES (NOW())") + app_conn.run("SELECT * FROM temporary") + + +def cleanup_migrator_drop_table(migrator_conn: Connection): + logger.info("Cleaning up the table that migrator created") + migrator_conn.run("DROP TABLE IF EXISTS temporary") + + +def connect_as_master_user() -> Connection: user = os.environ["DB_USER"] host = os.environ["DB_HOST"] port = os.environ["DB_PORT"] @@ -48,8 +97,19 @@ def connect() -> Connection: password = get_password() logger.info("Connecting to database: user=%s host=%s port=%s database=%s", user, host, port, database) - return Connection(user=user, host=host, port=port, database=database, password=password) + return Connection(user=user, host=host, port=port, database=database, password=password, ssl_context=True) + +def connect_using_iam(user: str) -> Connection: + client = boto3.client("rds") + host = os.environ["DB_HOST"] + port = os.environ["DB_PORT"] + database = os.environ["DB_NAME"] + token = client.generate_db_auth_token( + DBHostname=host, Port=port, DBUsername=user + ) + logger.info("Connecting to database: user=%s host=%s port=%s database=%s", user, host, port, database) + return Connection(user=user, host=host, port=port, database=database, password=token, ssl_context=True) def get_password() -> str: ssm = boto3.client("ssm") @@ -97,18 +157,26 @@ def configure_database(conn: Connection) -> None: app_username = os.environ.get("APP_USER") migrator_username = os.environ.get("MIGRATOR_USER") schema_name = os.environ.get("DB_SCHEMA") + database_name = os.environ.get("DB_NAME") + + logger.info("Revoking default access on public schema") + conn.run("REVOKE CREATE ON SCHEMA public FROM PUBLIC") + logger.info("Revoking database access from public role") + conn.run(f"REVOKE ALL ON DATABASE {identifier(database_name)} FROM PUBLIC") + logger.info("Setting default search path to schema=%s", schema_name) + conn.run(f"ALTER DATABASE {identifier(database_name)} SET search_path TO {identifier(schema_name)}") - configure_roles(conn, [migrator_username, app_username]) + configure_roles(conn, [migrator_username, app_username], database_name) configure_schema(conn, schema_name, migrator_username, app_username) -def configure_roles(conn: Connection, roles: list[str]) -> None: +def configure_roles(conn: Connection, roles: list[str], database_name: str) -> None: logger.info("Configuring roles") for role in roles: - configure_role(conn, role) + configure_role(conn, role, database_name) -def configure_role(conn: Connection, username: str) -> None: +def configure_role(conn: Connection, username: str, database_name: str) -> None: logger.info("Configuring role: username=%s", username) role = "rds_iam" conn.run( @@ -123,6 +191,7 @@ def configure_role(conn: Connection, username: str) -> None: """ ) conn.run(f"GRANT {identifier(role)} TO {identifier(username)}") + conn.run(f"GRANT CONNECT ON DATABASE {identifier(database_name)} TO {identifier(username)}") def configure_schema(conn: Connection, schema_name: str, migrator_username: str, app_username: str) -> None: diff --git a/infra/modules/database/variables.tf b/infra/modules/database/variables.tf index 513fc3668..8ec48dc03 100644 --- a/infra/modules/database/variables.tf +++ b/infra/modules/database/variables.tf @@ -12,6 +12,16 @@ variable "access_policy_name" { type = string } +variable "app_access_policy_name" { + description = "name of the IAM policy to create that will provide the service the ability to connect to the database as a user that will have read/write access." + type = string +} + +variable "migrator_access_policy_name" { + description = "name of the IAM policy to create that will provide the migration task the ability to connect to the database as a user that will have read/write access." + type = string +} + variable "app_username" { description = "name of the database user to create that will be for the application." type = string @@ -50,3 +60,8 @@ variable "private_subnet_ids" { type = list(any) description = "list of private subnet IDs to put the role provisioner and role checker lambda functions in" } + +variable "aws_services_security_group_id" { + type = string + description = "Security group ID for VPC endpoints that access AWS Services" +} diff --git a/infra/modules/service/access-control.tf b/infra/modules/service/access-control.tf new file mode 100644 index 000000000..0b295088f --- /dev/null +++ b/infra/modules/service/access-control.tf @@ -0,0 +1,71 @@ +#---------------- +# Access Control +#---------------- + +resource "aws_iam_role" "task_executor" { + name = local.task_executor_role_name + assume_role_policy = data.aws_iam_policy_document.ecs_tasks_assume_role_policy.json +} + +resource "aws_iam_role" "app_service" { + name = "${var.service_name}-app" + assume_role_policy = data.aws_iam_policy_document.ecs_tasks_assume_role_policy.json +} + +resource "aws_iam_role" "migrator_task" { + count = var.db_vars != null ? 1 : 0 + + name = "${var.service_name}-migrator" + assume_role_policy = data.aws_iam_policy_document.ecs_tasks_assume_role_policy.json +} + +data "aws_iam_policy_document" "ecs_tasks_assume_role_policy" { + statement { + sid = "ECSTasksAssumeRole" + actions = [ + "sts:AssumeRole" + ] + principals { + type = "Service" + identifiers = ["ecs-tasks.amazonaws.com"] + } + } +} + +data "aws_iam_policy_document" "task_executor" { + # Allow ECS to log to Cloudwatch. + statement { + actions = [ + "logs:CreateLogStream", + "logs:PutLogEvents", + "logs:DescribeLogStreams" + ] + resources = ["${aws_cloudwatch_log_group.service_logs.arn}:*"] + } + + # Allow ECS to authenticate with ECR + statement { + sid = "ECRAuth" + actions = [ + "ecr:GetAuthorizationToken", + ] + resources = ["*"] + } + + # Allow ECS to download images. + statement { + sid = "ECRPullAccess" + actions = [ + "ecr:BatchCheckLayerAvailability", + "ecr:BatchGetImage", + "ecr:GetDownloadUrlForLayer", + ] + resources = [data.aws_ecr_repository.app.arn] + } +} + +resource "aws_iam_role_policy" "task_executor" { + name = "${var.service_name}-task-executor-role-policy" + role = aws_iam_role.task_executor.id + policy = data.aws_iam_policy_document.task_executor.json +} diff --git a/infra/modules/service/access_logs.tf b/infra/modules/service/access-logs.tf similarity index 95% rename from infra/modules/service/access_logs.tf rename to infra/modules/service/access-logs.tf index 5b330e92e..da10075e2 100644 --- a/infra/modules/service/access_logs.tf +++ b/infra/modules/service/access-logs.tf @@ -26,6 +26,7 @@ resource "aws_s3_bucket" "access_logs" { # checkov:skip=CKV_AWS_18:Access logging was not considered necessary for this bucket # checkov:skip=CKV_AWS_144:Not considered critical to the point of cross region replication # checkov:skip=CKV_AWS_300:Known issue where Checkov gets confused by multiple rules + # checkov:skip=CKV_AWS_21:Bucket versioning is not worth it in this use case } resource "aws_s3_bucket_public_access_block" "access_logs" { @@ -86,13 +87,6 @@ resource "aws_s3_bucket_lifecycle_configuration" "access_logs" { # checkov:skip=CKV_AWS_300:There is a known issue where this check brings up false positives } -resource "aws_s3_bucket_versioning" "versioning" { - bucket = aws_s3_bucket.access_logs.id - versioning_configuration { - status = "Enabled" - } -} - resource "aws_s3_bucket_server_side_encryption_configuration" "encryption" { bucket = aws_s3_bucket.access_logs.id diff --git a/infra/modules/service/application-logs.tf b/infra/modules/service/application-logs.tf new file mode 100644 index 000000000..b0a8de471 --- /dev/null +++ b/infra/modules/service/application-logs.tf @@ -0,0 +1,15 @@ +#------ +# Logs +#------ + +# Cloudwatch log group to for streaming ECS application logs. +resource "aws_cloudwatch_log_group" "service_logs" { + name = local.log_group_name + + # Conservatively retain logs for 5 years. + # Looser requirements may allow shorter retention periods + retention_in_days = 1827 + + # TODO(https://github.com/navapbc/template-infra/issues/164) Encrypt with customer managed KMS key + # checkov:skip=CKV_AWS_158:Encrypt service logs with customer key in future work +} diff --git a/infra/modules/service/database-access.tf b/infra/modules/service/database-access.tf new file mode 100644 index 000000000..ce791b0a9 --- /dev/null +++ b/infra/modules/service/database-access.tf @@ -0,0 +1,37 @@ +#----------------- +# Database Access +#----------------- + +resource "aws_vpc_security_group_ingress_rule" "db_ingress_from_service" { + count = var.db_vars != null ? length(var.db_vars.security_group_ids) : 0 + + security_group_id = var.db_vars.security_group_ids[count.index] + description = "Allow inbound requests to database from ${var.service_name} service" + + from_port = tonumber(var.db_vars.connection_info.port) + to_port = tonumber(var.db_vars.connection_info.port) + ip_protocol = "tcp" + referenced_security_group_id = aws_security_group.app.id +} + +resource "aws_iam_role_policy_attachment" "app_service_db_access" { + count = var.db_vars != null ? 1 : 0 + + role = aws_iam_role.app_service.name + policy_arn = var.db_vars.app_access_policy_arn +} + +resource "aws_iam_role_policy_attachment" "migrator_db_access" { + count = var.db_vars != null ? 1 : 0 + + role = aws_iam_role.migrator_task[0].name + policy_arn = var.db_vars.migrator_access_policy_arn +} + +# TODO: Delete as part 3 of multipart update https://github.com/navapbc/template-infra/issues/354#issuecomment-1693973424 +resource "aws_iam_role_policy_attachment" "temp_app_migrator_db_access" { + count = var.db_vars != null ? 1 : 0 + + role = aws_iam_role.app_service.name + policy_arn = var.db_vars.migrator_access_policy_arn +} diff --git a/infra/modules/service/load-balancer.tf b/infra/modules/service/load-balancer.tf new file mode 100644 index 000000000..6ec73e06e --- /dev/null +++ b/infra/modules/service/load-balancer.tf @@ -0,0 +1,96 @@ +#--------------- +# Load balancer +#--------------- + +# ALB for an app running in ECS +resource "aws_lb" "alb" { + depends_on = [aws_s3_bucket_policy.access_logs] + name = var.service_name + idle_timeout = "120" + internal = false + security_groups = [aws_security_group.alb.id] + subnets = var.subnet_ids + + # TODO(https://github.com/navapbc/template-infra/issues/163) Implement HTTPS + # checkov:skip=CKV2_AWS_20:Redirect HTTP to HTTPS as part of implementing HTTPS support + + # TODO(https://github.com/navapbc/template-infra/issues/161) Prevent deletion protection + # checkov:skip=CKV_AWS_150:Allow deletion until we can automate deletion for automated tests + # enable_deletion_protection = true + + # TODO(https://github.com/navapbc/template-infra/issues/165) Protect ALB with WAF + # checkov:skip=CKV2_AWS_28:Implement WAF in issue #165 + + # Drop invalid HTTP headers for improved security + # Note that header names cannot contain underscores + # https://docs.bridgecrew.io/docs/ensure-that-alb-drops-http-headers + drop_invalid_header_fields = true + + access_logs { + bucket = aws_s3_bucket.access_logs.id + prefix = "${var.service_name}-lb" + enabled = true + } +} + +# NOTE: for the demo we expose private http endpoint +# due to the complexity of acquiring a valid TLS/SSL cert. +# In a production system we would provision an https listener +resource "aws_lb_listener" "alb_listener_http" { + # TODO(https://github.com/navapbc/template-infra/issues/163) Use HTTPS protocol + # checkov:skip=CKV_AWS_2:Implement HTTPS in issue #163 + # checkov:skip=CKV_AWS_103:Require TLS 1.2 as part of implementing HTTPS support + + load_balancer_arn = aws_lb.alb.arn + port = "80" + protocol = "HTTP" + + default_action { + type = "fixed-response" + + fixed_response { + content_type = "text/plain" + message_body = "Not Found" + status_code = "404" + } + } +} + +resource "aws_lb_listener_rule" "app_http_forward" { + listener_arn = aws_lb_listener.alb_listener_http.arn + priority = 100 + + action { + type = "forward" + target_group_arn = aws_lb_target_group.app_tg.arn + } + condition { + path_pattern { + values = ["/*"] + } + } +} + +resource "aws_lb_target_group" "app_tg" { + # you must use a prefix, to facilitate successful tg changes + name_prefix = "app-" + port = var.container_port + protocol = "HTTP" + vpc_id = var.vpc_id + target_type = "ip" + deregistration_delay = "30" + + health_check { + path = "/health" + port = var.container_port + healthy_threshold = 2 + unhealthy_threshold = 10 + interval = 30 + timeout = 29 + matcher = "200-299" + } + + lifecycle { + create_before_destroy = true + } +} diff --git a/infra/modules/service/main.tf b/infra/modules/service/main.tf index b36d0a5ac..aac221d5e 100644 --- a/infra/modules/service/main.tf +++ b/infra/modules/service/main.tf @@ -8,6 +8,7 @@ locals { alb_name = var.service_name cluster_name = var.service_name log_group_name = "service/${var.service_name}" + log_stream_prefix = var.service_name task_executor_role_name = "${var.service_name}-task-executor" image_url = "${data.aws_ecr_repository.app.repository_url}:${var.image_tag}" @@ -25,104 +26,6 @@ locals { environment_variables = concat(local.base_environment_variables, local.db_environment_variables) } - -#--------------- -# Load balancer -#--------------- - -# ALB for an app running in ECS -resource "aws_lb" "alb" { - depends_on = [aws_s3_bucket_policy.access_logs] - name = var.service_name - idle_timeout = "120" - internal = false - security_groups = [aws_security_group.alb.id] - subnets = var.subnet_ids - - # TODO(https://github.com/navapbc/template-infra/issues/163) Implement HTTPS - # checkov:skip=CKV2_AWS_20:Redirect HTTP to HTTPS as part of implementing HTTPS support - - # TODO(https://github.com/navapbc/template-infra/issues/161) Prevent deletion protection - # checkov:skip=CKV_AWS_150:Allow deletion until we can automate deletion for automated tests - # enable_deletion_protection = true - - # TODO(https://github.com/navapbc/template-infra/issues/165) Protect ALB with WAF - # checkov:skip=CKV2_AWS_28:Implement WAF in issue #165 - - # Drop invalid HTTP headers for improved security - # Note that header names cannot contain underscores - # https://docs.bridgecrew.io/docs/ensure-that-alb-drops-http-headers - drop_invalid_header_fields = true - - access_logs { - bucket = aws_s3_bucket.access_logs.id - prefix = "${var.service_name}-lb" - enabled = true - } -} - -# NOTE: for the demo we expose private http endpoint -# due to the complexity of acquiring a valid TLS/SSL cert. -# In a production system we would provision an https listener -resource "aws_lb_listener" "alb_listener_http" { - # TODO(https://github.com/navapbc/template-infra/issues/163) Use HTTPS protocol - # checkov:skip=CKV_AWS_2:Implement HTTPS in issue #163 - # checkov:skip=CKV_AWS_103:Require TLS 1.2 as part of implementing HTTPS support - - load_balancer_arn = aws_lb.alb.arn - port = "80" - protocol = "HTTP" - - default_action { - type = "fixed-response" - - fixed_response { - content_type = "text/plain" - message_body = "Not Found" - status_code = "404" - } - } -} - -resource "aws_lb_listener_rule" "app_http_forward" { - listener_arn = aws_lb_listener.alb_listener_http.arn - priority = 100 - - action { - type = "forward" - target_group_arn = aws_lb_target_group.app_tg.arn - } - condition { - path_pattern { - values = ["/*"] - } - } -} - -resource "aws_lb_target_group" "app_tg" { - # you must use a prefix, to facilitate successful tg changes - name_prefix = "app-" - port = var.container_port - protocol = "HTTP" - vpc_id = var.vpc_id - target_type = "ip" - deregistration_delay = "30" - - health_check { - path = "/health" - port = var.container_port - healthy_threshold = 2 - unhealthy_threshold = 10 - interval = 30 - timeout = 29 - matcher = "200-299" - } - - lifecycle { - create_before_destroy = true - } -} - #------------------- # Service Execution #------------------- @@ -158,7 +61,7 @@ resource "aws_ecs_service" "app" { resource "aws_ecs_task_definition" "app" { family = var.service_name execution_role_arn = aws_iam_role.task_executor.arn - task_role_arn = aws_iam_role.service.arn + task_role_arn = aws_iam_role.app_service.arn # when is this needed? # task_role_arn = aws_iam_role.app_service.arn @@ -201,7 +104,7 @@ resource "aws_ecs_task_definition" "app" { options = { "awslogs-group" = aws_cloudwatch_log_group.service_logs.name, "awslogs-region" = data.aws_region.current.name, - "awslogs-stream-prefix" = var.service_name + "awslogs-stream-prefix" = local.log_stream_prefix } } } @@ -224,176 +127,3 @@ resource "aws_ecs_cluster" "cluster" { value = "enabled" } } - -#------ -# Logs -#------ - -# Cloudwatch log group to for streaming ECS application logs. -resource "aws_cloudwatch_log_group" "service_logs" { - name = local.log_group_name - - # Conservatively retain logs for 5 years. - # Looser requirements may allow shorter retention periods - retention_in_days = 1827 - - # TODO(https://github.com/navapbc/template-infra/issues/164) Encrypt with customer managed KMS key - # checkov:skip=CKV_AWS_158:Encrypt service logs with customer key in future work -} - -#---------------- -# Access Control -#---------------- - -resource "aws_iam_role" "task_executor" { - name = local.task_executor_role_name - assume_role_policy = data.aws_iam_policy_document.ecs_tasks_assume_role_policy.json -} - -resource "aws_iam_role" "service" { - name = var.service_name - assume_role_policy = data.aws_iam_policy_document.ecs_tasks_assume_role_policy.json -} - -data "aws_iam_policy_document" "ecs_tasks_assume_role_policy" { - statement { - sid = "ECSTasksAssumeRole" - actions = [ - "sts:AssumeRole" - ] - principals { - type = "Service" - identifiers = ["ecs-tasks.amazonaws.com"] - } - } -} - -data "aws_iam_policy_document" "task_executor" { - # Allow ECS to log to Cloudwatch. - statement { - actions = [ - "logs:CreateLogStream", - "logs:PutLogEvents", - "logs:DescribeLogStreams" - ] - resources = ["${aws_cloudwatch_log_group.service_logs.arn}:*"] - } - - # Allow ECS to authenticate with ECR - statement { - sid = "ECRAuth" - actions = [ - "ecr:GetAuthorizationToken", - ] - resources = ["*"] - } - - # Allow ECS to download images. - statement { - sid = "ECRPullAccess" - actions = [ - "ecr:BatchCheckLayerAvailability", - "ecr:BatchGetImage", - "ecr:GetDownloadUrlForLayer", - ] - resources = [data.aws_ecr_repository.app.arn] - } -} - -resource "aws_iam_role_policy" "task_executor" { - name = "${var.service_name}-task-executor-role-policy" - role = aws_iam_role.task_executor.id - policy = data.aws_iam_policy_document.task_executor.json -} - -#----------------------- -# Network Configuration -#----------------------- - -resource "aws_security_group" "alb" { - # Specify name_prefix instead of name because when a change requires creating a new - # security group, sometimes the change requires the new security group to be created - # before the old one is destroyed. In this situation, the new one needs a unique name - name_prefix = "${var.service_name}-alb" - description = "Allow TCP traffic to application load balancer" - - lifecycle { - create_before_destroy = true - - # changing the description is a destructive change - # just ignore it - ignore_changes = [description] - } - - vpc_id = var.vpc_id - - # TODO(https://github.com/navapbc/template-infra/issues/163) Disallow incoming traffic to port 80 - # checkov:skip=CKV_AWS_260:Disallow ingress from 0.0.0.0:0 to port 80 when implementing HTTPS support in issue #163 - ingress { - description = "Allow HTTP traffic from public internet" - from_port = 80 - to_port = 80 - protocol = "tcp" - cidr_blocks = ["0.0.0.0/0"] - } - - egress { - description = "Allow all outgoing traffic" - from_port = 0 - to_port = 0 - protocol = "-1" - cidr_blocks = ["0.0.0.0/0"] - } -} - -# Security group to allow access to Fargate tasks -resource "aws_security_group" "app" { - # Specify name_prefix instead of name because when a change requires creating a new - # security group, sometimes the change requires the new security group to be created - # before the old one is destroyed. In this situation, the new one needs a unique name - name_prefix = "${var.service_name}-app" - description = "Allow inbound TCP access to application container port" - vpc_id = var.vpc_id - lifecycle { - create_before_destroy = true - } - - ingress { - description = "Allow HTTP traffic to application container port" - protocol = "tcp" - from_port = var.container_port - to_port = var.container_port - security_groups = [aws_security_group.alb.id] - } - - egress { - description = "Allow all outgoing traffic from application" - protocol = "-1" - from_port = 0 - to_port = 0 - cidr_blocks = ["0.0.0.0/0"] - } -} - -#----------------- -# Database Access -#----------------- - -resource "aws_vpc_security_group_ingress_rule" "db_ingress_from_service" { - count = var.db_vars != null ? length(var.db_vars.security_group_ids) : 0 - - security_group_id = var.db_vars.security_group_ids[count.index] - description = "Allow inbound requests to database from ${var.service_name} service" - - from_port = tonumber(var.db_vars.connection_info.port) - to_port = tonumber(var.db_vars.connection_info.port) - ip_protocol = "tcp" - referenced_security_group_id = aws_security_group.app.id -} - -resource "aws_iam_role_policy_attachment" "app_db_access" { - count = var.db_vars != null ? 1 : 0 - - role = aws_iam_role.service.name - policy_arn = var.db_vars.access_policy_arn -} diff --git a/infra/modules/service/networking.tf b/infra/modules/service/networking.tf new file mode 100644 index 000000000..0c74929dd --- /dev/null +++ b/infra/modules/service/networking.tf @@ -0,0 +1,68 @@ +#----------------------- +# Network Configuration +#----------------------- + +resource "aws_security_group" "alb" { + # Specify name_prefix instead of name because when a change requires creating a new + # security group, sometimes the change requires the new security group to be created + # before the old one is destroyed. In this situation, the new one needs a unique name + name_prefix = "${var.service_name}-alb" + description = "Allow TCP traffic to application load balancer" + + lifecycle { + create_before_destroy = true + + # changing the description is a destructive change + # just ignore it + ignore_changes = [description] + } + + vpc_id = var.vpc_id + + # TODO(https://github.com/navapbc/template-infra/issues/163) Disallow incoming traffic to port 80 + # checkov:skip=CKV_AWS_260:Disallow ingress from 0.0.0.0:0 to port 80 when implementing HTTPS support in issue #163 + ingress { + description = "Allow HTTP traffic from public internet" + from_port = 80 + to_port = 80 + protocol = "tcp" + cidr_blocks = ["0.0.0.0/0"] + } + + egress { + description = "Allow all outgoing traffic" + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } +} + +# Security group to allow access to Fargate tasks +resource "aws_security_group" "app" { + # Specify name_prefix instead of name because when a change requires creating a new + # security group, sometimes the change requires the new security group to be created + # before the old one is destroyed. In this situation, the new one needs a unique name + name_prefix = "${var.service_name}-app" + description = "Allow inbound TCP access to application container port" + vpc_id = var.vpc_id + lifecycle { + create_before_destroy = true + } + + ingress { + description = "Allow HTTP traffic to application container port" + protocol = "tcp" + from_port = var.container_port + to_port = var.container_port + security_groups = [aws_security_group.alb.id] + } + + egress { + description = "Allow all outgoing traffic from application" + protocol = "-1" + from_port = 0 + to_port = 0 + cidr_blocks = ["0.0.0.0/0"] + } +} diff --git a/infra/modules/service/outputs.tf b/infra/modules/service/outputs.tf index adde59cba..9be53806e 100644 --- a/infra/modules/service/outputs.tf +++ b/infra/modules/service/outputs.tf @@ -11,3 +11,16 @@ output "load_balancer_arn_suffix" { description = "The ARN suffix for use with CloudWatch Metrics." value = aws_lb.alb.arn_suffix } + +output "application_log_group" { + value = local.log_group_name +} + +output "application_log_stream_prefix" { + value = local.log_stream_prefix +} + +output "migrator_role_arn" { + description = "ARN for role to use for migration" + value = length(aws_iam_role.migrator_task) > 0 ? aws_iam_role.migrator_task[0].arn : null +} diff --git a/infra/modules/service/variables.tf b/infra/modules/service/variables.tf index 7b89db095..5e2b70119 100644 --- a/infra/modules/service/variables.tf +++ b/infra/modules/service/variables.tf @@ -54,8 +54,10 @@ variable "subnet_ids" { variable "db_vars" { description = "Variables for integrating the app service with a database" type = object({ - security_group_ids = list(string) - access_policy_arn = string + security_group_ids = list(string) + access_policy_arn = string + app_access_policy_arn = string + migrator_access_policy_arn = string connection_info = object({ host = string port = string diff --git a/infra/networks/.terraform.lock.hcl b/infra/networks/.terraform.lock.hcl new file mode 100644 index 000000000..1124b43ff --- /dev/null +++ b/infra/networks/.terraform.lock.hcl @@ -0,0 +1,25 @@ +# This file is maintained automatically by "terraform init". +# Manual edits may be lost in future updates. + +provider "registry.terraform.io/hashicorp/aws" { + version = "5.6.2" + constraints = "~> 5.6.0" + hashes = [ + "h1:lH9eN+oozDX4z/TvhoXg++5MIE6MQznSW5sXvqzXAVQ=", + "zh:25322d7e1f0054550357d5a03fe29168cc179421e5dcf44b28c25a99d8d6e4e7", + "zh:394aa5bff70003e76d1d33ef4fe37c4826918577cf339d35e56ae84d01e86765", + "zh:485b288bf95b5d3014903e386e8ee2d1182e507f746bc988458b9711c7df7171", + "zh:48cf69750681337d64df7e402116a6753a40b6702c49fc9232ff6621947d85af", + "zh:6ab11d052d681b5157e261b9dd9167482acffe2018fffd1204575e9bf6a08522", + "zh:882f22d0e6c16cd5a5f01a0ae817b1e75e928667d21d986b93a4ee74fa62c067", + "zh:9b12af85486a96aedd8d7984b0ff811a4b42e3d88dad1a3fb4c0b580d04fa425", + "zh:ac3403e3ab5c10869b23626467b919e3f010e7cae6e0acf8515e0cefab0dbff0", + "zh:b959a425c9be83838895e8626037656bf5db81397ad0078595d3b72fd1b816bc", + "zh:bf390951f21a5fe6b96b206c5496fda4d8b95823bd00d1c03a4a53dd215d882a", + "zh:c3534972986cd68a421359f07ab86631ffa8731606936276fce18ec8ae9045f4", + "zh:d4cf29d67ead2c5feb999c2882e5365bd4d04c115e98fb1639b747b682507fea", + "zh:dea669eea5bca9b57dae2975ec783d577d58a39eec769d1c9bd7fc4d50f241d0", + "zh:e7a82063d01eb2be3fd192afbad910150fe8054731db20c1b22c714d9391dbe5", + "zh:fdbbf96948e96dfed614ea4daa4f1706859122a3f978c42c37db8727cb55c94f", + ] +} diff --git a/infra/networks/default.s3.tfbackend b/infra/networks/default.s3.tfbackend new file mode 100644 index 000000000..175579e58 --- /dev/null +++ b/infra/networks/default.s3.tfbackend @@ -0,0 +1,4 @@ +bucket = "grants-equity-315341936575-us-east-1-tf" +key = "infra/networks/default.tfstate" +dynamodb_table = "grants-equity-315341936575-us-east-1-tf-state-locks" +region = "us-east-1" diff --git a/infra/networks/main.tf b/infra/networks/main.tf new file mode 100644 index 000000000..3741af752 --- /dev/null +++ b/infra/networks/main.tf @@ -0,0 +1,95 @@ +# TODO: This file is is a temporary implementation of the network layer +# that currently just adds resources to the default VPC +# The full network implementation is part of https://github.com/navapbc/template-infra/issues/152 + +data "aws_region" "current" {} + +locals { + tags = merge(module.project_config.default_tags, { + description = "VPC resources" + }) + region = module.project_config.default_region + + # List of AWS services used by this VPC + # This list is used to create VPC endpoints so that the AWS services can + # be accessed without network traffic ever leaving the VPC's private network + # For a list of AWS services that integrate with AWS PrivateLink + # see https://docs.aws.amazon.com/vpc/latest/privatelink/aws-services-privatelink-support.html + # + # The database module requires VPC access from private networks to SSM, KMS, and RDS + aws_service_integrations = toset( + module.app_config.has_database ? ["ssm", "kms"] : [] + ) +} + +terraform { + required_version = ">= 1.2.0, < 2.0.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = "~>5.6.0" + } + } + + backend "s3" { + encrypt = "true" + } +} + +provider "aws" { + region = local.region + default_tags { + tags = local.tags + } +} + +module "project_config" { + source = "../project-config" +} + +module "app_config" { + source = "../api/app-config" +} + +data "aws_vpc" "default" { + default = true +} + +data "aws_subnets" "default" { + filter { + name = "default-for-az" + values = [true] + } +} + +# VPC Endpoints for accessing AWS Services +# ---------------------------------------- +# +# Since the role manager Lambda function is in the VPC (which is needed to be +# able to access the database) we need to allow the Lambda function to access +# AWS Systems Manager Parameter Store (to fetch the database password) and +# KMS (to decrypt SecureString parameters from Parameter Store). We can do +# this by either allowing internet access to the Lambda, or by using a VPC +# endpoint. The latter is more secure. +# See https://repost.aws/knowledge-center/lambda-vpc-parameter-store +# See https://docs.aws.amazon.com/vpc/latest/privatelink/create-interface-endpoint.html#create-interface-endpoint + +resource "aws_security_group" "aws_services" { + count = length(local.aws_service_integrations) > 0 ? 1 : 0 + + name_prefix = module.project_config.aws_services_security_group_name_prefix + description = "VPC endpoints to access AWS services from the VPCs private subnets" + vpc_id = data.aws_vpc.default.id +} + +resource "aws_vpc_endpoint" "aws_service" { + for_each = local.aws_service_integrations + + vpc_id = data.aws_vpc.default.id + service_name = "com.amazonaws.${data.aws_region.current.name}.${each.key}" + vpc_endpoint_type = "Interface" + security_group_ids = [aws_security_group.aws_services[0].id] + subnet_ids = data.aws_subnets.default.ids + private_dns_enabled = true +} diff --git a/infra/project-config/main.tf b/infra/project-config/main.tf index c69b9360a..e9db7d1e3 100644 --- a/infra/project-config/main.tf +++ b/infra/project-config/main.tf @@ -14,5 +14,6 @@ locals { # otherwise us-east-1 is a good default default_region = "us-east-1" - github_actions_role_name = "${local.project_name}-github-actions" + github_actions_role_name = "${local.project_name}-github-actions" + aws_services_security_group_name_prefix = "aws-service-vpc-endpoints" } diff --git a/infra/project-config/outputs.tf b/infra/project-config/outputs.tf index e00c0be2d..690f1eb22 100644 --- a/infra/project-config/outputs.tf +++ b/infra/project-config/outputs.tf @@ -34,3 +34,7 @@ output "default_tags" { output "github_actions_role_name" { value = local.github_actions_role_name } + +output "aws_services_security_group_name_prefix" { + value = local.aws_services_security_group_name_prefix +} diff --git a/infra/test/go.mod b/infra/test/go.mod index 47d1ca7f7..4719ab068 100644 --- a/infra/test/go.mod +++ b/infra/test/go.mod @@ -2,54 +2,55 @@ module navapbc/template-infra go 1.19 +require github.com/gruntwork-io/terratest v0.41.0 + require ( - cloud.google.com/go v0.83.0 // indirect - cloud.google.com/go/storage v1.10.0 // indirect + cloud.google.com/go v0.104.0 // indirect + cloud.google.com/go/compute v1.10.0 // indirect + cloud.google.com/go/iam v0.5.0 // indirect + cloud.google.com/go/storage v1.27.0 // indirect github.com/agext/levenshtein v1.2.3 // indirect github.com/apparentlymart/go-textseg/v13 v13.0.0 // indirect - github.com/aws/aws-sdk-go v1.40.56 // indirect + github.com/aws/aws-sdk-go v1.44.122 // indirect github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d // indirect github.com/davecgh/go-spew v1.1.1 // indirect - github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e // indirect + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.2 // indirect - github.com/golang/snappy v0.0.3 // indirect - github.com/googleapis/gax-go/v2 v2.0.5 // indirect - github.com/gruntwork-io/terratest v0.41.0 // indirect + github.com/google/go-cmp v0.5.9 // indirect + github.com/google/uuid v1.3.0 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.2.0 // indirect + github.com/googleapis/gax-go/v2 v2.6.0 // indirect github.com/hashicorp/errwrap v1.0.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect - github.com/hashicorp/go-getter v1.6.1 // indirect + github.com/hashicorp/go-getter v1.7.0 // indirect github.com/hashicorp/go-multierror v1.1.0 // indirect github.com/hashicorp/go-safetemp v1.0.0 // indirect - github.com/hashicorp/go-version v1.3.0 // indirect + github.com/hashicorp/go-version v1.6.0 // indirect github.com/hashicorp/hcl/v2 v2.9.1 // indirect github.com/hashicorp/terraform-json v0.13.0 // indirect github.com/jinzhu/copier v0.0.0-20190924061706-b57f9002281a // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect - github.com/jstemmer/go-junit-report v0.9.1 // indirect - github.com/klauspost/compress v1.13.0 // indirect + github.com/klauspost/compress v1.15.11 // indirect github.com/mattn/go-zglob v0.0.2-0.20190814121620-e3c945676326 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect - github.com/mitchellh/go-testing-interface v1.0.0 // indirect + github.com/mitchellh/go-testing-interface v1.14.1 // indirect github.com/mitchellh/go-wordwrap v1.0.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/stretchr/testify v1.7.0 // indirect github.com/tmccombs/hcl2json v0.3.3 // indirect - github.com/ulikunitz/xz v0.5.8 // indirect + github.com/ulikunitz/xz v0.5.10 // indirect github.com/zclconf/go-cty v1.9.1 // indirect go.opencensus.io v0.23.0 // indirect - golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a // indirect - golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 // indirect - golang.org/x/mod v0.4.2 // indirect - golang.org/x/net v0.0.0-20210614182718-04defd469f4e // indirect - golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c // indirect - golang.org/x/sys v0.0.0-20220517195934-5e4e11fc645e // indirect - golang.org/x/text v0.3.6 // indirect - golang.org/x/tools v0.1.2 // indirect - golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect - google.golang.org/api v0.47.0 // indirect + golang.org/x/crypto v0.0.0-20210921155107-089bfa567519 // indirect + golang.org/x/net v0.1.0 // indirect + golang.org/x/oauth2 v0.1.0 // indirect + golang.org/x/sys v0.1.0 // indirect + golang.org/x/text v0.4.0 // indirect + golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect + google.golang.org/api v0.100.0 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c // indirect - google.golang.org/grpc v1.38.0 // indirect - google.golang.org/protobuf v1.26.0 // indirect + google.golang.org/genproto v0.0.0-20221025140454-527a21cfbd71 // indirect + google.golang.org/grpc v1.50.1 // indirect + google.golang.org/protobuf v1.28.1 // indirect gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect ) diff --git a/infra/test/go.sum b/infra/test/go.sum index 80865778a..ed2b5def8 100644 --- a/infra/test/go.sum +++ b/infra/test/go.sum @@ -18,42 +18,185 @@ cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmW cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= -cloud.google.com/go v0.83.0 h1:bAMqZidYkmIsUqe6PtkEPT7Q+vfizScn+jfNA6jwK9c= cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= +cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= +cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= +cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= +cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= +cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= +cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= +cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= +cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= +cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc= +cloud.google.com/go v0.102.1/go.mod h1:XZ77E9qnTEnrgEOvr4xzfdX5TRo7fB4T2F4O6+34hIU= +cloud.google.com/go v0.104.0 h1:gSmWO7DY1vOm0MVU6DNXM11BWHHsTUmsC5cv1fuW5X8= +cloud.google.com/go v0.104.0/go.mod h1:OO6xxXdJyvuJPcEPBLN9BJPD+jep5G1+2U5B5gkRYtA= +cloud.google.com/go/aiplatform v1.22.0/go.mod h1:ig5Nct50bZlzV6NvKaTwmplLLddFx0YReh9WfTO5jKw= +cloud.google.com/go/aiplatform v1.24.0/go.mod h1:67UUvRBKG6GTayHKV8DBv2RtR1t93YRu5B1P3x99mYY= +cloud.google.com/go/analytics v0.11.0/go.mod h1:DjEWCu41bVbYcKyvlws9Er60YE4a//bK6mnhWvQeFNI= +cloud.google.com/go/analytics v0.12.0/go.mod h1:gkfj9h6XRf9+TS4bmuhPEShsh3hH8PAZzm/41OOhQd4= +cloud.google.com/go/area120 v0.5.0/go.mod h1:DE/n4mp+iqVyvxHN41Vf1CR602GiHQjFPusMFW6bGR4= +cloud.google.com/go/area120 v0.6.0/go.mod h1:39yFJqWVgm0UZqWTOdqkLhjoC7uFfgXRC8g/ZegeAh0= +cloud.google.com/go/artifactregistry v1.6.0/go.mod h1:IYt0oBPSAGYj/kprzsBjZ/4LnG/zOcHyFHjWPCi6SAQ= +cloud.google.com/go/artifactregistry v1.7.0/go.mod h1:mqTOFOnGZx8EtSqK/ZWcsm/4U8B77rbcLP6ruDU2Ixk= +cloud.google.com/go/asset v1.5.0/go.mod h1:5mfs8UvcM5wHhqtSv8J1CtxxaQq3AdBxxQi2jGW/K4o= +cloud.google.com/go/asset v1.7.0/go.mod h1:YbENsRK4+xTiL+Ofoj5Ckf+O17kJtgp3Y3nn4uzZz5s= +cloud.google.com/go/asset v1.8.0/go.mod h1:mUNGKhiqIdbr8X7KNayoYvyc4HbbFO9URsjbytpUaW0= +cloud.google.com/go/assuredworkloads v1.5.0/go.mod h1:n8HOZ6pff6re5KYfBXcFvSViQjDwxFkAkmUFffJRbbY= +cloud.google.com/go/assuredworkloads v1.6.0/go.mod h1:yo2YOk37Yc89Rsd5QMVECvjaMKymF9OP+QXWlKXUkXw= +cloud.google.com/go/assuredworkloads v1.7.0/go.mod h1:z/736/oNmtGAyU47reJgGN+KVoYoxeLBoj4XkKYscNI= +cloud.google.com/go/automl v1.5.0/go.mod h1:34EjfoFGMZ5sgJ9EoLsRtdPSNZLcfflJR39VbVNS2M0= +cloud.google.com/go/automl v1.6.0/go.mod h1:ugf8a6Fx+zP0D59WLhqgTDsQI9w07o64uf/Is3Nh5p8= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/bigquery v1.42.0/go.mod h1:8dRTJxhtG+vwBKzE5OseQn/hiydoQN3EedCaOdYmxRA= +cloud.google.com/go/billing v1.4.0/go.mod h1:g9IdKBEFlItS8bTtlrZdVLWSSdSyFUZKXNS02zKMOZY= +cloud.google.com/go/billing v1.5.0/go.mod h1:mztb1tBc3QekhjSgmpf/CV4LzWXLzCArwpLmP2Gm88s= +cloud.google.com/go/binaryauthorization v1.1.0/go.mod h1:xwnoWu3Y84jbuHa0zd526MJYmtnVXn0syOjaJgy4+dM= +cloud.google.com/go/binaryauthorization v1.2.0/go.mod h1:86WKkJHtRcv5ViNABtYMhhNWRrD1Vpi//uKEy7aYEfI= +cloud.google.com/go/cloudtasks v1.5.0/go.mod h1:fD92REy1x5woxkKEkLdvavGnPJGEn8Uic9nWuLzqCpY= +cloud.google.com/go/cloudtasks v1.6.0/go.mod h1:C6Io+sxuke9/KNRkbQpihnW93SWDU3uXt92nu85HkYI= +cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= +cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= +cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= +cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s= +cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= +cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U= +cloud.google.com/go/compute v1.10.0 h1:aoLIYaA1fX3ywihqpBk2APQKOo20nXsp1GEZQbx5Jk4= +cloud.google.com/go/compute v1.10.0/go.mod h1:ER5CLbMxl90o2jtNbGSbtfOpQKR0t15FOtRsugnLrlU= +cloud.google.com/go/containeranalysis v0.5.1/go.mod h1:1D92jd8gRR/c0fGMlymRgxWD3Qw9C1ff6/T7mLgVL8I= +cloud.google.com/go/containeranalysis v0.6.0/go.mod h1:HEJoiEIu+lEXM+k7+qLCci0h33lX3ZqoYFdmPcoO7s4= +cloud.google.com/go/datacatalog v1.3.0/go.mod h1:g9svFY6tuR+j+hrTw3J2dNcmI0dzmSiyOzm8kpLq0a0= +cloud.google.com/go/datacatalog v1.5.0/go.mod h1:M7GPLNQeLfWqeIm3iuiruhPzkt65+Bx8dAKvScX8jvs= +cloud.google.com/go/datacatalog v1.6.0/go.mod h1:+aEyF8JKg+uXcIdAmmaMUmZ3q1b/lKLtXCmXdnc0lbc= +cloud.google.com/go/dataflow v0.6.0/go.mod h1:9QwV89cGoxjjSR9/r7eFDqqjtvbKxAK2BaYU6PVk9UM= +cloud.google.com/go/dataflow v0.7.0/go.mod h1:PX526vb4ijFMesO1o202EaUmouZKBpjHsTlCtB4parQ= +cloud.google.com/go/dataform v0.3.0/go.mod h1:cj8uNliRlHpa6L3yVhDOBrUXH+BPAO1+KFMQQNSThKo= +cloud.google.com/go/dataform v0.4.0/go.mod h1:fwV6Y4Ty2yIFL89huYlEkwUPtS7YZinZbzzj5S9FzCE= +cloud.google.com/go/datalabeling v0.5.0/go.mod h1:TGcJ0G2NzcsXSE/97yWjIZO0bXj0KbVlINXMG9ud42I= +cloud.google.com/go/datalabeling v0.6.0/go.mod h1:WqdISuk/+WIGeMkpw/1q7bK/tFEZxsrFJOJdY2bXvTQ= +cloud.google.com/go/dataqna v0.5.0/go.mod h1:90Hyk596ft3zUQ8NkFfvICSIfHFh1Bc7C4cK3vbhkeo= +cloud.google.com/go/dataqna v0.6.0/go.mod h1:1lqNpM7rqNLVgWBJyk5NF6Uen2PHym0jtVJonplVsDA= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/datastream v1.2.0/go.mod h1:i/uTP8/fZwgATHS/XFu0TcNUhuA0twZxxQ3EyCUQMwo= +cloud.google.com/go/datastream v1.3.0/go.mod h1:cqlOX8xlyYF/uxhiKn6Hbv6WjwPPuI9W2M9SAXwaLLQ= +cloud.google.com/go/dialogflow v1.15.0/go.mod h1:HbHDWs33WOGJgn6rfzBW1Kv807BE3O1+xGbn59zZWI4= +cloud.google.com/go/dialogflow v1.16.1/go.mod h1:po6LlzGfK+smoSmTBnbkIZY2w8ffjz/RcGSS+sh1el0= +cloud.google.com/go/dialogflow v1.17.0/go.mod h1:YNP09C/kXA1aZdBgC/VtXX74G/TKn7XVCcVumTflA+8= +cloud.google.com/go/documentai v1.7.0/go.mod h1:lJvftZB5NRiFSX4moiye1SMxHx0Bc3x1+p9e/RfXYiU= +cloud.google.com/go/documentai v1.8.0/go.mod h1:xGHNEB7CtsnySCNrCFdCyyMz44RhFEEX2Q7UD0c5IhU= +cloud.google.com/go/domains v0.6.0/go.mod h1:T9Rz3GasrpYk6mEGHh4rymIhjlnIuB4ofT1wTxDeT4Y= +cloud.google.com/go/domains v0.7.0/go.mod h1:PtZeqS1xjnXuRPKE/88Iru/LdfoRyEHYA9nFQf4UKpg= +cloud.google.com/go/edgecontainer v0.1.0/go.mod h1:WgkZ9tp10bFxqO8BLPqv2LlfmQF1X8lZqwW4r1BTajk= +cloud.google.com/go/edgecontainer v0.2.0/go.mod h1:RTmLijy+lGpQ7BXuTDa4C4ssxyXT34NIuHIgKuP4s5w= +cloud.google.com/go/functions v1.6.0/go.mod h1:3H1UA3qiIPRWD7PeZKLvHZ9SaQhR26XIJcC0A5GbvAk= +cloud.google.com/go/functions v1.7.0/go.mod h1:+d+QBcWM+RsrgZfV9xo6KfA1GlzJfxcfZcRPEhDDfzg= +cloud.google.com/go/gaming v1.5.0/go.mod h1:ol7rGcxP/qHTRQE/RO4bxkXq+Fix0j6D4LFPzYTIrDM= +cloud.google.com/go/gaming v1.6.0/go.mod h1:YMU1GEvA39Qt3zWGyAVA9bpYz/yAhTvaQ1t2sK4KPUA= +cloud.google.com/go/gkeconnect v0.5.0/go.mod h1:c5lsNAg5EwAy7fkqX/+goqFsU1Da/jQFqArp+wGNr/o= +cloud.google.com/go/gkeconnect v0.6.0/go.mod h1:Mln67KyU/sHJEBY8kFZ0xTeyPtzbq9StAVvEULYK16A= +cloud.google.com/go/gkehub v0.9.0/go.mod h1:WYHN6WG8w9bXU0hqNxt8rm5uxnk8IH+lPY9J2TV7BK0= +cloud.google.com/go/gkehub v0.10.0/go.mod h1:UIPwxI0DsrpsVoWpLB0stwKCP+WFVG9+y977wO+hBH0= +cloud.google.com/go/grafeas v0.2.0/go.mod h1:KhxgtF2hb0P191HlY5besjYm6MqTSTj3LSI+M+ByZHc= +cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= +cloud.google.com/go/iam v0.5.0 h1:fz9X5zyTWBmamZsqvqZqD7khbifcZF/q+Z1J8pfhIUg= +cloud.google.com/go/iam v0.5.0/go.mod h1:wPU9Vt0P4UmCux7mqtRu6jcpPAb74cP1fh50J3QpkUc= +cloud.google.com/go/language v1.4.0/go.mod h1:F9dRpNFQmJbkaop6g0JhSBXCNlO90e1KWx5iDdxbWic= +cloud.google.com/go/language v1.6.0/go.mod h1:6dJ8t3B+lUYfStgls25GusK04NLh3eDLQnWM3mdEbhI= +cloud.google.com/go/lifesciences v0.5.0/go.mod h1:3oIKy8ycWGPUyZDR/8RNnTOYevhaMLqh5vLUXs9zvT8= +cloud.google.com/go/lifesciences v0.6.0/go.mod h1:ddj6tSX/7BOnhxCSd3ZcETvtNr8NZ6t/iPhY2Tyfu08= +cloud.google.com/go/mediatranslation v0.5.0/go.mod h1:jGPUhGTybqsPQn91pNXw0xVHfuJ3leR1wj37oU3y1f4= +cloud.google.com/go/mediatranslation v0.6.0/go.mod h1:hHdBCTYNigsBxshbznuIMFNe5QXEowAuNmmC7h8pu5w= +cloud.google.com/go/memcache v1.4.0/go.mod h1:rTOfiGZtJX1AaFUrOgsMHX5kAzaTQ8azHiuDoTPzNsE= +cloud.google.com/go/memcache v1.5.0/go.mod h1:dk3fCK7dVo0cUU2c36jKb4VqKPS22BTkf81Xq617aWM= +cloud.google.com/go/metastore v1.5.0/go.mod h1:2ZNrDcQwghfdtCwJ33nM0+GrBGlVuh8rakL3vdPY3XY= +cloud.google.com/go/metastore v1.6.0/go.mod h1:6cyQTls8CWXzk45G55x57DVQ9gWg7RiH65+YgPsNh9s= +cloud.google.com/go/networkconnectivity v1.4.0/go.mod h1:nOl7YL8odKyAOtzNX73/M5/mGZgqqMeryi6UPZTk/rA= +cloud.google.com/go/networkconnectivity v1.5.0/go.mod h1:3GzqJx7uhtlM3kln0+x5wyFvuVH1pIBJjhCpjzSt75o= +cloud.google.com/go/networksecurity v0.5.0/go.mod h1:xS6fOCoqpVC5zx15Z/MqkfDwH4+m/61A3ODiDV1xmiQ= +cloud.google.com/go/networksecurity v0.6.0/go.mod h1:Q5fjhTr9WMI5mbpRYEbiexTzROf7ZbDzvzCrNl14nyU= +cloud.google.com/go/notebooks v1.2.0/go.mod h1:9+wtppMfVPUeJ8fIWPOq1UnATHISkGXGqTkxeieQ6UY= +cloud.google.com/go/notebooks v1.3.0/go.mod h1:bFR5lj07DtCPC7YAAJ//vHskFBxA5JzYlH68kXVdk34= +cloud.google.com/go/osconfig v1.7.0/go.mod h1:oVHeCeZELfJP7XLxcBGTMBvRO+1nQ5tFG9VQTmYS2Fs= +cloud.google.com/go/osconfig v1.8.0/go.mod h1:EQqZLu5w5XA7eKizepumcvWx+m8mJUhEwiPqWiZeEdg= +cloud.google.com/go/oslogin v1.4.0/go.mod h1:YdgMXWRaElXz/lDk1Na6Fh5orF7gvmJ0FGLIs9LId4E= +cloud.google.com/go/oslogin v1.5.0/go.mod h1:D260Qj11W2qx/HVF29zBg+0fd6YCSjSqLUkY/qEenQU= +cloud.google.com/go/phishingprotection v0.5.0/go.mod h1:Y3HZknsK9bc9dMi+oE8Bim0lczMU6hrX0UpADuMefr0= +cloud.google.com/go/phishingprotection v0.6.0/go.mod h1:9Y3LBLgy0kDTcYET8ZH3bq/7qni15yVUoAxiFxnlSUA= +cloud.google.com/go/privatecatalog v0.5.0/go.mod h1:XgosMUvvPyxDjAVNDYxJ7wBW8//hLDDYmnsNcMGq1K0= +cloud.google.com/go/privatecatalog v0.6.0/go.mod h1:i/fbkZR0hLN29eEWiiwue8Pb+GforiEIBnV9yrRUOKI= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/recaptchaenterprise v1.3.1/go.mod h1:OdD+q+y4XGeAlxRaMn1Y7/GveP6zmq76byL6tjPE7d4= +cloud.google.com/go/recaptchaenterprise/v2 v2.1.0/go.mod h1:w9yVqajwroDNTfGuhmOjPDN//rZGySaf6PtFVcSCa7o= +cloud.google.com/go/recaptchaenterprise/v2 v2.2.0/go.mod h1:/Zu5jisWGeERrd5HnlS3EUGb/D335f9k51B/FVil0jk= +cloud.google.com/go/recaptchaenterprise/v2 v2.3.0/go.mod h1:O9LwGCjrhGHBQET5CA7dd5NwwNQUErSgEDit1DLNTdo= +cloud.google.com/go/recommendationengine v0.5.0/go.mod h1:E5756pJcVFeVgaQv3WNpImkFP8a+RptV6dDLGPILjvg= +cloud.google.com/go/recommendationengine v0.6.0/go.mod h1:08mq2umu9oIqc7tDy8sx+MNJdLG0fUi3vaSVbztHgJ4= +cloud.google.com/go/recommender v1.5.0/go.mod h1:jdoeiBIVrJe9gQjwd759ecLJbxCDED4A6p+mqoqDvTg= +cloud.google.com/go/recommender v1.6.0/go.mod h1:+yETpm25mcoiECKh9DEScGzIRyDKpZ0cEhWGo+8bo+c= +cloud.google.com/go/redis v1.7.0/go.mod h1:V3x5Jq1jzUcg+UNsRvdmsfuFnit1cfe3Z/PGyq/lm4Y= +cloud.google.com/go/redis v1.8.0/go.mod h1:Fm2szCDavWzBk2cDKxrkmWBqoCiL1+Ctwq7EyqBCA/A= +cloud.google.com/go/retail v1.8.0/go.mod h1:QblKS8waDmNUhghY2TI9O3JLlFk8jybHeV4BF19FrE4= +cloud.google.com/go/retail v1.9.0/go.mod h1:g6jb6mKuCS1QKnH/dpu7isX253absFl6iE92nHwlBUY= +cloud.google.com/go/scheduler v1.4.0/go.mod h1:drcJBmxF3aqZJRhmkHQ9b3uSSpQoltBPGPxGAWROx6s= +cloud.google.com/go/scheduler v1.5.0/go.mod h1:ri073ym49NW3AfT6DZi21vLZrG07GXr5p3H1KxN5QlI= +cloud.google.com/go/secretmanager v1.6.0/go.mod h1:awVa/OXF6IiyaU1wQ34inzQNc4ISIDIrId8qE5QGgKA= +cloud.google.com/go/security v1.5.0/go.mod h1:lgxGdyOKKjHL4YG3/YwIL2zLqMFCKs0UbQwgyZmfJl4= +cloud.google.com/go/security v1.7.0/go.mod h1:mZklORHl6Bg7CNnnjLH//0UlAlaXqiG7Lb9PsPXLfD0= +cloud.google.com/go/security v1.8.0/go.mod h1:hAQOwgmaHhztFhiQ41CjDODdWP0+AE1B3sX4OFlq+GU= +cloud.google.com/go/securitycenter v1.13.0/go.mod h1:cv5qNAqjY84FCN6Y9z28WlkKXyWsgLO832YiWwkCWcU= +cloud.google.com/go/securitycenter v1.14.0/go.mod h1:gZLAhtyKv85n52XYWt6RmeBdydyxfPeTrpToDPw4Auc= +cloud.google.com/go/servicedirectory v1.4.0/go.mod h1:gH1MUaZCgtP7qQiI+F+A+OpeKF/HQWgtAddhTbhL2bs= +cloud.google.com/go/servicedirectory v1.5.0/go.mod h1:QMKFL0NUySbpZJ1UZs3oFAmdvVxhhxB6eJ/Vlp73dfg= +cloud.google.com/go/speech v1.6.0/go.mod h1:79tcr4FHCimOp56lwC01xnt/WPJZc4v3gzyT7FoBkCM= +cloud.google.com/go/speech v1.7.0/go.mod h1:KptqL+BAQIhMsj1kOP2la5DSEEerPDuOP/2mmkhHhZQ= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= -cloud.google.com/go/storage v1.10.0 h1:STgFzyU5/8miMl0//zKh2aQeTyeaUH3WN9bSUiJ09bA= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y= +cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc= +cloud.google.com/go/storage v1.27.0 h1:YOO045NZI9RKfCj1c5A/ZtuuENUc8OAW+gHdGnDgyMQ= +cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s= +cloud.google.com/go/talent v1.1.0/go.mod h1:Vl4pt9jiHKvOgF9KoZo6Kob9oV4lwd/ZD5Cto54zDRw= +cloud.google.com/go/talent v1.2.0/go.mod h1:MoNF9bhFQbiJ6eFD3uSsg0uBALw4n4gaCaEjBw9zo8g= +cloud.google.com/go/videointelligence v1.6.0/go.mod h1:w0DIDlVRKtwPCn/C4iwZIJdvC69yInhW0cfi+p546uU= +cloud.google.com/go/videointelligence v1.7.0/go.mod h1:k8pI/1wAhjznARtVT9U1llUaFNPh7muw8QyOUpavru4= +cloud.google.com/go/vision v1.2.0/go.mod h1:SmNwgObm5DpFBme2xpyOyasvBc1aPdjvMk2bBk0tKD0= +cloud.google.com/go/vision/v2 v2.2.0/go.mod h1:uCdV4PpN1S0jyCyq8sIM42v2Y6zOLkZs+4R9LrGYwFo= +cloud.google.com/go/vision/v2 v2.3.0/go.mod h1:UO61abBx9QRMFkNBbf1D8B1LXdS2cGiiCRx0vSpZoUo= +cloud.google.com/go/webrisk v1.4.0/go.mod h1:Hn8X6Zr+ziE2aNd8SliSDWpEnSS1u4R9+xXZmFiHmGE= +cloud.google.com/go/webrisk v1.5.0/go.mod h1:iPG6fr52Tv7sGk0H6qUFzmL3HHZev1htXuWDEEsqMTg= +cloud.google.com/go/workflows v1.6.0/go.mod h1:6t9F5h/unJz41YqfBmqSASJSXccBLtD1Vwf+KmJENM0= +cloud.google.com/go/workflows v1.7.0/go.mod h1:JhSrZuVZWuiDfKEFxU0/F1PQjmpnpcoISEXH2bcHC3M= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/agext/levenshtein v1.2.1/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7lmo= github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/apparentlymart/go-dump v0.0.0-20180507223929-23540a00eaa3/go.mod h1:oL81AME2rN47vu18xqj1S1jPIPuN7afo62yKTNn3XMM= github.com/apparentlymart/go-textseg v1.0.0/go.mod h1:z96Txxhf3xSFMPmb5X/1W05FF/Nj9VFpLOpjS5yuumk= github.com/apparentlymart/go-textseg/v13 v13.0.0 h1:Y+KvPE1NYz0xl601PVImeQfFyEy6iT90AvPUL1NNfNw= github.com/apparentlymart/go-textseg/v13 v13.0.0/go.mod h1:ZK2fH7c4NqDTLtiYLvIkEghdlcqw7yxLeM89kiTRPUo= -github.com/aws/aws-sdk-go v1.15.78/go.mod h1:E3/ieXAlvM0XWO57iftYVDLLvQ824smPP3ATZkfNZeM= -github.com/aws/aws-sdk-go v1.40.56 h1:FM2yjR0UUYFzDTMx+mH9Vyw1k1EUUxsAFzk+BjkzANA= -github.com/aws/aws-sdk-go v1.40.56/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q= +github.com/aws/aws-sdk-go v1.44.122 h1:p6mw01WBaNpbdP2xrisz5tIkcNwzj/HysobNoaAHjgo= +github.com/aws/aws-sdk-go v1.44.122/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d h1:xDfNPAt8lFiC1UJrqV3uuy861HCTo708pDMbjHHdCas= github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d/go.mod h1:6QX/PXZ00z/TKoufEY6K/a0k6AhaJrQKdFe6OfVXsa4= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cheggaaa/pb v1.0.27/go.mod h1:pQciLPpbU0oxA0h+VJYYLxO+XeDQb5pZijXscXHm81s= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= @@ -62,6 +205,12 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= @@ -72,18 +221,24 @@ github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1m github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= +github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-test/deep v1.0.3/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= +github.com/go-test/deep v1.0.7 h1:/VSMRlnY/JSyqxQUzQLKVMAskpY/NZKFA5j2P+0pP2M= github.com/go-test/deep v1.0.7/go.mod h1:QV8Hv/iy04NyLBxAdO9njL0iVPN1S4d/A3NVv1V36o8= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= @@ -92,6 +247,7 @@ github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -111,7 +267,6 @@ github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaS github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= @@ -127,9 +282,15 @@ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.2.1 h1:d8MncMlErDFTwQGBK1xhv026j9kqhvw1Qv9IbWT1VLQ= github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= @@ -143,26 +304,43 @@ github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= +github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= +github.com/googleapis/enterprise-certificate-proxy v0.2.0 h1:y8Yozv7SZtlU//QXbezB6QkpuE6jMD2/gfzk4AftXjs= +github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= +github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= +github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= +github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= +github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= +github.com/googleapis/gax-go/v2 v2.5.1/go.mod h1:h6B0KMMFNtI2ddbGJn3T3ZbwkeT6yqEF02fYlzkUCyo= +github.com/googleapis/gax-go/v2 v2.6.0 h1:SXk3ABtQYDT/OH8jAyvEOQ58mgawq5C4o/4/89qN2ZU= +github.com/googleapis/gax-go/v2 v2.6.0/go.mod h1:1mjbznJAPHFpesgE5ucqfYEscaz5kMdcIDwU/6+DDoY= +github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/gruntwork-io/terratest v0.41.0 h1:QKFK6m0EMVnrV7lw2L06TlG+Ha3t0CcOXuBVywpeNRU= github.com/gruntwork-io/terratest v0.41.0/go.mod h1:qH1xkPTTGx30XkMHw8jAVIbzqheSjIa5IyiTwSV2vKI= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= -github.com/hashicorp/go-getter v1.6.1 h1:NASsgP4q6tL94WH6nJxKWj8As2H/2kop/bB1d8JMyRY= -github.com/hashicorp/go-getter v1.6.1/go.mod h1:IZCrswsZPeWv9IkVnLElzRU/gz/QPi6pZHn4tv6vbwA= +github.com/hashicorp/go-getter v1.7.0 h1:bzrYP+qu/gMrL1au7/aDvkoOVGUJpeKBgbqRHACAFDY= +github.com/hashicorp/go-getter v1.7.0/go.mod h1:W7TalhMmbPmsSMdNjD0ZskARur/9GJ17cfHTRtXV744= github.com/hashicorp/go-multierror v1.1.0 h1:B9UzwGQJehnUY1yNrnwREHc3fGbC2xefo8g4TbElacI= github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= github.com/hashicorp/go-safetemp v1.0.0 h1:2HR189eFNrjHQyENnQMMpCiBAsRxzbTMIgBhEyExpmo= github.com/hashicorp/go-safetemp v1.0.0/go.mod h1:oaerMy3BhqiTbVye6QuFhFtIceqFoDHxNAB65b+Rj1I= -github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/go-version v1.3.0 h1:McDWVJIU/y+u1BRV06dPaLfLCaT7fUTJLp5r04x7iNw= github.com/hashicorp/go-version v1.3.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= +github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/hcl/v2 v2.9.1 h1:eOy4gREY0/ZQHNItlfuEZqtcQbXIxzojlP301hDpnac= @@ -173,23 +351,24 @@ github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1: github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/jinzhu/copier v0.0.0-20190924061706-b57f9002281a h1:zPPuIq2jAWWPTrGt70eK/BSch+gFAGrNzecsoENgu2o= github.com/jinzhu/copier v0.0.0-20190924061706-b57f9002281a/go.mod h1:yL958EeXv8Ylng6IfnvG4oflryUi3vgA3xPs9hmII1s= -github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jstemmer/go-junit-report v0.9.1 h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.11.2/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= -github.com/klauspost/compress v1.13.0 h1:2T7tUoQrQT+fQWdaY5rjWztFGAFwbGD04iPJg90ZiOs= -github.com/klauspost/compress v1.13.0/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= +github.com/klauspost/compress v1.15.11 h1:Lcadnb3RKGin4FYM/orgq0qde+nc15E5Cbqg4B9Sx9c= +github.com/klauspost/compress v1.15.11/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= @@ -197,11 +376,10 @@ github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzp github.com/mattn/go-zglob v0.0.2-0.20190814121620-e3c945676326 h1:ofNAzWCcyTALn2Zv40+8XitdzCgXY6e9qvXwN9W0YXg= github.com/mattn/go-zglob v0.0.2-0.20190814121620-e3c945676326/go.mod h1:9fxibJccNxU2cnpIKLRRFA7zX7qhkJIQWBb449FYHOo= github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= -github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0= -github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU= +github.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8= github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= @@ -210,9 +388,11 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/sebdah/goldie v1.0.0/go.mod h1:jXP4hmWywNEwZzhMuv2ccnqTSFpuq8iyQhtQdkkZBH4= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= @@ -224,8 +404,8 @@ github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5Cc github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/tmccombs/hcl2json v0.3.3 h1:+DLNYqpWE0CsOQiEZu+OZm5ZBImake3wtITYxQ8uLFQ= github.com/tmccombs/hcl2json v0.3.3/go.mod h1:Y2chtz2x9bAeRTvSibVRVgbLJhLJXKlUeIvjeVdnm4w= -github.com/ulikunitz/xz v0.5.8 h1:ERv8V6GKqVi23rgu5cj9pVfVzJbOqAY2Ntl88O6c2nQ= -github.com/ulikunitz/xz v0.5.8/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= +github.com/ulikunitz/xz v0.5.10 h1:t92gobL9l3HE202wg3rlk19F6X+JOxl9BBrCCMYEYd8= +github.com/ulikunitz/xz v0.5.10/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/vmihailenco/msgpack v3.3.3+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= github.com/vmihailenco/msgpack/v4 v4.3.12/go.mod h1:gborTTJjAo/GWTqqRjrLCn9pgNN+NXzzngzBKDPIqw4= github.com/vmihailenco/tagparser v0.1.1/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI= @@ -234,6 +414,7 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/zclconf/go-cty v1.2.0/go.mod h1:hOPWgoHbaTUnI5k4D2ld+GRpFJSCe6bCM7m1q/N4PQ8= github.com/zclconf/go-cty v1.8.0/go.mod h1:vVKLxnk3puL4qRAv72AO+W99LUD4da90g3uUAzyuvAk= github.com/zclconf/go-cty v1.8.1/go.mod h1:vVKLxnk3puL4qRAv72AO+W99LUD4da90g3uUAzyuvAk= @@ -248,14 +429,15 @@ go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a h1:kr2P4QFmQr29mSLA43kwrOcgcReGTfbE9N577tCTuBc= -golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519 h1:7I4JAnoQBe7ZtJcBaYHi5UtiO8tQHbUSXxL+pnGRANg= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -279,7 +461,6 @@ golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRu golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 h1:VLliZ0d+/avPrXXH+OakdXhpJuEoBZuwh1m2j7U6Iug= golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= @@ -291,8 +472,8 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180811021610-c39426892332/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -329,8 +510,19 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210614182718-04defd469f4e h1:XpT3nA5TvE525Ne3hInMh6+GETgn27Zfm9dxsThnX2Q= -golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220617184016-355a448f1bc9/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.1.0 h1:hZ/3BUoy5aId7sCpA/Tc5lt8DkFgdVS2onTpJsZ/fl0= +golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -342,8 +534,21 @@ golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c h1:pkQiBZBvdos9qq4wBAHqlzuZHEXo07pqV06ef90u1WI= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= +golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= +golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.1.0 h1:isLCZuhj4v+tYv7eskaN4v/TM+A1begWWgyVJDdl1+Y= +golang.org/x/oauth2 v0.1.0/go.mod h1:G9FE4dLTsbXUu90h/Pf85g4w1D+SSAgR+q46nJZ8M4A= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -355,6 +560,9 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -395,9 +603,35 @@ golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220517195934-5e4e11fc645e h1:w36l2Uw3dRan1K3TyXriXvY+6T56GNmlKGcqiQUJDfM= -golang.org/x/sys v0.0.0-20220517195934-5e4e11fc645e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.1.0 h1:kunALQeHf1/185U1i0GOB/fy1IPRDDpuoOOqRReG57U= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.1.0 h1:g6Z6vPFA9dYBAF7DWcH6sCcOntplXsDKcliusYijMlw= +golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -405,8 +639,10 @@ golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.4.0 h1:BrVqGRd7+k1DiOgtnFvAkoQEWQvBc25ouMJM6429SFg= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -457,13 +693,20 @@ golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.2 h1:kRBLX7v7Af8W7Gdbbc908OJcdgtK8bOz9Uaj8/F1ACA= golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk= +golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= @@ -485,8 +728,34 @@ google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34q google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= -google.golang.org/api v0.47.0 h1:sQLWZQvP6jPGIP4JGPkJu4zHswrv81iobiyszr3b/0I= google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= +google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= +google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= +google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= +google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= +google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= +google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= +google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo= +google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g= +google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA= +google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8= +google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs= +google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= +google.golang.org/api v0.77.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= +google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw= +google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg= +google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o= +google.golang.org/api v0.85.0/go.mod h1:AqZf8Ep9uZ2pyTvgL+x0D3Zt0eoT9b5E8fmzfu6FO2g= +google.golang.org/api v0.90.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= +google.golang.org/api v0.93.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= +google.golang.org/api v0.95.0/go.mod h1:eADj+UBuxkh5zlrSntJghuNeg8HwQ1w5lTKkuqaETEI= +google.golang.org/api v0.96.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.97.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.98.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.100.0 h1:LGUYIrbW9pzYQQ8NWXlaIVkgnfubVBZbMFb9P8TK374= +google.golang.org/api v0.100.0/go.mod h1:ZE3Z2+ZOr87Rx7dqFsdRQkRBk36kDtp/h+QpHbB7a70= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -518,6 +787,7 @@ google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= @@ -533,10 +803,69 @@ google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210329143202-679c6ae281ee/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= -google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c h1:wtujag7C+4D6KMoulW9YauvK2lgdvCMS260jsqqBXr0= google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= +google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= +google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= +google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220628213854-d9e0b6570c03/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220722212130-b98a9ff5e252/go.mod h1:GkXuJDJ6aQ7lnJcRF+SJVgFdQhypqgl3LB1C9vabdRE= +google.golang.org/genproto v0.0.0-20220801145646-83ce21fca29f/go.mod h1:iHe1svFLAZg9VWz891+QbRMwUv9O/1Ww+/mngYeThbc= +google.golang.org/genproto v0.0.0-20220815135757-37a418bb8959/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220817144833-d7fd3f11b9b1/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220822174746-9e6da59bd2fc/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220829144015-23454907ede3/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220829175752-36a9c930ecbf/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220913154956-18f8339a66a5/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220914142337-ca0e39ece12f/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220915135415-7fd63a7952de/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220916172020-2692e8806bfa/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220919141832-68c03719ef51/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220920201722-2b89144ce006/go.mod h1:ht8XFiar2npT/g4vkk7O0WYS1sHOHbdujxbEp7CJWbw= +google.golang.org/genproto v0.0.0-20220926165614-551eb538f295/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= +google.golang.org/genproto v0.0.0-20220926220553-6981cbe3cfce/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= +google.golang.org/genproto v0.0.0-20221010155953-15ba04fc1c0e/go.mod h1:3526vdqwhZAwq4wsRUaVG555sVgsNmIjRtO7t/JH29U= +google.golang.org/genproto v0.0.0-20221014173430-6e2ab493f96b/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= +google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= +google.golang.org/genproto v0.0.0-20221025140454-527a21cfbd71 h1:GEgb2jF5zxsFJpJfg9RoDDWm7tiwc/DDSTE2BtLUkXU= +google.golang.org/genproto v0.0.0-20221025140454-527a21cfbd71/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -550,6 +879,7 @@ google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3Iji google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= @@ -557,8 +887,21 @@ google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAG google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.38.0 h1:/9BgsAsa5nWe26HqOlvlgJnqBuktYOLCgjCPqsa56W0= google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= +google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/grpc v1.50.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/grpc v1.50.1 h1:DS/BukOZWp8s6p4Dt/tOaJaTQyPyOoCcrjroHuCeLzY= +google.golang.org/grpc v1.50.1/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= @@ -571,14 +914,20 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= +google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/cheggaaa/pb.v1 v1.0.27/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/infra/test/helpers.go b/infra/test/helpers.go new file mode 100644 index 000000000..2c3c3a328 --- /dev/null +++ b/infra/test/helpers.go @@ -0,0 +1,20 @@ +// Common functions used by test files +package test + +import ( + "fmt" + "testing" + + "github.com/gruntwork-io/terratest/modules/terraform" +) + +// Wrapper function for terraform init using a passed in backend config file. This is needed since +// terratest currently does not support passing a file as the -backend-config option +// so we need to manually call terraform rather than using terraform.Init +// see https://github.com/gruntwork-io/terratest/issues/517 +// it looks like this PR would add functionality for this: https://github.com/gruntwork-io/terratest/pull/558 +// after which we add BackendConfig: []string{"dev.s3.tfbackend": terraform.KeyOnly} to terraformOptions +// and replace the call to terraform.RunTerraformCommand with terraform.Init +func TerraformInit(t *testing.T, terraformOptions *terraform.Options, backendConfig string) { + terraform.RunTerraformCommand(t, terraformOptions, "init", fmt.Sprintf("-backend-config=%s", backendConfig)) +} diff --git a/infra/test/infra_test.go b/infra/test/infra_test.go index 47dc002ad..4f68a9bcb 100644 --- a/infra/test/infra_test.go +++ b/infra/test/infra_test.go @@ -13,13 +13,13 @@ import ( "github.com/gruntwork-io/terratest/modules/terraform" ) +var uniqueId = strings.ToLower(random.UniqueId()) +var workspaceName = fmt.Sprintf("t-%s", uniqueId) var appName = flag.String("app_name", "", "name of subdirectory that holds the app's infrastructure code") -func TestDev(t *testing.T) { +func TestService(t *testing.T) { BuildAndPublish(t) - uniqueId := strings.ToLower(random.UniqueId()) - workspaceName := fmt.Sprintf("t-%s", uniqueId) imageTag := shell.RunCommandAndGetOutput(t, shell.Command{ Command: "git", Args: []string{"rev-parse", "HEAD"}, @@ -28,61 +28,65 @@ func TestDev(t *testing.T) { terraformOptions := terraform.WithDefaultRetryableErrors(t, &terraform.Options{ Reconfigure: true, TerraformDir: fmt.Sprintf("../%s/service/", *appName), - VarFiles: []string{"dev.tfvars"}, Vars: map[string]interface{}{ - "image_tag": imageTag, + "environment_name": "dev", + "image_tag": imageTag, }, }) - defer DestroyDevEnvironmentAndWorkspace(t, terraformOptions, workspaceName) - CreateDevEnvironmentInWorkspace(t, terraformOptions, workspaceName) + fmt.Println("::group::Initialize service module") + TerraformInit(t, terraformOptions, "dev.s3.tfbackend") + fmt.Println("::endgroup::") + + defer terraform.WorkspaceDelete(t, terraformOptions, workspaceName) + fmt.Println("::group::Select new terraform workspace") + terraform.WorkspaceSelectOrNew(t, terraformOptions, workspaceName) + fmt.Println("::endgroup::") + + defer DestroyService(t, terraformOptions) + fmt.Println("::group::Create service layer") + terraform.Apply(t, terraformOptions) + fmt.Println("::endgroup::") + WaitForServiceToBeStable(t, workspaceName) RunEndToEndTests(t, terraformOptions) } func BuildAndPublish(t *testing.T) { + fmt.Println("::group::Initialize build-repository module") // terratest currently does not support passing a file as the -backend-config option // so we need to manually call terraform rather than using terraform.Init // see https://github.com/gruntwork-io/terratest/issues/517 // it looks like this PR would add functionality for this: https://github.com/gruntwork-io/terratest/pull/558 // after which we add BackendConfig: []string{"dev.s3.tfbackend": terraform.KeyOnly} to terraformOptions // and replace the call to terraform.RunTerraformCommand with terraform.Init - terraform.RunTerraformCommand(t, &terraform.Options{ + TerraformInit(t, &terraform.Options{ TerraformDir: fmt.Sprintf("../%s/build-repository/", *appName), - }, "init", "-backend-config=shared.s3.tfbackend") + }, "shared.s3.tfbackend") + fmt.Println("::endgroup::") + fmt.Println("::group::Build release") shell.RunCommand(t, shell.Command{ Command: "make", Args: []string{"release-build", fmt.Sprintf("APP_NAME=%s", *appName)}, WorkingDir: "../../", }) + fmt.Println("::endgroup::") + fmt.Println("::group::Publish release") shell.RunCommand(t, shell.Command{ Command: "make", Args: []string{"release-publish", fmt.Sprintf("APP_NAME=%s", *appName)}, WorkingDir: "../../", }) -} - -func CreateDevEnvironmentInWorkspace(t *testing.T, terraformOptions *terraform.Options, workspaceName string) { - fmt.Printf("::group::Create dev environment in new workspace '%s\n'", workspaceName) - - // terratest currently does not support passing a file as the -backend-config option - // so we need to manually call terraform rather than using terraform.Init - // see https://github.com/gruntwork-io/terratest/issues/517 - // it looks like this PR would add functionality for this: https://github.com/gruntwork-io/terratest/pull/558 - // after which we add BackendConfig: []string{"dev.s3.tfbackend": terraform.KeyOnly} to terraformOptions - // and replace the call to terraform.RunTerraformCommand with terraform.Init - terraform.RunTerraformCommand(t, terraformOptions, "init", "-backend-config=dev.s3.tfbackend") - terraform.WorkspaceSelectOrNew(t, terraformOptions, workspaceName) - terraform.Apply(t, terraformOptions) fmt.Println("::endgroup::") } func WaitForServiceToBeStable(t *testing.T, workspaceName string) { fmt.Println("::group::Wait for service to be stable") + appName := *appName environmentName := "dev" - serviceName := fmt.Sprintf("%s-%s-%s", workspaceName, *appName, environmentName) + serviceName := fmt.Sprintf("%s-%s-%s", workspaceName, appName, environmentName) shell.RunCommand(t, shell.Command{ Command: "aws", Args: []string{"ecs", "wait", "services-stable", "--cluster", serviceName, "--services", serviceName}, @@ -100,14 +104,14 @@ func RunEndToEndTests(t *testing.T, terraformOptions *terraform.Options) { fmt.Println("::endgroup::") } -func EnableDestroy(t *testing.T, terraformOptions *terraform.Options, workspaceName string) { - fmt.Println("::group::Setting force_destroy = true and prevent_destroy = false for s3 buckets") +func EnableDestroyService(t *testing.T, terraformOptions *terraform.Options) { + fmt.Println("::group::Set force_destroy = true and prevent_destroy = false for s3 buckets in service layer") shell.RunCommand(t, shell.Command{ Command: "sed", Args: []string{ "-i.bak", "s/force_destroy = false/force_destroy = true/g", - "infra/modules/service/access_logs.tf", + "infra/modules/service/access-logs.tf", }, WorkingDir: "../../", }) @@ -116,19 +120,17 @@ func EnableDestroy(t *testing.T, terraformOptions *terraform.Options, workspaceN Args: []string{ "-i.bak", "s/prevent_destroy = true/prevent_destroy = false/g", - "infra/modules/service/access_logs.tf", + "infra/modules/service/access-logs.tf", }, WorkingDir: "../../", }) - terraform.RunTerraformCommand(t, terraformOptions, "init", "-backend-config=dev.s3.tfbackend") terraform.Apply(t, terraformOptions) + fmt.Println("::endgroup::") } -func DestroyDevEnvironmentAndWorkspace(t *testing.T, terraformOptions *terraform.Options, workspaceName string) { - EnableDestroy(t, terraformOptions, workspaceName) - fmt.Println("::group::Destroy environment and workspace") - terraform.RunTerraformCommand(t, terraformOptions, "init", "-backend-config=dev.s3.tfbackend") +func DestroyService(t *testing.T, terraformOptions *terraform.Options) { + EnableDestroyService(t, terraformOptions) + fmt.Println("::group::Destroy service layer") terraform.Destroy(t, terraformOptions) - terraform.WorkspaceDelete(t, terraformOptions, workspaceName) fmt.Println("::endgroup::") }