From e179ae1d42c63d14bacf7585c29c2c5a2c23aca2 Mon Sep 17 00:00:00 2001 From: Alpha Date: Sat, 16 Nov 2019 08:21:40 -0500 Subject: [PATCH 001/128] Docker support & fix terraform template MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Added a dockerfile that builds a docker image to use npk. This provides two benefits. 1. If CoalfireLabs decides to publish and maintain the npk image, then users can just run npk by using a single command line in docker. 2. If CoalfireLabs does not publish a docker image, users can still use this dockerfile to build the container themselves. This prevents everyone from dealing with multiple terraform, python, pip and node versions that do not quite work together. Pending to be done: - Add ARGs, ENVs or a startup script (รก la MySQL) so that the container can setup the aws credential profile when running and does not need to be a manual process. - Add ARG, ENVs or a startup script (รก la MySQL) so that the container can setup the /npk/terraform/npk-settings.json file and does not need to be a manual process. - After these are done, we can remove `nano` (or any text editor) being installed, making the image smaller and faster to create. This change also fixes the error in the `terraform/cognito_iam_roles.tf` template, which basically just had an extra comma. Fixes #49. --- Dockerfile | 50 ++++++++++++++++++++++++++++++++++ build-docker-image.sh | 5 ++++ terraform/cognito_iam_roles.tf | 2 +- 3 files changed, 56 insertions(+), 1 deletion(-) create mode 100644 Dockerfile create mode 100644 build-docker-image.sh diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..a70acaa --- /dev/null +++ b/Dockerfile @@ -0,0 +1,50 @@ +# Inspired in https://andrewlock.net/packaging-cli-programs-into-docker-images-to-avoid-dependency-hell/ +FROM alpine:3.10 + +# Building Jsonnet +RUN mkdir /build && \ + cd build && \ + apk -v --no-cache add git make gcc g++ && \ + git clone https://github.com/google/jsonnet.git && \ + cd jsonnet && \ + make && \ + apk -v --purge del git gcc && \ + ln -s /build/jsonnet/jsonnet /usr/bin/jsonnet && \ + ln -s /build/jsonnet/jsonnetfmt /usr/bin/jsonnetfmt + +## Installing AWS CLI +RUN apk -v --no-cache add python py-pip groff less mailcap && \ + pip install --upgrade awscli s3cmd python-magic && \ + apk -v --purge del py-pip && \ + ## Installing other dependencies + apk -v --no-cache add jq npm + +# Installing terraform +RUN mkdir /build/terraform && \ + cd /build/terraform && \ + wget https://releases.hashicorp.com/terraform/0.11.15-oci/terraform_0.11.15-oci_linux_amd64.zip && \ + unzip terraform_0.11.15-oci_linux_amd64.zip && \ + ln -s /build/terraform/terraform /usr/bin/terraform + +# Installing a text editor +RUN apk -v --no-cache add nano + +RUN mkdir /npk +WORKDIR /npk +ADD . /npk + +RUN mkdir -p /root/.aws +RUN echo "[npk]" >> /root/.aws/credentials +RUN echo "aws_access_key_id = ..." >> /root/.aws/credentials +RUN echo "aws_secret_access_key = ..." >> /root/.aws/credentials +RUN cp /npk/terraform/npk-settings.json.sample /npk/terraform/npk-settings.json + +ENTRYPOINT [ "sh" ] + +# docker run -it npk:latest + +# To run once inside the container: +# nano /root/.aws/credentials +# nano /npk/terraform/npk-settings.json +# cd /npk/terraform +# sh deploy.sh \ No newline at end of file diff --git a/build-docker-image.sh b/build-docker-image.sh new file mode 100644 index 0000000..b0eb81b --- /dev/null +++ b/build-docker-image.sh @@ -0,0 +1,5 @@ +#!/bin/sh + +docker build -t coalfirelabs/npk:latest . + +# missing: docker push coalfirelabs/npk:latest \ No newline at end of file diff --git a/terraform/cognito_iam_roles.tf b/terraform/cognito_iam_roles.tf index 57c2b8e..3aa88af 100644 --- a/terraform/cognito_iam_roles.tf +++ b/terraform/cognito_iam_roles.tf @@ -36,7 +36,7 @@ data "aws_iam_policy_document" "cognito_authenticated" { sid = "3" actions = [ - "s3:GetObject",, + "s3:GetObject", "s3:ListObjectVersions", "s3:DeleteObject" ] From d7b0a5cd0a77f655db985e444cf97127c0e788df Mon Sep 17 00:00:00 2001 From: Brad Woodward Date: Fri, 16 Apr 2021 16:48:57 -0600 Subject: [PATCH 002/128] Fixed wiki reference --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index dd8a8ea..912451c 100644 --- a/README.md +++ b/README.md @@ -107,7 +107,7 @@ After that, run the deploy! npk/terraform$ ./deploy.sh ``` -For more details about each setting, their effects, and allowed values, check out [the wiki](https://github.com/Coalfire-Research/npk/wiki/Detailed-NPK-Settings). For more details around custom installations, see [Detailed Instructions](https://github.com/Coalfire-Research/npk/wiki/Detailed-Usage-Instructions). +For more details about each setting, their effects, and allowed values, check out [the wiki](https://github.com/c6fc/npk/wiki/Detailed-NPK-Settings). For more details around custom installations, see [Detailed Instructions](https://github.com/c6fc/npk/wiki/Detailed-Usage-Instructions). NPK will use the specified AWS cli profile to fully deploy NPK and provision the first user. If you'd like to change the configuration, simply run `./deploy.sh` again afterward. While it's deploying, pay a visit to https://aws.amazon.com/marketplace/pp/B07S5G9S1Z to subscribe and accept the terms of NVidia's AMIs. NPK uses these to ensure compatability with the GPUs. There is no cost associated with this step, but allows NPK to use these AMIs on your behalf. From 7ecbe1418ec1d5c762a7fcf30a35769aa013c492 Mon Sep 17 00:00:00 2001 From: Brad Woodward Date: Mon, 19 Apr 2021 12:21:24 -0600 Subject: [PATCH 003/128] Added quick deploy --- .gitignore | 3 +- Dockerfile | 20 +++----------- README.md | 31 +++++++++++++++++++-- build-docker-image.sh | 5 ++-- terraform/deploy.sh | 2 +- terraform/quickdeploy.sh | 59 ++++++++++++++++++++++++++++++++++++++++ 6 files changed, 96 insertions(+), 24 deletions(-) mode change 100644 => 100755 build-docker-image.sh create mode 100755 terraform/quickdeploy.sh diff --git a/.gitignore b/.gitignore index ec639aa..eeabd48 100644 --- a/.gitignore +++ b/.gitignore @@ -27,4 +27,5 @@ tools/rules/* tools/wordlists/* terraform-selfhost/upload_npkfile.sh terraform-selfhost/upload_npkcomponents.sh -terraform-selfhost/sync_npkcomponents.sh \ No newline at end of file +terraform-selfhost/sync_npkcomponents.sh +quickdeployed \ No newline at end of file diff --git a/Dockerfile b/Dockerfile index a70acaa..9afcbde 100644 --- a/Dockerfile +++ b/Dockerfile @@ -17,7 +17,7 @@ RUN apk -v --no-cache add python py-pip groff less mailcap && \ pip install --upgrade awscli s3cmd python-magic && \ apk -v --purge del py-pip && \ ## Installing other dependencies - apk -v --no-cache add jq npm + apk -v --no-cache add jq npm pwgen bash # Installing terraform RUN mkdir /build/terraform && \ @@ -30,21 +30,9 @@ RUN mkdir /build/terraform && \ RUN apk -v --no-cache add nano RUN mkdir /npk -WORKDIR /npk -ADD . /npk +VOLUME /npk RUN mkdir -p /root/.aws -RUN echo "[npk]" >> /root/.aws/credentials -RUN echo "aws_access_key_id = ..." >> /root/.aws/credentials -RUN echo "aws_secret_access_key = ..." >> /root/.aws/credentials -RUN cp /npk/terraform/npk-settings.json.sample /npk/terraform/npk-settings.json +VOLUME /root/.aws/ -ENTRYPOINT [ "sh" ] - -# docker run -it npk:latest - -# To run once inside the container: -# nano /root/.aws/credentials -# nano /npk/terraform/npk-settings.json -# cd /npk/terraform -# sh deploy.sh \ No newline at end of file +ENTRYPOINT [ "bash" ] \ No newline at end of file diff --git a/README.md b/README.md index 60977f2..f7735f8 100644 --- a/README.md +++ b/README.md @@ -16,7 +16,7 @@ If you'd like to see it in action, check out the video here: https://www.youtube ### 1. Super easy install -One config file, one command to run. That's about it. +Build the Docker container and run the wizard. That's about it. ### 2. Intuitive campaign builder @@ -42,7 +42,20 @@ NPK supports multiple users, with strict separation of data, campaigns, and resu Configure how long data will stay in NPK with configurable lifecycle durations during installation. Hashfiles and results are automatically removed after this much time to keep things nicely cleaned up. -## Install +## Easy Install (Docker) + +```sh +$ git clone https://github.com/c6fc/npk +$ cd npk +npk$ ./build-docker-container.sh +... Docker builds and runs. +bash-5.0# cd /npk/terraform +bash-5.0# ./quickdeploy.sh +``` + +The quickdeploy wizard will ask for a few basic things, then kick off the install on your behalf. + +## Advanced Install NPK requires that you have the following installed: * **awscli** (v2) @@ -51,10 +64,22 @@ NPK requires that you have the following installed: * **jsonnet** * **npm** +You can skip these prerequisites by using the provided Docker image. +```sh +# Build the container if you haven't already; +$ docker build -t c6fc/npk:latest . + +# Run the container. +$ docker run -it -v `pwd`:/npk -v ~/.aws/:/root/.aws c6fc/npk:latest + +# Your 'npk' folder is passed through to the container at '/npk' +bash-5.0# cd /npk/ +``` + **ProTip:** To keep things clean and distinct from other things you may have in AWS, it's STRONGLY recommended that you deploy NPK in a fresh account. You can create a new account easily from the 'Organizations' console in AWS. **By 'STRONGLY recommended', I mean 'seriously don't install this next to other stuff'.** ```sh -$ git clone npk . +$ git clone https://github.com/c6fc/npk $ cd npk/terraform/ npk/terraform$ cp npk-settings.json.sample npk-settings.json ``` diff --git a/build-docker-image.sh b/build-docker-image.sh old mode 100644 new mode 100755 index b0eb81b..4ed8bf2 --- a/build-docker-image.sh +++ b/build-docker-image.sh @@ -1,5 +1,4 @@ #!/bin/sh -docker build -t coalfirelabs/npk:latest . - -# missing: docker push coalfirelabs/npk:latest \ No newline at end of file +docker build -t c6fc/npk:latest . +docker run -it -v `pwd`:/npk -v ~/.aws/:/root/.aws c6fc/npk:latest \ No newline at end of file diff --git a/terraform/deploy.sh b/terraform/deploy.sh index e426916..3b86644 100755 --- a/terraform/deploy.sh +++ b/terraform/deploy.sh @@ -73,7 +73,7 @@ if [[ $(aws --version | grep "aws-cli/2" | wc -l) -ge 1 ]]; then export AWS_PAGER=""; fi -BUCKET=$(jq -r '.backend_bucket' npk-settings.json) +BUCKET=$(jq -r '.backend_bucket' npk-settings.json 2> /dev/null) if [[ "$BUCKET" == "" ]]; then echo "No backend bucket is specified in npk-settings.json. This is best practice and required for NPKv2." diff --git a/terraform/quickdeploy.sh b/terraform/quickdeploy.sh new file mode 100755 index 0000000..ff49bbb --- /dev/null +++ b/terraform/quickdeploy.sh @@ -0,0 +1,59 @@ +#! /bin/bash + +if [[ -f quickdeployed ]]; then + "[+] You've already run the quickdeploy wizard." +fi + +if [[ -f npk-settings.json ]]; then + read -r -p "[!] quickdeploy will overwrite your existing npk-settings.json. Type 'Yes' to proceed: " key + + if [[ "$key" != "Yes" ]]; then + echo "Only 'Yes' will be accepted." + echo "" + + exit 1 + fi +fi + +read -r -p "[?] Which AWS profile do you want to deploy NPK with? [e.g. default]: " profile + +echo "[*] Testing the profile..." +aws --profile $profile sts get-caller-identity > /dev/null +if [[ $? -ne 0 ]]; then + echo "[!] The provided profile is not valid. Try again." + exit 1 +fi + +echo "NPK will provision an administrative account for the first user, and send login details via email." +read -r -p "[?] What is the email address of the admin user?: " email +echo +echo "NPK sends critical event notifications via SMS. These are exceptionally rare," +echo " but could indicate a cost overrun. As such, you should use a real number." +echo " The number must include the country code and a '+' at the front." +read -r -p "[?] What number should SMS notifications be sent to? [e.g: +13035551234]: " sms +echo + +BUCKET="npk-terraform-quickdeploy-$(pwgen -A -0 12 1)" + +jq -n --arg profile $profile --arg email $email --arg sms $sms --arg bucket $BUCKET '{ + "backend_bucket": $bucket, + "campaign_data_ttl": 604800, + "campaign_max_price": 50, + "georestrictions": [], + "useCustomDNS": false, + "route53Zone": "", + "dnsNames": { + "www": [], + "api": [] + }, + "awsProfile": $profile, + "criticalEventsSMS": $sms, + "adminEmail": $email, + "debug_lambda": false, + + "useSAML": false, +}' >> npk-settings.json + +touch quickdeployed + +./deploy.sh \ No newline at end of file From 2e85b3bf57ab0b60c85478072b73d74af8d5e1d7 Mon Sep 17 00:00:00 2001 From: Robert Ellegate Date: Sun, 16 May 2021 11:43:59 -0400 Subject: [PATCH 004/128] Pin AWSCLI version and update dependency checks MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ๐Ÿ“Œ Pin AWSCLI to version 2 ๐Ÿ“Œ Use `grep -c` for terraform dependency check The `-c` flag counts the number of matches. A bit cleaner than `wc -l` --- terraform-selfhost/deploy-selfhost.sh | 6 +++--- terraform/deploy.sh | 12 ++++++++---- 2 files changed, 11 insertions(+), 7 deletions(-) diff --git a/terraform-selfhost/deploy-selfhost.sh b/terraform-selfhost/deploy-selfhost.sh index ede97ae..654a73e 100755 --- a/terraform-selfhost/deploy-selfhost.sh +++ b/terraform-selfhost/deploy-selfhost.sh @@ -22,9 +22,9 @@ if [[ ! -f $(which aws) ]]; then echo "Error: Must have AWSCLI installed."; fi -if [[ $(aws --version | grep "aws-cli/2" | wc -l) -lt 1 ]]; then +if [[ $(aws --version | grep -c "aws-cli/2") != 1 ]]; then ERR=1; - echo "Error: NPK Selfhost requires AWSCLI version 2."; + echo "Error: Wrong version of Terraform is installed. NPK requires AWSCLI version 2."; fi if [[ ! -f $(which npm) ]]; then @@ -37,7 +37,7 @@ if [[ ! -f $(which terraform) ]]; then echo "Error: Must have Terraform installed."; fi -if [[ "$(terraform -v | grep v0.11 | wc -l)" != "1" ]]; then +if [[ $($TERBIN -v | grep -c "Terraform v0.11") != 1 ]]; then ERR=1; echo "Error: Wrong version of Terraform is installed. NPK requires Terraform v0.11."; echo "-> Note: A non-default binary can be specified as a positional script parameter:" diff --git a/terraform/deploy.sh b/terraform/deploy.sh index e426916..d876417 100755 --- a/terraform/deploy.sh +++ b/terraform/deploy.sh @@ -22,6 +22,11 @@ if [[ ! -f $(which aws) ]]; then echo "Error: Must have AWSCLI installed."; fi +if [[ $(aws --version | grep -c "aws-cli/2") != 1 ]]; then + ERR=1; + echo "Error: Wrong version of Terraform is installed. NPK requires AWSCLI version 2."; +fi + if [[ ! -f $(which npm) ]]; then ERR=1; echo "Error: Must have NPM installed."; @@ -32,10 +37,10 @@ if [[ ! -f $(which terraform) ]]; then echo "Error: Must have Terraform installed."; fi -if [[ "$($TERBIN -v | grep v0.11 | wc -l)" != "1" ]]; then +if [[ $($TERBIN -v | grep -c "Terraform v0.11") != 1 ]]; then ERR=1; echo "Error: Wrong version of Terraform is installed. NPK requires Terraform v0.11."; - echo "-> Note: A non-default binary can be specified as positional script parameter:" + echo "-> Note: A non-default binary can be specified as a positional script parameter:" echo "-> e.g: ./deploy-selfhost.sh " echo "" fi @@ -213,7 +218,6 @@ fi # remove old configs silently: rm -f *.tf.json - echo "[*] Generating Terraform configurations" jsonnet -m . terraform.jsonnet @@ -236,4 +240,4 @@ if [[ ! -d .terraform || $ISINIT -ne 0 ]]; then fi fi -terraform apply -auto-approve \ No newline at end of file + $TERBIN apply -auto-approve \ No newline at end of file From fca4e654400aa2821df34e4b31ab8fcdbcf97b96 Mon Sep 17 00:00:00 2001 From: Brad Woodward Date: Tue, 27 Jul 2021 09:07:02 -0600 Subject: [PATCH 005/128] Removed inappropriate 'then' case --- terraform/lambda_functions/proxy_api_handler/main.js | 2 -- 1 file changed, 2 deletions(-) diff --git a/terraform/lambda_functions/proxy_api_handler/main.js b/terraform/lambda_functions/proxy_api_handler/main.js index fd9d759..2dc4db3 100644 --- a/terraform/lambda_functions/proxy_api_handler/main.js +++ b/terraform/lambda_functions/proxy_api_handler/main.js @@ -1127,8 +1127,6 @@ exports.main = function(event, context, callback) { console.log("Finished with message", data); }, (err) => { console.log("Finished with error", err); - }).then(() => { - respond(500, "Events occurred out of order. This is a bug.", false); }); } catch (e) { From 7b209978f6b22db438941ea3fb50253f64c9fca0 Mon Sep 17 00:00:00 2001 From: Brad Woodward Date: Sat, 4 Sep 2021 11:53:10 -0600 Subject: [PATCH 006/128] api rewrite progress --- terraform/cloudwatch-policy.tf | 14 - terraform/cloudwatch-policy.tf.bak | 22 + terraform/jsonnet/acm-old.libsonnet | 33 + terraform/jsonnet/acm.libsonnet | 59 +- terraform/jsonnet/api_gateway_map.libsonnet | 117 +++ terraform/jsonnet/cloudfront.libsonnet | 102 +- terraform/jsonnet/cognito.libsonnet | 172 ++-- terraform/jsonnet/iam.libsonnet | 40 + terraform/jsonnet/lambda.libsonnet | 133 +++ terraform/jsonnet/routetable.libsonnet | 14 +- terraform/jsonnet/templates.libsonnet | 72 +- .../lambda_functions/delete_campaign/main.js | 323 ++++++ .../delete_campaign/package-lock.json | 102 ++ .../delete_campaign/package.json | 15 + .../lambda_functions/execute_campaign/main.js | 323 ++++++ .../execute_campaign/package-lock.json | 102 ++ .../execute_campaign/package.json | 15 + .../lambda_functions/get_campaign/main.js | 323 ++++++ .../get_campaign/package-lock.json | 102 ++ .../get_campaign/package.json | 15 + .../lambda_functions/list_campaigns/main.js | 323 ++++++ .../list_campaigns/package-lock.json | 102 ++ .../list_campaigns/package.json | 15 + terraform/npk-settings.json.sample | 9 +- terraform/quickdeploy.sh | 10 - terraform/s3_policies.tf | 18 - terraform/s3_policies.tf.bak | 27 + terraform/terraform.jsonnet | 933 +++++++++++++----- 28 files changed, 3033 insertions(+), 502 deletions(-) delete mode 100644 terraform/cloudwatch-policy.tf create mode 100644 terraform/cloudwatch-policy.tf.bak create mode 100644 terraform/jsonnet/acm-old.libsonnet create mode 100644 terraform/jsonnet/api_gateway_map.libsonnet create mode 100644 terraform/jsonnet/iam.libsonnet create mode 100644 terraform/jsonnet/lambda.libsonnet create mode 100644 terraform/lambda_functions/delete_campaign/main.js create mode 100644 terraform/lambda_functions/delete_campaign/package-lock.json create mode 100644 terraform/lambda_functions/delete_campaign/package.json create mode 100644 terraform/lambda_functions/execute_campaign/main.js create mode 100644 terraform/lambda_functions/execute_campaign/package-lock.json create mode 100644 terraform/lambda_functions/execute_campaign/package.json create mode 100644 terraform/lambda_functions/get_campaign/main.js create mode 100644 terraform/lambda_functions/get_campaign/package-lock.json create mode 100644 terraform/lambda_functions/get_campaign/package.json create mode 100644 terraform/lambda_functions/list_campaigns/main.js create mode 100644 terraform/lambda_functions/list_campaigns/package-lock.json create mode 100644 terraform/lambda_functions/list_campaigns/package.json delete mode 100644 terraform/s3_policies.tf create mode 100644 terraform/s3_policies.tf.bak diff --git a/terraform/cloudwatch-policy.tf b/terraform/cloudwatch-policy.tf deleted file mode 100644 index 799e665..0000000 --- a/terraform/cloudwatch-policy.tf +++ /dev/null @@ -1,14 +0,0 @@ -data "aws_iam_policy_document" "cloudwatch_invoke_spot_monitor" { - - statement { - sid = "1" - - actions = [ - "lambda:InvokeFunction" - ] - - resources = [ - "${aws_lambda_function.spot_monitor.arn}" - ] - } -} \ No newline at end of file diff --git a/terraform/cloudwatch-policy.tf.bak b/terraform/cloudwatch-policy.tf.bak new file mode 100644 index 0000000..894f5aa --- /dev/null +++ b/terraform/cloudwatch-policy.tf.bak @@ -0,0 +1,22 @@ +data "aws_iam_policy_document" "cloudwatch_invoke_spot_monitor" { + statement { + sid = "1" + + actions = [ + "lambda:InvokeFunction", + ] + + # TF-UPGRADE-TODO: In Terraform v0.10 and earlier, it was sometimes necessary to + # force an interpolation expression to be interpreted as a list by wrapping it + # in an extra set of list brackets. That form was supported for compatibility in + # v0.11, but is no longer supported in Terraform v0.12. + # + # If the expression in the following list itself returns a list, remove the + # brackets to avoid interpretation as a list of lists. If the expression + # returns a single list item then leave it as-is and remove this TODO comment. + resources = [ + aws_lambda_function.spot_monitor.arn, + ] + } +} + diff --git a/terraform/jsonnet/acm-old.libsonnet b/terraform/jsonnet/acm-old.libsonnet new file mode 100644 index 0000000..b3b3091 --- /dev/null +++ b/terraform/jsonnet/acm-old.libsonnet @@ -0,0 +1,33 @@ +local certificate(dns_name) = { + "provider": "aws.us-east-1", + "domain_name": dns_name, + "validation_method": "DNS" +}; + +local certificate_validation(arn, fqdns) = { + "provider": "aws.us-east-1" , + "certificate_arn": arn, + "validation_record_fqdns": [fqdns] +}; + +local route53_record(name, type, records, zone) = { + "name": name, + "type": type, + "zone_id": zone, + "records": [records], + "ttl": 60 +}; + +local manual_record(name, type, records) = { + "value": "Create [" + type + "] record at [" + name + "] with value [" + records + "]" +}; + +{ + "certificate": certificate, + "certificate_validation": certificate_validation, + "route53_record": route53_record, + "manual_record": manual_record, + "lifecycle": { + "create_before_destroy": true + } +} \ No newline at end of file diff --git a/terraform/jsonnet/acm.libsonnet b/terraform/jsonnet/acm.libsonnet index b3b3091..5f69c0d 100644 --- a/terraform/jsonnet/acm.libsonnet +++ b/terraform/jsonnet/acm.libsonnet @@ -1,33 +1,32 @@ -local certificate(dns_name) = { - "provider": "aws.us-east-1", - "domain_name": dns_name, - "validation_method": "DNS" -}; - -local certificate_validation(arn, fqdns) = { - "provider": "aws.us-east-1" , - "certificate_arn": arn, - "validation_record_fqdns": [fqdns] -}; - -local route53_record(name, type, records, zone) = { - "name": name, - "type": type, - "zone_id": zone, - "records": [records], - "ttl": 60 -}; - -local manual_record(name, type, records) = { - "value": "Create [" + type + "] record at [" + name + "] with value [" + records + "]" -}; - { - "certificate": certificate, - "certificate_validation": certificate_validation, - "route53_record": route53_record, - "manual_record": manual_record, - "lifecycle": { - "create_before_destroy": true + certificate(name, dns_name, san, zone): { + aws_acm_certificate: { + [name]: { + provider: "aws.us-east-1", + domain_name: dns_name, + subject_alternative_names: san, + validation_method: "DNS", + + lifecycle: { + create_before_destroy: true + } + } + }, + aws_route53_record: { + [name + "-acm"]: { + name: "${tolist(aws_acm_certificate." + name + ".domain_validation_options)[0].resource_record_name}", + type: "${tolist(aws_acm_certificate." + name + ".domain_validation_options)[0].resource_record_type}", + records: ["${tolist(aws_acm_certificate." + name + ".domain_validation_options)[0].resource_record_value}"], + zone_id: zone, + ttl: 60 + } + }, + aws_acm_certificate_validation: { + [name]: { + provider: "aws.us-east-1" , + certificate_arn: "${aws_acm_certificate." + name + ".arn}", + validation_record_fqdns: ["${aws_route53_record." + name + "-acm.fqdn}"] + } + } } } \ No newline at end of file diff --git a/terraform/jsonnet/api_gateway_map.libsonnet b/terraform/jsonnet/api_gateway_map.libsonnet new file mode 100644 index 0000000..1a5af65 --- /dev/null +++ b/terraform/jsonnet/api_gateway_map.libsonnet @@ -0,0 +1,117 @@ +local join_objects(objs) = + local aux(arr, i, running) = + if i >= std.length(arr) then + running + else + aux(arr, i + 1, std.mergePatch(running, arr[i])) tailstrict; + aux(objs, 0, {}); + +local rest_api_map(api, pathParts) = { + foo(api, parent, path, object):: + local thispath = std.strReplace(std.strReplace(std.strReplace("%s-%s" % [path, object.pathPart], "}", ""), "{", ""), "+", ""); + std.mergePatch({ + + aws_api_gateway_resource: { + [thispath]: { + rest_api_id: "${aws_api_gateway_rest_api.%s.id}" % [api], + parent_id: parent, + path_part: object.pathPart + } + }, + aws_api_gateway_method: { + [std.asciiLower("%s_%s" % [thispath, method])]: { + rest_api_id: "${aws_api_gateway_rest_api.%s.id}" % [api], + resource_id: "${aws_api_gateway_resource.%s.id}" % [thispath], + http_method: method + } + object.methods[method].parameters + for method in std.objectFields(object.methods) + }, + aws_api_gateway_integration: { + [std.asciiLower("%s_%s" % [thispath, method])]: { + rest_api_id: "${aws_api_gateway_rest_api.%s.id}" % [api], + resource_id: "${aws_api_gateway_resource.%s.id}" % [thispath], + http_method: "${aws_api_gateway_method.%s.http_method}" % [std.asciiLower("%s_%s" % [thispath, method])] + } + + if (std.objectHas(object.methods[method], "lambdaIntegration")) then { + integration_http_method: "POST", + type: "AWS_PROXY", + uri: "arn:aws:apigateway:${var.region}:lambda:path/2015-03-31/functions/${aws_lambda_function.%s.arn}/invocations" % [object.methods[method].lambdaIntegration] + } else {} + + if (std.objectHas(object.methods[method], "optionsIntegration")) then { + type: "MOCK", + request_templates: { + "application/json": "{\"statusCode\": 200}", + } + } else {} + for method in std.objectFields(object.methods) + }, + aws_api_gateway_method_response: std.prune({ + [std.asciiLower("%s_%s" % [thispath, method])]: if (std.objectHas(object.methods[method], "optionsIntegration")) then { + rest_api_id: "${aws_api_gateway_rest_api.%s.id}" % [api], + resource_id: "${aws_api_gateway_resource.%s.id}" % [thispath], + http_method: method, + status_code: 200, + response_models: { + "application/json": "Empty" + }, + response_parameters: { + "method.response.header.Access-Control-Allow-Headers": true, + "method.response.header.Access-Control-Allow-Methods": true, + "method.response.header.Access-Control-Allow-Origin": true + }, + depends_on: ["aws_api_gateway_method.%s" % [std.asciiLower("%s_%s" % [thispath, method])]] + } else null + for method in std.objectFields(object.methods) + }), + aws_api_gateway_integration_response: std.prune({ + [std.asciiLower("%s_%s" % [thispath, method])]: if (std.objectHas(object.methods[method], "optionsIntegration")) then { + rest_api_id: "${aws_api_gateway_rest_api.%s.id}" % [api], + resource_id: "${aws_api_gateway_resource.%s.id}" % [thispath], + http_method: "${aws_api_gateway_method_response.%s.http_method}" % [std.asciiLower("%s_%s" % [thispath, method])], + status_code: 200, + response_parameters: { + "method.response.header.Access-Control-Allow-Headers": "'Content-Type,X-Amz-Date,Authorization,X-Api-Key,X-Amz-Security-Token'", + "method.response.header.Access-Control-Allow-Methods": "'GET,OPTIONS,POST,PUT,DELETE'", + "method.response.header.Access-Control-Allow-Origin": "'*'" + } + } else null + for method in std.objectFields(object.methods) + }), + aws_lambda_permission: std.prune({ + [std.asciiLower("%s_%s" % [thispath, method])]: if (std.objectHas(object.methods[method], "lambdaIntegration")) then { + statement_id: "AllowExecutionFromAPIGateway-%s" % [thispath], + action: "lambda:InvokeFunction", + function_name: "${aws_lambda_function.%s.arn}" % [object.methods[method].lambdaIntegration], + principal: "apigateway.amazonaws.com", + source_arn: "arn:aws:execute-api:${var.region}:${data.aws_caller_identity.current.account_id}:${aws_api_gateway_rest_api.%s.id}/*" % [api] + } else null + for method in std.objectFields(object.methods) + }) + }, if (std.objectHas(object, 'children')) then + join_objects([self.foo(api, "${" + std.strReplace(std.strReplace(std.strReplace("aws_api_gateway_resource.%s-%s.id" % [path, object.pathPart], "}", ""), "{", ""), "+", "") + "}", thispath, item) for item in object.children]) + else { }), + + resource: join_objects([self.foo(api, "${aws_api_gateway_rest_api.%s.root_resource_id}" % [api], "root", item) for item in pathParts.root.children]), +}; + +local rest_api(name, map) = + local api_map = rest_api_map(name, map) tailstrict; + std.mergePatch({ + resource: { + aws_api_gateway_rest_api: { + [name]: { + name: name, + } + map.parameters + }, + aws_api_gateway_deployment: { + [name]: map.deployment + { + rest_api_id: "${aws_api_gateway_rest_api.%s.id}" % [name], + depends_on: ["aws_api_gateway_integration.%s" % [integration] for integration in std.objectFields(api_map.resource.aws_api_gateway_integration)] + } + } + } + }, api_map); + +{ + rest_api: rest_api +} \ No newline at end of file diff --git a/terraform/jsonnet/cloudfront.libsonnet b/terraform/jsonnet/cloudfront.libsonnet index e9eb9f8..f90f896 100644 --- a/terraform/jsonnet/cloudfront.libsonnet +++ b/terraform/jsonnet/cloudfront.libsonnet @@ -1,79 +1,79 @@ { resource(settings): { - "aws_cloudfront_origin_access_identity": { - "npk": { - "comment": "OAI for NPK", + aws_cloudfront_origin_access_identity: { + npk: { + comment: "OAI for NPK", }, }, - "aws_cloudfront_distribution": { - "npk": { - "comment": "NPK", - "enabled": true, - "is_ipv6_enabled": false, - "default_root_object": "index.html", - "logging_config": { - "include_cookies": false, - "bucket": "${aws_s3_bucket.logs.bucket_domain_name}", - "prefix": "cloudfront", + aws_cloudfront_distribution: { + npk: { + comment: "NPK", + enabled: true, + is_ipv6_enabled: false, + default_root_object: "index.html", + logging_config: { + include_cookies: false, + bucket: "${aws_s3_bucket.logs.bucket_domain_name}", + prefix: "cloudfront", }, - "origin": { - "domain_name": "${aws_s3_bucket.static_site.bucket_regional_domain_name}", - "origin_id": "static", + origin: { + domain_name: "${aws_s3_bucket.static_site.bucket_regional_domain_name}", + origin_id: "static", - "s3_origin_config": { - "origin_access_identity": "${aws_cloudfront_origin_access_identity.npk.cloudfront_access_identity_path}", + s3_origin_config: { + origin_access_identity: "${aws_cloudfront_origin_access_identity.npk.cloudfront_access_identity_path}", } }, - "default_cache_behavior": { - "allowed_methods": ["DELETE", "GET", "HEAD", "OPTIONS", "PATCH", "POST", "PUT"], - "cached_methods": ["GET", "HEAD"], - "target_origin_id": "static", - "forwarded_values": { - "query_string": false, - "headers": ["Origin","Access-Control-Allow-Origin","Access-Control-Request-Method","Access-Control-Request-Headers"], - "cookies": { - "forward": "none", + default_cache_behavior: { + allowed_methods: ["DELETE", "GET", "HEAD", "OPTIONS", "PATCH", "POST", "PUT"], + cached_methods: ["GET", "HEAD"], + target_origin_id: "static", + forwarded_values: { + query_string: false, + headers: ["Origin","Access-Control-Allow-Origin","Access-Control-Request-Method","Access-Control-Request-Headers"], + cookies: { + forward: "none", } }, - "viewer_protocol_policy": "redirect-to-https", - "min_ttl": 0, - "max_ttl": 300, - "default_ttl": 0, + viewer_protocol_policy: "redirect-to-https", + min_ttl: 0, + max_ttl: 300, + default_ttl: 0, }, - "price_class": "PriceClass_100", - "tags": { - "Project": "NPK", + price_class: "PriceClass_100", + tags: { + Project: "NPK", }, - "viewer_certificate": { - "cloudfront_default_certificate": true, + viewer_certificate: { + cloudfront_default_certificate: true, } } + if std.objectHas(settings, "georestrictions") && std.length(settings.georestrictions) > 0 then { - "restrictions": { - "geo_restriction": { - "restriction_type": "whitelist", - "locations" : settings.georestrictions, + restrictions: { + geo_restriction: { + restriction_type: "whitelist", + locations: settings.georestrictions, } } } else { - "restrictions": { - "geo_restriction": { - "restriction_type": "none", + restrictions: { + geo_restriction: { + restriction_type: "none", } } } + if settings.useCustomDNS then { - "aliases": [i for i in settings.dnsNames.www], - "viewer_certificate": { - "cloudfront_default_certificate": false, - "acm_certificate_arn": "${aws_acm_certificate.www-0.arn}", - "ssl_support_method": "sni-only", + aliases: ["www.%s" % [settings.dnsBaseName]], + viewer_certificate: { + cloudfront_default_certificate: false, + acm_certificate_arn: "${aws_acm_certificate.main.arn}", + ssl_support_method: "sni-only", } } else { } } }, - "output": { - "cloudfront_url": { - "value": "${aws_cloudfront_distribution.npk.domain_name}" + output: { + cloudfront_url: { + value: "${aws_cloudfront_distribution.npk.domain_name}" } } } \ No newline at end of file diff --git a/terraform/jsonnet/cognito.libsonnet b/terraform/jsonnet/cognito.libsonnet index f61c4f2..28338d9 100644 --- a/terraform/jsonnet/cognito.libsonnet +++ b/terraform/jsonnet/cognito.libsonnet @@ -1,132 +1,132 @@ { resource(settings): { - "aws_cognito_user_pool": { - "npk": { - "name": "NPK", - "mfa_configuration": "${var.cognito_user_mfa}", - "password_policy": { - "minimum_length": 12, - "require_lowercase": true, - "require_uppercase": true, - "require_symbols": false, - "require_numbers": true + aws_cognito_user_pool: { + npk: { + name: "NPK", + mfa_configuration: "${var.cognito_user_mfa}", + password_policy: { + minimum_length: 12, + require_lowercase: true, + require_uppercase: true, + require_symbols: false, + require_numbers: true }, - "admin_create_user_config": { - "allow_admin_create_user_only": true, - "invite_message_template": { - "email_subject": "NPK Invitation", - "email_message": "You've been invited to join an NPK deployment at https://${aws_cloudfront_distribution.npk.domain_name}. Use {username} and {####} to log in.", - "sms_message": "NPK user created. Use {username} and {####} to log in." + admin_create_user_config: { + allow_admin_create_user_only: true, + invite_message_template: { + email_subject: "NPK Invitation", + email_message: "You've been invited to join an NPK deployment at https://${aws_cloudfront_distribution.npk.domain_name}. Use {username} and {####} to log in.", + sms_message: "NPK user created. Use {username} and {####} to log in." } + if settings.useCustomDNS then { - "email_message": "You've been invited to join an NPK deployment at https://" + settings.dnsNames.www[0] + ". Use {username} and {####} to log in." + email_message: "You've been invited to join an NPK deployment at https://" + settings.wwwEndpoint + ". Use {username} and {####} to log in." } else { } }, - "auto_verified_attributes": ["email"], - "username_attributes": ["email"] + auto_verified_attributes: ["email"], + username_attributes: ["email"] } }, - "aws_cognito_user_pool_client": { - "npk": { - "name": "npk_client", - "user_pool_id": "${aws_cognito_user_pool.npk.id}", - "generate_secret": false - } + if settings.useSAML == true then { - "allowed_oauth_flows_user_pool_client": "true", - "supported_identity_providers": ["${aws_cognito_identity_provider.saml.provider_name}"], - "allowed_oauth_scopes": ["email", "openid"], - "allowed_oauth_flows": ["code"], - "callback_urls": if settings.useCustomDNS == true then [ - "https://" + settings.dnsNames.www[0] + aws_cognito_user_pool_client: { + npk: { + name: "npk_client", + user_pool_id: "${aws_cognito_user_pool.npk.id}", + generate_secret: false + } + if settings.useSAML then { + allowed_oauth_flows_user_pool_client: "true", + supported_identity_providers: ["${aws_cognito_identity_provider.saml.provider_name}"], + allowed_oauth_scopes: ["email", "openid"], + allowed_oauth_flows: ["code"], + callback_urls: if settings.useCustomDNS then [ + "https://" + settings.wwwEndpoint ] else [ "https://${aws_cloudfront_distribution.npk.domain_name}" ], - "logout_urls": if settings.useCustomDNS == true then [ - "https://" + settings.dnsNames.www[0] + logout_urls: if settings.useCustomDNS then [ + "https://" + settings.wwwEndpoint ] else [ "https://${aws_cloudfront_distribution.npk.domain_name}" ] } else {} }, - "aws_cognito_identity_pool": { - "main": { - "identity_pool_name": "NPK Identity Pool", - "allow_unauthenticated_identities": false, - "cognito_identity_providers": { - "client_id": "${aws_cognito_user_pool_client.npk.id}", - "provider_name": "${aws_cognito_user_pool.npk.endpoint}", - "server_side_token_check": false, + aws_cognito_identity_pool: { + main: { + identity_pool_name: "NPK Identity Pool", + allow_unauthenticated_identities: false, + cognito_identity_providers: { + client_id: "${aws_cognito_user_pool_client.npk.id}", + provider_name: "${aws_cognito_user_pool.npk.endpoint}", + server_side_token_check: false, }, - "provisioner": { + provisioner: { "local-exec": { - "command": "aws --region " + settings.defaultRegion + " --profile " + settings.awsProfile + " cognito-idp admin-create-user --user-pool-id ${aws_cognito_user_pool.npk.id} --username ${random_string.admin_password.keepers.admin_email} --user-attributes '[{\"Name\": \"email\", \"Value\": \"${random_string.admin_password.keepers.admin_email}\"}, {\"Name\": \"email_verified\", \"Value\": \"true\"}]' --temporary-password ${random_string.admin_password.result}", - "on_failure": "continue" + command: "aws --region " + settings.defaultRegion + " --profile " + settings.awsProfile + " cognito-idp admin-create-user --user-pool-id ${aws_cognito_user_pool.npk.id} --username ${random_string.admin_password.keepers.admin_email} --user-attributes '[{\"Name\": \"email\", \"Value\": \"${random_string.admin_password.keepers.admin_email}\"}, {\"Name\": \"email_verified\", \"Value\": \"true\"}]' --temporary-password ${random_string.admin_password.result}", + on_failure: "continue" } } } }, - "aws_cognito_user_group": { + aws_cognito_user_group: { "npk-admins": { - "name": "npk-admins", - "user_pool_id": "${aws_cognito_user_pool.npk.id}", - "description": "Administrators of NPK", - "precedence": "0", - "role_arn": "${aws_iam_role.cognito_admins.arn}" + name: "npk-admins", + user_pool_id: "${aws_cognito_user_pool.npk.id}", + description: "Administrators of NPK", + precedence: "0", + role_arn: "${aws_iam_role.cognito_admins.arn}" } } - } + (if settings.useSAML == true then { - "aws_cognito_identity_provider": { - "saml": { - "user_pool_id": "${aws_cognito_user_pool.npk.id}", - "provider_name": "NPKSAML", - "provider_type": "SAML", + } + (if settings.useSAML then { + aws_cognito_identity_provider: { + saml: { + user_pool_id: "${aws_cognito_user_pool.npk.id}", + provider_name: "NPKSAML", + provider_type: "SAML", - "provider_details": { - "IDPSignout": "false" + provider_details: { + IDPSignout: "false" } + if std.objectHas(settings, "sAMLMetadataUrl") then { - "MetadataURL": settings.sAMLMetadataUrl + MetadataURL: settings.sAMLMetadataUrl } else {} + if std.objectHas(settings, "sAMLMetadataFile") then { - "MetadataFile": "${file(\"" + settings.sAMLMetadataFile + "\")}" + MetadataFile: "${file(\"" + settings.sAMLMetadataFile + "\")}" } else {}, - "attribute_mapping": { - "email": "http://schemas.xmlsoap.org/ws/2005/05/identity/claims/emailaddress" + attribute_mapping: { + email: "http://schemas.xmlsoap.org/ws/2005/05/identity/claims/emailaddress" } } } - } else {}) + (if settings.useSAML == true && settings.useCustomDNS == false then { - "aws_cognito_user_pool_domain": { - "saml": { - "domain": "${random_string.saml_domain.result}", - "user_pool_id": "${aws_cognito_user_pool.npk.id}" + } else {}) + (if settings.useSAML && !settings.useCustomDNS then { + aws_cognito_user_pool_domain: { + saml: { + domain: "${random_string.saml_domain.result}", + user_pool_id: "${aws_cognito_user_pool.npk.id}" } } - } else {}) + (if settings.useSAML == true && settings.useCustomDNS == true then { - "aws_cognito_user_pool_domain": { - "saml": { - "domain": "auth." + settings.dnsNames.www[0], - "certificate_arn": "${aws_acm_certificate.saml.arn}", - "user_pool_id": "${aws_cognito_user_pool.npk.id}" + } else {}) + (if settings.useSAML && settings.useCustomDNS then { + aws_cognito_user_pool_domain: { + saml: { + domain: "auth." + settings.wwwEndpoint, + certificate_arn: "${aws_acm_certificate.saml.arn}", + user_pool_id: "${aws_cognito_user_pool.npk.id}" } } } else {}), output(settings): { - "admin_create_user_command": { - "value": "aws --region " + settings.defaultRegion + " --profile " + settings.awsProfile + " cognito-idp admin-create-user --user-pool-id ${aws_cognito_user_pool.npk.id} --username ${random_string.admin_password.keepers.admin_email} --user-attributes '[{\"Name\": \"email\", \"Value\": \"${random_string.admin_password.keepers.admin_email}\"}, {\"Name\": \"email_verified\", \"Value\": \"true\"}]' --temporary-password ${random_string.admin_password.result}" + admin_create_user_command: { + value: "aws --region " + settings.defaultRegion + " --profile " + settings.awsProfile + " cognito-idp admin-create-user --user-pool-id ${aws_cognito_user_pool.npk.id} --username ${random_string.admin_password.keepers.admin_email} --user-attributes '[{\"Name\": \"email\", \"Value\": \"${random_string.admin_password.keepers.admin_email}\"}, {\"Name\": \"email_verified\", \"Value\": \"true\"}]' --temporary-password ${random_string.admin_password.result}" }, - "admin_join_group_command": { - "value": "aws --region " + settings.defaultRegion + " --profile " + settings.awsProfile + " cognito-idp admin-add-user-to-group --user-pool-id ${aws_cognito_user_pool.npk.id} --username ${random_string.admin_password.keepers.admin_email} --group npk-admins --user-attributes '[{\"Name\": \"email\", \"Value\": \"${random_string.admin_password.keepers.admin_email}\"}, {\"Name\": \"email_verified\", \"Value\": \"true\"}]' --temporary-password ${random_string.admin_password.result}" + admin_join_group_command: { + value: "aws --region " + settings.defaultRegion + " --profile " + settings.awsProfile + " cognito-idp admin-add-user-to-group --user-pool-id ${aws_cognito_user_pool.npk.id} --username ${random_string.admin_password.keepers.admin_email} --group npk-admins --user-attributes '[{\"Name\": \"email\", \"Value\": \"${random_string.admin_password.keepers.admin_email}\"}, {\"Name\": \"email_verified\", \"Value\": \"true\"}]' --temporary-password ${random_string.admin_password.result}" } - } + (if settings.useSAML == true then { - "saml_entity_id": { - "value": "urn:amazon:cognito:sp:${aws_cognito_user_pool.npk.id}" + } + (if settings.useSAML then { + saml_entity_id: { + value: "urn:amazon:cognito:sp:${aws_cognito_user_pool.npk.id}" } - } else {}) + (if settings.useSAML == true && settings.useCustomDNS == false then { - "saml_acs_url": { - "value": "https://${random_string.saml_domain.result}.auth.us-west-2.amazoncognito.com/saml2/idpresponse" + } else {}) + (if settings.useSAML && !settings.useCustomDNS then { + saml_acs_url: { + value: "https://${random_string.saml_domain.result}.auth.us-west-2.amazoncognito.com/saml2/idpresponse" } - } else {}) + (if settings.useSAML == true && settings.useCustomDNS == true then { - "saml_acs_url": { - "value": "https://auth." + settings.dnsNames.www[0] + "/saml2/idpresponse" + } else {}) + (if settings.useSAML && settings.useCustomDNS then { + saml_acs_url: { + value: "https://auth." + settings.wwwEndpoint + "/saml2/idpresponse" } } else {}) } \ No newline at end of file diff --git a/terraform/jsonnet/iam.libsonnet b/terraform/jsonnet/iam.libsonnet new file mode 100644 index 0000000..e406707 --- /dev/null +++ b/terraform/jsonnet/iam.libsonnet @@ -0,0 +1,40 @@ +// name: The name of the IAM role +// policy_attachments: an object as { "name": } to attach to the role +// inline_policies: an objects as { "name": } to attach inline. +// trust_policy: a raw trust policy string or 'statements' array. + + +local iam_role(name, description, policy_attachments, inline_policies, trust_policy) = std.prune({ + aws_iam_role: { + [name]: { + name: name, + description: description, + assume_role_policy: if std.isArray(trust_policy) then std.manifestJsonEx({ + Version: "2012-10-17", + Statement: trust_policy + }, " ") else trust_policy, + } + }, + aws_iam_role_policy_attachment: { + [name + "-" + i]: { + role: "${aws_iam_role." + name + ".id}", + policy_arn: policy_attachments[i] + } + for i in std.objectFields(policy_attachments) + }, + aws_iam_role_policy: { + [name + "-" + i]: { + name: i, + role: "${aws_iam_role." + name + ".id}", + policy: if std.isArray(inline_policies[i]) then std.manifestJsonEx({ + Version: "2012-10-17", + Statement: inline_policies[i] + }, " ") else inline_policies[i] + } + for i in std.objectFields(inline_policies) + } +}); + +{ + iam_role: iam_role +} \ No newline at end of file diff --git a/terraform/jsonnet/lambda.libsonnet b/terraform/jsonnet/lambda.libsonnet new file mode 100644 index 0000000..335ab2b --- /dev/null +++ b/terraform/jsonnet/lambda.libsonnet @@ -0,0 +1,133 @@ +local lambda_function(name, config, role_policy) = { + resource: { + aws_lambda_function: { + [name]: config + { + function_name: name, + filename: "./lambda_functions/zip_files/" + name + ".zip", + source_code_hash: "${data.archive_file." + name + ".output_base64sha256}", + runtime: "nodejs12.x", + role: "${aws_iam_role.lambda-" + name + ".arn}", + depends_on: ["data.archive_file." + name, "aws_iam_role_policy.lambda-" + name], + } + }, + null_resource: { + ["npm_install-" + name]: { + provisioner: [{ + "local-exec": { + command: "cd ${path.module}/lambda_functions/" + name + "/ && npm install", + } + }] + } + }, + aws_iam_role: { + ["lambda-" + name]: { + name: "lambda_" + name, + description: "Lambda Role for " + name, + assume_role_policy: '{"Version": "2012-10-17","Statement": [{ + "Effect": "Allow","Principal": {"Service": ["lambda.amazonaws.com"]}, + "Action": "sts:AssumeRole" + }]}' + } + }, + aws_iam_role_policy: { + ["lambda-" + name]: { + name: "lambda_" + name, + role: "${aws_iam_role.lambda-" + name + ".id}", + + policy: "${data.aws_iam_policy_document.lambda-" + name + ".json}", + } + }, + aws_iam_policy_attachment:: { + ["lambda-" + name + "-xray"]: { + name: "lambda-" + name + "-xray", + roles: ["${aws_iam_role.lambda-" + name + ".id}"], + + policy_arn: "arn:aws:iam::aws:policy/AWSXRayDaemonWriteAccess" + } + }, + local_file: { + ["lambda-" + name + "_envvars"]: { + content: std.join("\n", [ + "export %s=%s" % [key, config.environment.variables[key]] + for key in std.objectFields(config.environment.variables) + ]), + filename: "${path.module}/lambda_functions/" + name + "/ENVVARS", + file_permission: "0664" + } + } + }, + data: { + archive_file: { + [name]: { + depends_on: [ + "null_resource.npm_install-" + name + ], + type: "zip", + source_dir: "${path.module}/lambda_functions/" + name + "/", + output_path: "${path.module}/lambda_functions/zip_files/" + name + ".zip", + } + }, + aws_iam_policy_document: { + ["lambda-" + name]: { + statement: std.flattenArrays([ + role_policy.statement, + [{ + sid: "logs", + actions: [ + "logs:CreateLogGroup", + "logs:CreateLogStream", + "logs:PutLogEvents", + ], + resources: [ + "arn:aws:logs:*:*:*" + ] + }, { + sid: "xray", + actions: [ + "xray:PutTraceSegments", + "xray:PutTelemetryRecords", + "xray:GetSamplingRules", + "xray:GetSamplingTargets", + "xray:GetSamplingStatisticSummaries" + ], + resources: [ + "*" + ] + }] + ]) + } + } + } +}; + +local cloudwatch_trigger(name, schedule_expression) = { + resource: { + aws_lambda_permission: { + [name]: { + statement_id: "AllowExecutionFromCloudWatch", + action: "lambda:InvokeFunction", + function_name: "${aws_lambda_function." + name + ".function_name}", + principal: "events.amazonaws.com", + source_arn: "${aws_cloudwatch_event_rule." + name + ".arn}" + } + }, + aws_cloudwatch_event_rule: { + [name]: { + name: name, + schedule_expression: schedule_expression + } + }, + aws_cloudwatch_event_target: { + [name]: { + rule: name, + target_id: name, + arn: "${aws_lambda_function." + name + ".arn}" + } + } + } +}; + +{ + lambda_function: lambda_function, + cloudwatch_trigger: cloudwatch_trigger +} \ No newline at end of file diff --git a/terraform/jsonnet/routetable.libsonnet b/terraform/jsonnet/routetable.libsonnet index de133df..03fc1d9 100644 --- a/terraform/jsonnet/routetable.libsonnet +++ b/terraform/jsonnet/routetable.libsonnet @@ -1,10 +1,18 @@ local routetable(region) = { provider: "aws." + region, vpc_id: "${aws_vpc." + region + ".id}", - route: { + route: [{ cidr_block: "0.0.0.0/0", - gateway_id: "${aws_internet_gateway." + region + ".id}" - } + gateway_id: "${aws_internet_gateway." + region + ".id}", + egress_only_gateway_id: "", + instance_id: "", + ipv6_cidr_block: "", + local_gateway_id: "", + nat_gateway_id: "", + network_interface_id: "", + transit_gateway_id: "", + vpc_peering_connection_id: "" + }] }; local endpoint(region, endpoint) = { diff --git a/terraform/jsonnet/templates.libsonnet b/terraform/jsonnet/templates.libsonnet index 7217f93..a24fa80 100644 --- a/terraform/jsonnet/templates.libsonnet +++ b/terraform/jsonnet/templates.libsonnet @@ -7,57 +7,57 @@ local az(region) = { data(settings):: local regionKeys = std.objectFields(settings.regions); { - "template_file": { - "npk_config": { - "template": "${file(\"${path.module}/templates/npk_config.tpl\")}", + template_file: { + npk_config: { + template: "${file(\"${path.module}/templates/npk_config.tpl\")}", - "vars": { - "aws_region": "${var.region}", - "client_id": "${aws_cognito_user_pool_client.npk.id}", - "user_pool_id": "${aws_cognito_user_pool.npk.id}", - "identity_pool_id": "${aws_cognito_identity_pool.main.id}", - "userdata_bucket": "${aws_s3_bucket.user_data.id}", - "use_SAML": "${var.useSAML}", - "saml_domain": "", - "saml_redirect": "", - "g_quota": settings.quotas.gquota, - "p_quota": settings.quotas.pquota, - "api_gateway_url": if settings.useCustomDNS then - settings.dnsNames.api[0] + vars: { + aws_region: "${var.region}", + client_id: "${aws_cognito_user_pool_client.npk.id}", + user_pool_id: "${aws_cognito_user_pool.npk.id}", + identity_pool_id: "${aws_cognito_identity_pool.main.id}", + userdata_bucket: "${aws_s3_bucket.user_data.id}", + use_SAML: "${var.useSAML}", + saml_domain: "", + saml_redirect: "", + g_quota: settings.quotas.gquota, + p_quota: settings.quotas.pquota, + api_gateway_url: if settings.useCustomDNS then + settings.apiEndpoint else "${element(split(\"/\", aws_api_gateway_deployment.npk.invoke_url), 2)}" - } + (if settings.useSAML == true && settings.useCustomDNS == false then { - "saml_domain": "${aws_cognito_user_pool_domain.saml.domain}.auth.us-west-2.amazoncognito.com", - "saml_redirect": "https://${aws_cloudfront_distribution.npk.domain_name}" - } else {}) + (if settings.useSAML == true && settings.useCustomDNS == true then { - "saml_domain": "auth." + settings.dnsNames.www[0], - "saml_redirect": "https://" + settings.dnsNames.www[0] + } + (if settings.useSAML && !settings.useCustomDNS then { + saml_domain: "${aws_cognito_user_pool_domain.saml.domain}.auth.us-west-2.amazoncognito.com", + saml_redirect: "https://${aws_cloudfront_distribution.npk.domain_name}" + } else {}) + (if settings.useSAML && settings.useCustomDNS then { + saml_domain: settings.authEndpoint, + saml_redirect: "https://" + settings.wwwEndpoint } else {}) }, - "userdata_template": { - "template": "${file(\"${path.module}/templates/userdata.tpl\")}", + userdata_template: { + template: "${file(\"${path.module}/templates/userdata.tpl\")}", - "vars": { - "dictionaryBuckets": std.strReplace(std.manifestJsonEx({ + vars: { + dictionaryBuckets: std.strReplace(std.manifestJsonEx({ [regionKeys[i]]: "${var.dictionary-" + regionKeys[i] + "-id}" for i in std.range(0, std.length(regionKeys) - 1) }, ""), "\n", ""), - "userdata": "${aws_s3_bucket.user_data.id}" + userdata: "${aws_s3_bucket.user_data.id}" } } } }, - "resource": { - "local_file": { - "npk_config": { - "content": "${data.template_file.npk_config.rendered}", - "filename": "${path.module}/../site-content/angular/npk_config.js", + resource: { + local_file: { + npk_config: { + content: "${data.template_file.npk_config.rendered}", + filename: "${path.module}/../site-content/angular/npk_config.js", }, - "userdata_template": { - "content": "${data.template_file.userdata_template.rendered}", - "filename": "${path.module}/lambda_functions/proxy_api_handler/userdata.sh", + userdata_template: { + content: "${data.template_file.userdata_template.rendered}", + filename: "${path.module}/lambda_functions/proxy_api_handler/userdata.sh", } } }, - "az": az + az: az } \ No newline at end of file diff --git a/terraform/lambda_functions/delete_campaign/main.js b/terraform/lambda_functions/delete_campaign/main.js new file mode 100644 index 0000000..4951c00 --- /dev/null +++ b/terraform/lambda_functions/delete_campaign/main.js @@ -0,0 +1,323 @@ +const aws = require('aws-sdk'); +const ddb = new aws.DynamoDB({ region: "us-west-2" }); +const s3 = new aws.S3({ region: "us-west-2" }); + +let cb = ""; +let variables = {}; + +exports.main = async function(event, context, callback) { + + // Hand off the callback function for later. + cb = callback; + + // Get the available envvars into a usable format. + variables = JSON.parse(JSON.stringify(process.env)); + + try { + + console.log("Received event: " + JSON.stringify(event)); + + // Hand off the origin, too. Fix for weird case + origin = event?.headers?.origin ?? event?.headers?.Origin; + + var allowed_characters = /^[a-zA-Z0-9'"%\.\[\]\{\}\(\)\-\:\\\/\;\=\?\#\_+\s,!@#\$\^\*&]+$/; + if (!allowed_characters.test(JSON.stringify(event))) { + console.log("Request contains illegal characters"); + return respond(400, {}, "Request contains illegal characters", false); + } + + if (event?.requestContext?.identity?.cognitoAuthenticationType != "authenticated") { + console.log(`cognitoAuthenticationType ${event?.requestContext?.identity?.cognitoAuthenticationType} != "authenticated"`) + return respond(401, {}, "Authentication Required", false); + } + + var body = {}; + // Unencode the body if necessary + if (!!event?.body) { + body = (event.requestContext.isBase64Encoded) ? atob(event.body) : event.body; + + // Body will always be a JSON object. + try { + body = JSON.parse(body); + } catch (e) { + return respond(400, "Body must be JSON object", false); + } + } + + // Associate the user identity. + const [ UserPoolId,, Username ] = event?.requestContext?.identity?.cognitoAuthenticationProvider?.split('/')[2]?.split(':'); + + if (!UserPoolId || !Username) { + console.log(`UserPoolId or Username is missing from ${event?.requestContext?.identity?.cognitoAuthenticationProvider}`); + respond(401, "Authorization Required"); + } + + } catch (e) { + console.log("Failed to process request.", e); + return respond(500, {}, "Failed to process request.", false); + } + + try { + const user = await cognito.adminGetUser({ UserPoolId, Username }).promise(); + + // Restructure UserAttributes as an k:v + user.UserAttributes = user.UserAttributes.reduce((attrs, entry) => { + attrs[entry.Name] = entry.Value + }, {}); + + if (!user?.UserAttributes?.email) { + return respond(401, {}, "Unable to obtain user properties.", false); + } + + } catch (e) { + console.log("Unable to retrieve user context.", e); + return respond(500, {}, "Unable to retrieve user context.", false); + } + + console.log(event.pathParameters) + + const campaignId = event?.pathParameters?.campaign; + + // Get the campaign entry from DynamoDB, and manifest from S3. + // * In parallel, to save, like, some milliseconds. + + try { + const [campaign, manifestObject] = await Promise.all([ + ddb.query({ + ExpressionAttributeValues: { + ':id': {S: entity}, + ':keyid': {S: `campaigns:${campaignId}`} + }, + KeyConditionExpression: 'userid = :id and keyid = :keyid', + TableName: "Campaigns" + }).promise(), + + s3.getObject({ + Bucket: variables.userdata_bucket, + Key: `${entity}/campaigns/${campaign}/manifest.json` + }).promise() + ]); + + const manifest = JSON.parse(manifestObject.Body.toString('ascii')); + } catch (e) { + console.log("Failed to retrieve campaign details.", e); + return respond(500, {}, "Failed to retrieve campaign details."); + } + + if (campaign.Items?.[0]?.status?.S != "AVAILABLE") { + return respond(404, {}, "Campaign doesn't exist or is not in 'AVAILABLE' status."); + } + + console.log(campaign, manifest); + + // Test whether the provided presigned URL is expired. + + try { + var expires = /Expires=([\d]+)&/.exec(manifest.hashFileUrl)[1]; + } catch (e) { + return respond(400, "Invalid hashFileUrl; missing expiration"); + } + + var duration = expires - (new Date().getTime() / 1000); + if (duration < 900) { + return respond(400, {} `hashFileUrl must be valid for at least 900 seconds, got ${Math.floor(duration)}`); + } + + // Campaign is valid. Get AZ pricing and Image AMI + // * Again in parallel, to save, like, some more milliseconds. + + try { + const ec2 = new aws.EC2({region: manifest.region}); + const [pricing, image] = await Promise.all([ + ec2.describeSpotPriceHistory({ + EndTime: Math.round(Date.now() / 1000), + ProductDescriptions: [ "Linux/UNIX (Amazon VPC)" ], + InstanceTypes: [ manifest.instanceType ], + StartTime: Math.round(Date.now() / 1000) + }), + + ec2.describeImages({ + Filters: [{ + Name: "virtualization-type", + Values: ["hvm"] + },{ + Name: "name", + Values: ["amzn2-ami-graphics-hvm-2*"] + },{ + Name: "root-device-type", + Values: ["ebs"] + },{ + Name: "owner-id", + Values: ["679593333241"] + }] + }) + ]); + } catch (e) { + console.log("Failed to retrieve price and image details.", e); + return respond(500, {}, "Failed to retrieve price and image details."); + } + + try { + + // Calculate the necessary volume size + + const volumeSize = Math.ceil(manifest.wordlistSize / 1073741824) + 1; + console.log(`Wordlist is ${manifest.wordlistSize / 1073741824}GiB. Allocating ${volumeSize}GiB`); + + // Build a launchSpecification for each AZ in the target region. + + const instance_userdata = new Buffer(fs.readFileSync(__dirname + '/userdata.sh', 'utf-8') + .replace("{{APIGATEWAY}}", process.env.apigateway)) + .toString('base64'); + + const launchSpecificationTemplate = { + IamInstanceProfile: { + Arn: variables.instanceProfile + }, + ImageId: image.ImageId, + KeyName: "npk-key", + InstanceType: manifest.instanceType, + BlockDeviceMappings: [{ + DeviceName: '/dev/xvdb', + Ebs: { + DeleteOnTermination: true, + Encrypted: false, + VolumeSize: volumeSize, + VolumeType: "gp2" + } + }], + NetworkInterfaces: [{ + AssociatePublicIpAddress: true, + DeviceIndex: 0, + // SubnetId: Gets populated below. + }], + Placement: { + // AvailabilityZone: Gets populated below. + }, + TagSpecifications: [{ + ResourceType: "instance", + Tags: [{ + Key: "MaxCost", + Value: ((manifest.priceTarget < variables.campaign_max_price) ? manifest.priceTarget : variables.campaign_max_price).toString() + }, { + Key: "ManifestPath", + Value: `${entity}/campaigns/${campaignId}` + }] + }], + UserData: instance_userdata + }; + + // Create a copy of the launchSpecificationTemplate for each AvailabilityZone in the campaign's region. + + const launchSpecifications = Object.keys(variables.availabilityZones[manifest.region]).reduce((specs, entry) => { + const az = JSON.parse(JSON.stringify(launchSpecificationTemplate)); // Have to deep-copy to avoid referential overrides. + + az.Placement.AvailabilityZone = entry; + az.NetworkInterfaces[0].SubnetId = variables.availabilityZones[manifest.region][entry]; + + return specs.concat(az); + }, []); + + // Get the average spot price across all AZs in the region. + const spotPrice = pricing.reduce((average, entry) => average + (entry / pricing.length), 0); + const maxDuration = (Number(manifest.instanceDuration) < variables.campaign_max_price / spotPrice) ? Number(manifest.instanceDuration) : variables.campaign_max_price / spotPrice; + + console.log(spotPrice, maxDuration, variables.campaign_max_price); + + const spotFleetParams = { + SpotFleetRequestConfig: { + AllocationStrategy: "lowestPrice", + IamFleetRole: variables.iamFleetRole, + InstanceInterruptionBehavior: "terminate", + LaunchSpecifications: launchSpecifications, + SpotPrice: (manifest.priceTarget / (manifest.instanceCount * manifest.instanceDuration) * 2).toString(), + TargetCapacity: manifest.instanceCount, + ReplaceUnhealthyInstances: false, + TerminateInstancesWithExpiration: true, + Type: "request", + ValidFrom: (new Date().getTime() / 1000), + ValidUntil: (new Date().getTime() / 1000) + (maxDuration * 3600) + } + }; + + console.log(JSON.stringify(spotFleetParams)); + } catch (e) { + console.log("Failed to generate launch specifications.", e); + return respond(500, {}, "Failed to generate launch specifications."); + } + + try { + const spotFleetRequest = await ec2.requestSpotFleet(spotFleetParams).promise(); + } catch (e) { + console.log("Failed to request spot fleet.", e); + return respond(500, {}, "Failed to request spot fleet."); + } + + // Campaign created successfully. + + console.log(`Successfully requested spot fleet ${spotFleetRequest.SpotFleetRequestId}`); + + try { + const updateCampaign = await ddb.updateItem({ + Key: { + userid: {S: entity}, + keyid: {S: `campaigns:${campaign}`} + }, + TableName: "Campaigns", + AttributeUpdates: { + active: { Action: "PUT", Value: { BOOL: true }}, + status: { Action: "PUT", Value: { S: "STARTING" }}, + spotFleetRequestId: { Action: "PUT", Value: { S: data.SpotFleetRequestId }}, + startTime: { Action: "PUT", Value: { N: Math.floor(new Date().getTime() / 1000) }}, + eventType: { Action: "PUT", Value: { S: "CampaignStarted" }} + } + }).promise(); + } catch (e) { + console.log("Spot fleet submitted, but failed to mark Campaign as 'STARTING'. This is a catastrophic error.", e); + return respond(500, {}, "Spot fleet submitted, but failed to mark Campaign as 'STARTING'. This is a catastrophic error.", false); + } + + return respond(200, {}, { msg: "Campaign started successfully", campaignId: campaignId, spotFleetRequestId: spotFleetRequest.SpotFleetRequestId }, true); +} + +function respond(statusCode, headers, body, success) { + + // Include terraform dns names as allowed origins, as well as localhost. + const allowed_origins = JSON.parse(variables.www_dns_names); + allowed_origins.push("https://localhost"); + + headers['Content-Type'] = 'text/plain'; + + if (allowed_origins.indexOf(origin) !== false) { + // Echo the origin back. I guess this is the best way to support multiple origins + headers['Access-Control-Allow-Origin'] = origin; + } else { + console.log("Invalid origin received.", origin); + } + + switch (typeof body) { + case "string": + body = { msg: body, success: success }; + break; + + case "object": + body.success = success; + break; + } + + const response = { + statusCode: statusCode, + headers: headers, + body: JSON.stringify(body), + } + + console.log(JSON.stringify(response)); + + cb(null, response); + + if (success == true) { + return Promise.resolve(body.msg); + } else { + return Promise.reject(body.msg); + } +} \ No newline at end of file diff --git a/terraform/lambda_functions/delete_campaign/package-lock.json b/terraform/lambda_functions/delete_campaign/package-lock.json new file mode 100644 index 0000000..7c8e64a --- /dev/null +++ b/terraform/lambda_functions/delete_campaign/package-lock.json @@ -0,0 +1,102 @@ +{ + "name": "execute_campaign", + "version": "1.0.0", + "lockfileVersion": 1, + "requires": true, + "dependencies": { + "aws-sdk": { + "version": "2.982.0", + "resolved": "https://registry.npmjs.org/aws-sdk/-/aws-sdk-2.982.0.tgz", + "integrity": "sha512-5w+m8Ia35NqB4TOZHEKts5zSV+FTdc7hTYbN4N4lZ4YU3cLTMt496ojh5UI3Deo8IIlqgTf3UVuq6Y6cPpVxkg==", + "requires": { + "buffer": "4.9.2", + "events": "1.1.1", + "ieee754": "1.1.13", + "jmespath": "0.15.0", + "querystring": "0.2.0", + "sax": "1.2.1", + "url": "0.10.3", + "uuid": "3.3.2", + "xml2js": "0.4.19" + } + }, + "base64-js": { + "version": "1.5.1", + "resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.5.1.tgz", + "integrity": "sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==" + }, + "buffer": { + "version": "4.9.2", + "resolved": "https://registry.npmjs.org/buffer/-/buffer-4.9.2.tgz", + "integrity": "sha512-xq+q3SRMOxGivLhBNaUdC64hDTQwejJ+H0T/NB1XMtTVEwNTrfFF3gAxiyW0Bu/xWEGhjVKgUcMhCrUy2+uCWg==", + "requires": { + "base64-js": "^1.0.2", + "ieee754": "^1.1.4", + "isarray": "^1.0.0" + } + }, + "events": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/events/-/events-1.1.1.tgz", + "integrity": "sha1-nr23Y1rQmccNzEwqH1AEKI6L2SQ=" + }, + "ieee754": { + "version": "1.1.13", + "resolved": "https://registry.npmjs.org/ieee754/-/ieee754-1.1.13.tgz", + "integrity": "sha512-4vf7I2LYV/HaWerSo3XmlMkp5eZ83i+/CDluXi/IGTs/O1sejBNhTtnxzmRZfvOUqj7lZjqHkeTvpgSFDlWZTg==" + }, + "isarray": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz", + "integrity": "sha1-u5NdSFgsuhaMBoNJV6VKPgcSTxE=" + }, + "jmespath": { + "version": "0.15.0", + "resolved": "https://registry.npmjs.org/jmespath/-/jmespath-0.15.0.tgz", + "integrity": "sha1-o/Iiqarp+Wb10nx5ZRDigJF2Qhc=" + }, + "punycode": { + "version": "1.3.2", + "resolved": "https://registry.npmjs.org/punycode/-/punycode-1.3.2.tgz", + "integrity": "sha1-llOgNvt8HuQjQvIyXM7v6jkmxI0=" + }, + "querystring": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/querystring/-/querystring-0.2.0.tgz", + "integrity": "sha1-sgmEkgO7Jd+CDadW50cAWHhSFiA=" + }, + "sax": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/sax/-/sax-1.2.1.tgz", + "integrity": "sha1-e45lYZCyKOgaZq6nSEgNgozS03o=" + }, + "url": { + "version": "0.10.3", + "resolved": "https://registry.npmjs.org/url/-/url-0.10.3.tgz", + "integrity": "sha1-Ah5NnHcF8hu/N9A861h2dAJ3TGQ=", + "requires": { + "punycode": "1.3.2", + "querystring": "0.2.0" + } + }, + "uuid": { + "version": "3.3.2", + "resolved": "https://registry.npmjs.org/uuid/-/uuid-3.3.2.tgz", + "integrity": "sha512-yXJmeNaw3DnnKAOKJE51sL/ZaYfWJRl1pK9dr19YFCu0ObS231AB1/LbqTKRAQ5kw8A90rA6fr4riOUpTZvQZA==" + }, + "xml2js": { + "version": "0.4.19", + "resolved": "https://registry.npmjs.org/xml2js/-/xml2js-0.4.19.tgz", + "integrity": "sha512-esZnJZJOiJR9wWKMyuvSE1y6Dq5LCuJanqhxslH2bxM6duahNZ+HMpCLhBQGZkbX6xRf8x1Y2eJlgt2q3qo49Q==", + "requires": { + "sax": ">=0.6.0", + "xmlbuilder": "~9.0.1" + } + }, + "xmlbuilder": { + "version": "9.0.7", + "resolved": "https://registry.npmjs.org/xmlbuilder/-/xmlbuilder-9.0.7.tgz", + "integrity": "sha1-Ey7mPS7FVlxVfiD0wi35rKaGsQ0=" + } + } +} diff --git a/terraform/lambda_functions/delete_campaign/package.json b/terraform/lambda_functions/delete_campaign/package.json new file mode 100644 index 0000000..bd0b67b --- /dev/null +++ b/terraform/lambda_functions/delete_campaign/package.json @@ -0,0 +1,15 @@ +{ + "name": "execute_campaign", + "version": "1.0.0", + "description": "NPK execute_campaign Lambda", + "main": "main.js", + "dependencies": { + "aws-sdk": "^2.599.0" + }, + "devDependencies": {}, + "scripts": { + "test": "echo \"Error: no test specified\" && exit 1" + }, + "author": "Brad Woodward (brad@bradwoodward.io)", + "license": "MIT" +} diff --git a/terraform/lambda_functions/execute_campaign/main.js b/terraform/lambda_functions/execute_campaign/main.js new file mode 100644 index 0000000..4951c00 --- /dev/null +++ b/terraform/lambda_functions/execute_campaign/main.js @@ -0,0 +1,323 @@ +const aws = require('aws-sdk'); +const ddb = new aws.DynamoDB({ region: "us-west-2" }); +const s3 = new aws.S3({ region: "us-west-2" }); + +let cb = ""; +let variables = {}; + +exports.main = async function(event, context, callback) { + + // Hand off the callback function for later. + cb = callback; + + // Get the available envvars into a usable format. + variables = JSON.parse(JSON.stringify(process.env)); + + try { + + console.log("Received event: " + JSON.stringify(event)); + + // Hand off the origin, too. Fix for weird case + origin = event?.headers?.origin ?? event?.headers?.Origin; + + var allowed_characters = /^[a-zA-Z0-9'"%\.\[\]\{\}\(\)\-\:\\\/\;\=\?\#\_+\s,!@#\$\^\*&]+$/; + if (!allowed_characters.test(JSON.stringify(event))) { + console.log("Request contains illegal characters"); + return respond(400, {}, "Request contains illegal characters", false); + } + + if (event?.requestContext?.identity?.cognitoAuthenticationType != "authenticated") { + console.log(`cognitoAuthenticationType ${event?.requestContext?.identity?.cognitoAuthenticationType} != "authenticated"`) + return respond(401, {}, "Authentication Required", false); + } + + var body = {}; + // Unencode the body if necessary + if (!!event?.body) { + body = (event.requestContext.isBase64Encoded) ? atob(event.body) : event.body; + + // Body will always be a JSON object. + try { + body = JSON.parse(body); + } catch (e) { + return respond(400, "Body must be JSON object", false); + } + } + + // Associate the user identity. + const [ UserPoolId,, Username ] = event?.requestContext?.identity?.cognitoAuthenticationProvider?.split('/')[2]?.split(':'); + + if (!UserPoolId || !Username) { + console.log(`UserPoolId or Username is missing from ${event?.requestContext?.identity?.cognitoAuthenticationProvider}`); + respond(401, "Authorization Required"); + } + + } catch (e) { + console.log("Failed to process request.", e); + return respond(500, {}, "Failed to process request.", false); + } + + try { + const user = await cognito.adminGetUser({ UserPoolId, Username }).promise(); + + // Restructure UserAttributes as an k:v + user.UserAttributes = user.UserAttributes.reduce((attrs, entry) => { + attrs[entry.Name] = entry.Value + }, {}); + + if (!user?.UserAttributes?.email) { + return respond(401, {}, "Unable to obtain user properties.", false); + } + + } catch (e) { + console.log("Unable to retrieve user context.", e); + return respond(500, {}, "Unable to retrieve user context.", false); + } + + console.log(event.pathParameters) + + const campaignId = event?.pathParameters?.campaign; + + // Get the campaign entry from DynamoDB, and manifest from S3. + // * In parallel, to save, like, some milliseconds. + + try { + const [campaign, manifestObject] = await Promise.all([ + ddb.query({ + ExpressionAttributeValues: { + ':id': {S: entity}, + ':keyid': {S: `campaigns:${campaignId}`} + }, + KeyConditionExpression: 'userid = :id and keyid = :keyid', + TableName: "Campaigns" + }).promise(), + + s3.getObject({ + Bucket: variables.userdata_bucket, + Key: `${entity}/campaigns/${campaign}/manifest.json` + }).promise() + ]); + + const manifest = JSON.parse(manifestObject.Body.toString('ascii')); + } catch (e) { + console.log("Failed to retrieve campaign details.", e); + return respond(500, {}, "Failed to retrieve campaign details."); + } + + if (campaign.Items?.[0]?.status?.S != "AVAILABLE") { + return respond(404, {}, "Campaign doesn't exist or is not in 'AVAILABLE' status."); + } + + console.log(campaign, manifest); + + // Test whether the provided presigned URL is expired. + + try { + var expires = /Expires=([\d]+)&/.exec(manifest.hashFileUrl)[1]; + } catch (e) { + return respond(400, "Invalid hashFileUrl; missing expiration"); + } + + var duration = expires - (new Date().getTime() / 1000); + if (duration < 900) { + return respond(400, {} `hashFileUrl must be valid for at least 900 seconds, got ${Math.floor(duration)}`); + } + + // Campaign is valid. Get AZ pricing and Image AMI + // * Again in parallel, to save, like, some more milliseconds. + + try { + const ec2 = new aws.EC2({region: manifest.region}); + const [pricing, image] = await Promise.all([ + ec2.describeSpotPriceHistory({ + EndTime: Math.round(Date.now() / 1000), + ProductDescriptions: [ "Linux/UNIX (Amazon VPC)" ], + InstanceTypes: [ manifest.instanceType ], + StartTime: Math.round(Date.now() / 1000) + }), + + ec2.describeImages({ + Filters: [{ + Name: "virtualization-type", + Values: ["hvm"] + },{ + Name: "name", + Values: ["amzn2-ami-graphics-hvm-2*"] + },{ + Name: "root-device-type", + Values: ["ebs"] + },{ + Name: "owner-id", + Values: ["679593333241"] + }] + }) + ]); + } catch (e) { + console.log("Failed to retrieve price and image details.", e); + return respond(500, {}, "Failed to retrieve price and image details."); + } + + try { + + // Calculate the necessary volume size + + const volumeSize = Math.ceil(manifest.wordlistSize / 1073741824) + 1; + console.log(`Wordlist is ${manifest.wordlistSize / 1073741824}GiB. Allocating ${volumeSize}GiB`); + + // Build a launchSpecification for each AZ in the target region. + + const instance_userdata = new Buffer(fs.readFileSync(__dirname + '/userdata.sh', 'utf-8') + .replace("{{APIGATEWAY}}", process.env.apigateway)) + .toString('base64'); + + const launchSpecificationTemplate = { + IamInstanceProfile: { + Arn: variables.instanceProfile + }, + ImageId: image.ImageId, + KeyName: "npk-key", + InstanceType: manifest.instanceType, + BlockDeviceMappings: [{ + DeviceName: '/dev/xvdb', + Ebs: { + DeleteOnTermination: true, + Encrypted: false, + VolumeSize: volumeSize, + VolumeType: "gp2" + } + }], + NetworkInterfaces: [{ + AssociatePublicIpAddress: true, + DeviceIndex: 0, + // SubnetId: Gets populated below. + }], + Placement: { + // AvailabilityZone: Gets populated below. + }, + TagSpecifications: [{ + ResourceType: "instance", + Tags: [{ + Key: "MaxCost", + Value: ((manifest.priceTarget < variables.campaign_max_price) ? manifest.priceTarget : variables.campaign_max_price).toString() + }, { + Key: "ManifestPath", + Value: `${entity}/campaigns/${campaignId}` + }] + }], + UserData: instance_userdata + }; + + // Create a copy of the launchSpecificationTemplate for each AvailabilityZone in the campaign's region. + + const launchSpecifications = Object.keys(variables.availabilityZones[manifest.region]).reduce((specs, entry) => { + const az = JSON.parse(JSON.stringify(launchSpecificationTemplate)); // Have to deep-copy to avoid referential overrides. + + az.Placement.AvailabilityZone = entry; + az.NetworkInterfaces[0].SubnetId = variables.availabilityZones[manifest.region][entry]; + + return specs.concat(az); + }, []); + + // Get the average spot price across all AZs in the region. + const spotPrice = pricing.reduce((average, entry) => average + (entry / pricing.length), 0); + const maxDuration = (Number(manifest.instanceDuration) < variables.campaign_max_price / spotPrice) ? Number(manifest.instanceDuration) : variables.campaign_max_price / spotPrice; + + console.log(spotPrice, maxDuration, variables.campaign_max_price); + + const spotFleetParams = { + SpotFleetRequestConfig: { + AllocationStrategy: "lowestPrice", + IamFleetRole: variables.iamFleetRole, + InstanceInterruptionBehavior: "terminate", + LaunchSpecifications: launchSpecifications, + SpotPrice: (manifest.priceTarget / (manifest.instanceCount * manifest.instanceDuration) * 2).toString(), + TargetCapacity: manifest.instanceCount, + ReplaceUnhealthyInstances: false, + TerminateInstancesWithExpiration: true, + Type: "request", + ValidFrom: (new Date().getTime() / 1000), + ValidUntil: (new Date().getTime() / 1000) + (maxDuration * 3600) + } + }; + + console.log(JSON.stringify(spotFleetParams)); + } catch (e) { + console.log("Failed to generate launch specifications.", e); + return respond(500, {}, "Failed to generate launch specifications."); + } + + try { + const spotFleetRequest = await ec2.requestSpotFleet(spotFleetParams).promise(); + } catch (e) { + console.log("Failed to request spot fleet.", e); + return respond(500, {}, "Failed to request spot fleet."); + } + + // Campaign created successfully. + + console.log(`Successfully requested spot fleet ${spotFleetRequest.SpotFleetRequestId}`); + + try { + const updateCampaign = await ddb.updateItem({ + Key: { + userid: {S: entity}, + keyid: {S: `campaigns:${campaign}`} + }, + TableName: "Campaigns", + AttributeUpdates: { + active: { Action: "PUT", Value: { BOOL: true }}, + status: { Action: "PUT", Value: { S: "STARTING" }}, + spotFleetRequestId: { Action: "PUT", Value: { S: data.SpotFleetRequestId }}, + startTime: { Action: "PUT", Value: { N: Math.floor(new Date().getTime() / 1000) }}, + eventType: { Action: "PUT", Value: { S: "CampaignStarted" }} + } + }).promise(); + } catch (e) { + console.log("Spot fleet submitted, but failed to mark Campaign as 'STARTING'. This is a catastrophic error.", e); + return respond(500, {}, "Spot fleet submitted, but failed to mark Campaign as 'STARTING'. This is a catastrophic error.", false); + } + + return respond(200, {}, { msg: "Campaign started successfully", campaignId: campaignId, spotFleetRequestId: spotFleetRequest.SpotFleetRequestId }, true); +} + +function respond(statusCode, headers, body, success) { + + // Include terraform dns names as allowed origins, as well as localhost. + const allowed_origins = JSON.parse(variables.www_dns_names); + allowed_origins.push("https://localhost"); + + headers['Content-Type'] = 'text/plain'; + + if (allowed_origins.indexOf(origin) !== false) { + // Echo the origin back. I guess this is the best way to support multiple origins + headers['Access-Control-Allow-Origin'] = origin; + } else { + console.log("Invalid origin received.", origin); + } + + switch (typeof body) { + case "string": + body = { msg: body, success: success }; + break; + + case "object": + body.success = success; + break; + } + + const response = { + statusCode: statusCode, + headers: headers, + body: JSON.stringify(body), + } + + console.log(JSON.stringify(response)); + + cb(null, response); + + if (success == true) { + return Promise.resolve(body.msg); + } else { + return Promise.reject(body.msg); + } +} \ No newline at end of file diff --git a/terraform/lambda_functions/execute_campaign/package-lock.json b/terraform/lambda_functions/execute_campaign/package-lock.json new file mode 100644 index 0000000..7c8e64a --- /dev/null +++ b/terraform/lambda_functions/execute_campaign/package-lock.json @@ -0,0 +1,102 @@ +{ + "name": "execute_campaign", + "version": "1.0.0", + "lockfileVersion": 1, + "requires": true, + "dependencies": { + "aws-sdk": { + "version": "2.982.0", + "resolved": "https://registry.npmjs.org/aws-sdk/-/aws-sdk-2.982.0.tgz", + "integrity": "sha512-5w+m8Ia35NqB4TOZHEKts5zSV+FTdc7hTYbN4N4lZ4YU3cLTMt496ojh5UI3Deo8IIlqgTf3UVuq6Y6cPpVxkg==", + "requires": { + "buffer": "4.9.2", + "events": "1.1.1", + "ieee754": "1.1.13", + "jmespath": "0.15.0", + "querystring": "0.2.0", + "sax": "1.2.1", + "url": "0.10.3", + "uuid": "3.3.2", + "xml2js": "0.4.19" + } + }, + "base64-js": { + "version": "1.5.1", + "resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.5.1.tgz", + "integrity": "sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==" + }, + "buffer": { + "version": "4.9.2", + "resolved": "https://registry.npmjs.org/buffer/-/buffer-4.9.2.tgz", + "integrity": "sha512-xq+q3SRMOxGivLhBNaUdC64hDTQwejJ+H0T/NB1XMtTVEwNTrfFF3gAxiyW0Bu/xWEGhjVKgUcMhCrUy2+uCWg==", + "requires": { + "base64-js": "^1.0.2", + "ieee754": "^1.1.4", + "isarray": "^1.0.0" + } + }, + "events": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/events/-/events-1.1.1.tgz", + "integrity": "sha1-nr23Y1rQmccNzEwqH1AEKI6L2SQ=" + }, + "ieee754": { + "version": "1.1.13", + "resolved": "https://registry.npmjs.org/ieee754/-/ieee754-1.1.13.tgz", + "integrity": "sha512-4vf7I2LYV/HaWerSo3XmlMkp5eZ83i+/CDluXi/IGTs/O1sejBNhTtnxzmRZfvOUqj7lZjqHkeTvpgSFDlWZTg==" + }, + "isarray": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz", + "integrity": "sha1-u5NdSFgsuhaMBoNJV6VKPgcSTxE=" + }, + "jmespath": { + "version": "0.15.0", + "resolved": "https://registry.npmjs.org/jmespath/-/jmespath-0.15.0.tgz", + "integrity": "sha1-o/Iiqarp+Wb10nx5ZRDigJF2Qhc=" + }, + "punycode": { + "version": "1.3.2", + "resolved": "https://registry.npmjs.org/punycode/-/punycode-1.3.2.tgz", + "integrity": "sha1-llOgNvt8HuQjQvIyXM7v6jkmxI0=" + }, + "querystring": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/querystring/-/querystring-0.2.0.tgz", + "integrity": "sha1-sgmEkgO7Jd+CDadW50cAWHhSFiA=" + }, + "sax": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/sax/-/sax-1.2.1.tgz", + "integrity": "sha1-e45lYZCyKOgaZq6nSEgNgozS03o=" + }, + "url": { + "version": "0.10.3", + "resolved": "https://registry.npmjs.org/url/-/url-0.10.3.tgz", + "integrity": "sha1-Ah5NnHcF8hu/N9A861h2dAJ3TGQ=", + "requires": { + "punycode": "1.3.2", + "querystring": "0.2.0" + } + }, + "uuid": { + "version": "3.3.2", + "resolved": "https://registry.npmjs.org/uuid/-/uuid-3.3.2.tgz", + "integrity": "sha512-yXJmeNaw3DnnKAOKJE51sL/ZaYfWJRl1pK9dr19YFCu0ObS231AB1/LbqTKRAQ5kw8A90rA6fr4riOUpTZvQZA==" + }, + "xml2js": { + "version": "0.4.19", + "resolved": "https://registry.npmjs.org/xml2js/-/xml2js-0.4.19.tgz", + "integrity": "sha512-esZnJZJOiJR9wWKMyuvSE1y6Dq5LCuJanqhxslH2bxM6duahNZ+HMpCLhBQGZkbX6xRf8x1Y2eJlgt2q3qo49Q==", + "requires": { + "sax": ">=0.6.0", + "xmlbuilder": "~9.0.1" + } + }, + "xmlbuilder": { + "version": "9.0.7", + "resolved": "https://registry.npmjs.org/xmlbuilder/-/xmlbuilder-9.0.7.tgz", + "integrity": "sha1-Ey7mPS7FVlxVfiD0wi35rKaGsQ0=" + } + } +} diff --git a/terraform/lambda_functions/execute_campaign/package.json b/terraform/lambda_functions/execute_campaign/package.json new file mode 100644 index 0000000..bd0b67b --- /dev/null +++ b/terraform/lambda_functions/execute_campaign/package.json @@ -0,0 +1,15 @@ +{ + "name": "execute_campaign", + "version": "1.0.0", + "description": "NPK execute_campaign Lambda", + "main": "main.js", + "dependencies": { + "aws-sdk": "^2.599.0" + }, + "devDependencies": {}, + "scripts": { + "test": "echo \"Error: no test specified\" && exit 1" + }, + "author": "Brad Woodward (brad@bradwoodward.io)", + "license": "MIT" +} diff --git a/terraform/lambda_functions/get_campaign/main.js b/terraform/lambda_functions/get_campaign/main.js new file mode 100644 index 0000000..4951c00 --- /dev/null +++ b/terraform/lambda_functions/get_campaign/main.js @@ -0,0 +1,323 @@ +const aws = require('aws-sdk'); +const ddb = new aws.DynamoDB({ region: "us-west-2" }); +const s3 = new aws.S3({ region: "us-west-2" }); + +let cb = ""; +let variables = {}; + +exports.main = async function(event, context, callback) { + + // Hand off the callback function for later. + cb = callback; + + // Get the available envvars into a usable format. + variables = JSON.parse(JSON.stringify(process.env)); + + try { + + console.log("Received event: " + JSON.stringify(event)); + + // Hand off the origin, too. Fix for weird case + origin = event?.headers?.origin ?? event?.headers?.Origin; + + var allowed_characters = /^[a-zA-Z0-9'"%\.\[\]\{\}\(\)\-\:\\\/\;\=\?\#\_+\s,!@#\$\^\*&]+$/; + if (!allowed_characters.test(JSON.stringify(event))) { + console.log("Request contains illegal characters"); + return respond(400, {}, "Request contains illegal characters", false); + } + + if (event?.requestContext?.identity?.cognitoAuthenticationType != "authenticated") { + console.log(`cognitoAuthenticationType ${event?.requestContext?.identity?.cognitoAuthenticationType} != "authenticated"`) + return respond(401, {}, "Authentication Required", false); + } + + var body = {}; + // Unencode the body if necessary + if (!!event?.body) { + body = (event.requestContext.isBase64Encoded) ? atob(event.body) : event.body; + + // Body will always be a JSON object. + try { + body = JSON.parse(body); + } catch (e) { + return respond(400, "Body must be JSON object", false); + } + } + + // Associate the user identity. + const [ UserPoolId,, Username ] = event?.requestContext?.identity?.cognitoAuthenticationProvider?.split('/')[2]?.split(':'); + + if (!UserPoolId || !Username) { + console.log(`UserPoolId or Username is missing from ${event?.requestContext?.identity?.cognitoAuthenticationProvider}`); + respond(401, "Authorization Required"); + } + + } catch (e) { + console.log("Failed to process request.", e); + return respond(500, {}, "Failed to process request.", false); + } + + try { + const user = await cognito.adminGetUser({ UserPoolId, Username }).promise(); + + // Restructure UserAttributes as an k:v + user.UserAttributes = user.UserAttributes.reduce((attrs, entry) => { + attrs[entry.Name] = entry.Value + }, {}); + + if (!user?.UserAttributes?.email) { + return respond(401, {}, "Unable to obtain user properties.", false); + } + + } catch (e) { + console.log("Unable to retrieve user context.", e); + return respond(500, {}, "Unable to retrieve user context.", false); + } + + console.log(event.pathParameters) + + const campaignId = event?.pathParameters?.campaign; + + // Get the campaign entry from DynamoDB, and manifest from S3. + // * In parallel, to save, like, some milliseconds. + + try { + const [campaign, manifestObject] = await Promise.all([ + ddb.query({ + ExpressionAttributeValues: { + ':id': {S: entity}, + ':keyid': {S: `campaigns:${campaignId}`} + }, + KeyConditionExpression: 'userid = :id and keyid = :keyid', + TableName: "Campaigns" + }).promise(), + + s3.getObject({ + Bucket: variables.userdata_bucket, + Key: `${entity}/campaigns/${campaign}/manifest.json` + }).promise() + ]); + + const manifest = JSON.parse(manifestObject.Body.toString('ascii')); + } catch (e) { + console.log("Failed to retrieve campaign details.", e); + return respond(500, {}, "Failed to retrieve campaign details."); + } + + if (campaign.Items?.[0]?.status?.S != "AVAILABLE") { + return respond(404, {}, "Campaign doesn't exist or is not in 'AVAILABLE' status."); + } + + console.log(campaign, manifest); + + // Test whether the provided presigned URL is expired. + + try { + var expires = /Expires=([\d]+)&/.exec(manifest.hashFileUrl)[1]; + } catch (e) { + return respond(400, "Invalid hashFileUrl; missing expiration"); + } + + var duration = expires - (new Date().getTime() / 1000); + if (duration < 900) { + return respond(400, {} `hashFileUrl must be valid for at least 900 seconds, got ${Math.floor(duration)}`); + } + + // Campaign is valid. Get AZ pricing and Image AMI + // * Again in parallel, to save, like, some more milliseconds. + + try { + const ec2 = new aws.EC2({region: manifest.region}); + const [pricing, image] = await Promise.all([ + ec2.describeSpotPriceHistory({ + EndTime: Math.round(Date.now() / 1000), + ProductDescriptions: [ "Linux/UNIX (Amazon VPC)" ], + InstanceTypes: [ manifest.instanceType ], + StartTime: Math.round(Date.now() / 1000) + }), + + ec2.describeImages({ + Filters: [{ + Name: "virtualization-type", + Values: ["hvm"] + },{ + Name: "name", + Values: ["amzn2-ami-graphics-hvm-2*"] + },{ + Name: "root-device-type", + Values: ["ebs"] + },{ + Name: "owner-id", + Values: ["679593333241"] + }] + }) + ]); + } catch (e) { + console.log("Failed to retrieve price and image details.", e); + return respond(500, {}, "Failed to retrieve price and image details."); + } + + try { + + // Calculate the necessary volume size + + const volumeSize = Math.ceil(manifest.wordlistSize / 1073741824) + 1; + console.log(`Wordlist is ${manifest.wordlistSize / 1073741824}GiB. Allocating ${volumeSize}GiB`); + + // Build a launchSpecification for each AZ in the target region. + + const instance_userdata = new Buffer(fs.readFileSync(__dirname + '/userdata.sh', 'utf-8') + .replace("{{APIGATEWAY}}", process.env.apigateway)) + .toString('base64'); + + const launchSpecificationTemplate = { + IamInstanceProfile: { + Arn: variables.instanceProfile + }, + ImageId: image.ImageId, + KeyName: "npk-key", + InstanceType: manifest.instanceType, + BlockDeviceMappings: [{ + DeviceName: '/dev/xvdb', + Ebs: { + DeleteOnTermination: true, + Encrypted: false, + VolumeSize: volumeSize, + VolumeType: "gp2" + } + }], + NetworkInterfaces: [{ + AssociatePublicIpAddress: true, + DeviceIndex: 0, + // SubnetId: Gets populated below. + }], + Placement: { + // AvailabilityZone: Gets populated below. + }, + TagSpecifications: [{ + ResourceType: "instance", + Tags: [{ + Key: "MaxCost", + Value: ((manifest.priceTarget < variables.campaign_max_price) ? manifest.priceTarget : variables.campaign_max_price).toString() + }, { + Key: "ManifestPath", + Value: `${entity}/campaigns/${campaignId}` + }] + }], + UserData: instance_userdata + }; + + // Create a copy of the launchSpecificationTemplate for each AvailabilityZone in the campaign's region. + + const launchSpecifications = Object.keys(variables.availabilityZones[manifest.region]).reduce((specs, entry) => { + const az = JSON.parse(JSON.stringify(launchSpecificationTemplate)); // Have to deep-copy to avoid referential overrides. + + az.Placement.AvailabilityZone = entry; + az.NetworkInterfaces[0].SubnetId = variables.availabilityZones[manifest.region][entry]; + + return specs.concat(az); + }, []); + + // Get the average spot price across all AZs in the region. + const spotPrice = pricing.reduce((average, entry) => average + (entry / pricing.length), 0); + const maxDuration = (Number(manifest.instanceDuration) < variables.campaign_max_price / spotPrice) ? Number(manifest.instanceDuration) : variables.campaign_max_price / spotPrice; + + console.log(spotPrice, maxDuration, variables.campaign_max_price); + + const spotFleetParams = { + SpotFleetRequestConfig: { + AllocationStrategy: "lowestPrice", + IamFleetRole: variables.iamFleetRole, + InstanceInterruptionBehavior: "terminate", + LaunchSpecifications: launchSpecifications, + SpotPrice: (manifest.priceTarget / (manifest.instanceCount * manifest.instanceDuration) * 2).toString(), + TargetCapacity: manifest.instanceCount, + ReplaceUnhealthyInstances: false, + TerminateInstancesWithExpiration: true, + Type: "request", + ValidFrom: (new Date().getTime() / 1000), + ValidUntil: (new Date().getTime() / 1000) + (maxDuration * 3600) + } + }; + + console.log(JSON.stringify(spotFleetParams)); + } catch (e) { + console.log("Failed to generate launch specifications.", e); + return respond(500, {}, "Failed to generate launch specifications."); + } + + try { + const spotFleetRequest = await ec2.requestSpotFleet(spotFleetParams).promise(); + } catch (e) { + console.log("Failed to request spot fleet.", e); + return respond(500, {}, "Failed to request spot fleet."); + } + + // Campaign created successfully. + + console.log(`Successfully requested spot fleet ${spotFleetRequest.SpotFleetRequestId}`); + + try { + const updateCampaign = await ddb.updateItem({ + Key: { + userid: {S: entity}, + keyid: {S: `campaigns:${campaign}`} + }, + TableName: "Campaigns", + AttributeUpdates: { + active: { Action: "PUT", Value: { BOOL: true }}, + status: { Action: "PUT", Value: { S: "STARTING" }}, + spotFleetRequestId: { Action: "PUT", Value: { S: data.SpotFleetRequestId }}, + startTime: { Action: "PUT", Value: { N: Math.floor(new Date().getTime() / 1000) }}, + eventType: { Action: "PUT", Value: { S: "CampaignStarted" }} + } + }).promise(); + } catch (e) { + console.log("Spot fleet submitted, but failed to mark Campaign as 'STARTING'. This is a catastrophic error.", e); + return respond(500, {}, "Spot fleet submitted, but failed to mark Campaign as 'STARTING'. This is a catastrophic error.", false); + } + + return respond(200, {}, { msg: "Campaign started successfully", campaignId: campaignId, spotFleetRequestId: spotFleetRequest.SpotFleetRequestId }, true); +} + +function respond(statusCode, headers, body, success) { + + // Include terraform dns names as allowed origins, as well as localhost. + const allowed_origins = JSON.parse(variables.www_dns_names); + allowed_origins.push("https://localhost"); + + headers['Content-Type'] = 'text/plain'; + + if (allowed_origins.indexOf(origin) !== false) { + // Echo the origin back. I guess this is the best way to support multiple origins + headers['Access-Control-Allow-Origin'] = origin; + } else { + console.log("Invalid origin received.", origin); + } + + switch (typeof body) { + case "string": + body = { msg: body, success: success }; + break; + + case "object": + body.success = success; + break; + } + + const response = { + statusCode: statusCode, + headers: headers, + body: JSON.stringify(body), + } + + console.log(JSON.stringify(response)); + + cb(null, response); + + if (success == true) { + return Promise.resolve(body.msg); + } else { + return Promise.reject(body.msg); + } +} \ No newline at end of file diff --git a/terraform/lambda_functions/get_campaign/package-lock.json b/terraform/lambda_functions/get_campaign/package-lock.json new file mode 100644 index 0000000..7c8e64a --- /dev/null +++ b/terraform/lambda_functions/get_campaign/package-lock.json @@ -0,0 +1,102 @@ +{ + "name": "execute_campaign", + "version": "1.0.0", + "lockfileVersion": 1, + "requires": true, + "dependencies": { + "aws-sdk": { + "version": "2.982.0", + "resolved": "https://registry.npmjs.org/aws-sdk/-/aws-sdk-2.982.0.tgz", + "integrity": "sha512-5w+m8Ia35NqB4TOZHEKts5zSV+FTdc7hTYbN4N4lZ4YU3cLTMt496ojh5UI3Deo8IIlqgTf3UVuq6Y6cPpVxkg==", + "requires": { + "buffer": "4.9.2", + "events": "1.1.1", + "ieee754": "1.1.13", + "jmespath": "0.15.0", + "querystring": "0.2.0", + "sax": "1.2.1", + "url": "0.10.3", + "uuid": "3.3.2", + "xml2js": "0.4.19" + } + }, + "base64-js": { + "version": "1.5.1", + "resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.5.1.tgz", + "integrity": "sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==" + }, + "buffer": { + "version": "4.9.2", + "resolved": "https://registry.npmjs.org/buffer/-/buffer-4.9.2.tgz", + "integrity": "sha512-xq+q3SRMOxGivLhBNaUdC64hDTQwejJ+H0T/NB1XMtTVEwNTrfFF3gAxiyW0Bu/xWEGhjVKgUcMhCrUy2+uCWg==", + "requires": { + "base64-js": "^1.0.2", + "ieee754": "^1.1.4", + "isarray": "^1.0.0" + } + }, + "events": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/events/-/events-1.1.1.tgz", + "integrity": "sha1-nr23Y1rQmccNzEwqH1AEKI6L2SQ=" + }, + "ieee754": { + "version": "1.1.13", + "resolved": "https://registry.npmjs.org/ieee754/-/ieee754-1.1.13.tgz", + "integrity": "sha512-4vf7I2LYV/HaWerSo3XmlMkp5eZ83i+/CDluXi/IGTs/O1sejBNhTtnxzmRZfvOUqj7lZjqHkeTvpgSFDlWZTg==" + }, + "isarray": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz", + "integrity": "sha1-u5NdSFgsuhaMBoNJV6VKPgcSTxE=" + }, + "jmespath": { + "version": "0.15.0", + "resolved": "https://registry.npmjs.org/jmespath/-/jmespath-0.15.0.tgz", + "integrity": "sha1-o/Iiqarp+Wb10nx5ZRDigJF2Qhc=" + }, + "punycode": { + "version": "1.3.2", + "resolved": "https://registry.npmjs.org/punycode/-/punycode-1.3.2.tgz", + "integrity": "sha1-llOgNvt8HuQjQvIyXM7v6jkmxI0=" + }, + "querystring": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/querystring/-/querystring-0.2.0.tgz", + "integrity": "sha1-sgmEkgO7Jd+CDadW50cAWHhSFiA=" + }, + "sax": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/sax/-/sax-1.2.1.tgz", + "integrity": "sha1-e45lYZCyKOgaZq6nSEgNgozS03o=" + }, + "url": { + "version": "0.10.3", + "resolved": "https://registry.npmjs.org/url/-/url-0.10.3.tgz", + "integrity": "sha1-Ah5NnHcF8hu/N9A861h2dAJ3TGQ=", + "requires": { + "punycode": "1.3.2", + "querystring": "0.2.0" + } + }, + "uuid": { + "version": "3.3.2", + "resolved": "https://registry.npmjs.org/uuid/-/uuid-3.3.2.tgz", + "integrity": "sha512-yXJmeNaw3DnnKAOKJE51sL/ZaYfWJRl1pK9dr19YFCu0ObS231AB1/LbqTKRAQ5kw8A90rA6fr4riOUpTZvQZA==" + }, + "xml2js": { + "version": "0.4.19", + "resolved": "https://registry.npmjs.org/xml2js/-/xml2js-0.4.19.tgz", + "integrity": "sha512-esZnJZJOiJR9wWKMyuvSE1y6Dq5LCuJanqhxslH2bxM6duahNZ+HMpCLhBQGZkbX6xRf8x1Y2eJlgt2q3qo49Q==", + "requires": { + "sax": ">=0.6.0", + "xmlbuilder": "~9.0.1" + } + }, + "xmlbuilder": { + "version": "9.0.7", + "resolved": "https://registry.npmjs.org/xmlbuilder/-/xmlbuilder-9.0.7.tgz", + "integrity": "sha1-Ey7mPS7FVlxVfiD0wi35rKaGsQ0=" + } + } +} diff --git a/terraform/lambda_functions/get_campaign/package.json b/terraform/lambda_functions/get_campaign/package.json new file mode 100644 index 0000000..bd0b67b --- /dev/null +++ b/terraform/lambda_functions/get_campaign/package.json @@ -0,0 +1,15 @@ +{ + "name": "execute_campaign", + "version": "1.0.0", + "description": "NPK execute_campaign Lambda", + "main": "main.js", + "dependencies": { + "aws-sdk": "^2.599.0" + }, + "devDependencies": {}, + "scripts": { + "test": "echo \"Error: no test specified\" && exit 1" + }, + "author": "Brad Woodward (brad@bradwoodward.io)", + "license": "MIT" +} diff --git a/terraform/lambda_functions/list_campaigns/main.js b/terraform/lambda_functions/list_campaigns/main.js new file mode 100644 index 0000000..4951c00 --- /dev/null +++ b/terraform/lambda_functions/list_campaigns/main.js @@ -0,0 +1,323 @@ +const aws = require('aws-sdk'); +const ddb = new aws.DynamoDB({ region: "us-west-2" }); +const s3 = new aws.S3({ region: "us-west-2" }); + +let cb = ""; +let variables = {}; + +exports.main = async function(event, context, callback) { + + // Hand off the callback function for later. + cb = callback; + + // Get the available envvars into a usable format. + variables = JSON.parse(JSON.stringify(process.env)); + + try { + + console.log("Received event: " + JSON.stringify(event)); + + // Hand off the origin, too. Fix for weird case + origin = event?.headers?.origin ?? event?.headers?.Origin; + + var allowed_characters = /^[a-zA-Z0-9'"%\.\[\]\{\}\(\)\-\:\\\/\;\=\?\#\_+\s,!@#\$\^\*&]+$/; + if (!allowed_characters.test(JSON.stringify(event))) { + console.log("Request contains illegal characters"); + return respond(400, {}, "Request contains illegal characters", false); + } + + if (event?.requestContext?.identity?.cognitoAuthenticationType != "authenticated") { + console.log(`cognitoAuthenticationType ${event?.requestContext?.identity?.cognitoAuthenticationType} != "authenticated"`) + return respond(401, {}, "Authentication Required", false); + } + + var body = {}; + // Unencode the body if necessary + if (!!event?.body) { + body = (event.requestContext.isBase64Encoded) ? atob(event.body) : event.body; + + // Body will always be a JSON object. + try { + body = JSON.parse(body); + } catch (e) { + return respond(400, "Body must be JSON object", false); + } + } + + // Associate the user identity. + const [ UserPoolId,, Username ] = event?.requestContext?.identity?.cognitoAuthenticationProvider?.split('/')[2]?.split(':'); + + if (!UserPoolId || !Username) { + console.log(`UserPoolId or Username is missing from ${event?.requestContext?.identity?.cognitoAuthenticationProvider}`); + respond(401, "Authorization Required"); + } + + } catch (e) { + console.log("Failed to process request.", e); + return respond(500, {}, "Failed to process request.", false); + } + + try { + const user = await cognito.adminGetUser({ UserPoolId, Username }).promise(); + + // Restructure UserAttributes as an k:v + user.UserAttributes = user.UserAttributes.reduce((attrs, entry) => { + attrs[entry.Name] = entry.Value + }, {}); + + if (!user?.UserAttributes?.email) { + return respond(401, {}, "Unable to obtain user properties.", false); + } + + } catch (e) { + console.log("Unable to retrieve user context.", e); + return respond(500, {}, "Unable to retrieve user context.", false); + } + + console.log(event.pathParameters) + + const campaignId = event?.pathParameters?.campaign; + + // Get the campaign entry from DynamoDB, and manifest from S3. + // * In parallel, to save, like, some milliseconds. + + try { + const [campaign, manifestObject] = await Promise.all([ + ddb.query({ + ExpressionAttributeValues: { + ':id': {S: entity}, + ':keyid': {S: `campaigns:${campaignId}`} + }, + KeyConditionExpression: 'userid = :id and keyid = :keyid', + TableName: "Campaigns" + }).promise(), + + s3.getObject({ + Bucket: variables.userdata_bucket, + Key: `${entity}/campaigns/${campaign}/manifest.json` + }).promise() + ]); + + const manifest = JSON.parse(manifestObject.Body.toString('ascii')); + } catch (e) { + console.log("Failed to retrieve campaign details.", e); + return respond(500, {}, "Failed to retrieve campaign details."); + } + + if (campaign.Items?.[0]?.status?.S != "AVAILABLE") { + return respond(404, {}, "Campaign doesn't exist or is not in 'AVAILABLE' status."); + } + + console.log(campaign, manifest); + + // Test whether the provided presigned URL is expired. + + try { + var expires = /Expires=([\d]+)&/.exec(manifest.hashFileUrl)[1]; + } catch (e) { + return respond(400, "Invalid hashFileUrl; missing expiration"); + } + + var duration = expires - (new Date().getTime() / 1000); + if (duration < 900) { + return respond(400, {} `hashFileUrl must be valid for at least 900 seconds, got ${Math.floor(duration)}`); + } + + // Campaign is valid. Get AZ pricing and Image AMI + // * Again in parallel, to save, like, some more milliseconds. + + try { + const ec2 = new aws.EC2({region: manifest.region}); + const [pricing, image] = await Promise.all([ + ec2.describeSpotPriceHistory({ + EndTime: Math.round(Date.now() / 1000), + ProductDescriptions: [ "Linux/UNIX (Amazon VPC)" ], + InstanceTypes: [ manifest.instanceType ], + StartTime: Math.round(Date.now() / 1000) + }), + + ec2.describeImages({ + Filters: [{ + Name: "virtualization-type", + Values: ["hvm"] + },{ + Name: "name", + Values: ["amzn2-ami-graphics-hvm-2*"] + },{ + Name: "root-device-type", + Values: ["ebs"] + },{ + Name: "owner-id", + Values: ["679593333241"] + }] + }) + ]); + } catch (e) { + console.log("Failed to retrieve price and image details.", e); + return respond(500, {}, "Failed to retrieve price and image details."); + } + + try { + + // Calculate the necessary volume size + + const volumeSize = Math.ceil(manifest.wordlistSize / 1073741824) + 1; + console.log(`Wordlist is ${manifest.wordlistSize / 1073741824}GiB. Allocating ${volumeSize}GiB`); + + // Build a launchSpecification for each AZ in the target region. + + const instance_userdata = new Buffer(fs.readFileSync(__dirname + '/userdata.sh', 'utf-8') + .replace("{{APIGATEWAY}}", process.env.apigateway)) + .toString('base64'); + + const launchSpecificationTemplate = { + IamInstanceProfile: { + Arn: variables.instanceProfile + }, + ImageId: image.ImageId, + KeyName: "npk-key", + InstanceType: manifest.instanceType, + BlockDeviceMappings: [{ + DeviceName: '/dev/xvdb', + Ebs: { + DeleteOnTermination: true, + Encrypted: false, + VolumeSize: volumeSize, + VolumeType: "gp2" + } + }], + NetworkInterfaces: [{ + AssociatePublicIpAddress: true, + DeviceIndex: 0, + // SubnetId: Gets populated below. + }], + Placement: { + // AvailabilityZone: Gets populated below. + }, + TagSpecifications: [{ + ResourceType: "instance", + Tags: [{ + Key: "MaxCost", + Value: ((manifest.priceTarget < variables.campaign_max_price) ? manifest.priceTarget : variables.campaign_max_price).toString() + }, { + Key: "ManifestPath", + Value: `${entity}/campaigns/${campaignId}` + }] + }], + UserData: instance_userdata + }; + + // Create a copy of the launchSpecificationTemplate for each AvailabilityZone in the campaign's region. + + const launchSpecifications = Object.keys(variables.availabilityZones[manifest.region]).reduce((specs, entry) => { + const az = JSON.parse(JSON.stringify(launchSpecificationTemplate)); // Have to deep-copy to avoid referential overrides. + + az.Placement.AvailabilityZone = entry; + az.NetworkInterfaces[0].SubnetId = variables.availabilityZones[manifest.region][entry]; + + return specs.concat(az); + }, []); + + // Get the average spot price across all AZs in the region. + const spotPrice = pricing.reduce((average, entry) => average + (entry / pricing.length), 0); + const maxDuration = (Number(manifest.instanceDuration) < variables.campaign_max_price / spotPrice) ? Number(manifest.instanceDuration) : variables.campaign_max_price / spotPrice; + + console.log(spotPrice, maxDuration, variables.campaign_max_price); + + const spotFleetParams = { + SpotFleetRequestConfig: { + AllocationStrategy: "lowestPrice", + IamFleetRole: variables.iamFleetRole, + InstanceInterruptionBehavior: "terminate", + LaunchSpecifications: launchSpecifications, + SpotPrice: (manifest.priceTarget / (manifest.instanceCount * manifest.instanceDuration) * 2).toString(), + TargetCapacity: manifest.instanceCount, + ReplaceUnhealthyInstances: false, + TerminateInstancesWithExpiration: true, + Type: "request", + ValidFrom: (new Date().getTime() / 1000), + ValidUntil: (new Date().getTime() / 1000) + (maxDuration * 3600) + } + }; + + console.log(JSON.stringify(spotFleetParams)); + } catch (e) { + console.log("Failed to generate launch specifications.", e); + return respond(500, {}, "Failed to generate launch specifications."); + } + + try { + const spotFleetRequest = await ec2.requestSpotFleet(spotFleetParams).promise(); + } catch (e) { + console.log("Failed to request spot fleet.", e); + return respond(500, {}, "Failed to request spot fleet."); + } + + // Campaign created successfully. + + console.log(`Successfully requested spot fleet ${spotFleetRequest.SpotFleetRequestId}`); + + try { + const updateCampaign = await ddb.updateItem({ + Key: { + userid: {S: entity}, + keyid: {S: `campaigns:${campaign}`} + }, + TableName: "Campaigns", + AttributeUpdates: { + active: { Action: "PUT", Value: { BOOL: true }}, + status: { Action: "PUT", Value: { S: "STARTING" }}, + spotFleetRequestId: { Action: "PUT", Value: { S: data.SpotFleetRequestId }}, + startTime: { Action: "PUT", Value: { N: Math.floor(new Date().getTime() / 1000) }}, + eventType: { Action: "PUT", Value: { S: "CampaignStarted" }} + } + }).promise(); + } catch (e) { + console.log("Spot fleet submitted, but failed to mark Campaign as 'STARTING'. This is a catastrophic error.", e); + return respond(500, {}, "Spot fleet submitted, but failed to mark Campaign as 'STARTING'. This is a catastrophic error.", false); + } + + return respond(200, {}, { msg: "Campaign started successfully", campaignId: campaignId, spotFleetRequestId: spotFleetRequest.SpotFleetRequestId }, true); +} + +function respond(statusCode, headers, body, success) { + + // Include terraform dns names as allowed origins, as well as localhost. + const allowed_origins = JSON.parse(variables.www_dns_names); + allowed_origins.push("https://localhost"); + + headers['Content-Type'] = 'text/plain'; + + if (allowed_origins.indexOf(origin) !== false) { + // Echo the origin back. I guess this is the best way to support multiple origins + headers['Access-Control-Allow-Origin'] = origin; + } else { + console.log("Invalid origin received.", origin); + } + + switch (typeof body) { + case "string": + body = { msg: body, success: success }; + break; + + case "object": + body.success = success; + break; + } + + const response = { + statusCode: statusCode, + headers: headers, + body: JSON.stringify(body), + } + + console.log(JSON.stringify(response)); + + cb(null, response); + + if (success == true) { + return Promise.resolve(body.msg); + } else { + return Promise.reject(body.msg); + } +} \ No newline at end of file diff --git a/terraform/lambda_functions/list_campaigns/package-lock.json b/terraform/lambda_functions/list_campaigns/package-lock.json new file mode 100644 index 0000000..7c8e64a --- /dev/null +++ b/terraform/lambda_functions/list_campaigns/package-lock.json @@ -0,0 +1,102 @@ +{ + "name": "execute_campaign", + "version": "1.0.0", + "lockfileVersion": 1, + "requires": true, + "dependencies": { + "aws-sdk": { + "version": "2.982.0", + "resolved": "https://registry.npmjs.org/aws-sdk/-/aws-sdk-2.982.0.tgz", + "integrity": "sha512-5w+m8Ia35NqB4TOZHEKts5zSV+FTdc7hTYbN4N4lZ4YU3cLTMt496ojh5UI3Deo8IIlqgTf3UVuq6Y6cPpVxkg==", + "requires": { + "buffer": "4.9.2", + "events": "1.1.1", + "ieee754": "1.1.13", + "jmespath": "0.15.0", + "querystring": "0.2.0", + "sax": "1.2.1", + "url": "0.10.3", + "uuid": "3.3.2", + "xml2js": "0.4.19" + } + }, + "base64-js": { + "version": "1.5.1", + "resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.5.1.tgz", + "integrity": "sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==" + }, + "buffer": { + "version": "4.9.2", + "resolved": "https://registry.npmjs.org/buffer/-/buffer-4.9.2.tgz", + "integrity": "sha512-xq+q3SRMOxGivLhBNaUdC64hDTQwejJ+H0T/NB1XMtTVEwNTrfFF3gAxiyW0Bu/xWEGhjVKgUcMhCrUy2+uCWg==", + "requires": { + "base64-js": "^1.0.2", + "ieee754": "^1.1.4", + "isarray": "^1.0.0" + } + }, + "events": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/events/-/events-1.1.1.tgz", + "integrity": "sha1-nr23Y1rQmccNzEwqH1AEKI6L2SQ=" + }, + "ieee754": { + "version": "1.1.13", + "resolved": "https://registry.npmjs.org/ieee754/-/ieee754-1.1.13.tgz", + "integrity": "sha512-4vf7I2LYV/HaWerSo3XmlMkp5eZ83i+/CDluXi/IGTs/O1sejBNhTtnxzmRZfvOUqj7lZjqHkeTvpgSFDlWZTg==" + }, + "isarray": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz", + "integrity": "sha1-u5NdSFgsuhaMBoNJV6VKPgcSTxE=" + }, + "jmespath": { + "version": "0.15.0", + "resolved": "https://registry.npmjs.org/jmespath/-/jmespath-0.15.0.tgz", + "integrity": "sha1-o/Iiqarp+Wb10nx5ZRDigJF2Qhc=" + }, + "punycode": { + "version": "1.3.2", + "resolved": "https://registry.npmjs.org/punycode/-/punycode-1.3.2.tgz", + "integrity": "sha1-llOgNvt8HuQjQvIyXM7v6jkmxI0=" + }, + "querystring": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/querystring/-/querystring-0.2.0.tgz", + "integrity": "sha1-sgmEkgO7Jd+CDadW50cAWHhSFiA=" + }, + "sax": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/sax/-/sax-1.2.1.tgz", + "integrity": "sha1-e45lYZCyKOgaZq6nSEgNgozS03o=" + }, + "url": { + "version": "0.10.3", + "resolved": "https://registry.npmjs.org/url/-/url-0.10.3.tgz", + "integrity": "sha1-Ah5NnHcF8hu/N9A861h2dAJ3TGQ=", + "requires": { + "punycode": "1.3.2", + "querystring": "0.2.0" + } + }, + "uuid": { + "version": "3.3.2", + "resolved": "https://registry.npmjs.org/uuid/-/uuid-3.3.2.tgz", + "integrity": "sha512-yXJmeNaw3DnnKAOKJE51sL/ZaYfWJRl1pK9dr19YFCu0ObS231AB1/LbqTKRAQ5kw8A90rA6fr4riOUpTZvQZA==" + }, + "xml2js": { + "version": "0.4.19", + "resolved": "https://registry.npmjs.org/xml2js/-/xml2js-0.4.19.tgz", + "integrity": "sha512-esZnJZJOiJR9wWKMyuvSE1y6Dq5LCuJanqhxslH2bxM6duahNZ+HMpCLhBQGZkbX6xRf8x1Y2eJlgt2q3qo49Q==", + "requires": { + "sax": ">=0.6.0", + "xmlbuilder": "~9.0.1" + } + }, + "xmlbuilder": { + "version": "9.0.7", + "resolved": "https://registry.npmjs.org/xmlbuilder/-/xmlbuilder-9.0.7.tgz", + "integrity": "sha1-Ey7mPS7FVlxVfiD0wi35rKaGsQ0=" + } + } +} diff --git a/terraform/lambda_functions/list_campaigns/package.json b/terraform/lambda_functions/list_campaigns/package.json new file mode 100644 index 0000000..bd0b67b --- /dev/null +++ b/terraform/lambda_functions/list_campaigns/package.json @@ -0,0 +1,15 @@ +{ + "name": "execute_campaign", + "version": "1.0.0", + "description": "NPK execute_campaign Lambda", + "main": "main.js", + "dependencies": { + "aws-sdk": "^2.599.0" + }, + "devDependencies": {}, + "scripts": { + "test": "echo \"Error: no test specified\" && exit 1" + }, + "author": "Brad Woodward (brad@bradwoodward.io)", + "license": "MIT" +} diff --git a/terraform/npk-settings.json.sample b/terraform/npk-settings.json.sample index 1b06fff..8d4298d 100644 --- a/terraform/npk-settings.json.sample +++ b/terraform/npk-settings.json.sample @@ -6,14 +6,7 @@ "useCustomDNS": false, "route53Zone": "E10SDSEFH1102DF", - "dnsNames": { - "www": [ - "dev.npkproject.io" - ], - "api": [ - "api.dev.npkproject.io" - ] - }, + "dnsName": "dev.npkproject.io", "awsProfile": "npk", "criticalEventsSMS": "+13035551234", diff --git a/terraform/quickdeploy.sh b/terraform/quickdeploy.sh index ff49bbb..d330509 100755 --- a/terraform/quickdeploy.sh +++ b/terraform/quickdeploy.sh @@ -39,19 +39,9 @@ jq -n --arg profile $profile --arg email $email --arg sms $sms --arg bucket $BUC "backend_bucket": $bucket, "campaign_data_ttl": 604800, "campaign_max_price": 50, - "georestrictions": [], - "useCustomDNS": false, - "route53Zone": "", - "dnsNames": { - "www": [], - "api": [] - }, "awsProfile": $profile, "criticalEventsSMS": $sms, "adminEmail": $email, - "debug_lambda": false, - - "useSAML": false, }' >> npk-settings.json touch quickdeployed diff --git a/terraform/s3_policies.tf b/terraform/s3_policies.tf deleted file mode 100644 index 546def5..0000000 --- a/terraform/s3_policies.tf +++ /dev/null @@ -1,18 +0,0 @@ -/* Static Site Bucket Policy */ - -data "aws_iam_policy_document" "s3_static_site" { - statement { - actions = ["s3:GetObject"] - resources = ["${aws_s3_bucket.static_site.arn}/*"] - - principals { - type = "AWS" - identifiers = ["${aws_cloudfront_origin_access_identity.npk.iam_arn}"] - } - } -} - -resource "aws_s3_bucket_policy" "s3_static_site" { - bucket = "${aws_s3_bucket.static_site.id}" - policy = "${data.aws_iam_policy_document.s3_static_site.json}" -} \ No newline at end of file diff --git a/terraform/s3_policies.tf.bak b/terraform/s3_policies.tf.bak new file mode 100644 index 0000000..ba8ce27 --- /dev/null +++ b/terraform/s3_policies.tf.bak @@ -0,0 +1,27 @@ +/* Static Site Bucket Policy */ + +data "aws_iam_policy_document" "s3_static_site" { + statement { + actions = ["s3:GetObject"] + resources = ["${aws_s3_bucket.static_site.arn}/*"] + + principals { + type = "AWS" + # TF-UPGRADE-TODO: In Terraform v0.10 and earlier, it was sometimes necessary to + # force an interpolation expression to be interpreted as a list by wrapping it + # in an extra set of list brackets. That form was supported for compatibility in + # v0.11, but is no longer supported in Terraform v0.12. + # + # If the expression in the following list itself returns a list, remove the + # brackets to avoid interpretation as a list of lists. If the expression + # returns a single list item then leave it as-is and remove this TODO comment. + identifiers = [aws_cloudfront_origin_access_identity.npk.iam_arn] + } + } +} + +resource "aws_s3_bucket_policy" "s3_static_site" { + bucket = aws_s3_bucket.static_site.id + policy = data.aws_iam_policy_document.s3_static_site.json +} + diff --git a/terraform/terraform.jsonnet b/terraform/terraform.jsonnet index 4f18529..e6eeea2 100644 --- a/terraform/terraform.jsonnet +++ b/terraform/terraform.jsonnet @@ -1,354 +1,795 @@ -local npksettings = import 'npk-settings.json'; -local regions = import 'regions.json'; -local quotas = import 'quotas.json'; local backend = import 'jsonnet/backend.libsonnet'; local provider = import 'jsonnet/provider.libsonnet'; -local vpc = import 'jsonnet/vpc.libsonnet'; -local subnet = import 'jsonnet/subnet.libsonnet'; -local route = import 'jsonnet/routetable.libsonnet'; + +local iam = import 'jsonnet/iam.libsonnet'; local igw = import 'jsonnet/igw.libsonnet'; -local dynamodb = import 'jsonnet/dynamodb.libsonnet'; -local dynamodb_settings = import 'jsonnet/dynamodb_settings.libsonnet'; -local cognito_iam_roles = import 'jsonnet/cognito_iam_roles.libsonnet'; -local route53 = import 'jsonnet/route53.libsonnet'; -local s3 = import 'jsonnet/s3.libsonnet'; local acm = import 'jsonnet/acm.libsonnet'; -local api_gateway = import 'jsonnet/api_gateway.libsonnet'; +local api_gateway_map = import 'jsonnet/api_gateway_map.libsonnet'; local cloudfront = import 'jsonnet/cloudfront.libsonnet'; local cloudwatch = import 'jsonnet/cloudwatch.libsonnet'; local cognito = import 'jsonnet/cognito.libsonnet'; +local cognito_iam_roles = import 'jsonnet/cognito_iam_roles.libsonnet'; +local dynamodb = import 'jsonnet/dynamodb.libsonnet'; +local dynamodb_settings = import 'jsonnet/dynamodb_settings.libsonnet'; local ec2_iam_roles = import 'jsonnet/ec2_iam_roles.libsonnet'; local keepers = import 'jsonnet/keepers.libsonnet'; -local lambda_functions = import 'jsonnet/lambda_functions.libsonnet'; -local lambda_iam_roles = import 'jsonnet/lambda_iam_roles.libsonnet'; +local lambda = import 'jsonnet/lambda.libsonnet'; +// local lambda_functions = import 'jsonnet/lambda_functions.libsonnet'; +// local lambda_iam_roles = import 'jsonnet/lambda_iam_roles.libsonnet'; +local null_resources = import 'jsonnet/null_resources.libsonnet'; +local route = import 'jsonnet/routetable.libsonnet'; +local route53 = import 'jsonnet/route53.libsonnet'; +local s3 = import 'jsonnet/s3.libsonnet'; +local subnet = import 'jsonnet/subnet.libsonnet'; local templates = import 'jsonnet/templates.libsonnet'; local variables = import 'jsonnet/variables.libsonnet'; -local templates = import 'jsonnet/templates.libsonnet'; -local null_resources = import 'jsonnet/null_resources.libsonnet'; +local vpc = import 'jsonnet/vpc.libsonnet'; + +local npksettings = import 'npk-settings.json'; +local regions = import 'regions.json'; +local quotas = import 'quotas.json'; local settings = npksettings + { defaultRegion: "us-west-2", regions: regions, - quotas: quotas -}; - -local defaultResource = { - "tags": { - "Project": "NPK" - } + quotas: quotas, + useCustomDNS: std.objectHas(npksettings, 'dnsBaseName'), + [if std.objectHas(npksettings, 'dnsBaseName') then 'wwwEndpoint']: "www.%s" % [npksettings.dnsBaseName], + [if std.objectHas(npksettings, 'dnsBaseName') then 'apiEndpoint']: "api.%s" % [npksettings.dnsBaseName], + [if std.objectHas(npksettings, 'dnsBaseName') then 'authEndpoint']: "auth.%s" % [npksettings.dnsBaseName], + useSAML: std.objectHas(npksettings, 'sAMLMetadataFile') || std.objectHas(npksettings, 'sAMLMetadataUrl') }; local regionKeys = std.objectFields(settings.regions); { - 'acm.tf.json': if settings.useCustomDNS then { - // TODO: Fix this to get 1 cert for all names. - "resource": { - "aws_acm_certificate": { - ["www-" + i]: defaultResource + acm.certificate(settings.dnsNames.www[i]) for i in std.range(0, std.length(settings.dnsNames.www) - 1) - } + { - ["api-" + i]: defaultResource + acm.certificate(settings.dnsNames.api[i]) for i in std.range(0, std.length(settings.dnsNames.api) - 1) - } + if settings.useSAML == true && settings.useCustomDNS == true then { - "saml": acm.certificate("auth." + settings.dnsNames.www[0]) - } else {} - } + if std.type(settings.route53Zone) == "string" then { - "aws_route53_record": { - ["acm-validation-www-" + i]: acm.route53_record( - "${aws_acm_certificate.www-" + i + ".domain_validation_options.0.resource_record_name}", - "${aws_acm_certificate.www-" + i + ".domain_validation_options.0.resource_record_type}", - "${aws_acm_certificate.www-" + i + ".domain_validation_options.0.resource_record_value}", - settings.route53Zone - ) for i in std.range(0, std.length(settings.dnsNames.www) - 1) - } + { - ["acm-validation-api-" + i]: acm.route53_record( - "${aws_acm_certificate.api-" + i + ".domain_validation_options.0.resource_record_name}", - "${aws_acm_certificate.api-" + i + ".domain_validation_options.0.resource_record_type}", - "${aws_acm_certificate.api-" + i + ".domain_validation_options.0.resource_record_value}", - settings.route53Zone - ) for i in std.range(0, std.length(settings.dnsNames.api) - 1) - } + (if settings.useSAML == true && settings.useCustomDNS == true then { - "acm-validation-saml": acm.route53_record( - "${aws_acm_certificate.saml.domain_validation_options.0.resource_record_name}", - "${aws_acm_certificate.saml.domain_validation_options.0.resource_record_type}", - "${aws_acm_certificate.saml.domain_validation_options.0.resource_record_value}", - settings.route53Zone - ) - } else {}), - "aws_acm_certificate_validation": { - ["www-" + i]: acm.certificate_validation( - "${aws_acm_certificate.www-" + i + ".arn}", - "${aws_route53_record.acm-validation-www-" + i + ".fqdn}" - ) for i in std.range(0, std.length(settings.dnsNames.www) - 1) - } + { - ["api-" + i]: acm.certificate_validation( - "${aws_acm_certificate.api-" + i + ".arn}", - "${aws_route53_record.acm-validation-api-" + i + ".fqdn}" - ) for i in std.range(0, std.length(settings.dnsNames.api) - 1) - } + (if settings.useSAML == true && settings.useCustomDNS == true then { - "saml": acm.certificate_validation( - "${aws_acm_certificate.saml.arn}", - "${aws_route53_record.acm-validation-saml.fqdn}" - ) - } else {}) - } else {} - } + if std.type(settings.route53Zone) != "string" then { - "output": { - ["acm-validation-www-" + i]: acm.manual_record( - "${aws_acm_certificate.www-" + i + ".domain_validation_options.0.resource_record_name}", - "${aws_acm_certificate.www-" + i + ".domain_validation_options.0.resource_record_type}", - "${aws_acm_certificate.www-" + i + ".domain_validation_options.0.resource_record_value}" - ) for i in std.range(0, std.length(settings.dnsNames.www) - 1) - } + { - ["acm-validation-api-" + i]: acm.manual_record( - "${aws_acm_certificate.api-" + i + ".domain_validation_options.0.resource_record_name}", - "${aws_acm_certificate.api-" + i + ".domain_validation_options.0.resource_record_type}", - "${aws_acm_certificate.api-" + i + ".domain_validation_options.0.resource_record_value}" - ) for i in std.range(0, std.length(settings.dnsNames.api) - 1) - } - } else {} - else {}, - 'api_gateway.tf.json': { - "resource": api_gateway.resource + if settings.useCustomDNS then { - "aws_api_gateway_domain_name": { - ["api-url-" + i]: api_gateway.domain_name( - settings.dnsNames.api[i], - "${aws_acm_certificate.api-" + i + ".arn}" - ) for i in std.range(0, std.length(settings.dnsNames.api) - 1) + [if settings.useCustomDNS then 'acm.tf.json' else null]: { + resource: acm.certificate("main", "*.%s" % [settings.dnsBaseName], [settings.dnsBaseName], settings.route53Zone) + }, + 'api_gateway.tf.json': api_gateway_map.rest_api('npk', { + parameters: { + endpoint_configuration: { + types: ["EDGE"] + } + }, + deployment: { + stage_name: "v1" + }, + root: { + children: [{ + pathPart: "api", + methods: { + OPTIONS: { + optionsIntegration: true, + parameters: { + authorization: "NONE", + request_parameters: { + "method.request.path.proxy": true + } + } + } + }, + children: [{ + pathPart: "campaign", + methods: { + GET: { + lambdaIntegration: "list_campaigns", + parameters: { + authorization: "AWS_IAM" + } + }, + PUT: { + lambdaIntegration: "execute_campaign", + parameters: { + authorization: "AWS_IAM" + } + }, + OPTIONS: { + optionsIntegration: true, + parameters: { + authorization: "NONE", + request_parameters: { + "method.request.path.proxy": true + } + } + } + }, + children: [{ + pathPart: "{campaign}", + methods: { + DELETE: { + lambdaIntegration: "delete_campaign", + parameters: { + authorization: "AWS_IAM", + request_parameters: { + "method.request.path.campaign": true + } + } + }, + GET: { + lambdaIntegration: "get_campaign", + parameters: { + authorization: "AWS_IAM", + request_parameters: { + "method.request.path.campaign": true + } + } + }, + OPTIONS: { + optionsIntegration: true, + parameters: { + authorization: "NONE", + request_parameters: { + "method.request.path.proxy": true + } + } + } + }, + }] + }] + }, { + pathPart: "statusreport", + methods: { + OPTIONS: { + optionsIntegration: true, + parameters: { + authorization: "NONE", + request_parameters: { + "method.request.path.proxy": true + } + } + } + }, + children: [{ + pathPart: "{proxy+}", + methods: { + POST: { + lambdaIntegration: "status_reporter", + parameters: { + authorization: "AWS_IAM" + } + }, + OPTIONS: { + optionsIntegration: true, + parameters: { + authorization: "NONE", + request_parameters: { + "method.request.path.proxy": true + } + } + } + } + }] + }] + } + }), + 'api_gateway_addons.tf.json': { + resource: { + aws_api_gateway_account: { + "us-west-2": { + cloudwatch_role_arn: "${aws_iam_role.npk-apigateway_cloudwatch.arn}" + } }, - "aws_api_gateway_base_path_mapping": { - ["api-url-" + i]: api_gateway.base_path("${aws_api_gateway_domain_name.api-url-" + i + ".domain_name}") - for i in std.range(0, std.length(settings.dnsNames.api) - 1) + aws_api_gateway_authorizer: { + npk: { + name: "npk", + type: "COGNITO_USER_POOLS", + rest_api_id: "${aws_api_gateway_rest_api.npk.id}", + provider_arns: [ + "${aws_cognito_user_pool.npk.arn}" + ] + } }, - } else {} + } + }, + [if settings.useCustomDNS then 'api_gateway_addons-useCustomDNS.tf.json' else null]: { + resource: { + aws_api_gateway_base_path_mapping: { + npk: { + api_id: "${aws_api_gateway_rest_api.npk.id}", + stage_name: "${aws_api_gateway_deployment.npk.stage_name}", + domain_name: "${aws_api_gateway_domain_name.npk.domain_name}", + base_path: "v1" + } + }, + aws_api_gateway_domain_name: { + npk: { + certificate_arn: "${aws_acm_certificate.main.arn}", + domain_name: "api.%s" % [settings.dnsBaseName], + depends_on: ["aws_acm_certificate.main"] + } + } + } }, 'backend.tf.json': backend(settings), 'cloudfront.tf.json': { - "resource": cloudfront.resource(settings), - "output": cloudfront.output + resource: cloudfront.resource(settings), + output: cloudfront.output }, 'cloudwatch.tf.json': cloudwatch, + 'cloudwatch-policy.tf.json': { + data: { + aws_iam_policy_document: { + cloudwatch_invoke_spot_monitor: { + statement: { + actions: ["lambda:Invoke"], + resources: ["${aws_lambda_function.spot_monitor.arn}"], + principals: { + type: "AWS", + identifiers: ["${aws_cloudfront_origin_access_identity.npk.iam_arn}"] + } + } + } + } + } + }, + 'cloudwatch-api-gateway-role.tf.json': { + resource: iam.iam_role( + "npk-apigateway_cloudwatch", + "Allow APIGateway to write to CloudWatch Logs", + {}, + { + CloudWatchPut: [{ + Sid: "logs", + Effect: "Allow", + Action: [ + "logs:CreateLogGroup", + "logs:CreateLogStream", + "logs:DescribeLogGroups", + "logs:DescribeLogStreams", + "logs:PutLogEvents", + "logs:GetLogEvents", + "logs:FilterLogEvents" + ], + Resource: "arn:aws:logs:*" + }] + }, + [{ + Effect: "Allow", + Principal: { + Service: "apigateway.amazonaws.com" + }, + Action: "sts:AssumeRole" + }] + ) + }, 'cognito_iam_roles.tf.json': { - "resource": cognito_iam_roles.resource, - "data": cognito_iam_roles.data(settings) + resource: cognito_iam_roles.resource, + data: cognito_iam_roles.data(settings) }, 'cognito.tf.json': { - "resource": cognito.resource(settings), - "output": cognito.output(settings) + resource: cognito.resource(settings), + output: cognito.output(settings) }, 'data.tf.json': { - "data": { - "aws_caller_identity": { - "current": {} + data: { + aws_caller_identity: { + current: {} } } }, 'dynamodb.tf.json': { - "resource": { - "aws_dynamodb_table": { - [i]: defaultResource + dynamodb[i] for i in std.objectFields(dynamodb) + resource: { + aws_dynamodb_table: { + [i]: dynamodb[i] for i in std.objectFields(dynamodb) } } }, 'dynamodb_settings.tf.json': { - "resource": dynamodb_settings + resource: dynamodb_settings }, 'ec2_iam_roles.tf.json': ec2_iam_roles, 'igw.tf.json': { - "resource": { - "aws_internet_gateway": { - [regionKeys[i]]: defaultResource + igw(regionKeys[i]) for i in std.range(0, std.length(regionKeys) - 1) + resource: { + aws_internet_gateway: { + [regionKeys[i]]: igw(regionKeys[i]) for i in std.range(0, std.length(regionKeys) - 1) } } }, 'keepers.tf.json': { - "resource": keepers.resource(settings.adminEmail), - "output": keepers.output + resource: keepers.resource(settings.adminEmail), + output: keepers.output }, 'keys.tf.json': { - "resource": { - "tls_private_key": { - "ssh": { - "algorithm": "RSA", - "rsa_bits": 4096 + resource: { + tls_private_key: { + ssh: { + algorithm: "RSA", + rsa_bits: 4096 } }, - "aws_key_pair": { + aws_key_pair: { [region]: { - "provider": "aws." + region, - "key_name": "npk-key", - "public_key": "${tls_private_key.ssh.public_key_openssh}" + provider: "aws." + region, + key_name: "npk-key", + public_key: "${tls_private_key.ssh.public_key_openssh}" } for region in regionKeys }, - "local_file": { - "ssh_key": { - "sensitive_content": "${tls_private_key.ssh.private_key_pem}", - "filename": "${path.module}/npk.pem", - "file_permission": "0600" + local_file: { + ssh_key: { + sensitive_content: "${tls_private_key.ssh.private_key_pem}", + filename: "${path.module}/npk.pem", + file_permission: "0600" } } } }, - 'lambda_functions.tf.json': { - "resource": lambda_functions.resources(settings), - "data": lambda_functions.data - }, - 'lambda_iam_roles.tf.json': { - "resource": lambda_iam_roles.resource(settings), - "data": lambda_iam_roles.data(settings) - }, - 'null_resources.tf.json': null_resources.resource(settings), - 'provider-aws.tf.json': { - "provider": [ - provider.aws_provider - ] + [ - provider.aws_alias(region) for region in regionKeys - ] - }, - 'provider-other.tf.json': { - "provider": { - "archive": {} + 'lambda-delete_campaign.tf.json': lambda.lambda_function("delete_campaign", { + handler: "main.main", + timeout: 20, + memory_size: 512, + + environment: { + variables: { + www_dns_names: std.toString([settings.wwwEndpoint]), + } + } + }, { + statement: [{ + sid: "ec2", + actions: [ + "ec2:CancelSpotFleetRequests", + "ec2:CreateTags", + "ec2:DescribeImages", + "ec2:DescribeSpotPriceHistory", + "ec2:DescribeSpotFleetRequests" + ], + resources: ["*"] + },{ + sid: "ddb", + actions: [ + "dynamodb:Query", + "dynamodb:UpdateItem", + "dynamodb:DeleteItem" + ], + resources: [ + "${aws_dynamodb_table.campaigns.arn}" + ] + },{ + sid: "adminGetUser", + actions: [ + "cognito-idp:AdminGetUser" + ], + resources: [ + "${aws_cognito_user_pool.npk.arn}" + ] + }] + }), + 'lambda-execute_campaign.tf.json': lambda.lambda_function("execute_campaign", { + handler: "main.main", + timeout: 60, + memory_size: 512, + + environment: { + variables: { + www_dns_names: std.toString([settings.wwwEndpoint]), + campaign_max_price: "${var.campaign_max_price}", + userdata_bucket: "${aws_s3_bucket.user_data.id}", + instanceProfile: "${aws_iam_instance_profile.npk_node.arn}", + iamFleetRole: "${aws_iam_role.npk_fleet_role.arn}", + availabilityZones: std.manifestJsonEx({ + [regionKeys[i]]: { + [settings.regions[regionKeys[i]][azi]]: "${aws_subnet." + settings.regions[regionKeys[i]][azi] + ".id}" + for azi in std.range(0, std.length(settings.regions[regionKeys[i]]) - 1) + } + for i in std.range(0, std.length(regionKeys) - 1) + }, "") + } } + }, { + statement: [{ + sid: "s3", + actions: [ + "s3:GetObject" + ], + resources: [ + "${aws_s3_bucket.user_data.arn}/*" + ] + },{ + sid: "ec2", + actions: [ + "ec2:DescribeImages", + "ec2:DescribeSpotFleetRequests", + "ec2:DescribeSpotPriceHistory", + "ec2:RequestSpotFleet", + "ec2:RunInstances", + "ec2:CreateTags" + ], + resources: ["*"] + },{ + sid: "ddb", + actions: [ + "dynamodb:Query", + "dynamodb:UpdateItem", + "dynamodb:DeleteItem" + ], + resources: [ + "${aws_dynamodb_table.campaigns.arn}" + ] + },{ + sid: "passrole", + actions: [ + "iam:PassRole" + ], + resources: [ + "${aws_iam_role.npk_instance_role.arn}", + "${aws_iam_role.npk_fleet_role.arn}" + ] + },{ + sid: "adminGetUser", + actions: [ + "cognito-idp:AdminGetUser" + ], + resources: [ + "${aws_cognito_user_pool.npk.arn}" + ] + }] + }), + 'lambda-get_campaign.tf.json': lambda.lambda_function("get_campaign", { + handler: "main.main", + timeout: 20, + memory_size: 512, + + environment: { + variables: { + www_dns_names: std.toString([settings.wwwEndpoint]), + } + } + }, { + statement: [{ + sid: "ddb", + actions: [ + "dynamodb:Query", + "dynamodb:UpdateItem" + ], + resources: [ + "${aws_dynamodb_table.campaigns.arn}" + ] + },{ + sid: "adminGetUser", + actions: [ + "cognito-idp:AdminGetUser" + ], + resources: [ + "${aws_cognito_user_pool.npk.arn}" + ] + }] + }), + 'lambda-list_campaigns.tf.json': lambda.lambda_function("list_campaigns", { + handler: "main.main", + timeout: 20, + memory_size: 512, + + environment: { + variables: { + www_dns_names: std.toString([settings.wwwEndpoint]), + } + } + }, { + statement: [{ + sid: "ddb", + actions: [ + "dynamodb:Query", + "dynamodb:UpdateItem" + ], + resources: [ + "${aws_dynamodb_table.campaigns.arn}" + ] + },{ + sid: "adminGetUser", + actions: [ + "cognito-idp:AdminGetUser" + ], + resources: [ + "${aws_cognito_user_pool.npk.arn}" + ] + }] + }), + 'lambda-spot_monitor.tf.json': lambda.lambda_function("spot_monitor", { + handler: "main.main", + timeout: 10, + memory_size: 512, + + environment: { + variables: { + www_dns_name: std.toString(settings.wwwEndpoint), + region: "${var.region}", + campaign_max_price: "${var.campaign_max_price}", + critical_events_sns_topic: "${aws_sns_topic.critical_events.id}", + availabilityZones: std.manifestJsonEx({ + [regionKeys[i]]: { + [settings.regions[regionKeys[i]][azi]]: "${aws_subnet." + settings.regions[regionKeys[i]][azi] + ".id}" + for azi in std.range(0, std.length(settings.regions[regionKeys[i]]) - 1) + } + for i in std.range(0, std.length(regionKeys) - 1) + }, "") + } + }, + + dead_letter_config: { + target_arn: "${aws_sns_topic.critical_events.arn}" + } + }, { + statement: [{ + sid: "sns", + actions: [ + "sns:Publish" + ], + resources: [ + "${aws_sns_topic.critical_events.arn}" + ] + },{ + sid: "ec2", + actions: [ + "ec2:CancelSpotFleetRequests", + "ec2:DescribeTags", + "ec2:DescribeInstances", + "ec2:DescribeInstanceStatus", + "ec2:DescribeSpotFleetRequests", + "ec2:DescribeSpotFleetRequestHistory", + "ec2:DescribeSpotFleetInstances", + "ec2:DescribeSpotInstanceRequests", + "ec2:DescribeSpotPriceHistory" + ], + resources: ["*"] + },{ + sid: "ddb", + actions: [ + "dynamodb:GetItem", + "dynamodb:UpdateItem", + "dynamodb:Query" + ], + resources: [ + "${aws_dynamodb_table.campaigns.arn}", + "${aws_dynamodb_table.campaigns.arn}/index/SpotFleetRequests" + ] + }] + }), + 'lambda-status_reporter.tf.json': lambda.lambda_function("status_reporter", { + handler: "main.main", + timeout: 10, + memory_size: 512, + + environment: { + variables: { + www_dns_name: std.toString(settings.wwwEndpoint), + region: "${var.region}", + campaign_max_price: "${var.campaign_max_price}", + critical_events_sns_topic: "${aws_sns_topic.critical_events.id}", + availabilityZones: std.manifestJsonEx({ + [regionKeys[i]]: { + [settings.regions[regionKeys[i]][azi]]: "${aws_subnet." + settings.regions[regionKeys[i]][azi] + ".id}" + for azi in std.range(0, std.length(settings.regions[regionKeys[i]]) - 1) + } + for i in std.range(0, std.length(regionKeys) - 1) + }, "") + } + }, + + dead_letter_config: { + target_arn: "${aws_sqs_queue.status_reporter_dlq.arn}" + }, + }, { + statement: [{ + sid: "s3Put", + actions: [ + "s3:PutObject" + ], + resources: [ + "${aws_s3_bucket.user_data.arn}/*", + "${aws_s3_bucket.logs.arn}/api_gateway_proxy/*", + ] + },{ + sid: "s3GetDelete", + actions: [ + "s3:GetObject", + "s3:DeleteObject" + ], + resources: [ + "${aws_s3_bucket.user_data.arn}/*" + ] + },{ + sid: "ddb", + actions: [ + "dynamodb:Query", + "dynamodb:UpdateItem" + ], + resources: [ + "${aws_dynamodb_table.campaigns.arn}" + ] + },{ + sid: "sqs", + actions: [ + "sqs:SendMessage" + ], + resources: [ + "${aws_sqs_queue.status_reporter_dlq.arn}" + ] + }] + }), + 'null_resources.tf.json': null_resources.resource(settings), + 'provider.tf.json': { + terraform: { + required_providers: { + aws: { + source: "hashicorp/aws", + version: "~> 3.57.0" + }, + archive: { + source: "hashicorp/archive", + version: "~> 2.2.0" + } + } + }, + provider: [{ + aws: { + profile: settings.awsProfile, + region: "us-west-2" + } + }, { + archive: {} + }] + [{ + aws: { + alias: region, + profile: settings.awsProfile, + region: region + } + } for region in regionKeys] }, - 'route53.tf.json': if settings.useCustomDNS && std.type(settings.route53Zone) == "string" then - { - "resource": { - "aws_route53_record": { - ["www-record-" + i]: route53.record( - settings.dnsNames.www[i], - settings.route53Zone, - route53.alias( - "${aws_cloudfront_distribution.npk.domain_name}", - "${aws_cloudfront_distribution.npk.hosted_zone_id}" - ) - ) for i in std.range(0, std.length(settings.dnsNames.www) - 1) - } + { - ["api-record-" + i]: route53.record( - settings.dnsNames.api[i], - settings.route53Zone, - route53.alias( - "${aws_api_gateway_domain_name.api-url-" + i + ".cloudfront_domain_name}", - "${aws_api_gateway_domain_name.api-url-" + i + ".cloudfront_zone_id}" - ) - ) for i in std.range(0, std.length(settings.dnsNames.api) - 1) - } + (if settings.useSAML == true && settings.useCustomDNS == true then { - "saml": route53.record( - "auth." + settings.dnsNames.www[0], - settings.route53Zone, - route53.alias( - "${aws_cognito_user_pool_domain.saml.cloudfront_distribution_arn}", - "Z2FDTNDATAQYW2" - ) + # 'provider-aws.tf.json': { + # provider: [ + # provider.aws_provider + # ] + [ + # provider.aws_alias(region) for region in regionKeys + # ] + # }, + # 'provider-other.tf.json': { + # provider: { + # archive: {} + # } + # }, + [if settings.useCustomDNS then 'route53-main.tf.json' else null]: { + resource: { + aws_route53_record: { + www: route53.record( + settings.wwwEndpoint, + settings.route53Zone, + route53.alias( + "${aws_cloudfront_distribution.npk.domain_name}", + "${aws_cloudfront_distribution.npk.hosted_zone_id}" + ) + ) + } + { + api: route53.record( + "api.%s" % [settings.dnsBaseName], + settings.route53Zone, + route53.alias( + "${aws_api_gateway_domain_name.npk.cloudfront_domain_name}", + "${aws_api_gateway_domain_name.npk.cloudfront_zone_id}" ) - } else {}) + ) } - } else {}, + } + }, + [if settings.useSAML then 'route53-saml.tf.json' else null]: { + resource: { + saml: route53.record( + settings.authEndpoint, + settings.route53Zone, + route53.alias( + "${aws_cognito_user_pool_domain.saml.cloudfront_distribution_arn}", + "Z2FDTNDATAQYW2" + ) + ) + } + }, 'routetable.tf.json': { - "resource": { - "aws_route_table": { - [regionKeys[i]]: defaultResource + route.routetable(regionKeys[i]) for i in std.range(0, std.length(regionKeys) - 1) + resource: { + aws_route_table: { + [regionKeys[i]]: route.routetable(regionKeys[i]) for i in std.range(0, std.length(regionKeys) - 1) }, - "aws_route_table_association": { + aws_route_table_association: { [settings.regions[regionKeys[i]][azi]]: route.association(regionKeys[i], settings.regions[regionKeys[i]][azi]) for i in std.range(0, std.length(regionKeys) - 1) for azi in std.range(0, std.length(settings.regions[regionKeys[i]]) - 1) }, - "aws_vpc_endpoint_route_table_association": { + aws_vpc_endpoint_route_table_association: { [regionKeys[i]]: route.endpoint(regionKeys[i], "s3-" + regionKeys[i]) for i in std.range(0, std.length(regionKeys) - 1) }, } }, 's3.tf.json': { - "resource": { - "aws_s3_bucket": { - "user_data": defaultResource + s3.bucket( + resource: { + aws_s3_bucket: { + user_data: s3.bucket( "npk-user-data-", std.map( s3.cors_rule, [ "http://localhost" ] + if settings.useCustomDNS then - [ "https://" + i for i in settings.dnsNames.www ] + [ "https://www.%s" % [settings.dnsBaseName] ] else [ "https://${aws_cloudfront_distribution.npk.domain_name}" ] ) ) + { - "lifecycle_rule": { - "enabled": "true", - "expiration": { - "days": 7 + lifecycle_rule: { + enabled: "true", + expiration: { + days: 7 }, - "abort_incomplete_multipart_upload_days": 1 + abort_incomplete_multipart_upload_days: 1 } }, - "static_site": defaultResource + s3.bucket("npk-site-content-"), - "logs": defaultResource + s3.bucket("npk-logs-") + { - "acl": "log-delivery-write" + static_site: s3.bucket("npk-site-content-"), + logs: s3.bucket("npk-logs-") + { + acl: "log-delivery-write" } } }, - "output": { - "s3_static_site_sync_command": { - "value": "aws --profile " + settings.awsProfile + " s3 --region " + settings.defaultRegion + " sync ${path.module}/../site-content/ s3://${aws_s3_bucket.static_site.id}" + output: { + s3_static_site_sync_command: { + value: "aws --profile " + settings.awsProfile + " s3 --region " + settings.defaultRegion + " sync ${path.module}/../site-content/ s3://${aws_s3_bucket.static_site.id}" } } }, - 's3_policies.tf.json':: { - "data": { - "aws_iam_policy_document": { - "s3_static_site": { - "statement": { - "actions": ["s3:GetObject"], - "resources": ["${aws_s3_bucket.static_site.arn}/*"], - "principals": { - "type": "AWS", - "identifiers": ["${aws_cloudfront_origin_access_identity.npk.iam_arn}"] + 's3_policies.tf.json': { + data: { + aws_iam_policy_document: { + s3_static_site: { + statement: { + actions: ["s3:GetObject"], + resources: ["${aws_s3_bucket.static_site.arn}/*"], + principals: { + type: "AWS", + identifiers: ["${aws_cloudfront_origin_access_identity.npk.iam_arn}"] } } } } }, - "resource": { - "aws_s3_bucket_policy": { - "s3_static_site": { - "bucket": "${aws_s3_bucket.static_site.id}", - "policy": "${data.aws_iam_policy_document.s3_static_site.json}" + resource: { + aws_s3_bucket_policy: { + s3_static_site: { + bucket: "${aws_s3_bucket.static_site.id}", + policy: "${data.aws_iam_policy_document.s3_static_site.json}" } } } }, 'sns.tf.json': { - "resource": { - "aws_sns_topic": { - "critical_events": { - "name": "critical_events" + resource: { + aws_sns_topic: { + critical_events: { + name: "critical_events" } }, - "aws_sns_topic_subscription": { - "critical_events_sms": { - "depends_on": ["aws_cloudfront_distribution.npk"], + aws_sns_topic_subscription: { + critical_events_sms: { + depends_on: ["aws_cloudfront_distribution.npk"], - "topic_arn": "${aws_sns_topic.critical_events.arn}", - "protocol": "sms", - "endpoint": settings.criticalEventsSMS + topic_arn: "${aws_sns_topic.critical_events.arn}", + protocol: "sms", + endpoint: settings.criticalEventsSMS } } } }, 'sqs.tf.json': { - "resource": { - "aws_sqs_queue": { - "api_handler_dlq": { - "name": "api_handler_dlq" + resource: { + aws_sqs_queue: { + api_handler_dlq: { + name: "api_handler_dlq" }, - "status_reporter_dlq": { - "name": "status_reporter_dlq" + status_reporter_dlq: { + name: "status_reporter_dlq" } } } }, 'subnet.tf.json': { - "resource": { - "aws_subnet": { + resource: { + aws_subnet: { [settings.regions[regionKeys[i]][azi]]: subnet(regionKeys[i], settings.regions[regionKeys[i]][azi], azi) for i in std.range(0, std.length(regionKeys) - 1) for azi in std.range(0, std.length(settings.regions[regionKeys[i]]) - 1) @@ -356,28 +797,28 @@ local regionKeys = std.objectFields(settings.regions); } }, 'templates.tf.json': { - "data": templates.data(settings), - "resource": templates.resource + data: templates.data(settings), + resource: templates.resource }, 'template-inject_api_handler.json': { [regionKeys[i]]: templates.az(settings.regions[regionKeys[i]]) for i in std.range(0, std.length(regionKeys) - 1) }, 'variables.tf.json': { - "variable": variables.variables(settings) + { - "profile": { "default": settings.awsProfile }, - "region": { "default": settings.defaultRegion }, - "campaign_data_ttl": { "default": settings.campaign_data_ttl }, - "campaign_max_price": { "default": settings.campaign_max_price }, - "useSAML": { "default": settings.useSAML } + variable: variables.variables(settings) + { + profile: { default: settings.awsProfile }, + region: { default: settings.defaultRegion }, + campaign_data_ttl: { default: settings.campaign_data_ttl }, + campaign_max_price: { default: settings.campaign_max_price }, + useSAML: { default: settings.useSAML } } }, 'vpc.tf.json': { - "resource": { - "aws_vpc": { + resource: { + aws_vpc: { [regionKeys[i]]: vpc.vpc(regionKeys[i], i) for i in std.range(0, std.length(regionKeys) - 1) }, - "aws_vpc_endpoint": { + aws_vpc_endpoint: { ["s3-" + regionKeys[i]]: vpc.endpoint(regionKeys[i]) for i in std.range(0, std.length(regionKeys) - 1) }, } From e1247b8e4c428670daae6faf661092716fa03952 Mon Sep 17 00:00:00 2001 From: Brad Woodward Date: Sun, 5 Sep 2021 17:42:31 -0600 Subject: [PATCH 007/128] Migrate to Terraform 0.15, break up API. --- .gitignore | 4 +- .../angular/controllers/npkMainCtrl.js | 6 +- terraform/deploy.sh | 120 ++-- terraform/jsonnet/backend.libsonnet | 2 +- terraform/jsonnet/cognito_iam_roles.libsonnet | 228 +++---- terraform/jsonnet/lambda.libsonnet | 2 +- terraform/jsonnet/routetable.libsonnet | 5 +- .../lambda_functions/create_campaign/main.js | 555 ++++++++++++++++++ .../package-lock.json | 0 .../package.json | 3 +- .../lambda_functions/delete_campaign/main.js | 4 + .../lambda_functions/execute_campaign/main.js | 4 + .../lambda_functions/get_campaign/main.js | 323 ---------- .../get_campaign/package.json | 15 - .../lambda_functions/list_campaigns/main.js | 323 ---------- .../list_campaigns/package-lock.json | 102 ---- terraform/quota_warning_accepted | 0 terraform/terraform.jsonnet | 178 +++--- 18 files changed, 849 insertions(+), 1025 deletions(-) create mode 100644 terraform/lambda_functions/create_campaign/main.js rename terraform/lambda_functions/{get_campaign => create_campaign}/package-lock.json (100%) rename terraform/lambda_functions/{list_campaigns => create_campaign}/package.json (87%) delete mode 100644 terraform/lambda_functions/get_campaign/main.js delete mode 100644 terraform/lambda_functions/get_campaign/package.json delete mode 100644 terraform/lambda_functions/list_campaigns/main.js delete mode 100644 terraform/lambda_functions/list_campaigns/package-lock.json create mode 100644 terraform/quota_warning_accepted diff --git a/.gitignore b/.gitignore index eeabd48..5310fe7 100644 --- a/.gitignore +++ b/.gitignore @@ -16,6 +16,7 @@ terraform/regions.json terraform/*.tf.json terraform-selfhost/*.tf.json terraform/lambda_functions/zip_files/* +terraform/lambda_functions/*/ENVVARS terraform/lambda_functions/proxy_api_handler/api_handler_variables.js terraform/lambda_functions/status_reporter/npk_settings.js terraform/lambda_functions/spot_monitor/npk_settings.js @@ -28,4 +29,5 @@ tools/wordlists/* terraform-selfhost/upload_npkfile.sh terraform-selfhost/upload_npkcomponents.sh terraform-selfhost/sync_npkcomponents.sh -quickdeployed \ No newline at end of file +quickdeployed +quotas \ No newline at end of file diff --git a/site-content/angular/controllers/npkMainCtrl.js b/site-content/angular/controllers/npkMainCtrl.js index 0172766..6954cda 100755 --- a/site-content/angular/controllers/npkMainCtrl.js +++ b/site-content/angular/controllers/npkMainCtrl.js @@ -1582,8 +1582,9 @@ angular $scope.campaignId = data.campaignId; $scope.$digest(); - - }).fail((data) => { + }) + .fail((data) => { + $('#orderResponseModal').modal('hide'); $scope.submittingOrder = false; var response = {}; @@ -1594,7 +1595,6 @@ angular response = {msg: "Unable to parse response as JSON", success: false}; } - $('#orderResponseModal').modal('hide'); $scope.orderErrors = [response.msg]; $scope.orderWarnings = []; $scope.$digest(); diff --git a/terraform/deploy.sh b/terraform/deploy.sh index 7f9eb30..fdfc3e2 100755 --- a/terraform/deploy.sh +++ b/terraform/deploy.sh @@ -37,18 +37,18 @@ if [[ ! -f $(which terraform) ]]; then echo "Error: Must have Terraform installed."; fi -if [[ $($TERBIN -v | grep -c "Terraform v0.11") != 1 ]]; then +if [[ $($TERBIN -v | grep -c "Terraform v0.15") != 1 ]]; then ERR=1; - echo "Error: Wrong version of Terraform is installed. NPK requires Terraform v0.11."; + echo "Error: Wrong version of Terraform is installed. NPK requires Terraform v0.15."; echo "-> Note: A non-default binary can be specified as a positional script parameter:" - echo "-> e.g: ./deploy-selfhost.sh " + echo "-> e.g: ./deploy.sh " echo "" fi if [[ -f $(which snap) ]]; then if [[ $(snap list | grep $TERBIN | wc -l) -ne 0 ]]; then ERR=1; - echo "Error: Terraform cannot be installed via snap. Download the v0.11 binary manually and place it in your path." + echo "Error: Terraform cannot be installed via snap. Download the v0.15 binary manually and place it in your path." fi if [[ $(snap list | grep jsonnet | wc -l) -ne 0 ]]; then @@ -106,77 +106,79 @@ fi echo "[*] Checking account quotas..." -PQUOTA=$(aws service-quotas list-service-quotas --service-code ec2 | jq '.Quotas[] | select(.QuotaCode == "L-7212CCBC") | .Value') -GQUOTA=$(aws service-quotas list-service-quotas --service-code ec2 | jq '.Quotas[] | select(.QuotaCode == "L-3819A6DF") | .Value') +if [[ ! -f quotas.json ]]; then -if [[ $PQUOTA -eq 0 ]]; then - PQUOTA=384 -fi + PQUOTA=$(aws service-quotas list-service-quotas --service-code ec2 | jq '.Quotas[] | select(.QuotaCode == "L-7212CCBC") | .Value') + GQUOTA=$(aws service-quotas list-service-quotas --service-code ec2 | jq '.Quotas[] | select(.QuotaCode == "L-3819A6DF") | .Value') -if [[ $GQUOTA -eq 0 ]]; then - GQUOTA=384 -fi + if [[ $PQUOTA -eq 0 ]]; then + PQUOTA=384 + fi -QUOTAERR=0 -if [[ $PQUOTA -lt 16 ]]; then - QUOTAERR=1 - echo "The target account is limited to fewer than 384 vCPUs in us-west-2 for P-type instances." - echo "-> Current limit: $PQUOTA" - echo "" -fi + if [[ $GQUOTA -eq 0 ]]; then + GQUOTA=384 + fi -if [[ $GQUOTA -lt 16 ]]; then - QUOTAERR=1 - echo "The target account is limited to fewer than 16 vCPUs in us-west-2 for G-type instances." - echo "-> Current limit: $GQUOTA" - echo "" -fi + QUOTAERR=0 + if [[ $PQUOTA -lt 16 ]]; then + QUOTAERR=1 + echo "The target account is limited to fewer than 384 vCPUs in us-west-2 for P-type instances." + echo "-> Current limit: $PQUOTA" + echo "" + fi -if [[ $QUOTAERR -eq 1 ]]; then - echo "You cannot proceed without increasing your limits." - echo "-> A limit of at least 16 is required for minimal capacity." - echo "-> A limit of 384 is required for full capacity." - echo "" - exit 1 -fi + if [[ $GQUOTA -lt 16 ]]; then + QUOTAERR=1 + echo "The target account is limited to fewer than 16 vCPUs in us-west-2 for G-type instances." + echo "-> Current limit: $GQUOTA" + echo "" + fi -QUOTAWARN=0 -if [[ $PQUOTA -lt 384 ]]; then - QUOTAWARN=1 - echo "The target account is limited to fewer than 384 vCPUs in us-west-2 for P-type instances." - echo "-> Current limit: $PQUOTA" - echo "" -fi + if [[ $QUOTAERR -eq 1 ]]; then + echo "You cannot proceed without increasing your limits." + echo "-> A limit of at least 16 is required for minimal capacity." + echo "-> A limit of 384 is required for full capacity." + echo "" + exit 1 + fi -if [[ $GQUOTA -lt 384 ]]; then - QUOTAWARN=1 - echo "The target account is limited to fewer than 384 vCPUs in us-west-2 for G-type instances." - echo "-> Current limit: $GQUOTA" - echo "" -fi + QUOTAWARN=0 + if [[ $PQUOTA -lt 384 ]]; then + QUOTAWARN=1 + echo "The target account is limited to fewer than 384 vCPUs in us-west-2 for P-type instances." + echo "-> Current limit: $PQUOTA" + echo "" + fi -if [[ $QUOTAWARN -eq 1 ]]; then - echo "1. Attempting to create campaigns in excess of these limits will fail". - echo "2. The UI will not prevent you from requesting campaigns in excess of these limits." - echo "3. The UI does not yet indicate when requests fail due to exceeded limits." - echo "" - echo "tl;dr: You can ignore this warning, but probably don't." - echo "" - read -r -p " Do you understand? [Yes]: " key + if [[ $GQUOTA -lt 384 ]]; then + QUOTAWARN=1 + echo "The target account is limited to fewer than 384 vCPUs in us-west-2 for G-type instances." + echo "-> Current limit: $GQUOTA" + echo "" + fi - if [[ "$key" != "Yes" ]]; then - echo "You must accept the campaign size warning with 'Yes' in order to continue." + if [[ $QUOTAWARN -eq 1 ]]; then + echo "1. Attempting to create campaigns in excess of these limits will fail". + echo "2. The UI will not prevent you from requesting campaigns in excess of these limits." + echo "3. The UI does not yet indicate when requests fail due to exceeded limits." echo "" + echo "tl;dr: You can ignore this warning, but probably don't." + echo "" + read -r -p " Do you understand? [Yes]: " key - exit 1 + if [[ "$key" != "Yes" ]]; then + echo "You must accept the campaign size warning with 'Yes' in order to continue." + echo "" + + exit 1 + fi + + jq -n --arg PQUOTA "$PQUOTA" --arg GQUOTA "$GQUOTA" '{pquota: $PQUOTA, gquota: $GQUOTA}' > quotas.json fi fi - echo "[*] Preparing to deploy NPK." -jq -n --arg PQUOTA "$PQUOTA" --arg GQUOTA "$GQUOTA" '{pquota: $PQUOTA, gquota: $GQUOTA}' > quotas.json - # Get the availability zones for each region if [ ! -f regions.json ]; then echo "[*] Getting availability zones from AWS" diff --git a/terraform/jsonnet/backend.libsonnet b/terraform/jsonnet/backend.libsonnet index fd31882..dde85e2 100644 --- a/terraform/jsonnet/backend.libsonnet +++ b/terraform/jsonnet/backend.libsonnet @@ -3,7 +3,7 @@ local backend(settings) = { backend: { s3: { bucket: settings.backend_bucket, - key: "c6fc.io/npk/terraform.tfstate", + key: "c6fc.io/npk-2.5/terraform.tfstate", profile: settings.awsProfile, region: settings.defaultRegion } diff --git a/terraform/jsonnet/cognito_iam_roles.libsonnet b/terraform/jsonnet/cognito_iam_roles.libsonnet index 620296c..0ea4976 100644 --- a/terraform/jsonnet/cognito_iam_roles.libsonnet +++ b/terraform/jsonnet/cognito_iam_roles.libsonnet @@ -1,9 +1,9 @@ { - "resource": { - "aws_iam_role": { - "cognito_admins": { - "name_prefix": "cognito_admin_role_", - "assume_role_policy": '{"Version": "2012-10-17","Statement": [{ + resource: { + aws_iam_role: { + cognito_admins: { + name_prefix: "cognito_admin_role_", + assume_role_policy: '{"Version": "2012-10-17","Statement": [{ "Effect": "Allow", "Principal": {"Federated": "cognito-identity.amazonaws.com"}, "Action": "sts:AssumeRoleWithWebIdentity", @@ -12,9 +12,9 @@ "ForAnyValue:StringLike": {"cognito-identity.amazonaws.com:amr": "authenticated"} }}]}' }, - "cognito_authenticated": { - "name_prefix": "cognito_authenticated_role_", - "assume_role_policy": '{"Version": "2012-10-17","Statement": [{ + cognito_authenticated: { + name_prefix: "cognito_authenticated_role_", + assume_role_policy: '{"Version": "2012-10-17","Statement": [{ "Effect": "Allow", "Principal": {"Federated": "cognito-identity.amazonaws.com"}, "Action": "sts:AssumeRoleWithWebIdentity", @@ -23,54 +23,54 @@ "ForAnyValue:StringLike": {"cognito-identity.amazonaws.com:amr": "authenticated"} }}]}' }, - "cognito_unauthenticated": { - "name_prefix": "cognito_unauthenticated_role_", - "assume_role_policy": '{"Version": "2012-10-17","Statement": [{ + cognito_unauthenticated: { + name_prefix: "cognito_unauthenticated_role_", + assume_role_policy: '{"Version": "2012-10-17","Statement": [{ "Effect": "Allow","Principal": {"Federated": "cognito-identity.amazonaws.com"}, "Action": "sts:AssumeRoleWithWebIdentity"} ]}' }, }, - "aws_iam_role_policy": { - "cognito_admins": { - "name_prefix": "cognito_admins_policy_", - "role": "${aws_iam_role.cognito_admins.id}", - "policy": "${data.aws_iam_policy_document.cognito_admins.json}" + aws_iam_role_policy: { + cognito_admins: { + name_prefix: "cognito_admins_policy_", + role: "${aws_iam_role.cognito_admins.id}", + policy: "${data.aws_iam_policy_document.cognito_admins.json}" }, - "cognito_admins_baseline": { - "name_prefix": "cognito_baseline_policy_", - "role": "${aws_iam_role.cognito_admins.id}", - "policy": "${data.aws_iam_policy_document.cognito_authenticated.json}" + cognito_admins_baseline: { + name_prefix: "cognito_baseline_policy_", + role: "${aws_iam_role.cognito_admins.id}", + policy: "${data.aws_iam_policy_document.cognito_authenticated.json}" }, - "cognito_authenticated": { - "name_prefix": "cognito_authenticated_policy_", - "role": "${aws_iam_role.cognito_authenticated.id}", - "policy": "${data.aws_iam_policy_document.cognito_authenticated.json}" + cognito_authenticated: { + name_prefix: "cognito_authenticated_policy_", + role: "${aws_iam_role.cognito_authenticated.id}", + policy: "${data.aws_iam_policy_document.cognito_authenticated.json}" }, - "cognito_unauthenticated": { - "name_prefix": "cognito_authenticated_policy_", - "role": "${aws_iam_role.cognito_unauthenticated.id}", - "policy": "${data.aws_iam_policy_document.cognito_unauthenticated.json}" + cognito_unauthenticated: { + name_prefix: "cognito_authenticated_policy_", + role: "${aws_iam_role.cognito_unauthenticated.id}", + policy: "${data.aws_iam_policy_document.cognito_unauthenticated.json}" } }, - "aws_cognito_identity_pool_roles_attachment": { - "default": { - "identity_pool_id": "${aws_cognito_identity_pool.main.id}", - "roles": { - "authenticated": "${aws_iam_role.cognito_authenticated.arn}", - "unauthenticated": "${aws_iam_role.cognito_unauthenticated.arn}" + aws_cognito_identity_pool_roles_attachment: { + default: { + identity_pool_id: "${aws_cognito_identity_pool.main.id}", + roles: { + authenticated: "${aws_iam_role.cognito_authenticated.arn}", + unauthenticated: "${aws_iam_role.cognito_unauthenticated.arn}" }, - "role_mapping": { - "identity_provider": "${aws_cognito_user_pool.npk.endpoint}:${aws_cognito_user_pool_client.npk.id}", - "ambiguous_role_resolution": "AuthenticatedRole", - "type": "Rules", + role_mapping: { + identity_provider: "${aws_cognito_user_pool.npk.endpoint}:${aws_cognito_user_pool_client.npk.id}", + ambiguous_role_resolution: "AuthenticatedRole", + type: "Rules", - "mapping_rule": [{ - "claim": "cognito:groups", - "match_type": "Contains", - "value": "npk-admins", - "role_arn": "${aws_iam_role.cognito_admins.arn}" + mapping_rule: [{ + claim: "cognito:groups", + match_type: "Contains", + value: "npk-admins", + role_arn: "${aws_iam_role.cognito_admins.arn}" }] } } @@ -79,44 +79,44 @@ data(settings):: local regionKeys = std.objectFields(settings.regions); { - "aws_iam_policy_document": { - "cognito_admins": { - "statement": [{ - "sid": "adminSettings", - "actions": [ + aws_iam_policy_document: { + cognito_admins: { + statement: [{ + sid: "adminSettings", + actions: [ "dynamodb:PutItem", ], - "resources": [ + resources: [ "${aws_dynamodb_table.settings.arn}" ], - "condition": [{ - "test": "ForAllValues:StringEquals", - "variable": "dynamodb:LeadingKeys", + condition: [{ + test: "ForAllValues:StringEquals", + variable: "dynamodb:LeadingKeys", - "values": [ + values: [ "admin" ] }, { - "test": "ForAllValues:StringEquals", - "variable": "dynamodb:Attributes", + test: "ForAllValues:StringEquals", + variable: "dynamodb:Attributes", - "values": [ + values: [ "userid", "keyid", "value", ] }] }, { - "sid": "events", - "actions": [ + sid: "events", + actions: [ "dynamodb:Query", ], - "resources": [ + resources: [ "${aws_dynamodb_table.campaigns.arn}/index/Events" ] }, { - "sid": "cognitoAdmin", - "actions": [ + sid: "cognitoAdmin", + actions: [ "cognito-idp:AdminAddUserToGroup", "cognito-idp:AdminCreateUser", "cognito-idp:AdminDeleteUser", @@ -128,152 +128,152 @@ "cognito-idp:ListUsers", "cognito-idp:ListUsersInGroup" ], - "resources": [ + resources: [ "${aws_cognito_user_pool.npk.arn}" ] }, { - "sid": "cognitoIdentities", - "actions": [ + sid: "cognitoIdentities", + actions: [ "cognito-idp:ListIdentities", "cognito-idp:DescribeIdentity", ], - "resources": [ + resources: [ "${aws_cognito_identity_pool.main.arn}" ] }] }, - "cognito_authenticated": { - "statement": [{ - "sid": "1", - "actions": [ + cognito_authenticated: { + statement: [{ + sid: "1", + actions: [ "cognito-identity:*", "mobileanalytics:PutEvents", "cognito-sync:*", "ec2:describeSpotPriceHistory", "pricing:*" ], - "resources": [ + resources: [ "*" ] },{ - "sid": "2", - "actions": [ + sid: "2", + actions: [ "s3:PutObject" ], - "resources": [ + resources: [ "${aws_s3_bucket.user_data.arn}/&{cognito-identity.amazonaws.com:sub}/uploads/*" ] },{ - "sid": "3", - "actions": [ + sid: "3", + actions: [ "s3:GetObject", "s3:ListObjectVersions", "s3:DeleteObject" ], - "resources": [ + resources: [ "${aws_s3_bucket.user_data.arn}/&{cognito-identity.amazonaws.com:sub}", "${aws_s3_bucket.user_data.arn}/&{cognito-identity.amazonaws.com:sub}/*" ] },{ - "sid": "4", - "actions": [ + sid: "4", + actions: [ "s3:ListBucket" ], - "resources": [ + resources: [ "${aws_s3_bucket.user_data.arn}", ], - "condition": [{ - "test": "StringLike", - "variable": "s3:prefix", + condition: [{ + test: "StringLike", + variable: "s3:prefix", - "values": [ + values: [ "&{cognito-identity.amazonaws.com:sub}/", "&{cognito-identity.amazonaws.com:sub}/*" ] }] },{ - "sid": "5", - "actions": [ + sid: "5", + actions: [ "dynamodb:GetItem", "dynamodb:BatchGetItem", "dynamodb:Query" ], - "resources": [ + resources: [ "${aws_dynamodb_table.campaigns.arn}", "${aws_dynamodb_table.settings.arn}" ], - "condition": [{ - "test": "ForAllValues:StringEquals", - "variable": "dynamodb:LeadingKeys", + condition: [{ + test: "ForAllValues:StringEquals", + variable: "dynamodb:LeadingKeys", - "values": [ + values: [ "&{cognito-identity.amazonaws.com:sub}", "admin" ] }] },{ - "sid": "settings", - "actions": [ + sid: "settings", + actions: [ "dynamodb:PutItem", ], - "resources": [ + resources: [ "${aws_dynamodb_table.campaigns.arn}", "${aws_dynamodb_table.settings.arn}" ], - "condition": [{ - "test": "ForAllValues:StringEquals", - "variable": "dynamodb:LeadingKeys", + condition: [{ + test: "ForAllValues:StringEquals", + variable: "dynamodb:LeadingKeys", - "values": [ + values: [ "&{cognito-identity.amazonaws.com:sub}" ] }, { - "test": "ForAllValues:StringEquals", - "variable": "dynamodb:Attributes", + test: "ForAllValues:StringEquals", + variable: "dynamodb:Attributes", - "values": [ + values: [ "userid", "keyid", "value" ] }] },{ - "sid": "6", - "actions": [ + sid: "6", + actions: [ "s3:ListBucket" ], - "resources": [ + resources: [ "${var.dictionary-" + regionKeys[i] + "}" for i in std.range(0, std.length(regionKeys) - 1) ] },{ - "sid": "7", - "actions": [ + sid: "7", + actions: [ "s3:GetObject" ], - "resources": [ + resources: [ "${var.dictionary-" + regionKeys[i] + "}/*" for i in std.range(0, std.length(regionKeys) - 1) ] },{ - "sid": "8", - "actions": [ + sid: "8", + actions: [ "execute-api:Invoke" ], - "resources": [ + resources: [ "${aws_api_gateway_deployment.npk.execution_arn}/*/userproxy/*" ] }] }, - "cognito_unauthenticated": { - "statement": [{ - "sid": "logs", - "actions": [ + cognito_unauthenticated: { + statement: [{ + sid: "logs", + actions: [ "cognito-identity:*", "mobileanalytics:PutEvents", "cognito-sync:*" ], - "resources": [ + resources: [ "*" ] }] diff --git a/terraform/jsonnet/lambda.libsonnet b/terraform/jsonnet/lambda.libsonnet index 335ab2b..9ffb8b1 100644 --- a/terraform/jsonnet/lambda.libsonnet +++ b/terraform/jsonnet/lambda.libsonnet @@ -5,7 +5,7 @@ local lambda_function(name, config, role_policy) = { function_name: name, filename: "./lambda_functions/zip_files/" + name + ".zip", source_code_hash: "${data.archive_file." + name + ".output_base64sha256}", - runtime: "nodejs12.x", + runtime: "nodejs14.x", role: "${aws_iam_role.lambda-" + name + ".arn}", depends_on: ["data.archive_file." + name, "aws_iam_role_policy.lambda-" + name], } diff --git a/terraform/jsonnet/routetable.libsonnet b/terraform/jsonnet/routetable.libsonnet index 03fc1d9..6ca0f37 100644 --- a/terraform/jsonnet/routetable.libsonnet +++ b/terraform/jsonnet/routetable.libsonnet @@ -11,7 +11,10 @@ local routetable(region) = { nat_gateway_id: "", network_interface_id: "", transit_gateway_id: "", - vpc_peering_connection_id: "" + vpc_peering_connection_id: "", + carrier_gateway_id: "", + destination_prefix_list_id: "", + vpc_endpoint_id: "" }] }; diff --git a/terraform/lambda_functions/create_campaign/main.js b/terraform/lambda_functions/create_campaign/main.js new file mode 100644 index 0000000..bc73728 --- /dev/null +++ b/terraform/lambda_functions/create_campaign/main.js @@ -0,0 +1,555 @@ +const aws = require('aws-sdk'); +const uuid = require('uuid/v4'); + +const ddb = new aws.DynamoDB({ region: "us-west-2" }); +const s3 = new aws.S3({ region: "us-west-2" }); + +var cognito = new aws.CognitoIdentityServiceProvider({region: "us-west-2", apiVersion: "2016-04-18"}); + +let cb = ""; +let variables = {}; + +var allowed_regions = [ + "us-west-1", + "us-west-2", + "us-east-1", + "us-east-2" +]; + +var allowed_instances = [ + // "g3s.xlarge", + "g3.4xlarge", + "g3.8xlarge", + "g3.16xlarge", + + "p2.xlarge", + "p2.8xlarge", + "p2.16xlarge", + + "p3.2xlarge", + "p3.8xlarge", + "p3.16xlarge" +]; + +var vcpus = { + "g3.4xlarge": 16, + "g3.8xlarge": 32, + "g3.16xlarge": 64, + + "p2.xlarge": 4, + "p2.8xlarge": 32, + "p2.16xlarge": 64, + + "p3.2xlarge": 8, + "p3.8xlarge": 32, + "p3.16xlarge": 64 +} + +exports.main = async function(event, context, callback) { + + console.log(JSON.stringify(event)); + + // Hand off the callback function for later. + cb = callback; + + // Get the available envvars into a usable format. + variables = JSON.parse(JSON.stringify(process.env)); + variables.availabilityZones = JSON.parse(variables.availabilityZones); + variables.dictionaryBuckets = JSON.parse(variables.dictionaryBuckets); + + let entity, campaign, UserPoolId, Username; + + try { + + console.log("Received event: " + JSON.stringify(event)); + + // Hand off the origin, too. Fix for weird case + origin = event?.headers?.origin ?? event?.headers?.Origin; + + var allowed_characters = /^[a-zA-Z0-9'"%\.\[\]\{\}\(\)\-\:\\\/\;\=\?\#\_+\s,!@#\$\^\*&]+$/; + if (!allowed_characters.test(JSON.stringify(event))) { + console.log("Request contains illegal characters"); + return respond(400, {}, "Request contains illegal characters", false); + } + + if (event?.requestContext?.identity?.cognitoAuthenticationType != "authenticated") { + console.log(`cognitoAuthenticationType ${event?.requestContext?.identity?.cognitoAuthenticationType} != "authenticated"`) + return respond(401, {}, "Authentication Required", false); + } + + entity = event.requestContext.identity.cognitoIdentityId; + + let body = {}; + // Unencode the body if necessary + if (!!event?.body) { + body = (event.requestContext.isBase64Encoded) ? atob(event.body) : event.body; + + // Body will always be a JSON object. + try { + body = JSON.parse(body); + } catch (e) { + return respond(400, {}, "Body must be JSON object", false); + } + } + + campaign = body; + + // Associate the user identity. + [ UserPoolId,, Username ] = event?.requestContext?.identity?.cognitoAuthenticationProvider?.split('/')[2]?.split(':'); + + if (!UserPoolId || !Username) { + console.log(`UserPoolId or Username is missing from ${event?.requestContext?.identity?.cognitoAuthenticationProvider}`); + respond(401, {}, "Authorization Required", false); + } + + } catch (e) { + console.log("Failed to process request.", e); + return respond(500, {}, "Failed to process request.", false); + } + + let user, email; + + try { + user = await cognito.adminGetUser({ UserPoolId, Username }).promise(); + + // Restructure UserAttributes as an k:v + user.UserAttributes = user.UserAttributes.reduce((attrs, entry) => { + attrs[entry.Name] = entry.Value; + + return attrs; + }, {}); + + if (!user?.UserAttributes?.email) { + return respond(401, {}, "Unable to obtain user properties.", false); + } + + email = user.UserAttributes.email; + + } catch (e) { + console.log("Unable to retrieve user context.", e); + return respond(500, {}, "Unable to retrieve user context.", false); + } + + console.log(event.pathParameters) + + // Verify that required elements are present: + const missingElements = [ + "region", + "availabilityZone", + "instanceType", + "hashFile", + "instanceCount", + "instanceDuration", + "priceTarget" + ].reduce((missing, entry) => { + if (!campaign[entry]) { + missing.push[entry]; + } + + return missing; + }, []); + + if (missingElements.length > 0) { + return respond(400, {}, `Campaign missing required elements [${missingElements.join(', ')}]`); + } + + const verifiedManifest = { + rulesFiles: [], + cognitoIdentityId: entity + }; + + if (parseInt(campaign.hashType) < 0 || parseInt(campaign.hashType > 100000)) { + return respond(400, {}, "hashType " + campaign.hashType + " is invalid", false); + } + + verifiedManifest.hashType = campaign.hashType; + + if (parseInt(campaign.instanceCount) < 1 || parseInt(campaign.instanceCount) > 6) { + return respond(400, {}, "instanceCount must be greater than 1", false); + } + + verifiedManifest.instanceCount = campaign.instanceCount; + + if (parseInt(campaign.instanceDuration) < 1 || parseInt(campaign.instanceDuration) > 24) { + return respond(400, {}, "instanceDuration must be between 1 and 24", false); + } + + verifiedManifest.instanceDuration = campaign.instanceDuration; + + if (allowed_regions.indexOf(campaign.region) < 0) { + return respond(400, {}, campaign.region + " is not a valid or allowed region", false); + } + + verifiedManifest.region = campaign.region; + + if (Object.keys(vcpus).indexOf(campaign.instanceType) < 0) { + return respond(400, {}, campaign.instanceType + " is not a valid or allowed instance type.", false); + } + + let quota = 0; + switch (campaign.instanceType.split("")[0]) { + case 'g': + quota = variables.gQuota; + break; + + case 'p': + quota = variables.pQuota; + break; + + default: + return respond(400, {}, "Unable to determine applicable quota for " + campaign.instanceType, false); + break; + } + + const neededVCPUs = vcpus[campaign.instanceType] * parseInt(campaign.instanceCount); + if (quota < neededVCPUs) { + return respond(400, {}, "Order exceeds account quota limits. Needs " + neededVCPUs + " but account is limited to " + quota, false); + } + + verifiedManifest.instanceType = campaign.instanceType; + + if (parseFloat(campaign.priceTarget) < 0 || parseFloat(campaign.priceTarget) != campaign.priceTarget) { + return respond(400, {}, "Invalid priceTarget; must be integer greater than 0.", false); + } + + verifiedManifest.priceTarget = campaign.priceTarget; + + let expires; + + try { + expires = /Expires=([\d]+)&/.exec(campaign.hashFileUrl)[1]; + } catch (e) { + return respond(400, {}, "Invalid hashFileUrl.", false); + } + + const duration = expires - (new Date().getTime() / 1000); + if (duration < 900) { + return respond(400, {}, "hashFileUrl must be valid for at least 900 seconds, got " + Math.floor(duration), false); + } + + verifiedManifest.hashFileUrl = campaign.hashFileUrl; + + if (campaign.manualArguments) { + verifiedManifest.manualArguments = campaign.manualArguments; + } + + if (campaign.manualMask) { + if (campaign.mask || campaign.rulesFiles || campaign.dictionaryFile) { + return respond(400, {}, "Manual masks cannot be combined with any other attack type.", false); + } + + verifiedManifest.manualMask = campaign.manualMask; + } + + // Optional values might be present, but nulled. + const promises = []; + const knownMetadata = {}; + + let hashfilelines = 0; + let dictionaryKeyspace = 0; + let dictionarySize = 0; + let rulesKeyspace = 0; + let rulesSize = 0; + let lineCount = 0; + + try { + + // Verify hashfile metadata. + promises.push(new Promise((success, failure) => { + s3.headObject({ + Bucket: variables.userdata_bucket, + Key: entity + '/' + campaign.hashFile + }, function(err, data) { + if (err) { + return failure(respond(400, {}, "Invalid hash file: " + err, false)); + } + + if (data.ContentType != "text/plain") { + return failure(respond(400, {}, "Content Type " + data.ContentType + " not permitted. Use text/plain.", false)); + } + + knownMetadata[bucket + ":" + campaign.dictionaryFile] = data.Metadata; + + return success(); + }); + })); + + // Verify hashfile contents. + promises.push(new Promise((success, failure) => { + s3.getObject({ + Bucket: variables.userdata_bucket, + Key: entity + '/' + campaign.hashFile + }, function(err, data) { + if (err) { + return failure(respond(400, {}, "Invalid hash file contents: " + err, false)); + } + + var body = data.Body.toString('ascii'); + var lines = body.split("\n"); + + lineCount = lines.length; + + verifiedManifest.hashFile = campaign.hashFile; + + return success(); + }); + })); + + if (typeof campaign.rulesFiles != "undefined" && campaign.rulesFiles != null) { + console.log("Debug: Rules are enabled. Verifiying files."); + + // Verify that required elements are present: + let missingElements = [ + "rulesFile", + "dictionaryFile" + ].reduce((missing, entry) => { + if (!campaign[entry]) { + missing.push[entry]; + } + + return missing; + }, []); + + if (missingElements.length > 0) { + return respond(400, {}, `Rule-based campaign missing required elements [${missingElements.join(', ')}]`); + } + + s3dict = new aws.S3({region: campaign.region}); + var bucket = variables.dictionaryBuckets[campaign.region]; + + console.log(variables.dictionaryBuckets); + + // Verify dictionary + promises.push(new Promise((success, failure) => { + s3dict.headObject({ + Bucket: bucket, + Key: campaign.dictionaryFile + }, function(err, data) { + if (err) { + return failure(respond(400, {}, "Invalid dictionary file: " + err, false)); + } + + knownMetadata[bucket + ":" + campaign.dictionaryFile] = data.Metadata; + dictionaryKeyspace += data.Metadata.lines; + dictionarySize += parseInt(data.Metadata.size) + parseInt(data.ContentLength); + + verifiedManifest.dictionaryFile = campaign.dictionaryFile; + + console.log("Debug: Dictionary file verified"); + return success(); + }); + })); + + // Verify rule files + campaign.rulesFiles.forEach(function(e) { + promises.push(new Promise((success, failure) => { + s3dict.headObject({ + Bucket: bucket, + Key: e, + }, function(err, data) { + if (err) { + return failure(respond(400, {}, "Invalid rule file: " + err, false)); + } + + knownMetadata[bucket + ":" + e] = data.Metadata; + rulesKeyspace += data.Metadata.lines; + rulesSize += parseInt(data.Metadata.size) + parseInt(data.ContentLength); + + verifiedManifest.rulesFiles.push(e); + + console.log("Debug: Rules files verified"); + return success(); + }); + })); + }); + } + + var maskKeyspace = 1; + if (typeof campaign.mask != "undefined" && campaign.mask != null) { + console.log("Debug: Mask is enabled. Verifying mask."); + campaign.mask.split('?').slice(1).forEach(function(e) { + switch (e) { + case "l": + maskKeyspace *= 26; + break; + + case "u": + maskKeyspace *= 26; + break; + + case "d": + maskKeyspace *= 10; + break; + + case "s": + maskKeyspace *= 33; + break; + + case "a": + maskKeyspace *= 95; + break; + + case "b": + maskKeyspace *= 256; + break; + + default: + return respond(400, {}, "Invalid mask provided", false); + break; + } + }); + + verifiedManifest.mask = campaign.mask; + } + + await Promise.all(promises).then((data) => { + console.log("Debug: All promises returned."); + + // Compare the manifest with the verifiedManifest, and return any values that weren't processed. + + Object.keys(verifiedManifest).forEach(function(e) { + delete campaign[e]; + }); + + console.log("Debug: Processing complete. The following parameters from the campaign were not used."); + console.log(campaign); + + var wordlistKeyspace = ((dictionaryKeyspace > 0) ? dictionaryKeyspace : 1) * ((rulesKeyspace > 0) ? rulesKeyspace : 1); + + console.log("d: " + dictionarySize); + console.log("r: " + rulesSize); + var wordlistSize = dictionarySize + rulesSize; + var totalKeyspace = wordlistKeyspace * maskKeyspace; + + verifiedManifest.wordlistSize = wordlistSize; + + if (typeof verifiedManifest.dictionaryFile != "undefined") { + if (typeof verifiedManifest.mask != "undefined") { + verifiedManifest.attackType = 6; + } else { + verifiedManifest.attackType = 0; + } + } else { + if (typeof verifiedManifest.mask == "undefined" && typeof verifiedManifest.manualMask == "undefined") { + return respond(400, {}, "Must have either dictionary or mask defined", false); + } + + verifiedManifest.attackType = 3; + } + + if (typeof verifiedManifest.rulesFiles != "undefined" && verifiedManifest.rulesFiles.length > 0 && typeof verifiedManifest.manualMask == "undefined") { + verifiedManifest.attackType = 0; + } + + if (typeof verifiedManifest.attackType == "undefined") { + return respond(500, {}, "Hit an impossible combination of attack types. Exiting.", false); + } + }); + } catch (e) { + console.log("Campaign creation failed after validation.", e); + return respond(500, {}, "Campaign creation failed after validation.", false) + } + + const campaignId = uuid(); + let putManifest, editCampaign; + + try { + + putManifest = await s3.putObject({ + Body: JSON.stringify(verifiedManifest), + Bucket: variables.userdata_bucket, + Key: entity + '/campaigns/' + campaignId + '/manifest.json', + ContentType: 'text/plain' + }).promise(); + + } catch (e) { + console.log("Failed to place manifest file.", e); + return respond(500, {}, "Failed to place manifest file.", false) + } + + try { + const updateParams = aws.DynamoDB.Converter.marshall({ + instanceType: verifiedManifest.instanceType, + status: "AVAILABLE", + active: false, + durationSeconds: verifiedManifest.instanceDuration * 3600, + hashType: verifiedManifest.hashType, + hashes: lineCount, + instanceCount: verifiedManifest.instanceCount, + price: 0, + targetPrice: verifiedManifest.priceTarget, + region: verifiedManifest.region, + startTime: Math.floor(new Date().getTime() / 1000), + spotFleetRequestId: "", + cognitoUserEmail: email, + deleted: false + }); + + const updateCampaign = await ddb.updateItem({ + Key: { + userid: {S: entity}, + keyid: {S: `campaigns:${campaign}`} + }, + TableName: "Campaigns", + AttributeUpdates: Object.keys(updateParams).reduce((attrs, entry) => { + attrs[entry] = { + Action: "PUT", + Value: updateParams[entry] + }; + + return attrs; + }, {}) + }).promise(); + } catch (e) { + console.log("Failed to update campaign record.", e); + return respond(500, {}, "Failed to update campaign record.", false) + } + + console.log("=================================================================================================="); + console.log("====== Campaign " + campaignId + " created. Preparing to execute! ======"); + console.log("=================================================================================================="); + + return respond(201, {}, {campaignId: campaignId}, true); +} + +function respond(statusCode, headers, body, success) { + + // Include terraform dns names as allowed origins, as well as localhost. + const allowed_origins = [variables.www_dns_names]; + allowed_origins.push("https://localhost"); + + headers['Content-Type'] = 'text/plain'; + + if (allowed_origins.indexOf(origin) !== false) { + // Echo the origin back. I guess this is the best way to support multiple origins + headers['Access-Control-Allow-Origin'] = origin; + } else { + console.log("Invalid origin received.", origin); + } + + switch (typeof body) { + case "string": + body = { msg: body, success: success }; + break; + + case "object": + body.success = success; + break; + } + + const response = { + statusCode: statusCode, + headers: headers, + body: JSON.stringify(body), + } + + console.log(JSON.stringify(response)); + + cb(null, response); + + if (success == true) { + return Promise.resolve(body.msg); + } else { + return Promise.reject(body.msg); + } +} \ No newline at end of file diff --git a/terraform/lambda_functions/get_campaign/package-lock.json b/terraform/lambda_functions/create_campaign/package-lock.json similarity index 100% rename from terraform/lambda_functions/get_campaign/package-lock.json rename to terraform/lambda_functions/create_campaign/package-lock.json diff --git a/terraform/lambda_functions/list_campaigns/package.json b/terraform/lambda_functions/create_campaign/package.json similarity index 87% rename from terraform/lambda_functions/list_campaigns/package.json rename to terraform/lambda_functions/create_campaign/package.json index bd0b67b..aa0a890 100644 --- a/terraform/lambda_functions/list_campaigns/package.json +++ b/terraform/lambda_functions/create_campaign/package.json @@ -4,7 +4,8 @@ "description": "NPK execute_campaign Lambda", "main": "main.js", "dependencies": { - "aws-sdk": "^2.599.0" + "aws-sdk": "^2.599.0", + "uuid": "^3.3.2" }, "devDependencies": {}, "scripts": { diff --git a/terraform/lambda_functions/delete_campaign/main.js b/terraform/lambda_functions/delete_campaign/main.js index 4951c00..3a91d7c 100644 --- a/terraform/lambda_functions/delete_campaign/main.js +++ b/terraform/lambda_functions/delete_campaign/main.js @@ -7,6 +7,8 @@ let variables = {}; exports.main = async function(event, context, callback) { + console.log(JSON.stringify(event)); + // Hand off the callback function for later. cb = callback; @@ -31,6 +33,8 @@ exports.main = async function(event, context, callback) { return respond(401, {}, "Authentication Required", false); } + const entity = event.requestContext.identity.cognitoIdentityId; + var body = {}; // Unencode the body if necessary if (!!event?.body) { diff --git a/terraform/lambda_functions/execute_campaign/main.js b/terraform/lambda_functions/execute_campaign/main.js index 4951c00..3a91d7c 100644 --- a/terraform/lambda_functions/execute_campaign/main.js +++ b/terraform/lambda_functions/execute_campaign/main.js @@ -7,6 +7,8 @@ let variables = {}; exports.main = async function(event, context, callback) { + console.log(JSON.stringify(event)); + // Hand off the callback function for later. cb = callback; @@ -31,6 +33,8 @@ exports.main = async function(event, context, callback) { return respond(401, {}, "Authentication Required", false); } + const entity = event.requestContext.identity.cognitoIdentityId; + var body = {}; // Unencode the body if necessary if (!!event?.body) { diff --git a/terraform/lambda_functions/get_campaign/main.js b/terraform/lambda_functions/get_campaign/main.js deleted file mode 100644 index 4951c00..0000000 --- a/terraform/lambda_functions/get_campaign/main.js +++ /dev/null @@ -1,323 +0,0 @@ -const aws = require('aws-sdk'); -const ddb = new aws.DynamoDB({ region: "us-west-2" }); -const s3 = new aws.S3({ region: "us-west-2" }); - -let cb = ""; -let variables = {}; - -exports.main = async function(event, context, callback) { - - // Hand off the callback function for later. - cb = callback; - - // Get the available envvars into a usable format. - variables = JSON.parse(JSON.stringify(process.env)); - - try { - - console.log("Received event: " + JSON.stringify(event)); - - // Hand off the origin, too. Fix for weird case - origin = event?.headers?.origin ?? event?.headers?.Origin; - - var allowed_characters = /^[a-zA-Z0-9'"%\.\[\]\{\}\(\)\-\:\\\/\;\=\?\#\_+\s,!@#\$\^\*&]+$/; - if (!allowed_characters.test(JSON.stringify(event))) { - console.log("Request contains illegal characters"); - return respond(400, {}, "Request contains illegal characters", false); - } - - if (event?.requestContext?.identity?.cognitoAuthenticationType != "authenticated") { - console.log(`cognitoAuthenticationType ${event?.requestContext?.identity?.cognitoAuthenticationType} != "authenticated"`) - return respond(401, {}, "Authentication Required", false); - } - - var body = {}; - // Unencode the body if necessary - if (!!event?.body) { - body = (event.requestContext.isBase64Encoded) ? atob(event.body) : event.body; - - // Body will always be a JSON object. - try { - body = JSON.parse(body); - } catch (e) { - return respond(400, "Body must be JSON object", false); - } - } - - // Associate the user identity. - const [ UserPoolId,, Username ] = event?.requestContext?.identity?.cognitoAuthenticationProvider?.split('/')[2]?.split(':'); - - if (!UserPoolId || !Username) { - console.log(`UserPoolId or Username is missing from ${event?.requestContext?.identity?.cognitoAuthenticationProvider}`); - respond(401, "Authorization Required"); - } - - } catch (e) { - console.log("Failed to process request.", e); - return respond(500, {}, "Failed to process request.", false); - } - - try { - const user = await cognito.adminGetUser({ UserPoolId, Username }).promise(); - - // Restructure UserAttributes as an k:v - user.UserAttributes = user.UserAttributes.reduce((attrs, entry) => { - attrs[entry.Name] = entry.Value - }, {}); - - if (!user?.UserAttributes?.email) { - return respond(401, {}, "Unable to obtain user properties.", false); - } - - } catch (e) { - console.log("Unable to retrieve user context.", e); - return respond(500, {}, "Unable to retrieve user context.", false); - } - - console.log(event.pathParameters) - - const campaignId = event?.pathParameters?.campaign; - - // Get the campaign entry from DynamoDB, and manifest from S3. - // * In parallel, to save, like, some milliseconds. - - try { - const [campaign, manifestObject] = await Promise.all([ - ddb.query({ - ExpressionAttributeValues: { - ':id': {S: entity}, - ':keyid': {S: `campaigns:${campaignId}`} - }, - KeyConditionExpression: 'userid = :id and keyid = :keyid', - TableName: "Campaigns" - }).promise(), - - s3.getObject({ - Bucket: variables.userdata_bucket, - Key: `${entity}/campaigns/${campaign}/manifest.json` - }).promise() - ]); - - const manifest = JSON.parse(manifestObject.Body.toString('ascii')); - } catch (e) { - console.log("Failed to retrieve campaign details.", e); - return respond(500, {}, "Failed to retrieve campaign details."); - } - - if (campaign.Items?.[0]?.status?.S != "AVAILABLE") { - return respond(404, {}, "Campaign doesn't exist or is not in 'AVAILABLE' status."); - } - - console.log(campaign, manifest); - - // Test whether the provided presigned URL is expired. - - try { - var expires = /Expires=([\d]+)&/.exec(manifest.hashFileUrl)[1]; - } catch (e) { - return respond(400, "Invalid hashFileUrl; missing expiration"); - } - - var duration = expires - (new Date().getTime() / 1000); - if (duration < 900) { - return respond(400, {} `hashFileUrl must be valid for at least 900 seconds, got ${Math.floor(duration)}`); - } - - // Campaign is valid. Get AZ pricing and Image AMI - // * Again in parallel, to save, like, some more milliseconds. - - try { - const ec2 = new aws.EC2({region: manifest.region}); - const [pricing, image] = await Promise.all([ - ec2.describeSpotPriceHistory({ - EndTime: Math.round(Date.now() / 1000), - ProductDescriptions: [ "Linux/UNIX (Amazon VPC)" ], - InstanceTypes: [ manifest.instanceType ], - StartTime: Math.round(Date.now() / 1000) - }), - - ec2.describeImages({ - Filters: [{ - Name: "virtualization-type", - Values: ["hvm"] - },{ - Name: "name", - Values: ["amzn2-ami-graphics-hvm-2*"] - },{ - Name: "root-device-type", - Values: ["ebs"] - },{ - Name: "owner-id", - Values: ["679593333241"] - }] - }) - ]); - } catch (e) { - console.log("Failed to retrieve price and image details.", e); - return respond(500, {}, "Failed to retrieve price and image details."); - } - - try { - - // Calculate the necessary volume size - - const volumeSize = Math.ceil(manifest.wordlistSize / 1073741824) + 1; - console.log(`Wordlist is ${manifest.wordlistSize / 1073741824}GiB. Allocating ${volumeSize}GiB`); - - // Build a launchSpecification for each AZ in the target region. - - const instance_userdata = new Buffer(fs.readFileSync(__dirname + '/userdata.sh', 'utf-8') - .replace("{{APIGATEWAY}}", process.env.apigateway)) - .toString('base64'); - - const launchSpecificationTemplate = { - IamInstanceProfile: { - Arn: variables.instanceProfile - }, - ImageId: image.ImageId, - KeyName: "npk-key", - InstanceType: manifest.instanceType, - BlockDeviceMappings: [{ - DeviceName: '/dev/xvdb', - Ebs: { - DeleteOnTermination: true, - Encrypted: false, - VolumeSize: volumeSize, - VolumeType: "gp2" - } - }], - NetworkInterfaces: [{ - AssociatePublicIpAddress: true, - DeviceIndex: 0, - // SubnetId: Gets populated below. - }], - Placement: { - // AvailabilityZone: Gets populated below. - }, - TagSpecifications: [{ - ResourceType: "instance", - Tags: [{ - Key: "MaxCost", - Value: ((manifest.priceTarget < variables.campaign_max_price) ? manifest.priceTarget : variables.campaign_max_price).toString() - }, { - Key: "ManifestPath", - Value: `${entity}/campaigns/${campaignId}` - }] - }], - UserData: instance_userdata - }; - - // Create a copy of the launchSpecificationTemplate for each AvailabilityZone in the campaign's region. - - const launchSpecifications = Object.keys(variables.availabilityZones[manifest.region]).reduce((specs, entry) => { - const az = JSON.parse(JSON.stringify(launchSpecificationTemplate)); // Have to deep-copy to avoid referential overrides. - - az.Placement.AvailabilityZone = entry; - az.NetworkInterfaces[0].SubnetId = variables.availabilityZones[manifest.region][entry]; - - return specs.concat(az); - }, []); - - // Get the average spot price across all AZs in the region. - const spotPrice = pricing.reduce((average, entry) => average + (entry / pricing.length), 0); - const maxDuration = (Number(manifest.instanceDuration) < variables.campaign_max_price / spotPrice) ? Number(manifest.instanceDuration) : variables.campaign_max_price / spotPrice; - - console.log(spotPrice, maxDuration, variables.campaign_max_price); - - const spotFleetParams = { - SpotFleetRequestConfig: { - AllocationStrategy: "lowestPrice", - IamFleetRole: variables.iamFleetRole, - InstanceInterruptionBehavior: "terminate", - LaunchSpecifications: launchSpecifications, - SpotPrice: (manifest.priceTarget / (manifest.instanceCount * manifest.instanceDuration) * 2).toString(), - TargetCapacity: manifest.instanceCount, - ReplaceUnhealthyInstances: false, - TerminateInstancesWithExpiration: true, - Type: "request", - ValidFrom: (new Date().getTime() / 1000), - ValidUntil: (new Date().getTime() / 1000) + (maxDuration * 3600) - } - }; - - console.log(JSON.stringify(spotFleetParams)); - } catch (e) { - console.log("Failed to generate launch specifications.", e); - return respond(500, {}, "Failed to generate launch specifications."); - } - - try { - const spotFleetRequest = await ec2.requestSpotFleet(spotFleetParams).promise(); - } catch (e) { - console.log("Failed to request spot fleet.", e); - return respond(500, {}, "Failed to request spot fleet."); - } - - // Campaign created successfully. - - console.log(`Successfully requested spot fleet ${spotFleetRequest.SpotFleetRequestId}`); - - try { - const updateCampaign = await ddb.updateItem({ - Key: { - userid: {S: entity}, - keyid: {S: `campaigns:${campaign}`} - }, - TableName: "Campaigns", - AttributeUpdates: { - active: { Action: "PUT", Value: { BOOL: true }}, - status: { Action: "PUT", Value: { S: "STARTING" }}, - spotFleetRequestId: { Action: "PUT", Value: { S: data.SpotFleetRequestId }}, - startTime: { Action: "PUT", Value: { N: Math.floor(new Date().getTime() / 1000) }}, - eventType: { Action: "PUT", Value: { S: "CampaignStarted" }} - } - }).promise(); - } catch (e) { - console.log("Spot fleet submitted, but failed to mark Campaign as 'STARTING'. This is a catastrophic error.", e); - return respond(500, {}, "Spot fleet submitted, but failed to mark Campaign as 'STARTING'. This is a catastrophic error.", false); - } - - return respond(200, {}, { msg: "Campaign started successfully", campaignId: campaignId, spotFleetRequestId: spotFleetRequest.SpotFleetRequestId }, true); -} - -function respond(statusCode, headers, body, success) { - - // Include terraform dns names as allowed origins, as well as localhost. - const allowed_origins = JSON.parse(variables.www_dns_names); - allowed_origins.push("https://localhost"); - - headers['Content-Type'] = 'text/plain'; - - if (allowed_origins.indexOf(origin) !== false) { - // Echo the origin back. I guess this is the best way to support multiple origins - headers['Access-Control-Allow-Origin'] = origin; - } else { - console.log("Invalid origin received.", origin); - } - - switch (typeof body) { - case "string": - body = { msg: body, success: success }; - break; - - case "object": - body.success = success; - break; - } - - const response = { - statusCode: statusCode, - headers: headers, - body: JSON.stringify(body), - } - - console.log(JSON.stringify(response)); - - cb(null, response); - - if (success == true) { - return Promise.resolve(body.msg); - } else { - return Promise.reject(body.msg); - } -} \ No newline at end of file diff --git a/terraform/lambda_functions/get_campaign/package.json b/terraform/lambda_functions/get_campaign/package.json deleted file mode 100644 index bd0b67b..0000000 --- a/terraform/lambda_functions/get_campaign/package.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "name": "execute_campaign", - "version": "1.0.0", - "description": "NPK execute_campaign Lambda", - "main": "main.js", - "dependencies": { - "aws-sdk": "^2.599.0" - }, - "devDependencies": {}, - "scripts": { - "test": "echo \"Error: no test specified\" && exit 1" - }, - "author": "Brad Woodward (brad@bradwoodward.io)", - "license": "MIT" -} diff --git a/terraform/lambda_functions/list_campaigns/main.js b/terraform/lambda_functions/list_campaigns/main.js deleted file mode 100644 index 4951c00..0000000 --- a/terraform/lambda_functions/list_campaigns/main.js +++ /dev/null @@ -1,323 +0,0 @@ -const aws = require('aws-sdk'); -const ddb = new aws.DynamoDB({ region: "us-west-2" }); -const s3 = new aws.S3({ region: "us-west-2" }); - -let cb = ""; -let variables = {}; - -exports.main = async function(event, context, callback) { - - // Hand off the callback function for later. - cb = callback; - - // Get the available envvars into a usable format. - variables = JSON.parse(JSON.stringify(process.env)); - - try { - - console.log("Received event: " + JSON.stringify(event)); - - // Hand off the origin, too. Fix for weird case - origin = event?.headers?.origin ?? event?.headers?.Origin; - - var allowed_characters = /^[a-zA-Z0-9'"%\.\[\]\{\}\(\)\-\:\\\/\;\=\?\#\_+\s,!@#\$\^\*&]+$/; - if (!allowed_characters.test(JSON.stringify(event))) { - console.log("Request contains illegal characters"); - return respond(400, {}, "Request contains illegal characters", false); - } - - if (event?.requestContext?.identity?.cognitoAuthenticationType != "authenticated") { - console.log(`cognitoAuthenticationType ${event?.requestContext?.identity?.cognitoAuthenticationType} != "authenticated"`) - return respond(401, {}, "Authentication Required", false); - } - - var body = {}; - // Unencode the body if necessary - if (!!event?.body) { - body = (event.requestContext.isBase64Encoded) ? atob(event.body) : event.body; - - // Body will always be a JSON object. - try { - body = JSON.parse(body); - } catch (e) { - return respond(400, "Body must be JSON object", false); - } - } - - // Associate the user identity. - const [ UserPoolId,, Username ] = event?.requestContext?.identity?.cognitoAuthenticationProvider?.split('/')[2]?.split(':'); - - if (!UserPoolId || !Username) { - console.log(`UserPoolId or Username is missing from ${event?.requestContext?.identity?.cognitoAuthenticationProvider}`); - respond(401, "Authorization Required"); - } - - } catch (e) { - console.log("Failed to process request.", e); - return respond(500, {}, "Failed to process request.", false); - } - - try { - const user = await cognito.adminGetUser({ UserPoolId, Username }).promise(); - - // Restructure UserAttributes as an k:v - user.UserAttributes = user.UserAttributes.reduce((attrs, entry) => { - attrs[entry.Name] = entry.Value - }, {}); - - if (!user?.UserAttributes?.email) { - return respond(401, {}, "Unable to obtain user properties.", false); - } - - } catch (e) { - console.log("Unable to retrieve user context.", e); - return respond(500, {}, "Unable to retrieve user context.", false); - } - - console.log(event.pathParameters) - - const campaignId = event?.pathParameters?.campaign; - - // Get the campaign entry from DynamoDB, and manifest from S3. - // * In parallel, to save, like, some milliseconds. - - try { - const [campaign, manifestObject] = await Promise.all([ - ddb.query({ - ExpressionAttributeValues: { - ':id': {S: entity}, - ':keyid': {S: `campaigns:${campaignId}`} - }, - KeyConditionExpression: 'userid = :id and keyid = :keyid', - TableName: "Campaigns" - }).promise(), - - s3.getObject({ - Bucket: variables.userdata_bucket, - Key: `${entity}/campaigns/${campaign}/manifest.json` - }).promise() - ]); - - const manifest = JSON.parse(manifestObject.Body.toString('ascii')); - } catch (e) { - console.log("Failed to retrieve campaign details.", e); - return respond(500, {}, "Failed to retrieve campaign details."); - } - - if (campaign.Items?.[0]?.status?.S != "AVAILABLE") { - return respond(404, {}, "Campaign doesn't exist or is not in 'AVAILABLE' status."); - } - - console.log(campaign, manifest); - - // Test whether the provided presigned URL is expired. - - try { - var expires = /Expires=([\d]+)&/.exec(manifest.hashFileUrl)[1]; - } catch (e) { - return respond(400, "Invalid hashFileUrl; missing expiration"); - } - - var duration = expires - (new Date().getTime() / 1000); - if (duration < 900) { - return respond(400, {} `hashFileUrl must be valid for at least 900 seconds, got ${Math.floor(duration)}`); - } - - // Campaign is valid. Get AZ pricing and Image AMI - // * Again in parallel, to save, like, some more milliseconds. - - try { - const ec2 = new aws.EC2({region: manifest.region}); - const [pricing, image] = await Promise.all([ - ec2.describeSpotPriceHistory({ - EndTime: Math.round(Date.now() / 1000), - ProductDescriptions: [ "Linux/UNIX (Amazon VPC)" ], - InstanceTypes: [ manifest.instanceType ], - StartTime: Math.round(Date.now() / 1000) - }), - - ec2.describeImages({ - Filters: [{ - Name: "virtualization-type", - Values: ["hvm"] - },{ - Name: "name", - Values: ["amzn2-ami-graphics-hvm-2*"] - },{ - Name: "root-device-type", - Values: ["ebs"] - },{ - Name: "owner-id", - Values: ["679593333241"] - }] - }) - ]); - } catch (e) { - console.log("Failed to retrieve price and image details.", e); - return respond(500, {}, "Failed to retrieve price and image details."); - } - - try { - - // Calculate the necessary volume size - - const volumeSize = Math.ceil(manifest.wordlistSize / 1073741824) + 1; - console.log(`Wordlist is ${manifest.wordlistSize / 1073741824}GiB. Allocating ${volumeSize}GiB`); - - // Build a launchSpecification for each AZ in the target region. - - const instance_userdata = new Buffer(fs.readFileSync(__dirname + '/userdata.sh', 'utf-8') - .replace("{{APIGATEWAY}}", process.env.apigateway)) - .toString('base64'); - - const launchSpecificationTemplate = { - IamInstanceProfile: { - Arn: variables.instanceProfile - }, - ImageId: image.ImageId, - KeyName: "npk-key", - InstanceType: manifest.instanceType, - BlockDeviceMappings: [{ - DeviceName: '/dev/xvdb', - Ebs: { - DeleteOnTermination: true, - Encrypted: false, - VolumeSize: volumeSize, - VolumeType: "gp2" - } - }], - NetworkInterfaces: [{ - AssociatePublicIpAddress: true, - DeviceIndex: 0, - // SubnetId: Gets populated below. - }], - Placement: { - // AvailabilityZone: Gets populated below. - }, - TagSpecifications: [{ - ResourceType: "instance", - Tags: [{ - Key: "MaxCost", - Value: ((manifest.priceTarget < variables.campaign_max_price) ? manifest.priceTarget : variables.campaign_max_price).toString() - }, { - Key: "ManifestPath", - Value: `${entity}/campaigns/${campaignId}` - }] - }], - UserData: instance_userdata - }; - - // Create a copy of the launchSpecificationTemplate for each AvailabilityZone in the campaign's region. - - const launchSpecifications = Object.keys(variables.availabilityZones[manifest.region]).reduce((specs, entry) => { - const az = JSON.parse(JSON.stringify(launchSpecificationTemplate)); // Have to deep-copy to avoid referential overrides. - - az.Placement.AvailabilityZone = entry; - az.NetworkInterfaces[0].SubnetId = variables.availabilityZones[manifest.region][entry]; - - return specs.concat(az); - }, []); - - // Get the average spot price across all AZs in the region. - const spotPrice = pricing.reduce((average, entry) => average + (entry / pricing.length), 0); - const maxDuration = (Number(manifest.instanceDuration) < variables.campaign_max_price / spotPrice) ? Number(manifest.instanceDuration) : variables.campaign_max_price / spotPrice; - - console.log(spotPrice, maxDuration, variables.campaign_max_price); - - const spotFleetParams = { - SpotFleetRequestConfig: { - AllocationStrategy: "lowestPrice", - IamFleetRole: variables.iamFleetRole, - InstanceInterruptionBehavior: "terminate", - LaunchSpecifications: launchSpecifications, - SpotPrice: (manifest.priceTarget / (manifest.instanceCount * manifest.instanceDuration) * 2).toString(), - TargetCapacity: manifest.instanceCount, - ReplaceUnhealthyInstances: false, - TerminateInstancesWithExpiration: true, - Type: "request", - ValidFrom: (new Date().getTime() / 1000), - ValidUntil: (new Date().getTime() / 1000) + (maxDuration * 3600) - } - }; - - console.log(JSON.stringify(spotFleetParams)); - } catch (e) { - console.log("Failed to generate launch specifications.", e); - return respond(500, {}, "Failed to generate launch specifications."); - } - - try { - const spotFleetRequest = await ec2.requestSpotFleet(spotFleetParams).promise(); - } catch (e) { - console.log("Failed to request spot fleet.", e); - return respond(500, {}, "Failed to request spot fleet."); - } - - // Campaign created successfully. - - console.log(`Successfully requested spot fleet ${spotFleetRequest.SpotFleetRequestId}`); - - try { - const updateCampaign = await ddb.updateItem({ - Key: { - userid: {S: entity}, - keyid: {S: `campaigns:${campaign}`} - }, - TableName: "Campaigns", - AttributeUpdates: { - active: { Action: "PUT", Value: { BOOL: true }}, - status: { Action: "PUT", Value: { S: "STARTING" }}, - spotFleetRequestId: { Action: "PUT", Value: { S: data.SpotFleetRequestId }}, - startTime: { Action: "PUT", Value: { N: Math.floor(new Date().getTime() / 1000) }}, - eventType: { Action: "PUT", Value: { S: "CampaignStarted" }} - } - }).promise(); - } catch (e) { - console.log("Spot fleet submitted, but failed to mark Campaign as 'STARTING'. This is a catastrophic error.", e); - return respond(500, {}, "Spot fleet submitted, but failed to mark Campaign as 'STARTING'. This is a catastrophic error.", false); - } - - return respond(200, {}, { msg: "Campaign started successfully", campaignId: campaignId, spotFleetRequestId: spotFleetRequest.SpotFleetRequestId }, true); -} - -function respond(statusCode, headers, body, success) { - - // Include terraform dns names as allowed origins, as well as localhost. - const allowed_origins = JSON.parse(variables.www_dns_names); - allowed_origins.push("https://localhost"); - - headers['Content-Type'] = 'text/plain'; - - if (allowed_origins.indexOf(origin) !== false) { - // Echo the origin back. I guess this is the best way to support multiple origins - headers['Access-Control-Allow-Origin'] = origin; - } else { - console.log("Invalid origin received.", origin); - } - - switch (typeof body) { - case "string": - body = { msg: body, success: success }; - break; - - case "object": - body.success = success; - break; - } - - const response = { - statusCode: statusCode, - headers: headers, - body: JSON.stringify(body), - } - - console.log(JSON.stringify(response)); - - cb(null, response); - - if (success == true) { - return Promise.resolve(body.msg); - } else { - return Promise.reject(body.msg); - } -} \ No newline at end of file diff --git a/terraform/lambda_functions/list_campaigns/package-lock.json b/terraform/lambda_functions/list_campaigns/package-lock.json deleted file mode 100644 index 7c8e64a..0000000 --- a/terraform/lambda_functions/list_campaigns/package-lock.json +++ /dev/null @@ -1,102 +0,0 @@ -{ - "name": "execute_campaign", - "version": "1.0.0", - "lockfileVersion": 1, - "requires": true, - "dependencies": { - "aws-sdk": { - "version": "2.982.0", - "resolved": "https://registry.npmjs.org/aws-sdk/-/aws-sdk-2.982.0.tgz", - "integrity": "sha512-5w+m8Ia35NqB4TOZHEKts5zSV+FTdc7hTYbN4N4lZ4YU3cLTMt496ojh5UI3Deo8IIlqgTf3UVuq6Y6cPpVxkg==", - "requires": { - "buffer": "4.9.2", - "events": "1.1.1", - "ieee754": "1.1.13", - "jmespath": "0.15.0", - "querystring": "0.2.0", - "sax": "1.2.1", - "url": "0.10.3", - "uuid": "3.3.2", - "xml2js": "0.4.19" - } - }, - "base64-js": { - "version": "1.5.1", - "resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.5.1.tgz", - "integrity": "sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==" - }, - "buffer": { - "version": "4.9.2", - "resolved": "https://registry.npmjs.org/buffer/-/buffer-4.9.2.tgz", - "integrity": "sha512-xq+q3SRMOxGivLhBNaUdC64hDTQwejJ+H0T/NB1XMtTVEwNTrfFF3gAxiyW0Bu/xWEGhjVKgUcMhCrUy2+uCWg==", - "requires": { - "base64-js": "^1.0.2", - "ieee754": "^1.1.4", - "isarray": "^1.0.0" - } - }, - "events": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/events/-/events-1.1.1.tgz", - "integrity": "sha1-nr23Y1rQmccNzEwqH1AEKI6L2SQ=" - }, - "ieee754": { - "version": "1.1.13", - "resolved": "https://registry.npmjs.org/ieee754/-/ieee754-1.1.13.tgz", - "integrity": "sha512-4vf7I2LYV/HaWerSo3XmlMkp5eZ83i+/CDluXi/IGTs/O1sejBNhTtnxzmRZfvOUqj7lZjqHkeTvpgSFDlWZTg==" - }, - "isarray": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz", - "integrity": "sha1-u5NdSFgsuhaMBoNJV6VKPgcSTxE=" - }, - "jmespath": { - "version": "0.15.0", - "resolved": "https://registry.npmjs.org/jmespath/-/jmespath-0.15.0.tgz", - "integrity": "sha1-o/Iiqarp+Wb10nx5ZRDigJF2Qhc=" - }, - "punycode": { - "version": "1.3.2", - "resolved": "https://registry.npmjs.org/punycode/-/punycode-1.3.2.tgz", - "integrity": "sha1-llOgNvt8HuQjQvIyXM7v6jkmxI0=" - }, - "querystring": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/querystring/-/querystring-0.2.0.tgz", - "integrity": "sha1-sgmEkgO7Jd+CDadW50cAWHhSFiA=" - }, - "sax": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/sax/-/sax-1.2.1.tgz", - "integrity": "sha1-e45lYZCyKOgaZq6nSEgNgozS03o=" - }, - "url": { - "version": "0.10.3", - "resolved": "https://registry.npmjs.org/url/-/url-0.10.3.tgz", - "integrity": "sha1-Ah5NnHcF8hu/N9A861h2dAJ3TGQ=", - "requires": { - "punycode": "1.3.2", - "querystring": "0.2.0" - } - }, - "uuid": { - "version": "3.3.2", - "resolved": "https://registry.npmjs.org/uuid/-/uuid-3.3.2.tgz", - "integrity": "sha512-yXJmeNaw3DnnKAOKJE51sL/ZaYfWJRl1pK9dr19YFCu0ObS231AB1/LbqTKRAQ5kw8A90rA6fr4riOUpTZvQZA==" - }, - "xml2js": { - "version": "0.4.19", - "resolved": "https://registry.npmjs.org/xml2js/-/xml2js-0.4.19.tgz", - "integrity": "sha512-esZnJZJOiJR9wWKMyuvSE1y6Dq5LCuJanqhxslH2bxM6duahNZ+HMpCLhBQGZkbX6xRf8x1Y2eJlgt2q3qo49Q==", - "requires": { - "sax": ">=0.6.0", - "xmlbuilder": "~9.0.1" - } - }, - "xmlbuilder": { - "version": "9.0.7", - "resolved": "https://registry.npmjs.org/xmlbuilder/-/xmlbuilder-9.0.7.tgz", - "integrity": "sha1-Ey7mPS7FVlxVfiD0wi35rKaGsQ0=" - } - } -} diff --git a/terraform/quota_warning_accepted b/terraform/quota_warning_accepted new file mode 100644 index 0000000..e69de29 diff --git a/terraform/terraform.jsonnet b/terraform/terraform.jsonnet index e6eeea2..a44812c 100644 --- a/terraform/terraform.jsonnet +++ b/terraform/terraform.jsonnet @@ -57,7 +57,7 @@ local regionKeys = std.objectFields(settings.regions); }, root: { children: [{ - pathPart: "api", + pathPart: "userproxy", methods: { OPTIONS: { optionsIntegration: true, @@ -72,14 +72,8 @@ local regionKeys = std.objectFields(settings.regions); children: [{ pathPart: "campaign", methods: { - GET: { - lambdaIntegration: "list_campaigns", - parameters: { - authorization: "AWS_IAM" - } - }, - PUT: { - lambdaIntegration: "execute_campaign", + POST: { + lambdaIntegration: "create_campaign", parameters: { authorization: "AWS_IAM" } @@ -106,13 +100,10 @@ local regionKeys = std.objectFields(settings.regions); } } }, - GET: { - lambdaIntegration: "get_campaign", + PUT: { + lambdaIntegration: "execute_campaign", parameters: { - authorization: "AWS_IAM", - request_parameters: { - "method.request.path.campaign": true - } + authorization: "AWS_IAM" } }, OPTIONS: { @@ -196,7 +187,7 @@ local regionKeys = std.objectFields(settings.regions); npk: { certificate_arn: "${aws_acm_certificate.main.arn}", domain_name: "api.%s" % [settings.dnsBaseName], - depends_on: ["aws_acm_certificate.main"] + depends_on: ["aws_acm_certificate_validation.main"] } } } @@ -213,11 +204,7 @@ local regionKeys = std.objectFields(settings.regions); cloudwatch_invoke_spot_monitor: { statement: { actions: ["lambda:Invoke"], - resources: ["${aws_lambda_function.spot_monitor.arn}"], - principals: { - type: "AWS", - identifiers: ["${aws_cloudfront_origin_access_identity.npk.iam_arn}"] - } + resources: ["${aws_lambda_function.spot_monitor.arn}"] } } } @@ -314,6 +301,84 @@ local regionKeys = std.objectFields(settings.regions); } } }, + 'lambda-create_campaign.tf.json': lambda.lambda_function("create_campaign", { + handler: "main.main", + timeout: 20, + memory_size: 512, + + environment: { + variables: { + + www_dns_names: std.toString(settings.wwwEndpoint), + campaign_max_price: "${var.campaign_max_price}", + gQuota: settings.quotas.gquota, + pQuota: settings.quotas.pquota, + userdata_bucket: "${aws_s3_bucket.user_data.id}", + instanceProfile: "${aws_iam_instance_profile.npk_node.arn}", + iamFleetRole: "${aws_iam_role.npk_fleet_role.arn}", + availabilityZones: std.strReplace(std.manifestJsonEx({ + [regionKeys[i]]: { + [settings.regions[regionKeys[i]][azi]]: "${aws_subnet." + settings.regions[regionKeys[i]][azi] + ".id}" + for azi in std.range(0, std.length(settings.regions[regionKeys[i]]) - 1) + } + for i in std.range(0, std.length(regionKeys) - 1) + }, ""), "\n", ""), + dictionaryBuckets: std.strReplace(std.manifestJsonEx({ + [regionKeys[i]]: "${var.dictionary-" + regionKeys[i] + "-id}" + for i in std.range(0, std.length(regionKeys) - 1) + }, ""), "\n", ""), + apigateway: if settings.useCustomDNS then + settings.apiEndpoint + else + "${aws_api_gateway_rest_api.npk.id}.execute-api." + settings.defaultRegion + ".amazonaws.com" + } + }, + }, { + statement: [{ + sid: "s3Put", + actions: [ + "s3:PutObject" + ], + resources: [ + "${aws_s3_bucket.user_data.arn}/*/campaigns/*/manifest.json", + "${aws_s3_bucket.logs.arn}/api_gateway_proxy/*", + ] + },{ + sid: "s3GetUserFile", + actions: [ + "s3:GetObject" + ], + resources: [ + "${aws_s3_bucket.user_data.arn}/*" + ] + },{ + sid: "s3GetDictionaryFile", + actions: [ + "s3:GetObject" + ], + resources: [ + "${var.dictionary-" + regionKeys[i] + "}/*" + for i in std.range(0, std.length(regionKeys) - 1) + ] + },{ + sid: "ddb", + actions: [ + "dynamodb:Query", + "dynamodb:UpdateItem" + ], + resources: [ + "${aws_dynamodb_table.campaigns.arn}" + ] + },{ + sid: "adminGetUser", + actions: [ + "cognito-idp:AdminGetUser" + ], + resources: [ + "${aws_cognito_user_pool.npk.arn}" + ] + }] + }), 'lambda-delete_campaign.tf.json': lambda.lambda_function("delete_campaign", { handler: "main.main", timeout: 20, @@ -322,6 +387,17 @@ local regionKeys = std.objectFields(settings.regions); environment: { variables: { www_dns_names: std.toString([settings.wwwEndpoint]), + campaign_max_price: "${var.campaign_max_price}", + userdata_bucket: "${aws_s3_bucket.user_data.id}", + instanceProfile: "${aws_iam_instance_profile.npk_node.arn}", + iamFleetRole: "${aws_iam_role.npk_fleet_role.arn}", + availabilityZones: std.strReplace(std.manifestJsonEx({ + [regionKeys[i]]: { + [settings.regions[regionKeys[i]][azi]]: "${aws_subnet." + settings.regions[regionKeys[i]][azi] + ".id}" + for azi in std.range(0, std.length(settings.regions[regionKeys[i]]) - 1) + } + for i in std.range(0, std.length(regionKeys) - 1) + }, ""), "\n", "") } } }, { @@ -425,66 +501,6 @@ local regionKeys = std.objectFields(settings.regions); ] }] }), - 'lambda-get_campaign.tf.json': lambda.lambda_function("get_campaign", { - handler: "main.main", - timeout: 20, - memory_size: 512, - - environment: { - variables: { - www_dns_names: std.toString([settings.wwwEndpoint]), - } - } - }, { - statement: [{ - sid: "ddb", - actions: [ - "dynamodb:Query", - "dynamodb:UpdateItem" - ], - resources: [ - "${aws_dynamodb_table.campaigns.arn}" - ] - },{ - sid: "adminGetUser", - actions: [ - "cognito-idp:AdminGetUser" - ], - resources: [ - "${aws_cognito_user_pool.npk.arn}" - ] - }] - }), - 'lambda-list_campaigns.tf.json': lambda.lambda_function("list_campaigns", { - handler: "main.main", - timeout: 20, - memory_size: 512, - - environment: { - variables: { - www_dns_names: std.toString([settings.wwwEndpoint]), - } - } - }, { - statement: [{ - sid: "ddb", - actions: [ - "dynamodb:Query", - "dynamodb:UpdateItem" - ], - resources: [ - "${aws_dynamodb_table.campaigns.arn}" - ] - },{ - sid: "adminGetUser", - actions: [ - "cognito-idp:AdminGetUser" - ], - resources: [ - "${aws_cognito_user_pool.npk.arn}" - ] - }] - }), 'lambda-spot_monitor.tf.json': lambda.lambda_function("spot_monitor", { handler: "main.main", timeout: 10, From cbbfe93f6766469517512a603c2e40d2666c92a6 Mon Sep 17 00:00:00 2001 From: Brad Woodward Date: Sun, 5 Sep 2021 17:43:19 -0600 Subject: [PATCH 008/128] Uncommit touch file --- terraform/quota_warning_accepted | 0 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100644 terraform/quota_warning_accepted diff --git a/terraform/quota_warning_accepted b/terraform/quota_warning_accepted deleted file mode 100644 index e69de29..0000000 From 4f679d8f730c2cba1c3e86a8571e9f963c4d89ad Mon Sep 17 00:00:00 2001 From: Brad Woodward Date: Mon, 6 Sep 2021 17:30:56 -0600 Subject: [PATCH 009/128] rewrite create, execute, delete --- .gitignore | 1 + .../angular/controllers/npkMainCtrl.js | 2 + terraform/jsonnet/lambda.libsonnet | 4 +- terraform/jsonnet/templates.libsonnet | 2 +- .../lambda_functions/create_campaign/main.js | 8 +- .../lambda_functions/delete_campaign/main.js | 319 +++++++----------- .../delete_campaign/package.json | 4 +- .../lambda_functions/execute_campaign/main.js | 135 ++++---- terraform/terraform.jsonnet | 35 +- 9 files changed, 233 insertions(+), 277 deletions(-) diff --git a/.gitignore b/.gitignore index 5310fe7..99ffc38 100644 --- a/.gitignore +++ b/.gitignore @@ -21,6 +21,7 @@ terraform/lambda_functions/proxy_api_handler/api_handler_variables.js terraform/lambda_functions/status_reporter/npk_settings.js terraform/lambda_functions/spot_monitor/npk_settings.js terraform/lambda_functions/proxy_api_handler/userdata.sh +terraform/lambda_functions/execute_campaign/userdata.sh terraform/templates/api-handler-variables.tpl terraform/templates/npk_settings.tpl terraform-selfhost/components/ diff --git a/site-content/angular/controllers/npkMainCtrl.js b/site-content/angular/controllers/npkMainCtrl.js index 6954cda..bb75dcf 100755 --- a/site-content/angular/controllers/npkMainCtrl.js +++ b/site-content/angular/controllers/npkMainCtrl.js @@ -628,6 +628,8 @@ angular $scope.$digest(); $('#messageModal').modal('show'); + $('img#action-' + campaign_id).hide(); + $('a#start-' + campaign_id).show(); }); }; diff --git a/terraform/jsonnet/lambda.libsonnet b/terraform/jsonnet/lambda.libsonnet index 9ffb8b1..31a78ca 100644 --- a/terraform/jsonnet/lambda.libsonnet +++ b/terraform/jsonnet/lambda.libsonnet @@ -48,7 +48,7 @@ local lambda_function(name, config, role_policy) = { local_file: { ["lambda-" + name + "_envvars"]: { content: std.join("\n", [ - "export %s=%s" % [key, config.environment.variables[key]] + "declare %s='%s'\nexport %s" % [key, config.environment.variables[key], key] for key in std.objectFields(config.environment.variables) ]), filename: "${path.module}/lambda_functions/" + name + "/ENVVARS", @@ -61,7 +61,7 @@ local lambda_function(name, config, role_policy) = { [name]: { depends_on: [ "null_resource.npm_install-" + name - ], + ] + if std.objectHas(config, 'depends_on') then config.depends_on else [], type: "zip", source_dir: "${path.module}/lambda_functions/" + name + "/", output_path: "${path.module}/lambda_functions/zip_files/" + name + ".zip", diff --git a/terraform/jsonnet/templates.libsonnet b/terraform/jsonnet/templates.libsonnet index a24fa80..a9bd423 100644 --- a/terraform/jsonnet/templates.libsonnet +++ b/terraform/jsonnet/templates.libsonnet @@ -55,7 +55,7 @@ local az(region) = { }, userdata_template: { content: "${data.template_file.userdata_template.rendered}", - filename: "${path.module}/lambda_functions/proxy_api_handler/userdata.sh", + filename: "${path.module}/lambda_functions/execute_campaign/userdata.sh", } } }, diff --git a/terraform/lambda_functions/create_campaign/main.js b/terraform/lambda_functions/create_campaign/main.js index bc73728..97f9099 100644 --- a/terraform/lambda_functions/create_campaign/main.js +++ b/terraform/lambda_functions/create_campaign/main.js @@ -482,13 +482,14 @@ exports.main = async function(event, context, callback) { startTime: Math.floor(new Date().getTime() / 1000), spotFleetRequestId: "", cognitoUserEmail: email, - deleted: false + deleted: false, + lastuntil: Math.floor(new Date().getTime() / 1000) + 2700, }); const updateCampaign = await ddb.updateItem({ Key: { userid: {S: entity}, - keyid: {S: `campaigns:${campaign}`} + keyid: {S: `campaigns:${campaignId}`} }, TableName: "Campaigns", AttributeUpdates: Object.keys(updateParams).reduce((attrs, entry) => { @@ -515,8 +516,7 @@ exports.main = async function(event, context, callback) { function respond(statusCode, headers, body, success) { // Include terraform dns names as allowed origins, as well as localhost. - const allowed_origins = [variables.www_dns_names]; - allowed_origins.push("https://localhost"); + const allowed_origins = [variables.www_dns_names, "https://localhost"]; headers['Content-Type'] = 'text/plain'; diff --git a/terraform/lambda_functions/delete_campaign/main.js b/terraform/lambda_functions/delete_campaign/main.js index 3a91d7c..e92397b 100644 --- a/terraform/lambda_functions/delete_campaign/main.js +++ b/terraform/lambda_functions/delete_campaign/main.js @@ -5,6 +5,8 @@ const s3 = new aws.S3({ region: "us-west-2" }); let cb = ""; let variables = {}; +var cognito = new aws.CognitoIdentityServiceProvider({region: "us-west-2", apiVersion: "2016-04-18"}); + exports.main = async function(event, context, callback) { console.log(JSON.stringify(event)); @@ -15,6 +17,8 @@ exports.main = async function(event, context, callback) { // Get the available envvars into a usable format. variables = JSON.parse(JSON.stringify(process.env)); + let entity, UserPoolId, Username; + try { console.log("Received event: " + JSON.stringify(event)); @@ -33,27 +37,14 @@ exports.main = async function(event, context, callback) { return respond(401, {}, "Authentication Required", false); } - const entity = event.requestContext.identity.cognitoIdentityId; - - var body = {}; - // Unencode the body if necessary - if (!!event?.body) { - body = (event.requestContext.isBase64Encoded) ? atob(event.body) : event.body; - - // Body will always be a JSON object. - try { - body = JSON.parse(body); - } catch (e) { - return respond(400, "Body must be JSON object", false); - } - } + entity = event.requestContext.identity.cognitoIdentityId; // Associate the user identity. - const [ UserPoolId,, Username ] = event?.requestContext?.identity?.cognitoAuthenticationProvider?.split('/')[2]?.split(':'); + [ UserPoolId,, Username ] = event?.requestContext?.identity?.cognitoAuthenticationProvider?.split('/')[2]?.split(':'); if (!UserPoolId || !Username) { console.log(`UserPoolId or Username is missing from ${event?.requestContext?.identity?.cognitoAuthenticationProvider}`); - respond(401, "Authorization Required"); + respond(401, {}, "Authorization Required", false); } } catch (e) { @@ -61,17 +52,23 @@ exports.main = async function(event, context, callback) { return respond(500, {}, "Failed to process request.", false); } + let user, email; + try { - const user = await cognito.adminGetUser({ UserPoolId, Username }).promise(); + user = await cognito.adminGetUser({ UserPoolId, Username }).promise(); // Restructure UserAttributes as an k:v user.UserAttributes = user.UserAttributes.reduce((attrs, entry) => { attrs[entry.Name] = entry.Value + + return attrs; }, {}); if (!user?.UserAttributes?.email) { return respond(401, {}, "Unable to obtain user properties.", false); } + + email = user.UserAttributes.email; } catch (e) { console.log("Unable to retrieve user context.", e); @@ -85,210 +82,150 @@ exports.main = async function(event, context, callback) { // Get the campaign entry from DynamoDB, and manifest from S3. // * In parallel, to save, like, some milliseconds. + let campaign; + try { - const [campaign, manifestObject] = await Promise.all([ - ddb.query({ - ExpressionAttributeValues: { - ':id': {S: entity}, - ':keyid': {S: `campaigns:${campaignId}`} - }, - KeyConditionExpression: 'userid = :id and keyid = :keyid', - TableName: "Campaigns" - }).promise(), - - s3.getObject({ - Bucket: variables.userdata_bucket, - Key: `${entity}/campaigns/${campaign}/manifest.json` - }).promise() - ]); - - const manifest = JSON.parse(manifestObject.Body.toString('ascii')); + campaign = await ddb.query({ + ExpressionAttributeValues: { + ':id': {S: entity}, + ':keyid': {S: `campaigns:${campaignId}`} + }, + KeyConditionExpression: 'userid = :id and keyid = :keyid', + TableName: "Campaigns" + }).promise(); + } catch (e) { console.log("Failed to retrieve campaign details.", e); return respond(500, {}, "Failed to retrieve campaign details."); } - if (campaign.Items?.[0]?.status?.S != "AVAILABLE") { - return respond(404, {}, "Campaign doesn't exist or is not in 'AVAILABLE' status."); + if (!campaign.Items?.[0]?.status?.S) { + return respond(404, {}, "Specified campaign does not exist.", false); } - console.log(campaign, manifest); + var ec2 = new aws.EC2({region: campaign.Items[0].region.S}); - // Test whether the provided presigned URL is expired. + switch (campaign.Items[0].status.S) { + case "STARTING": + case "RUNNING": - try { - var expires = /Expires=([\d]+)&/.exec(manifest.hashFileUrl)[1]; - } catch (e) { - return respond(400, "Invalid hashFileUrl; missing expiration"); - } + let sfr; - var duration = expires - (new Date().getTime() / 1000); - if (duration < 900) { - return respond(400, {} `hashFileUrl must be valid for at least 900 seconds, got ${Math.floor(duration)}`); - } + try { + sfr = await ec2.describeSpotFleetRequests({ + SpotFleetRequestIds: [campaign.spotFleetRequestId] + }).promise(); + } catch(e) { + console.log("Failed to retrieve spot fleet request.", e); + return respond(500, {}, "Failed to retrieve spot fleet request.", false); + } - // Campaign is valid. Get AZ pricing and Image AMI - // * Again in parallel, to save, like, some more milliseconds. + if (!sfr.SpotFleetRequestConfigs?.[0]?.SpotFleetRequestId) { + return respond(404, "Error retrieving spot fleet data: not found.", false); + } - try { - const ec2 = new aws.EC2({region: manifest.region}); - const [pricing, image] = await Promise.all([ - ec2.describeSpotPriceHistory({ - EndTime: Math.round(Date.now() / 1000), - ProductDescriptions: [ "Linux/UNIX (Amazon VPC)" ], - InstanceTypes: [ manifest.instanceType ], - StartTime: Math.round(Date.now() / 1000) - }), - - ec2.describeImages({ - Filters: [{ - Name: "virtualization-type", - Values: ["hvm"] - },{ - Name: "name", - Values: ["amzn2-ami-graphics-hvm-2*"] - },{ - Name: "root-device-type", - Values: ["ebs"] - },{ - Name: "owner-id", - Values: ["679593333241"] - }] - }) - ]); - } catch (e) { - console.log("Failed to retrieve price and image details.", e); - return respond(500, {}, "Failed to retrieve price and image details."); - } + if (sfr.SpotFleetRequestConfigs[0].SpotFleetRequestState == "active") { + let cancellation; + + try { + cancellation = await ec2.cancelSpotFleetRequests({ + SpotFleetRequestIds: [sfr.SpotFleetRequestConfigs[0].SpotFleetRequestId], + TerminateInstances: true + }).promise(); + } catch(e) { + console.log("Failed to request cancellation of spot fleet request.", e); + return respond(500, {}, "Failed to request cancellation of spot fleet request.", false); + } - try { + if (cancellation?.SuccessfulFleetRequests?.[0].CurrentSpotFleetRequestState?.indexOf('cancelled') < 0) { + return respond(400, "Error cancelling spot fleet. Current state: " + cancallation.SuccessfulFleetRequests[0].CurrentSpotFleetRequestState, false); + } + } - // Calculate the necessary volume size + try { + let update = await ddb.updateItem({ + Key: { + userid: {S: entity}, + keyid: {S: `campaigns:${campaignId}`} + }, + TableName: "Campaigns", + AttributeUpdates: { + active: { Action: 'PUT', Value: { BOOL: false }}, + status: { Action: 'PUT', Value: { S: "CANCELLED" }} + } + }).promise(); + } catch(e) { + console.log("Failed to deactivate campaign.", e); + return respond(500, {}, "Failed to deactivate campaign.", false); + } - const volumeSize = Math.ceil(manifest.wordlistSize / 1073741824) + 1; - console.log(`Wordlist is ${manifest.wordlistSize / 1073741824}GiB. Allocating ${volumeSize}GiB`); + return respond(200, {}, `Campaign ${campaignId} stopped.`, true); - // Build a launchSpecification for each AZ in the target region. + break; - const instance_userdata = new Buffer(fs.readFileSync(__dirname + '/userdata.sh', 'utf-8') - .replace("{{APIGATEWAY}}", process.env.apigateway)) - .toString('base64'); + default: - const launchSpecificationTemplate = { - IamInstanceProfile: { - Arn: variables.instanceProfile - }, - ImageId: image.ImageId, - KeyName: "npk-key", - InstanceType: manifest.instanceType, - BlockDeviceMappings: [{ - DeviceName: '/dev/xvdb', - Ebs: { - DeleteOnTermination: true, - Encrypted: false, - VolumeSize: volumeSize, - VolumeType: "gp2" - } - }], - NetworkInterfaces: [{ - AssociatePublicIpAddress: true, - DeviceIndex: 0, - // SubnetId: Gets populated below. - }], - Placement: { - // AvailabilityZone: Gets populated below. - }, - TagSpecifications: [{ - ResourceType: "instance", - Tags: [{ - Key: "MaxCost", - Value: ((manifest.priceTarget < variables.campaign_max_price) ? manifest.priceTarget : variables.campaign_max_price).toString() - }, { - Key: "ManifestPath", - Value: `${entity}/campaigns/${campaignId}` - }] - }], - UserData: instance_userdata - }; - - // Create a copy of the launchSpecificationTemplate for each AvailabilityZone in the campaign's region. - - const launchSpecifications = Object.keys(variables.availabilityZones[manifest.region]).reduce((specs, entry) => { - const az = JSON.parse(JSON.stringify(launchSpecificationTemplate)); // Have to deep-copy to avoid referential overrides. - - az.Placement.AvailabilityZone = entry; - az.NetworkInterfaces[0].SubnetId = variables.availabilityZones[manifest.region][entry]; - - return specs.concat(az); - }, []); - - // Get the average spot price across all AZs in the region. - const spotPrice = pricing.reduce((average, entry) => average + (entry / pricing.length), 0); - const maxDuration = (Number(manifest.instanceDuration) < variables.campaign_max_price / spotPrice) ? Number(manifest.instanceDuration) : variables.campaign_max_price / spotPrice; - - console.log(spotPrice, maxDuration, variables.campaign_max_price); - - const spotFleetParams = { - SpotFleetRequestConfig: { - AllocationStrategy: "lowestPrice", - IamFleetRole: variables.iamFleetRole, - InstanceInterruptionBehavior: "terminate", - LaunchSpecifications: launchSpecifications, - SpotPrice: (manifest.priceTarget / (manifest.instanceCount * manifest.instanceDuration) * 2).toString(), - TargetCapacity: manifest.instanceCount, - ReplaceUnhealthyInstances: false, - TerminateInstancesWithExpiration: true, - Type: "request", - ValidFrom: (new Date().getTime() / 1000), - ValidUntil: (new Date().getTime() / 1000) + (maxDuration * 3600) + try { + let entries = await ddb.query({ + ExpressionAttributeValues: { + ':id': {S: entity}, + ':keyid': {S: `${campaignId}:`} + }, + KeyConditionExpression: 'userid = :id and begins_with(keyid, :keyid)', + TableName: "Campaigns" + }).promise(); + } catch (e) { + console.log("Failed to retrieve events for campaign.", e); + return respond(500, {}, "Failed to retrieve events for campaign.", false); } - }; - console.log(JSON.stringify(spotFleetParams)); - } catch (e) { - console.log("Failed to generate launch specifications.", e); - return respond(500, {}, "Failed to generate launch specifications."); - } + try { - try { - const spotFleetRequest = await ec2.requestSpotFleet(spotFleetParams).promise(); - } catch (e) { - console.log("Failed to request spot fleet.", e); - return respond(500, {}, "Failed to request spot fleet."); - } + // Delete event entries for the campaign. + const promises = entries.Items.map((entry) => { + entry = aws.DynamoDB.Converter.unmarshall(entry); + + return ddb.deleteItem({ + Key: { + userid: {S: entity}, + keyid: {S: entry.keyid} + }, + TableName: "Campaigns" + }).promise(); + }); + + promises.push(ddb.updateItem({ + Key: { + userid: {S: entity}, + keyid: {S: `campaigns:${campaignId}`} + }, + TableName: "Campaigns", + AttributeUpdates: { + deleted: { Action: 'PUT', Value: { BOOL: true }} + } + }).promise()); + + let finished = await Promise.all(promises); - // Campaign created successfully. + } catch (e) { + console.log("Failed to delete campaign", e); + return respond(500, {}, "Failed to delete campaign", false); + } - console.log(`Successfully requested spot fleet ${spotFleetRequest.SpotFleetRequestId}`); + return respond(200, {}, `Campaign ${campaignId} deleted.`, true); - try { - const updateCampaign = await ddb.updateItem({ - Key: { - userid: {S: entity}, - keyid: {S: `campaigns:${campaign}`} - }, - TableName: "Campaigns", - AttributeUpdates: { - active: { Action: "PUT", Value: { BOOL: true }}, - status: { Action: "PUT", Value: { S: "STARTING" }}, - spotFleetRequestId: { Action: "PUT", Value: { S: data.SpotFleetRequestId }}, - startTime: { Action: "PUT", Value: { N: Math.floor(new Date().getTime() / 1000) }}, - eventType: { Action: "PUT", Value: { S: "CampaignStarted" }} - } - }).promise(); - } catch (e) { - console.log("Spot fleet submitted, but failed to mark Campaign as 'STARTING'. This is a catastrophic error.", e); - return respond(500, {}, "Spot fleet submitted, but failed to mark Campaign as 'STARTING'. This is a catastrophic error.", false); + break; } + + return respond(200, {}, { msg: "Campaign started successfully", campaignId: campaignId, spotFleetRequestId: spotFleetRequest.SpotFleetRequestId }, true); } function respond(statusCode, headers, body, success) { // Include terraform dns names as allowed origins, as well as localhost. - const allowed_origins = JSON.parse(variables.www_dns_names); - allowed_origins.push("https://localhost"); + const allowed_origins = [variables.www_dns_names, "https://localhost"]; headers['Content-Type'] = 'text/plain'; @@ -319,9 +256,5 @@ function respond(statusCode, headers, body, success) { cb(null, response); - if (success == true) { - return Promise.resolve(body.msg); - } else { - return Promise.reject(body.msg); - } + return Promise.resolve(body.msg); } \ No newline at end of file diff --git a/terraform/lambda_functions/delete_campaign/package.json b/terraform/lambda_functions/delete_campaign/package.json index bd0b67b..ccd8e8e 100644 --- a/terraform/lambda_functions/delete_campaign/package.json +++ b/terraform/lambda_functions/delete_campaign/package.json @@ -1,7 +1,7 @@ { - "name": "execute_campaign", + "name": "delete_campaign", "version": "1.0.0", - "description": "NPK execute_campaign Lambda", + "description": "NPK delete_campaign Lambda", "main": "main.js", "dependencies": { "aws-sdk": "^2.599.0" diff --git a/terraform/lambda_functions/execute_campaign/main.js b/terraform/lambda_functions/execute_campaign/main.js index 3a91d7c..2d905fc 100644 --- a/terraform/lambda_functions/execute_campaign/main.js +++ b/terraform/lambda_functions/execute_campaign/main.js @@ -1,10 +1,13 @@ const aws = require('aws-sdk'); const ddb = new aws.DynamoDB({ region: "us-west-2" }); const s3 = new aws.S3({ region: "us-west-2" }); +const fs = require('fs'); let cb = ""; let variables = {}; +const cognito = new aws.CognitoIdentityServiceProvider({region: "us-west-2", apiVersion: "2016-04-18"}); + exports.main = async function(event, context, callback) { console.log(JSON.stringify(event)); @@ -14,6 +17,10 @@ exports.main = async function(event, context, callback) { // Get the available envvars into a usable format. variables = JSON.parse(JSON.stringify(process.env)); + variables.availabilityZones = JSON.parse(variables.availabilityZones); + variables.dictionaryBuckets = JSON.parse(variables.dictionaryBuckets); + + let entity, UserPoolId, Username; try { @@ -33,27 +40,14 @@ exports.main = async function(event, context, callback) { return respond(401, {}, "Authentication Required", false); } - const entity = event.requestContext.identity.cognitoIdentityId; - - var body = {}; - // Unencode the body if necessary - if (!!event?.body) { - body = (event.requestContext.isBase64Encoded) ? atob(event.body) : event.body; - - // Body will always be a JSON object. - try { - body = JSON.parse(body); - } catch (e) { - return respond(400, "Body must be JSON object", false); - } - } + entity = event.requestContext.identity.cognitoIdentityId; // Associate the user identity. - const [ UserPoolId,, Username ] = event?.requestContext?.identity?.cognitoAuthenticationProvider?.split('/')[2]?.split(':'); + [ UserPoolId,, Username ] = event?.requestContext?.identity?.cognitoAuthenticationProvider?.split('/')[2]?.split(':'); if (!UserPoolId || !Username) { console.log(`UserPoolId or Username is missing from ${event?.requestContext?.identity?.cognitoAuthenticationProvider}`); - respond(401, "Authorization Required"); + respond(401, {}, "Authorization Required", false); } } catch (e) { @@ -61,17 +55,23 @@ exports.main = async function(event, context, callback) { return respond(500, {}, "Failed to process request.", false); } + let user, email; + try { const user = await cognito.adminGetUser({ UserPoolId, Username }).promise(); // Restructure UserAttributes as an k:v user.UserAttributes = user.UserAttributes.reduce((attrs, entry) => { attrs[entry.Name] = entry.Value + + return attrs; }, {}); if (!user?.UserAttributes?.email) { return respond(401, {}, "Unable to obtain user properties.", false); } + + email = user.UserAttributes.email; } catch (e) { console.log("Unable to retrieve user context.", e); @@ -85,8 +85,10 @@ exports.main = async function(event, context, callback) { // Get the campaign entry from DynamoDB, and manifest from S3. // * In parallel, to save, like, some milliseconds. + let campaign, manifestObject, manifest; + try { - const [campaign, manifestObject] = await Promise.all([ + [campaign, manifestObject] = await Promise.all([ ddb.query({ ExpressionAttributeValues: { ':id': {S: entity}, @@ -98,47 +100,50 @@ exports.main = async function(event, context, callback) { s3.getObject({ Bucket: variables.userdata_bucket, - Key: `${entity}/campaigns/${campaign}/manifest.json` + Key: `${entity}/campaigns/${campaignId}/manifest.json` }).promise() ]); - const manifest = JSON.parse(manifestObject.Body.toString('ascii')); + manifest = JSON.parse(manifestObject.Body.toString('ascii')); } catch (e) { console.log("Failed to retrieve campaign details.", e); - return respond(500, {}, "Failed to retrieve campaign details."); + return respond(500, {}, "Failed to retrieve campaign details.", false); } if (campaign.Items?.[0]?.status?.S != "AVAILABLE") { - return respond(404, {}, "Campaign doesn't exist or is not in 'AVAILABLE' status."); + return respond(404, {}, "Campaign doesn't exist or is not in 'AVAILABLE' status.", false); } - console.log(campaign, manifest); - // Test whether the provided presigned URL is expired. + let expires, duration; + try { - var expires = /Expires=([\d]+)&/.exec(manifest.hashFileUrl)[1]; - } catch (e) { - return respond(400, "Invalid hashFileUrl; missing expiration"); - } + expires = /Expires=([\d]+)&/.exec(manifest.hashFileUrl)[1]; - var duration = expires - (new Date().getTime() / 1000); - if (duration < 900) { - return respond(400, {} `hashFileUrl must be valid for at least 900 seconds, got ${Math.floor(duration)}`); + duration = expires - (new Date().getTime() / 1000); + + if (duration < 900) { + return respond(400, {} `hashFileUrl must be valid for at least 900 seconds, got ${Math.floor(duration)}`, false); + } + } catch (e) { + return respond(400, {}, "Invalid hashFileUrl; missing expiration", false); } // Campaign is valid. Get AZ pricing and Image AMI // * Again in parallel, to save, like, some more milliseconds. + const ec2 = new aws.EC2({region: manifest.region}); + let pricing, image; + try { - const ec2 = new aws.EC2({region: manifest.region}); - const [pricing, image] = await Promise.all([ + [pricing, image] = await Promise.all([ ec2.describeSpotPriceHistory({ EndTime: Math.round(Date.now() / 1000), ProductDescriptions: [ "Linux/UNIX (Amazon VPC)" ], InstanceTypes: [ manifest.instanceType ], StartTime: Math.round(Date.now() / 1000) - }), + }).promise(), ec2.describeImages({ Filters: [{ @@ -154,13 +159,19 @@ exports.main = async function(event, context, callback) { Name: "owner-id", Values: ["679593333241"] }] - }) + }).promise() ]); } catch (e) { console.log("Failed to retrieve price and image details.", e); - return respond(500, {}, "Failed to retrieve price and image details."); + return respond(500, {}, "Failed to retrieve price and image details.", false); } + image = image.Images.reduce((newest, entry) => + entry.CreationDate > newest.CreationDate ? entry : newest + , { CreationDate: '1980-01-01T00:00:00.000Z' }); + + let spotFleetParams; + try { // Calculate the necessary volume size @@ -170,7 +181,7 @@ exports.main = async function(event, context, callback) { // Build a launchSpecification for each AZ in the target region. - const instance_userdata = new Buffer(fs.readFileSync(__dirname + '/userdata.sh', 'utf-8') + const instance_userdata = new Buffer.from(fs.readFileSync(__dirname + '/userdata.sh', 'utf-8') .replace("{{APIGATEWAY}}", process.env.apigateway)) .toString('base64'); @@ -223,12 +234,12 @@ exports.main = async function(event, context, callback) { }, []); // Get the average spot price across all AZs in the region. - const spotPrice = pricing.reduce((average, entry) => average + (entry / pricing.length), 0); + const spotPrice = pricing.SpotPriceHistory.reduce((average, entry) => average + (entry.SpotPrice / pricing.SpotPriceHistory.length), 0); const maxDuration = (Number(manifest.instanceDuration) < variables.campaign_max_price / spotPrice) ? Number(manifest.instanceDuration) : variables.campaign_max_price / spotPrice; - console.log(spotPrice, maxDuration, variables.campaign_max_price); + console.log(`Setting Duration to ${maxDuration} (Spot average $${spotPrice} with limit of $${variables.campaign_max_price})`); - const spotFleetParams = { + spotFleetParams = { SpotFleetRequestConfig: { AllocationStrategy: "lowestPrice", IamFleetRole: variables.iamFleetRole, @@ -244,17 +255,19 @@ exports.main = async function(event, context, callback) { } }; - console.log(JSON.stringify(spotFleetParams)); + // console.log(JSON.stringify(spotFleetParams)); } catch (e) { console.log("Failed to generate launch specifications.", e); - return respond(500, {}, "Failed to generate launch specifications."); + return respond(500, {}, "Failed to generate launch specifications.", false); } + let spotFleetRequest; + try { - const spotFleetRequest = await ec2.requestSpotFleet(spotFleetParams).promise(); + spotFleetRequest = await ec2.requestSpotFleet(spotFleetParams).promise(); } catch (e) { console.log("Failed to request spot fleet.", e); - return respond(500, {}, "Failed to request spot fleet."); + return respond(500, {}, "Failed to request spot fleet.", false); } // Campaign created successfully. @@ -262,19 +275,30 @@ exports.main = async function(event, context, callback) { console.log(`Successfully requested spot fleet ${spotFleetRequest.SpotFleetRequestId}`); try { + const updateParams = aws.DynamoDB.Converter.marshall({ + active: true, + status: "STARTING", + spotFleetRequestId: spotFleetRequest.SpotFleetRequestId, + startTime: Math.floor(new Date().getTime() / 1000), + eventType: "CampaignStarted", + lastuntil: 0, + }); + const updateCampaign = await ddb.updateItem({ Key: { userid: {S: entity}, - keyid: {S: `campaigns:${campaign}`} + keyid: {S: `campaigns:${campaignId}`} }, TableName: "Campaigns", - AttributeUpdates: { - active: { Action: "PUT", Value: { BOOL: true }}, - status: { Action: "PUT", Value: { S: "STARTING" }}, - spotFleetRequestId: { Action: "PUT", Value: { S: data.SpotFleetRequestId }}, - startTime: { Action: "PUT", Value: { N: Math.floor(new Date().getTime() / 1000) }}, - eventType: { Action: "PUT", Value: { S: "CampaignStarted" }} - } + AttributeUpdates: Object.keys(updateParams).reduce((attrs, entry) => { + attrs[entry] = { + Action: "PUT", + Value: updateParams[entry] + }; + + return attrs; + }, {}) + }).promise(); } catch (e) { console.log("Spot fleet submitted, but failed to mark Campaign as 'STARTING'. This is a catastrophic error.", e); @@ -287,8 +311,7 @@ exports.main = async function(event, context, callback) { function respond(statusCode, headers, body, success) { // Include terraform dns names as allowed origins, as well as localhost. - const allowed_origins = JSON.parse(variables.www_dns_names); - allowed_origins.push("https://localhost"); + const allowed_origins = [variables.www_dns_names, "https://localhost"]; headers['Content-Type'] = 'text/plain'; @@ -319,9 +342,5 @@ function respond(statusCode, headers, body, success) { cb(null, response); - if (success == true) { - return Promise.resolve(body.msg); - } else { - return Promise.reject(body.msg); - } + return Promise.resolve(body.msg); } \ No newline at end of file diff --git a/terraform/terraform.jsonnet b/terraform/terraform.jsonnet index a44812c..eee0e5f 100644 --- a/terraform/terraform.jsonnet +++ b/terraform/terraform.jsonnet @@ -386,18 +386,7 @@ local regionKeys = std.objectFields(settings.regions); environment: { variables: { - www_dns_names: std.toString([settings.wwwEndpoint]), - campaign_max_price: "${var.campaign_max_price}", - userdata_bucket: "${aws_s3_bucket.user_data.id}", - instanceProfile: "${aws_iam_instance_profile.npk_node.arn}", - iamFleetRole: "${aws_iam_role.npk_fleet_role.arn}", - availabilityZones: std.strReplace(std.manifestJsonEx({ - [regionKeys[i]]: { - [settings.regions[regionKeys[i]][azi]]: "${aws_subnet." + settings.regions[regionKeys[i]][azi] + ".id}" - for azi in std.range(0, std.length(settings.regions[regionKeys[i]]) - 1) - } - for i in std.range(0, std.length(regionKeys) - 1) - }, ""), "\n", "") + www_dns_names: std.toString([settings.wwwEndpoint]) } } }, { @@ -438,23 +427,35 @@ local regionKeys = std.objectFields(settings.regions); environment: { variables: { - www_dns_names: std.toString([settings.wwwEndpoint]), + www_dns_names: std.toString(settings.wwwEndpoint), campaign_max_price: "${var.campaign_max_price}", + gQuota: settings.quotas.gquota, + pQuota: settings.quotas.pquota, userdata_bucket: "${aws_s3_bucket.user_data.id}", instanceProfile: "${aws_iam_instance_profile.npk_node.arn}", iamFleetRole: "${aws_iam_role.npk_fleet_role.arn}", - availabilityZones: std.manifestJsonEx({ + availabilityZones: std.strReplace(std.manifestJsonEx({ [regionKeys[i]]: { [settings.regions[regionKeys[i]][azi]]: "${aws_subnet." + settings.regions[regionKeys[i]][azi] + ".id}" for azi in std.range(0, std.length(settings.regions[regionKeys[i]]) - 1) } for i in std.range(0, std.length(regionKeys) - 1) - }, "") + }, ""), "\n", ""), + dictionaryBuckets: std.strReplace(std.manifestJsonEx({ + [regionKeys[i]]: "${var.dictionary-" + regionKeys[i] + "-id}" + for i in std.range(0, std.length(regionKeys) - 1) + }, ""), "\n", ""), + apigateway: if settings.useCustomDNS then + settings.apiEndpoint + else + "${aws_api_gateway_rest_api.npk.id}.execute-api." + settings.defaultRegion + ".amazonaws.com" } - } + }, + + depends_on: ["local_file.userdata_template"] }, { statement: [{ - sid: "s3", + sid: "s3GetUserFile", actions: [ "s3:GetObject" ], From 5a3664b009dead50d2895284c83e6c86710f8a23 Mon Sep 17 00:00:00 2001 From: Brad Woodward Date: Mon, 6 Sep 2021 21:37:39 -0600 Subject: [PATCH 010/128] Fix terraform signing and ACM dependency --- Dockerfile | 4 ++-- terraform/terraform.jsonnet | 4 +++- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/Dockerfile b/Dockerfile index 9afcbde..d718782 100644 --- a/Dockerfile +++ b/Dockerfile @@ -22,8 +22,8 @@ RUN apk -v --no-cache add python py-pip groff less mailcap && \ # Installing terraform RUN mkdir /build/terraform && \ cd /build/terraform && \ - wget https://releases.hashicorp.com/terraform/0.11.15-oci/terraform_0.11.15-oci_linux_amd64.zip && \ - unzip terraform_0.11.15-oci_linux_amd64.zip && \ + wget https://releases.hashicorp.com/terraform/0.11.15/terraform_0.11.15_linux_amd64.zip && \ + unzip terraform_0.11.15_linux_amd64.zip && \ ln -s /build/terraform/terraform /usr/bin/terraform # Installing a text editor diff --git a/terraform/terraform.jsonnet b/terraform/terraform.jsonnet index 4f18529..1929282 100644 --- a/terraform/terraform.jsonnet +++ b/terraform/terraform.jsonnet @@ -113,7 +113,9 @@ local regionKeys = std.objectFields(settings.regions); ["api-url-" + i]: api_gateway.domain_name( settings.dnsNames.api[i], "${aws_acm_certificate.api-" + i + ".arn}" - ) for i in std.range(0, std.length(settings.dnsNames.api) - 1) + ) + { + depends_on: ["aws_acm_certificate_validation.api-" + i] + } for i in std.range(0, std.length(settings.dnsNames.api) - 1) }, "aws_api_gateway_base_path_mapping": { ["api-url-" + i]: api_gateway.base_path("${aws_api_gateway_domain_name.api-url-" + i + ".domain_name}") From c711d70c2e2561ce7d0b0597335c2ed03ead287b Mon Sep 17 00:00:00 2001 From: Brad Woodward Date: Thu, 9 Sep 2021 16:04:53 -0600 Subject: [PATCH 011/128] Updated sample settings --- terraform/npk-settings.json.sample | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/terraform/npk-settings.json.sample b/terraform/npk-settings.json.sample index 8d4298d..4317324 100644 --- a/terraform/npk-settings.json.sample +++ b/terraform/npk-settings.json.sample @@ -4,17 +4,10 @@ "campaign_max_price": 50, "georestrictions": ["US", "CA", "GB", "DE"], - "useCustomDNS": false, "route53Zone": "E10SDSEFH1102DF", - "dnsName": "dev.npkproject.io", + "dnsBaseName": "dev.npkproject.io", "awsProfile": "npk", "criticalEventsSMS": "+13035551234", - "adminEmail": "demo@npkproject.io", - "debug_lambda": false, - - "useSAML": false, - "sAMLMetadataFile": "/home/self/Documents/CorpSAMLMetadata.xml" - - OR - - "sAMLMetadataUrl": "https://myfederatedprovider.com/saml/metadata.xml" + "adminEmail": "demo@npkproject.io" } From f5805fbb2a2bf8b1a7040143794f143d51509191 Mon Sep 17 00:00:00 2001 From: Brad Woodward Date: Thu, 9 Sep 2021 16:20:46 -0600 Subject: [PATCH 012/128] Fix attempt for #6 --- terraform/lambda_functions/proxy_api_handler/main.js | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/terraform/lambda_functions/proxy_api_handler/main.js b/terraform/lambda_functions/proxy_api_handler/main.js index 2dc4db3..0a0a2aa 100644 --- a/terraform/lambda_functions/proxy_api_handler/main.js +++ b/terraform/lambda_functions/proxy_api_handler/main.js @@ -532,10 +532,10 @@ function createCampaign(entity, email, campaign) { if (typeof campaign.rulesFiles != "undefined" && campaign.rulesFiles != null) { console.log("Debug: Rules are enabled. Verifiying files."); - campaign.require({ + /*campaign.require({ "rulesFiles": 0, "dictionaryFile": 0 - }); + });*/ s3dict = new aws.S3({region: campaign.region}); var bucket = variables.dictionaryBuckets[campaign.region]; From f54ac85e27e348a6a53bf30c37f64d8356d52d9c Mon Sep 17 00:00:00 2001 From: Brad Woodward Date: Sat, 11 Sep 2021 17:29:44 -0600 Subject: [PATCH 013/128] Updated dockerfile and backend --- Dockerfile | 58 ++++-- terraform-selfhost/deploy-selfhost.sh | 10 +- terraform-selfhost/terraform-selfhost.jsonnet | 2 +- terraform/cloudwatch-policy.tf.bak | 22 --- terraform/cognito_iam_roles.tf.bak | 178 ------------------ 5 files changed, 44 insertions(+), 226 deletions(-) delete mode 100644 terraform/cloudwatch-policy.tf.bak delete mode 100644 terraform/cognito_iam_roles.tf.bak diff --git a/Dockerfile b/Dockerfile index 9afcbde..8381cf4 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,34 +1,52 @@ -# Inspired in https://andrewlock.net/packaging-cli-programs-into-docker-images-to-avoid-dependency-hell/ -FROM alpine:3.10 +FROM ubuntu:20.04 + +SHELL ["/bin/bash", "--login", "-c"] + +RUN echo 'debconf debconf/frontend select Noninteractive' | debconf-set-selections + +# Install base dependencies +RUN apt-get update && apt-get install -y -q --no-install-recommends \ + apt-transport-https \ + build-essential \ + ca-certificates \ + curl \ + git \ + libssl-dev \ + wget \ + nano \ + jq \ + npm \ + pwgen \ + unzip \ + && rm -rf /var/lib/apt/lists/* + +RUN curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.35.3/install.sh | bash + +RUN . ~/.nvm/nvm.sh && \ + nvm install 14.15.4 && \ + nvm use 14.15.4 # Building Jsonnet -RUN mkdir /build && \ +RUN mkdir build && \ cd build && \ - apk -v --no-cache add git make gcc g++ && \ - git clone https://github.com/google/jsonnet.git && \ - cd jsonnet && \ - make && \ - apk -v --purge del git gcc && \ - ln -s /build/jsonnet/jsonnet /usr/bin/jsonnet && \ - ln -s /build/jsonnet/jsonnetfmt /usr/bin/jsonnetfmt + wget https://github.com/google/jsonnet/releases/download/v0.17.0/jsonnet-bin-v0.17.0-linux.tar.gz && \ + tar -xf jsonnet-bin-v0.17.0-linux.tar.gz && \ + mv ./jsonnet /usr/bin/jsonnet && \ + mv ./jsonnetfmt /usr/bin/jsonnetfmt ## Installing AWS CLI -RUN apk -v --no-cache add python py-pip groff less mailcap && \ - pip install --upgrade awscli s3cmd python-magic && \ - apk -v --purge del py-pip && \ - ## Installing other dependencies - apk -v --no-cache add jq npm pwgen bash +RUN cd build && \ + curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64-2.1.4.zip" -o "awscliv2.zip" && \ + unzip awscliv2.zip && \ + ./aws/install # Installing terraform RUN mkdir /build/terraform && \ cd /build/terraform && \ - wget https://releases.hashicorp.com/terraform/0.11.15-oci/terraform_0.11.15-oci_linux_amd64.zip && \ - unzip terraform_0.11.15-oci_linux_amd64.zip && \ + wget https://releases.hashicorp.com/terraform/0.15.5/terraform_0.15.5_linux_amd64.zip && \ + unzip terraform_0.15.5_linux_amd64.zip && \ ln -s /build/terraform/terraform /usr/bin/terraform -# Installing a text editor -RUN apk -v --no-cache add nano - RUN mkdir /npk VOLUME /npk diff --git a/terraform-selfhost/deploy-selfhost.sh b/terraform-selfhost/deploy-selfhost.sh index 654a73e..12b3bfb 100755 --- a/terraform-selfhost/deploy-selfhost.sh +++ b/terraform-selfhost/deploy-selfhost.sh @@ -24,7 +24,7 @@ fi if [[ $(aws --version | grep -c "aws-cli/2") != 1 ]]; then ERR=1; - echo "Error: Wrong version of Terraform is installed. NPK requires AWSCLI version 2."; + echo "Error: Wrong version of AWSCLI is installed. NPK requires AWSCLI version 2."; fi if [[ ! -f $(which npm) ]]; then @@ -37,18 +37,18 @@ if [[ ! -f $(which terraform) ]]; then echo "Error: Must have Terraform installed."; fi -if [[ $($TERBIN -v | grep -c "Terraform v0.11") != 1 ]]; then +if [[ $($TERBIN -v | grep -c "Terraform v0.15") != 1 ]]; then ERR=1; - echo "Error: Wrong version of Terraform is installed. NPK requires Terraform v0.11."; + echo "Error: Wrong version of Terraform is installed. NPK requires Terraform v0.15."; echo "-> Note: A non-default binary can be specified as a positional script parameter:" - echo "-> e.g: ./deploy-selfhost.sh " + echo "-> e.g: ./deploy-selfhost.sh " echo "" fi if [[ -f $(which snap) ]]; then if [[ $(snap list | grep $TERBIN | wc -l) -ne 0 ]]; then ERR=1; - echo "Error: Terraform cannot be installed via snap. Download the v0.11 binary manually and place it in your path." + echo "Error: Terraform cannot be installed via snap. Download the v0.15 binary manually and place it in your path." fi if [[ $(snap list | grep jsonnet | wc -l) -ne 0 ]]; then diff --git a/terraform-selfhost/terraform-selfhost.jsonnet b/terraform-selfhost/terraform-selfhost.jsonnet index 74c3535..053db53 100644 --- a/terraform-selfhost/terraform-selfhost.jsonnet +++ b/terraform-selfhost/terraform-selfhost.jsonnet @@ -20,7 +20,7 @@ local regionKeys = std.objectFields(settings.regions); backend: { s3: { bucket: settings.backend_bucket, - key: "c6fc.io/npk/terraform-selfhost.tfstate", + key: "c6fc.io/npkv2.5/terraform-selfhost.tfstate", profile: settings.awsProfile, region: settings.defaultRegion } diff --git a/terraform/cloudwatch-policy.tf.bak b/terraform/cloudwatch-policy.tf.bak deleted file mode 100644 index 894f5aa..0000000 --- a/terraform/cloudwatch-policy.tf.bak +++ /dev/null @@ -1,22 +0,0 @@ -data "aws_iam_policy_document" "cloudwatch_invoke_spot_monitor" { - statement { - sid = "1" - - actions = [ - "lambda:InvokeFunction", - ] - - # TF-UPGRADE-TODO: In Terraform v0.10 and earlier, it was sometimes necessary to - # force an interpolation expression to be interpreted as a list by wrapping it - # in an extra set of list brackets. That form was supported for compatibility in - # v0.11, but is no longer supported in Terraform v0.12. - # - # If the expression in the following list itself returns a list, remove the - # brackets to avoid interpretation as a list of lists. If the expression - # returns a single list item then leave it as-is and remove this TODO comment. - resources = [ - aws_lambda_function.spot_monitor.arn, - ] - } -} - diff --git a/terraform/cognito_iam_roles.tf.bak b/terraform/cognito_iam_roles.tf.bak deleted file mode 100644 index e3fc64c..0000000 --- a/terraform/cognito_iam_roles.tf.bak +++ /dev/null @@ -1,178 +0,0 @@ -data "aws_iam_policy_document" "cognito_authenticated" { - - /* TODO: This might require revision, but it's the default for Cognito-assumable roles */ - statement { - sid = "1" - - actions = [ - "cognito-identity:*", - "mobileanalytics:PutEvents", - "cognito-sync:*", - "ec2:describeSpotPriceHistory", - "pricing:*" - ] - - resources = [ - "*" - ] - } - - statement { - sid = "2" - - actions = [ - "s3:PutObject" - ] - - resources = [ - "${aws_s3_bucket.user_data.arn}/&{cognito-identity.amazonaws.com:sub}/uploads/*" - ] - } - - // TODO: This statement doesn't restrict file, mime, or content types - // IAM doesn't support such restrictions. - // Write an S3 upload hook to boot not-ok files. - statement { - sid = "3" - - actions = [ - "s3:GetObject", - "s3:ListObjectVersions", - "s3:DeleteObject" - ] - - resources = [ - "${aws_s3_bucket.user_data.arn}/&{cognito-identity.amazonaws.com:sub}", - "${aws_s3_bucket.user_data.arn}/&{cognito-identity.amazonaws.com:sub}/*" - ] - } - - statement { - sid = "4" - - actions = [ - "s3:ListBucket" - ] - - resources = [ - "${aws_s3_bucket.user_data.arn}", - ] - - condition { - test = "StringLike" - variable = "s3:prefix" - - values = [ - "&{cognito-identity.amazonaws.com:sub}/", - "&{cognito-identity.amazonaws.com:sub}/*", - ] - } - } - - statement { - sid = "5" - - actions = [ - "s3:ListBucket" - ] - - resources = [ - "${aws_s3_bucket.user_data.arn}", - ] - - condition { - test = "StringLike" - variable = "s3:prefix" - - values = [ - "&{cognito-identity.amazonaws.com:sub}/", - "&{cognito-identity.amazonaws.com:sub}/*", - ] - } - } - - statement { - sid = "6" - - actions = [ - "dynamodb:GetItem", - "dynamodb:BatchGetItem", - "dynamodb:Query" - ] - - resources = [ - "${aws_dynamodb_table.campaigns.arn}", - "${aws_dynamodb_table.settings.arn}" - ] - - condition { - test = "ForAllValues:StringEquals" - variable = "dynamodb:LeadingKeys" - - values = [ - "&{cognito-identity.amazonaws.com:sub}", - "admin" - ] - } - } - - statement { - sid = "7" - - actions = [ - "s3:ListBucket" - ] - - resources = [ - "${var.dictionary-east-1}", - "${var.dictionary-east-2}", - "${var.dictionary-west-1}", - "${var.dictionary-west-2}" - ] - } - - statement { - sid = "8" - - actions = [ - "s3:GetObject" - ] - - resources = [ - "${var.dictionary-east-1}/*", - "${var.dictionary-east-2}/*", - "${var.dictionary-west-1}/*", - "${var.dictionary-west-2}/*" - ] - } - - statement { - sid = "9" - - actions = [ - "execute-api:Invoke" - ] - - resources = [ - "${aws_api_gateway_deployment.npk.execution_arn}/*/userproxy/*" - ] - } - - /* TODO: Add DynamoDB and Lambda policy items */ -} - -data "aws_iam_policy_document" "cognito_unauthenticated" { - statement { - sid = "1" - - actions = [ - "cognito-identity:*", - "mobileanalytics:PutEvents", - "cognito-sync:*" - ] - - resources = [ - "*" - ] - } -} From 025ba700f8f1abc3deb2455a96b521102a5c5ec4 Mon Sep 17 00:00:00 2001 From: Brad Woodward Date: Mon, 13 Sep 2021 00:11:08 -0600 Subject: [PATCH 014/128] Rewrite spot_monitor, fix events & SSO --- .gitignore | 4 +- README.md | 46 +- build-docker-image.sh | 2 +- .../angular/controllers/npkMainCtrl.js | 9 +- .../angular/providers/cognitoProvider.js | 5 + site-content/angular/services/npkDB.js | 21 + site-content/views/events.html | 18 +- site-content/views/sidebar.html | 1 + terraform-selfhost/deploy-selfhost.sh | 8 +- terraform-selfhost/terraform-selfhost.jsonnet | 34 +- terraform/deploy.sh | 49 +- terraform/jsonnet/cloudfront.libsonnet | 2 +- terraform/jsonnet/cognito.libsonnet | 21 +- terraform/jsonnet/templates.libsonnet | 2 +- .../lambda_functions/create_campaign/main.js | 22 +- .../lambda_functions/delete_campaign/main.js | 24 +- .../delete_campaign/package-lock.json | 2 +- .../lambda_functions/execute_campaign/main.js | 20 +- .../lambda_functions/spot_monitor/main.js | 987 ++++++------------ .../lambda_functions/status_reporter/main.js | 6 +- terraform/quickdeploy.sh | 25 +- terraform/s3_policies.tf.bak | 27 - terraform/terraform.jsonnet | 50 +- 23 files changed, 589 insertions(+), 796 deletions(-) delete mode 100644 terraform/s3_policies.tf.bak diff --git a/.gitignore b/.gitignore index 99ffc38..b321cc3 100644 --- a/.gitignore +++ b/.gitignore @@ -13,6 +13,7 @@ terraform/quotas.json terraform/npk-settings.json terraform/generated-settings.jsonnet terraform/regions.json +terraform/hostedZone.json terraform/*.tf.json terraform-selfhost/*.tf.json terraform/lambda_functions/zip_files/* @@ -30,5 +31,4 @@ tools/wordlists/* terraform-selfhost/upload_npkfile.sh terraform-selfhost/upload_npkcomponents.sh terraform-selfhost/sync_npkcomponents.sh -quickdeployed -quotas \ No newline at end of file +quickdeployed \ No newline at end of file diff --git a/README.md b/README.md index f7735f8..96c8f94 100644 --- a/README.md +++ b/README.md @@ -6,6 +6,23 @@ NPK is a distributed hash-cracking platform built entirely of serverless compone 'NPK' is an initialism for the three primary atomic elements in fertilizer (Nitrogen, Phosphorus, and Potassium). Add it to your hashes to increase your cred yield! +## Upgrading from v2 + +NPK v2.5 is designed to be more resilient, easier to use, and more flexible. + +NPK v2.5 now uses Terraform 0.15, which is a significant jump from 0.11 without any direct upgrade paths. As a result, NPK v2.5 is not compatible with previous versions. In order to upgrade to v2.5 you must completely destroy your previous installation before deploying v2.5. Note that this will remove all campaigns, hash files, results, users, settings, and everything else you have in NPK. *This must be done BEFORE you switch to the v2.5 branch.* + +```sh +npk/terraform$ terraform destroy + +# If you're configured to selfhost, you'll need to remove that as well. +npk/terraform-selfhost$ terraform destroy + +npk/terraform$ git checkout api_rewrite +npk/terraform$ vim npk-settings.json # Edit the settings to conform with the new format. +npk/terraform$ ./deploy.sh +``` + ## How it works Lets face it - even the beastliest cracking rig spends a lot of time at idle. You sink a ton of money up front on hardware, then have the electricity bill to deal with. NPK lets you leverage extremely powerful hash cracking with the 'pay-as-you-go' benefits of AWS. For example, you can crank out as much as 1.2TH/s of NTLM for a mere $14.70/hr. NPK was also designed to fit easily within the free tier while you're not using it! Without the free tier, it'll still cost less than 25 CENTS per MONTH to have online! @@ -59,7 +76,7 @@ The quickdeploy wizard will ask for a few basic things, then kick off the instal NPK requires that you have the following installed: * **awscli** (v2) -* **terraform** (v0.11) +* **terraform** (v0.15) * **jq** * **jsonnet** * **npm** @@ -89,41 +106,24 @@ Edit `npk-settings.json` to taste: * `backend_bucket`: Is the bucket to store the terraform state in. If it doesn't exist, NPK will create it for you. Replace '' with random characters to make it unique, or specify another bucket you own. * `campaign_data_ttl`: This is the number of seconds that uploaded files and cracked hashes will last before they are automatically deleted. Default is 7 days. * `campaign_max_price`: The maximum number of dollars allowed to be spent on a single campaign. -* `georestrictions`: An array of https://en.wikipedia.org/wiki/ISO_3166-1_alpha-2 country codes that access should be WHITELISTED for. Traffic originating from other countries will not be permitted. -* `useCustomDNS`: A boolean value for whether to use custom domain names for your NPK installation. if set to `true`, you must configure `route53Zone` and `dnsNames` below. -* `route53Zone`: The Route53 Zone ID for the domain or subdomain you're going to host NPK with. You must configure this zone yourself in the same account before installing NPK. -* `dnsNames`: This is where you configure the DNS names for the console and api endpoints for your NPK installation. Both domains must be at the same depth as one another; e.g. {www,api}.npk.yourdomain.com +* `georestrictions`: An array of https://en.wikipedia.org/wiki/ISO_3166-1_alpha-2 country codes that access should be WHITELISTED for. Traffic originating from other countries will not be permitted. Remove the entry entirely if you don't wish to use it. +* `route53Zone`: The Route53 Zone ID for the domain or subdomain you're going to host NPK with. You must configure this zone yourself in the same account before installing NPK. *The NPK console will be hosted at the root of this zone* with the API endpoint being created as a subdomain. * `awsProfile`: The profile name in `~/.aws/credentials` that you want to piggyback on for the installation. * `criticalEventsSMS`: The cellphone number of a destination to receive critical events to. Only catastrophic errors are reported here, so use a real one. * `adminEmail`: The email address of the administrator and first user of NPK. Once the installation is complete, this is where you'll receive your credentials. -* `useSAML`: Set to `true` if you want to enable SAML-based federated authentication. -* `sAMLMetadataFile` or `sAMLMetadataUrl`: Only one can be configured, and it's required if `useSAML` is `true`. +* `sAMLMetadataFile` or `sAMLMetadataUrl`: Only one can be configured. Leave them out entirely if you're not using SAML. -Here's an example of a completed config file with custom DNS and no SAML: +Here's an example of a completed config file with custom DNS, no GeoRestrictions, and no SAML: ```json { "backend_bucket": "backend-terraform-npkdev", "campaign_data_ttl": 604800, "campaign_max_price": 50, - "georestrictions": [], - "useCustomDNS": true, "route53Zone": "Z05471496OWNC3E2EHCI", - "dnsNames": { - "www": [ - "www.npk.yourdomain.com" - ], - "api": [ - "api.npk.yourdomain.com" - ] - }, "awsProfile": "npkdev", "criticalEventsSMS": "+12085551234", "adminEmail": "you@yourdomain.com", - "debug_lambda": true, - - "useSAML": false, - "sAMLMetadataFile": "" } ``` After that, run the deploy! @@ -161,6 +161,6 @@ npk/terraform$ terraform destroy # Official Discord Channel -Come hang out on Discord! +Have questions, need help, want to contribute or brag about a win? Come hang out on Discord! [![Porchetta Industries](https://discordapp.com/api/guilds/736724457258745996/widget.png?style=banner3)](https://discord.gg/k5PQnqSNDF) \ No newline at end of file diff --git a/build-docker-image.sh b/build-docker-image.sh index 4ed8bf2..ccd759b 100755 --- a/build-docker-image.sh +++ b/build-docker-image.sh @@ -1,4 +1,4 @@ -#!/bin/sh +#!/bin/bash docker build -t c6fc/npk:latest . docker run -it -v `pwd`:/npk -v ~/.aws/:/root/.aws c6fc/npk:latest \ No newline at end of file diff --git a/site-content/angular/controllers/npkMainCtrl.js b/site-content/angular/controllers/npkMainCtrl.js index bb75dcf..adf15ae 100755 --- a/site-content/angular/controllers/npkMainCtrl.js +++ b/site-content/angular/controllers/npkMainCtrl.js @@ -252,7 +252,7 @@ angular $scope.confirmpassword; $scope.verificationcode; - $scope.useSamlSSO = (SAMLSSO.useSamlSSO == "1"); + $scope.useSamlSSO = (SAMLSSO.useSamlSSO == "true"); if ($scope.useSamlSSO == true) { $scope.samlSSOURL = "https://" + SAMLSSO.SAMLDomain + "/oauth2/authorize?identity_provider=" + SAMLSSO.SAMLIdp + "&redirect_uri=" + SAMLSSO.SAMLRedirectUrl + "&response_type=CODE&client_id=" + COGNITO_CONFIG.ClientId + "&scope=email%20openid" } @@ -580,6 +580,9 @@ angular $('a#start-' + campaign_id).hide(); $('img#action-' + campaign_id).show(); + $scope.modalMessages.error = []; + $scope.modalMessages.success = []; + params = { method: 'PUT', url: 'https://' + APIGATEWAY_URL + '/v1/userproxy/campaign/' + campaign_id, @@ -1566,6 +1569,10 @@ angular $scope.orderResponse = {success: false}; $scope.submitOrder = function() { + $scope.orderErrors = []; + $scope.orderWarnings = []; + $scope.submittingOrder = true; + $('#orderModal').modal('hide'); $('#orderResponseModal').modal('show'); diff --git a/site-content/angular/providers/cognitoProvider.js b/site-content/angular/providers/cognitoProvider.js index 6f9ef3d..2589ae5 100644 --- a/site-content/angular/providers/cognitoProvider.js +++ b/site-content/angular/providers/cognitoProvider.js @@ -29,6 +29,11 @@ angular return false; } + // If the identity type is SAML, the 'payload' object is merged into the parent 'idToken'. + if (!this.cognitoUserSession.idToken.hasOwnProperty('payload')) { + return this.cognitoUserSession.idToken['cognito:groups'].indexOf('npk-admins') > -1; + } + return this.cognitoUserSession.idToken.payload['cognito:groups'].indexOf('npk-admins') > -1; }, diff --git a/site-content/angular/services/npkDB.js b/site-content/angular/services/npkDB.js index bdb7960..bc10038 100644 --- a/site-content/angular/services/npkDB.js +++ b/site-content/angular/services/npkDB.js @@ -287,6 +287,7 @@ angular cancelCampaign: function(campaign_id) { $('a#cancel-' + campaign_id).hide(); + $('a#delete-' + campaign_id).hide(); $('img#action-' + campaign_id).show(); params = { @@ -307,6 +308,26 @@ angular } location.href = location.href.split('#')[0]; + }).fail(function(xhr) { + + data = xhr.responseText; + + try { + data = JSON.parse(data); + } catch (e) { + data = {msg: "Error parsing response JSON.", success: false}; + } + + if (data.success == false) { + $scope.modalMessages.error = [data.msg]; + } else { + $scope.modalMessages.success = [data.msg]; + } + + $scope.$digest(); + + $('#messageModal').modal('show'); + $('img#action-' + campaign_id).hide(); }); }, diff --git a/site-content/views/events.html b/site-content/views/events.html index 4444214..6e486a2 100644 --- a/site-content/views/events.html +++ b/site-content/views/events.html @@ -52,7 +52,7 @@ - {{ event.startTime | momentfn:'format' }} + {{ event.startTime | momentns:'format' }} @@ -72,7 +72,21 @@
Order was comprised of {{ event.instanceCount }}x {{ event.instanceType }} for {{ event.durationSeconds / 3600 }} hours.
- Campaign spent {{ event.price | currency:'$' }} of {{ event.targetPrice }} allotted. + Campaign spent {{ event.price | currency:'$' }} of {{ event.targetPrice | currency:'$' }} allotted. + + + + + Instance {{ event.keyid.split(':')[2] }} (from campaign {{ event.keyid.split(':')[0] }}) finished with an error. +
+ See the File Management section for the full log details. + + + + + Instance {{ event.keyid.split(':')[2] }} (from campaign {{ event.keyid.split(':')[0] }}) finished successfully. +
+ Recovered {{ event.recoveredHashes }} plaintext values. diff --git a/site-content/views/sidebar.html b/site-content/views/sidebar.html index ffe9622..6a221c8 100644 --- a/site-content/views/sidebar.html +++ b/site-content/views/sidebar.html @@ -4,6 +4,7 @@ logo icon
N-P-K
+ Version 2.5