diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 0000000..9c8b64c --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,20 @@ +# Changelog + +## v0.2.0 (2019-??-??) + +- **major (incompatible)**, Refactor: With [#70](https://github.com/MatthiasScholz/cos/issues/70) the cos module was upgraded to be compatible to terraform 0.12.0. + - Furthermore the depending modules where upgraded as well: + - terraform-aws-consul from [v0.3.1](https://github.com/hashicorp/terraform-aws-consul/tree/v0.3.1) to [v0.7.0](https://github.com/hashicorp/terraform-aws-consul/tree/v0.7.0) + - terraform-aws-nomad from [v0.4.5](https://github.com/hashicorp/terraform-aws-nomad/tree/v0.4.5) to [v0.5.0](https://github.com/hashicorp/terraform-aws-nomad/tree/v0.5.0) +- License: With [9156e49](https://github.com/MatthiasScholz/cos/commit/9156e49f0eabbfc50100aeb778e6a776ba376b96) the license model was changed from GPL to LGPL, a more relaxed one. +- Test: With PR [#68](https://github.com/MatthiasScholz/cos/pull/68) tests (terratest) where added to ensure functionality of the COS. + +## v0.1.1 (2019-07-20) + +## v0.1.0 (2019-06-14) + +## v0.0.3 (2019-01-01) + +## v0.0.2 (2018-12-13) + +## v0.0.1 (2018-04-30) diff --git a/examples/bastion/main.tf b/examples/bastion/main.tf index b3ea528..c2491b6 100644 --- a/examples/bastion/main.tf +++ b/examples/bastion/main.tf @@ -6,8 +6,8 @@ locals { } provider "aws" { - profile = "${var.deploy_profile}" - region = "${local.aws_region}" + profile = var.deploy_profile + region = local.aws_region } resource "random_pet" "unicorn" { @@ -21,22 +21,22 @@ data "aws_vpc" "default" { } data "aws_subnet_ids" "all" { - vpc_id = "${data.aws_vpc.default.id}" + vpc_id = data.aws_vpc.default.id } module "bastion" { source = "../../modules/bastion" ## required parameters - vpc_id = "${data.aws_vpc.default.id}" - subnet_id = "${element(data.aws_subnet_ids.all.ids,0)}" - ami_id = "${local.ami_id}" + vpc_id = data.aws_vpc.default.id + subnet_id = element(tolist(data.aws_subnet_ids.all.ids), 0) + ami_id = local.ami_id ssh_key_name = "${var.ssh_key_name}" ## optional parameters - aws_region = "${local.aws_region}" - env_name = "${local.env_name}" - stack_name = "${local.stack_name}" + aws_region = local.aws_region + env_name = local.env_name + stack_name = local.stack_name allowed_ssh_cidr_blocks = { "all" = "0.0.0.0/0" diff --git a/examples/bastion/outputs.tf b/examples/bastion/outputs.tf index 46c2017..5d42a88 100644 --- a/examples/bastion/outputs.tf +++ b/examples/bastion/outputs.tf @@ -1,5 +1,5 @@ output "bastion_ip" { - value = "${module.bastion.bastion_ip}" + value = module.bastion.bastion_ip } output "ssh_login" { diff --git a/examples/bastion/versions.tf b/examples/bastion/versions.tf new file mode 100644 index 0000000..ac97c6a --- /dev/null +++ b/examples/bastion/versions.tf @@ -0,0 +1,4 @@ + +terraform { + required_version = ">= 0.12" +} diff --git a/examples/consul/main.tf b/examples/consul/main.tf index b4bc89c..d015ea2 100644 --- a/examples/consul/main.tf +++ b/examples/consul/main.tf @@ -4,8 +4,8 @@ locals { } provider "aws" { - profile = "${var.deploy_profile}" - region = "${var.aws_region}" + profile = var.deploy_profile + region = var.aws_region } ### obtaining default vpc, security group and subnet of the env @@ -14,21 +14,21 @@ data "aws_vpc" "default" { } data "aws_subnet_ids" "all" { - vpc_id = "${data.aws_vpc.default.id}" + vpc_id = data.aws_vpc.default.id } module "consul" { source = "../../modules/consul" ## required parameters - vpc_id = "${data.aws_vpc.default.id}" - subnet_ids = "${data.aws_subnet_ids.all.ids}" - ami_id = "${var.ami_id}" + vpc_id = data.aws_vpc.default.id + subnet_ids = data.aws_subnet_ids.all.ids + ami_id = var.ami_id ## optional parameters - aws_region = "${var.aws_region}" - env_name = "${local.env_name}" - stack_name = "${local.stack_name}" + aws_region = var.aws_region + env_name = local.env_name + stack_name = local.stack_name cluster_tag_key = "consul-servers" cluster_tag_value = "${local.stack_name}-${local.env_name}-consul-srv" allowed_ssh_cidr_blocks = ["0.0.0.0/0"] diff --git a/examples/consul/outputs.tf b/examples/consul/outputs.tf index 05a1e49..23f704d 100644 --- a/examples/consul/outputs.tf +++ b/examples/consul/outputs.tf @@ -1,15 +1,15 @@ output "asg_name_consul_servers" { - value = "${module.consul.asg_name_consul_servers}" + value = module.consul.asg_name_consul_servers } output "security_group_id_consul_servers" { - value = "${module.consul.security_group_id_consul_servers}" + value = module.consul.security_group_id_consul_servers } output "consul_servers_cluster_tag_key" { - value = "${module.consul.consul_servers_cluster_tag_key}" + value = module.consul.consul_servers_cluster_tag_key } output "consul_servers_cluster_tag_value" { - value = "${module.consul.consul_servers_cluster_tag_value}" + value = module.consul.consul_servers_cluster_tag_value } diff --git a/examples/consul/versions.tf b/examples/consul/versions.tf new file mode 100644 index 0000000..ac97c6a --- /dev/null +++ b/examples/consul/versions.tf @@ -0,0 +1,4 @@ + +terraform { + required_version = ">= 0.12" +} diff --git a/examples/networking/main.tf b/examples/networking/main.tf index 77cd204..edf2f4a 100644 --- a/examples/networking/main.tf +++ b/examples/networking/main.tf @@ -1,9 +1,9 @@ provider "aws" { - profile = "${var.deploy_profile}" - region = "${var.aws_region}" + profile = var.deploy_profile + region = var.aws_region } module "networking" { source = "../../modules/networking" - region = "${var.aws_region}" + region = var.aws_region } diff --git a/examples/networking/versions.tf b/examples/networking/versions.tf new file mode 100644 index 0000000..ac97c6a --- /dev/null +++ b/examples/networking/versions.tf @@ -0,0 +1,4 @@ + +terraform { + required_version = ">= 0.12" +} diff --git a/examples/nomad-datacenter/main.tf b/examples/nomad-datacenter/main.tf index d9d10e8..c30e58e 100644 --- a/examples/nomad-datacenter/main.tf +++ b/examples/nomad-datacenter/main.tf @@ -4,8 +4,8 @@ locals { } provider "aws" { - profile = "${var.deploy_profile}" - region = "${var.aws_region}" + profile = var.deploy_profile + region = var.aws_region } resource "random_pet" "unicorn" { @@ -19,11 +19,11 @@ data "aws_vpc" "default" { } data "aws_subnet_ids" "all" { - vpc_id = "${data.aws_vpc.default.id}" + vpc_id = data.aws_vpc.default.id } resource "aws_security_group" "sg_nomad_server" { - vpc_id = "${data.aws_vpc.default.id}" + vpc_id = data.aws_vpc.default.id name_prefix = "sg_nomad_server" description = "Sample nomad server sg." } @@ -32,17 +32,17 @@ module "nomad-datacenter" { source = "../../modules/nomad-datacenter" ## required parameters - vpc_id = "${data.aws_vpc.default.id}" - subnet_ids = "${data.aws_subnet_ids.all.ids}" - ami_id = "${var.ami_id}" + vpc_id = data.aws_vpc.default.id + subnet_ids = data.aws_subnet_ids.all.ids + ami_id = var.ami_id consul_cluster_tag_key = "consul-servers" consul_cluster_tag_value = "${local.stack_name}-${local.env_name}-consul-srv" - server_sg_id = "${aws_security_group.sg_nomad_server.id}" + server_sg_id = aws_security_group.sg_nomad_server.id ## optional parameters - aws_region = "${var.aws_region}" - env_name = "${local.env_name}" - stack_name = "${local.stack_name}" + aws_region = var.aws_region + env_name = local.env_name + stack_name = local.stack_name allowed_ssh_cidr_blocks = ["0.0.0.0/0"] ssh_key_name = "${var.ssh_key_name}" datacenter_name = "public-services" @@ -64,8 +64,11 @@ module "nomad-datacenter" { "desired_capacity" = 1 } - ebs_block_devices = [{ - "device_name" = "/dev/xvdf" - "volume_size" = "50" - }] + ebs_block_devices = [ + { + "device_name" = "/dev/xvdf" + "volume_size" = "50" + }, + ] } + diff --git a/examples/nomad-datacenter/vars.tf b/examples/nomad-datacenter/vars.tf index 838785c..4f39c5b 100644 --- a/examples/nomad-datacenter/vars.tf +++ b/examples/nomad-datacenter/vars.tf @@ -4,7 +4,7 @@ variable "deploy_profile" { variable "ami_id" { description = "Id of the AMI for the nomad and consul nodes." - default = "ami-a23feadf" + default = "ami-09118e4b58586b75d" } variable "aws_region" { diff --git a/examples/nomad-datacenter/versions.tf b/examples/nomad-datacenter/versions.tf new file mode 100644 index 0000000..ac97c6a --- /dev/null +++ b/examples/nomad-datacenter/versions.tf @@ -0,0 +1,4 @@ + +terraform { + required_version = ">= 0.12" +} diff --git a/examples/nomad/main.tf b/examples/nomad/main.tf index bc6e4f2..2cca5d3 100644 --- a/examples/nomad/main.tf +++ b/examples/nomad/main.tf @@ -1,15 +1,15 @@ locals { stack_name = "COS" env_name = "playground" - consul_ami_id = "${var.ami_id}" - nomad_ami_id = "${var.ami_id}" + consul_ami_id = var.ami_id + nomad_ami_id = var.ami_id consul_cluster_tag_key = "consul-servers" consul_cluster_tag_value = "${local.stack_name}-SDCFG-consul-${random_pet.unicorn.id}" } provider "aws" { - profile = "${var.deploy_profile}" - region = "${var.aws_region}" + profile = var.deploy_profile + region = var.aws_region } resource "random_pet" "unicorn" { @@ -23,23 +23,23 @@ data "aws_vpc" "default" { } data "aws_subnet_ids" "all" { - vpc_id = "${data.aws_vpc.default.id}" + vpc_id = data.aws_vpc.default.id } module "consul" { source = "../../modules/consul" ## required parameters - vpc_id = "${data.aws_vpc.default.id}" - subnet_ids = "${data.aws_subnet_ids.all.ids}" - ami_id = "${local.consul_ami_id}" + vpc_id = data.aws_vpc.default.id + subnet_ids = data.aws_subnet_ids.all.ids + ami_id = local.consul_ami_id ## optional parameters - aws_region = "${var.aws_region}" - env_name = "${local.env_name}" - stack_name = "${local.stack_name}" - cluster_tag_key = "${local.consul_cluster_tag_key}" - cluster_tag_value = "${local.consul_cluster_tag_value}" + aws_region = var.aws_region + env_name = local.env_name + stack_name = local.stack_name + cluster_tag_key = local.consul_cluster_tag_key + cluster_tag_value = local.consul_cluster_tag_value allowed_ssh_cidr_blocks = ["0.0.0.0/0"] ssh_key_name = "${var.ssh_key_name}" } @@ -48,17 +48,16 @@ module "nomad" { source = "../../modules/nomad" ## required parameters - vpc_id = "${data.aws_vpc.default.id}" - subnet_ids = "${data.aws_subnet_ids.all.ids}" - ami_id = "${local.nomad_ami_id}" - consul_cluster_tag_key = "${local.consul_cluster_tag_key}" - consul_cluster_tag_value = "${local.consul_cluster_tag_value}" - consul_cluster_security_group_id = "${module.consul.security_group_id_consul_servers}" + vpc_id = data.aws_vpc.default.id + subnet_ids = data.aws_subnet_ids.all.ids + ami_id = local.nomad_ami_id + consul_cluster_tag_key = local.consul_cluster_tag_key + consul_cluster_tag_value = local.consul_cluster_tag_value ## optional parameters - aws_region = "${var.aws_region}" - env_name = "${local.env_name}" - stack_name = "${local.stack_name}" + aws_region = var.aws_region + env_name = local.env_name + stack_name = local.stack_name allowed_ssh_cidr_blocks = ["0.0.0.0/0"] ssh_key_name = "${var.ssh_key_name}" instance_type = "t2.micro" @@ -71,3 +70,4 @@ module "nomad" { "desired_capacity" = 3 } } + diff --git a/examples/nomad/outputs.tf b/examples/nomad/outputs.tf index e392fbe..a7a7e48 100644 --- a/examples/nomad/outputs.tf +++ b/examples/nomad/outputs.tf @@ -1,15 +1,16 @@ output "aws_region" { - value = "${var.aws_region}" + value = var.aws_region } output "nomad_servers_cluster_tag_key" { - value = "${module.nomad.nomad_servers_cluster_tag_key}" + value = module.nomad.nomad_servers_cluster_tag_key } output "nomad_servers_cluster_tag_value" { - value = "${module.nomad.nomad_servers_cluster_tag_value}" + value = module.nomad.nomad_servers_cluster_tag_value } output "num_nomad_servers" { - value = "${module.nomad.num_nomad_servers}" + value = module.nomad.num_nomad_servers } + diff --git a/examples/nomad/sg_consul_nomad.tf b/examples/nomad/sg_consul_nomad.tf new file mode 100644 index 0000000..9f396ce --- /dev/null +++ b/examples/nomad/sg_consul_nomad.tf @@ -0,0 +1,26 @@ +locals { + # The rule_map contains the spec for the security group rules that should be applied. + # An entry is of the form "" = ["",,] + rule_map = { + "Grants access from nomad (rcp, serf: lan, wan - tcp)" = ["tcp",8300,8302], + "Grants access from nomad (rcp, serf: lan, wan - udp)" = ["udp",8301,8302], + "Grants access from nomad (http)" = ["tcp",8500,8500], + "Grants access from nomad (dns tcp)" = ["tcp",8600,8600], + "Grants access from nomad (dns udp)" = ["udp",8600,8600], + } +} + +# rule granting access from nomad to consul on ports defined in rule_map +# [nomad>consul] +resource "aws_security_group_rule" "sgr_nomad_to_consul" { + + for_each = local.rule_map + + type = "ingress" + description = each.key + protocol = element(each.value,0) + from_port = element(each.value,1) + to_port = element(each.value,2) + source_security_group_id = module.nomad.security_group_id_nomad_servers + security_group_id = module.consul.security_group_id_consul_servers +} \ No newline at end of file diff --git a/examples/nomad/sg_nomad_consul.tf b/examples/nomad/sg_nomad_consul.tf new file mode 100644 index 0000000..1cb951f --- /dev/null +++ b/examples/nomad/sg_nomad_consul.tf @@ -0,0 +1,18 @@ +locals { + protocols = ["tcp","udp"] +} + +# rule granting access from consul to nomad server on ports +# 8300...8302 tcp and udp +# [consul>nomad] RCP, Serf LAN and WAN, TCP + UDP +resource "aws_security_group_rule" "sgr_consul_to_nomad_server" { + count = length(local.protocols) + + type = "ingress" + description = "Grants access from consul server (rcp, serf: lan, wan - ${element(local.protocols,count.index)})" + from_port = 8300 + to_port = 8302 + protocol = element(local.protocols,count.index) + source_security_group_id = module.consul.security_group_id_consul_servers + security_group_id = module.nomad.security_group_id_nomad_servers +} \ No newline at end of file diff --git a/examples/nomad/vars.tf b/examples/nomad/vars.tf index 23f1cb7..f6f4a02 100644 --- a/examples/nomad/vars.tf +++ b/examples/nomad/vars.tf @@ -4,7 +4,7 @@ variable "deploy_profile" { variable "ami_id" { description = "Id of the AMI for the nomad and consul nodes." - default = "ami-a23feadf" + default = "ami-09118e4b58586b75d" } variable "ssh_key_name" { diff --git a/examples/nomad/versions.tf b/examples/nomad/versions.tf new file mode 100644 index 0000000..ac97c6a --- /dev/null +++ b/examples/nomad/versions.tf @@ -0,0 +1,4 @@ + +terraform { + required_version = ">= 0.12" +} diff --git a/examples/root-example/README.md b/examples/root-example/README.md index 0a33be7..c9cbf89 100644 --- a/examples/root-example/README.md +++ b/examples/root-example/README.md @@ -24,7 +24,6 @@ source ./bootstrap.sh Or you follow the preceding instructions. - ## Setup helper scripts ```bash @@ -139,7 +138,52 @@ sshuttle_login.sh ### Datacenter Configuration -* [ ** TODO: Describe to configuration of the different nomad datacenters. +- [ ] TODO: Describe to configuration of the different nomad datacenters. + +## Troubleshooting + +### No images found for AMI + +If you see the following error, then you don't have the AMI which is referenced available in your account. + +```bash +module.nomad-infra.module.dc-backoffice.module.data_center.aws_launch_configuration.launch_configuration: 1 error occurred: +aws_launch_configuration.launch_configuration: No images found for AMI ami-02d24827dece83bef +``` + +To solve this issue you have to build it and to reference the newly built AMI in the example. + +#### Build the AMI + +How to do this see paragraph `Build the AMI using Packer` in [modules/ami2/README.md](../../modules/ami2/README.md). + +![output of ami creation](create_ami_output.png) + +#### Reference the AMI in root-example + +Open the file `vars.tf` and there replace the value of the field `default` for variables `nomad_ami_id_clients` and `nomad_ami_id_servers` with the id of the ami that was just created with packer. + +![reference the ami](ref_ami.png) + +### MalformedCertificate: Certificate is no longer valid + +If the used certificate is not valid any more you will receive the following (or similar) error. + +```bash +aws_iam_server_certificate.certificate_alb: 1 error occurred: +aws_iam_server_certificate.certificate_alb: Error uploading server certificate, error: MalformedCertificate: Certificate is no longer valid. The 'Not After' date restriction on the certificate has passed. +``` + +To solve this issue a new certificate has to be created. + +#### Create the self signed Certificate + +```bash +openssl req -x509 -newkey rsa:4096 -keyout key.pem -out cert.pem -days 365 -nodes +``` + +Then copy the content of `cert.pem` into the field `certificate_body` of the file `alb_cert.tf`. +And copy the content of `key.pem` into the field `private_key` of the file `alb_cert.tf`. ## Remarks diff --git a/examples/root-example/create_ami_output.png b/examples/root-example/create_ami_output.png new file mode 100644 index 0000000..b084d2f Binary files /dev/null and b/examples/root-example/create_ami_output.png differ diff --git a/examples/root-example/main.tf b/examples/root-example/main.tf index accaa4c..2e14509 100644 --- a/examples/root-example/main.tf +++ b/examples/root-example/main.tf @@ -9,8 +9,8 @@ locals { } provider "aws" { - profile = "${var.deploy_profile}" - region = "${var.aws_region}" + profile = var.deploy_profile + region = var.aws_region } resource "random_pet" "unicorn" { @@ -20,8 +20,8 @@ resource "random_pet" "unicorn" { module "networking" { source = "../../modules/networking" - region = "${var.aws_region}" - env_name = "${var.env_name}" + region = var.aws_region + env_name = var.env_name unique_postfix = "-${random_pet.unicorn.id}" az_postfixes = ["a", "b"] } @@ -30,16 +30,16 @@ module "bastion" { source = "../../modules/bastion" ## required parameters - vpc_id = "${module.networking.vpc_id}" - subnet_id = "${element(module.networking.public_subnet_ids,0)}" - ami_id = "${local.ami_id_bastion}" - ssh_key_name = "${var.ssh_key_name}" + vpc_id = module.networking.vpc_id + subnet_id = element(module.networking.public_subnet_ids, 0) + ami_id = local.ami_id_bastion + ssh_key_name = var.ssh_key_name ## optional parameters - aws_region = "${var.aws_region}" - env_name = "${var.env_name}" - stack_name = "${var.stack_name}" - allowed_ssh_cidr_blocks = "${local.allowed_cidr_blocks}" + aws_region = var.aws_region + env_name = var.env_name + stack_name = var.stack_name + allowed_ssh_cidr_blocks = local.allowed_cidr_blocks instance_type = "t2.micro" unique_postfix = "-${random_pet.unicorn.id}" } @@ -48,39 +48,39 @@ module "nomad-infra" { source = "../../" # [General] Required variables - aws_region = "${var.aws_region}" - vpc_id = "${module.networking.vpc_id}" - alb_subnet_ids = "${module.networking.public_subnet_ids}" + aws_region = var.aws_region + vpc_id = module.networking.vpc_id + alb_subnet_ids = module.networking.public_subnet_ids # HACK: Use an http listener here to avoid the need to create a certificate. # In a production environmant you should pass in a https listener instead. - alb_ingress_https_listener_arn = "${module.networking.alb_ingress_http_listener_arn}" + alb_ingress_https_listener_arn = module.networking.alb_ingress_http_listener_arn - alb_backoffice_https_listener_arn = "${module.networking.alb_backoffice_https_listener_arn}" + alb_backoffice_https_listener_arn = module.networking.alb_backoffice_https_listener_arn attach_backoffice_alb_listener = true # [Nomad] Required variables nomad_ami_id_servers = "${var.ami_id}" nomad_ami_id_clients = "${var.ami_id}" - nomad_server_subnet_ids = "${module.networking.backoffice_subnet_ids}" - nomad_clients_public_services_subnet_ids = "${module.networking.services_subnet_ids}" - nomad_clients_private_services_subnet_ids = "${module.networking.services_subnet_ids}" - nomad_clients_content_connector_subnet_ids = "${module.networking.content_connector_subnet_ids}" - nomad_clients_backoffice_subnet_ids = "${module.networking.backoffice_subnet_ids}" + nomad_server_subnet_ids = module.networking.backoffice_subnet_ids + nomad_clients_public_services_subnet_ids = module.networking.services_subnet_ids + nomad_clients_private_services_subnet_ids = module.networking.services_subnet_ids + nomad_clients_content_connector_subnet_ids = module.networking.content_connector_subnet_ids + nomad_clients_backoffice_subnet_ids = module.networking.backoffice_subnet_ids # [Consul] Required variables + consul_server_subnet_ids = module.networking.backoffice_subnet_ids consul_ami_id = "${var.ami_id}" - consul_server_subnet_ids = "${module.networking.backoffice_subnet_ids}" # [General] Optional variables - stack_name = "${var.stack_name}" - env_name = "${var.env_name}" + stack_name = var.stack_name + env_name = var.env_name unique_postfix = "-${random_pet.unicorn.id}" instance_type_server = "t2.micro" - ssh_key_name = "${var.ssh_key_name}" - allowed_ssh_cidr_blocks = ["${values(local.allowed_cidr_blocks)}"] + ssh_key_name = var.ssh_key_name + allowed_ssh_cidr_blocks = values(local.allowed_cidr_blocks) - allowed_cidr_blocks_for_ui_alb = "${local.allowed_cidr_blocks}" + allowed_cidr_blocks_for_ui_alb = local.allowed_cidr_blocks # INFO: uncomment the following two lines if you want to deploy the cluster having https endpoints # for the ui-albs (nomad-ui, consul-ui and fabio-ui). @@ -91,24 +91,26 @@ module "nomad-infra" { #ui_alb_use_https_listener = true # [Nomad] Optional variables - nomad_server_scaling_cfg = "${var.server_scaling_cfg}" - nomad_private_services_dc_node_cfg = "${var.nomad_dc_node_cfg}" - nomad_public_services_dc_node_cfg = "${var.nomad_dc_node_cfg}" - nomad_content_connector_dc_node_cfg = "${var.nomad_dc_node_cfg}" - nomad_backoffice_dc_node_cfg = "${var.nomad_dc_node_cfg}" - ebs_block_devices_private_services_dc = "${var.ebs_block_devices_sample}" - ebs_block_devices_public_services_dc = "${var.ebs_block_devices_sample}" - ebs_block_devices_backoffice_dc = "${var.ebs_block_devices_sample}" - ebs_block_devices_content_connector_dc = "${var.ebs_block_devices_sample}" - device_to_mount_target_map_public_services_dc = "${var.device_to_mount_target_map_sample}" - device_to_mount_target_map_private_services_dc = "${var.device_to_mount_target_map_sample}" - device_to_mount_target_map_backoffice_dc = "${var.device_to_mount_target_map_sample}" - device_to_mount_target_map_content_connector_dc = "${var.device_to_mount_target_map_sample}" - additional_instance_tags_public_services_dc = "${var.additional_instance_tags_sample}" - additional_instance_tags_private_services_dc = "${var.additional_instance_tags_sample}" - additional_instance_tags_backoffice_dc = "${var.additional_instance_tags_sample}" - additional_instance_tags_content_connector_dc = "${var.additional_instance_tags_sample}" + nomad_server_scaling_cfg = var.server_scaling_cfg + nomad_private_services_dc_node_cfg = var.nomad_dc_node_cfg + nomad_public_services_dc_node_cfg = var.nomad_dc_node_cfg + nomad_content_connector_dc_node_cfg = var.nomad_dc_node_cfg + nomad_backoffice_dc_node_cfg = var.nomad_dc_node_cfg + ebs_block_devices_private_services_dc = var.ebs_block_devices_sample + ebs_block_devices_public_services_dc = var.ebs_block_devices_sample + ebs_block_devices_backoffice_dc = var.ebs_block_devices_sample + ebs_block_devices_content_connector_dc = var.ebs_block_devices_sample + device_to_mount_target_map_public_services_dc = var.device_to_mount_target_map_sample + device_to_mount_target_map_private_services_dc = var.device_to_mount_target_map_sample + device_to_mount_target_map_backoffice_dc = var.device_to_mount_target_map_sample + device_to_mount_target_map_content_connector_dc = var.device_to_mount_target_map_sample + additional_instance_tags_public_services_dc = var.additional_instance_tags_sample + additional_instance_tags_private_services_dc = var.additional_instance_tags_sample + additional_instance_tags_backoffice_dc = var.additional_instance_tags_sample + additional_instance_tags_content_connector_dc = var.additional_instance_tags_sample + # [Consul] Optional variables consul_num_servers = 3 consul_instance_type = "t2.micro" } + diff --git a/examples/root-example/outputs.tf b/examples/root-example/outputs.tf index 09539f2..a89fd0d 100644 --- a/examples/root-example/outputs.tf +++ b/examples/root-example/outputs.tf @@ -1,25 +1,25 @@ output "aws_region" { - value = "${var.aws_region}" + value = var.aws_region } output "nomad_servers_cluster_tag_key" { - value = "${module.nomad-infra.nomad_servers_cluster_tag_key}" + value = module.nomad-infra.nomad_servers_cluster_tag_key } output "nomad_servers_cluster_tag_value" { - value = "${module.nomad-infra.nomad_servers_cluster_tag_value}" + value = module.nomad-infra.nomad_servers_cluster_tag_value } output "num_nomad_servers" { - value = "${module.nomad-infra.num_nomad_servers}" + value = module.nomad-infra.num_nomad_servers } output "nomad_clients_public_services_cluster_tag_value" { - value = "${module.nomad-infra.nomad_clients_public_services_cluster_tag_value}" + value = module.nomad-infra.nomad_clients_public_services_cluster_tag_value } output "nomad_ui_alb_dns" { - value = "${module.nomad-infra.nomad_ui_alb_dns_name}" + value = module.nomad-infra.nomad_ui_alb_dns_name } output "curl_nomad_ui" { @@ -31,7 +31,7 @@ output "export_nomad_cmd" { } output "consul_ui_alb_dns" { - value = "${module.nomad-infra.consul_ui_alb_dns_name}" + value = module.nomad-infra.consul_ui_alb_dns_name } output "curl_consul_ui" { @@ -39,7 +39,7 @@ output "curl_consul_ui" { } output "fabio_ui_alb_dns" { - value = "${module.nomad-infra.fabio_ui_alb_dns_name}" + value = module.nomad-infra.fabio_ui_alb_dns_name } output "curl_fabio_ui" { @@ -51,11 +51,11 @@ output "curl_ping_service" { } output "ingress_alb_dns" { - value = "${module.networking.alb_public_services_dns}" + value = module.networking.alb_public_services_dns } output "bastion_ip" { - value = "${module.bastion.bastion_ip}" + value = module.bastion.bastion_ip } output "ssh_login" { @@ -63,73 +63,74 @@ output "ssh_login" { } output "ssh_key_name" { - value = "${module.nomad-infra.ssh_key_name}" + value = module.nomad-infra.ssh_key_name } output "vpc_id" { - value = "${module.nomad-infra.vpc_id}" + value = module.nomad-infra.vpc_id } output "vpc_cidr_block" { - value = "${module.networking.vpc_cidr_block}" + value = module.networking.vpc_cidr_block } output "cluster_prefix" { - value = "${module.nomad-infra.cluster_prefix}" + value = module.nomad-infra.cluster_prefix } output "consul_servers_cluster_tag_key" { - value = "${module.nomad-infra.consul_servers_cluster_tag_key}" + value = module.nomad-infra.consul_servers_cluster_tag_key } output "consul_servers_cluster_tag_value" { - value = "${module.nomad-infra.consul_servers_cluster_tag_value}" + value = module.nomad-infra.consul_servers_cluster_tag_value } output "nomad_ui_alb_dns_name" { - value = "${module.nomad-infra.nomad_ui_alb_dns_name}" + value = module.nomad-infra.nomad_ui_alb_dns_name } output "nomad_ui_alb_https_targetgroup_arn" { - value = "${module.nomad-infra.nomad_ui_alb_https_targetgroup_arn}" + value = module.nomad-infra.nomad_ui_alb_https_targetgroup_arn } output "nomad_ui_alb_https_listener_arn" { - value = "${module.nomad-infra.nomad_ui_alb_https_listener_arn}" + value = module.nomad-infra.nomad_ui_alb_https_listener_arn } output "consul_ui_alb_dns_name" { - value = "${module.nomad-infra.consul_ui_alb_dns_name}" + value = module.nomad-infra.consul_ui_alb_dns_name } output "consul_ui_alb_https_targetgroup_arn" { - value = "${module.nomad-infra.consul_ui_alb_https_targetgroup_arn}" + value = module.nomad-infra.consul_ui_alb_https_targetgroup_arn } output "consul_ui_alb_https_listener_arn" { - value = "${module.nomad-infra.consul_ui_alb_https_listener_arn}" + value = module.nomad-infra.consul_ui_alb_https_listener_arn } output "fabio_ui_alb_dns_name" { - value = "${module.nomad-infra.fabio_ui_alb_dns_name}" + value = module.nomad-infra.fabio_ui_alb_dns_name } output "fabio_ui_alb_https_targetgroup_arn" { - value = "${module.nomad-infra.fabio_ui_alb_https_targetgroup_arn}" + value = module.nomad-infra.fabio_ui_alb_https_targetgroup_arn } output "fabio_ui_alb_https_listener_arn" { - value = "${module.nomad-infra.fabio_ui_alb_https_listener_arn}" + value = module.nomad-infra.fabio_ui_alb_https_listener_arn } output "dc-public-services_alb_https_targetgroup_arn" { - value = "${module.nomad-infra.dc-public-services_alb_https_targetgroup_arn}" + value = module.nomad-infra.dc-public-services_alb_https_targetgroup_arn } output "dc-private-services_alb_https_targetgroup_arn" { - value = "${module.nomad-infra.dc-private-services_alb_https_targetgroup_arn}" + value = module.nomad-infra.dc-private-services_alb_https_targetgroup_arn } output "dc-backoffice_alb_https_targetgroup_arn" { - value = "${module.nomad-infra.dc-backoffice_alb_https_targetgroup_arn}" + value = module.nomad-infra.dc-backoffice_alb_https_targetgroup_arn } + diff --git a/examples/root-example/ref_ami.png b/examples/root-example/ref_ami.png new file mode 100644 index 0000000..5cfd5fd Binary files /dev/null and b/examples/root-example/ref_ami.png differ diff --git a/examples/root-example/vars.tf b/examples/root-example/vars.tf index c728806..d34d612 100644 --- a/examples/root-example/vars.tf +++ b/examples/root-example/vars.tf @@ -30,7 +30,7 @@ variable "stack_name" { variable "server_scaling_cfg" { description = "Number of nomad server" - type = "map" + type = map(string) default = { "min" = 3 @@ -41,7 +41,7 @@ variable "server_scaling_cfg" { variable "nomad_dc_node_cfg" { description = "Configuration for the private data-center nodes" - type = "map" + type = map(string) default = { "min" = 1 @@ -52,12 +52,13 @@ variable "nomad_dc_node_cfg" { } variable "ebs_block_devices_sample" { - type = "list" + type = any - default = [{ - "device_name" = "/dev/xvde" - "volume_size" = "50" - }, + default = [ + { + "device_name" = "/dev/xvde" + "volume_size" = "50" + }, { "device_name" = "/dev/xvdf" "volume_size" = "80" @@ -66,13 +67,17 @@ variable "ebs_block_devices_sample" { } variable "device_to_mount_target_map_sample" { - type = "list" + type = list(string) default = ["/dev/xvde:/mnt/map1", "/dev/xvdf:/mnt/map2"] } variable "additional_instance_tags_sample" { - type = "list" + type = list(object({ + key = string + value = string + propagate_at_launch = bool + })) default = [ { @@ -82,3 +87,4 @@ variable "additional_instance_tags_sample" { }, ] } + diff --git a/examples/root-example/versions.tf b/examples/root-example/versions.tf new file mode 100644 index 0000000..ac97c6a --- /dev/null +++ b/examples/root-example/versions.tf @@ -0,0 +1,4 @@ + +terraform { + required_version = ">= 0.12" +} diff --git a/examples/ui-access/main.tf b/examples/ui-access/main.tf index 9c91b38..d4c6392 100644 --- a/examples/ui-access/main.tf +++ b/examples/ui-access/main.tf @@ -4,8 +4,8 @@ locals { } provider "aws" { - profile = "${var.deploy_profile}" - region = "${var.aws_region}" + profile = var.deploy_profile + region = var.aws_region } ### obtaining default vpc, security group and subnet of the env @@ -14,12 +14,12 @@ data "aws_vpc" "default" { } data "aws_subnet_ids" "all" { - vpc_id = "${data.aws_vpc.default.id}" + vpc_id = data.aws_vpc.default.id } resource "aws_security_group" "sg_sample" { name_prefix = "sg_sample_" - vpc_id = "${data.aws_vpc.default.id}" + vpc_id = data.aws_vpc.default.id lifecycle { create_before_destroy = true @@ -33,7 +33,7 @@ resource "aws_security_group_rule" "allow_all_inbound" { protocol = "-1" cidr_blocks = ["0.0.0.0/0"] - security_group_id = "${aws_security_group.sg_sample.id}" + security_group_id = aws_security_group.sg_sample.id } resource "aws_security_group_rule" "allow_all_outbound" { @@ -43,25 +43,25 @@ resource "aws_security_group_rule" "allow_all_outbound" { protocol = "-1" cidr_blocks = ["0.0.0.0/0"] - security_group_id = "${aws_security_group.sg_sample.id}" + security_group_id = aws_security_group.sg_sample.id } resource "aws_autoscaling_group" "asg_sample" { - launch_configuration = "${aws_launch_configuration.lc_sample.name}" + launch_configuration = aws_launch_configuration.lc_sample.name name_prefix = "asg-sample-" - vpc_zone_identifier = ["${data.aws_subnet_ids.all.ids}"] + vpc_zone_identifier = data.aws_subnet_ids.all.ids min_size = "1" max_size = "1" } resource "aws_launch_configuration" "lc_sample" { name_prefix = "lc-sample-" - image_id = "ami-43a15f3e" # Ubuntu Server 16.04 LTS (HVM) + image_id = "ami-43a15f3e" # Ubuntu Server 16.04 LTS (HVM) instance_type = "t2.micro" - user_data = "${data.template_file.user_data.rendered}" + user_data = data.template_file.user_data.rendered key_name = "${var.ssh_key_name}" associate_public_ip_address = true - security_groups = ["${aws_security_group.sg_sample.id}"] + security_groups = [aws_security_group.sg_sample.id] lifecycle { create_before_destroy = true @@ -69,9 +69,9 @@ resource "aws_launch_configuration" "lc_sample" { } data "template_file" "user_data" { - template = "${file("${path.module}/user-data.sh")}" + template = file("${path.module}/user-data.sh") - vars { + vars = { nomad_ui_port = 4646 consul_ui_port = 8500 fabio_ui_port = 9998 @@ -82,23 +82,25 @@ module "ui-access" { source = "../../modules/ui-access" ## required parameters - vpc_id = "${data.aws_vpc.default.id}" - subnet_ids = "${data.aws_subnet_ids.all.ids}" - nomad_server_asg_name = "${aws_autoscaling_group.asg_sample.name}" - consul_server_asg_name = "${aws_autoscaling_group.asg_sample.name}" - fabio_server_asg_name = "${aws_autoscaling_group.asg_sample.name}" + vpc_id = data.aws_vpc.default.id + subnet_ids = data.aws_subnet_ids.all.ids + nomad_server_asg_name = aws_autoscaling_group.asg_sample.name + consul_server_asg_name = aws_autoscaling_group.asg_sample.name + fabio_server_asg_name = aws_autoscaling_group.asg_sample.name ## optional parameters - aws_region = "${var.aws_region}" - env_name = "${local.env_name}" - stack_name = "${local.stack_name}" + aws_region = var.aws_region + env_name = local.env_name + stack_name = local.stack_name nomad_ui_port = 4646 consul_ui_port = 8500 fabio_ui_port = 9998 allowed_cidr_blocks_for_ui_alb = { - "all" = "0.0.0.0/0" + "all" = "0.0.0.0/0", + "another" = "10.10.0.0/16" } unique_postfix = "" } + diff --git a/examples/ui-access/outputs.tf b/examples/ui-access/outputs.tf index 7ca6948..ae2934a 100644 --- a/examples/ui-access/outputs.tf +++ b/examples/ui-access/outputs.tf @@ -1,5 +1,5 @@ output "nomad_ui_alb_dns" { - value = "${module.ui-access.nomad_ui_alb_dns_name}" + value = module.ui-access.nomad_ui_alb_dns_name } output "curl_nomad_ui" { @@ -11,7 +11,7 @@ output "url_nomad_ui" { } output "consul_ui_alb_dns" { - value = "${module.ui-access.consul_ui_alb_dns_name}" + value = module.ui-access.consul_ui_alb_dns_name } output "curl_consul_ui" { @@ -23,7 +23,7 @@ output "url_consul_ui" { } output "fabio_ui_alb_dns" { - value = "${module.ui-access.fabio_ui_alb_dns_name}" + value = module.ui-access.fabio_ui_alb_dns_name } output "curl_fabio_ui" { diff --git a/examples/ui-access/versions.tf b/examples/ui-access/versions.tf new file mode 100644 index 0000000..ac97c6a --- /dev/null +++ b/examples/ui-access/versions.tf @@ -0,0 +1,4 @@ + +terraform { + required_version = ">= 0.12" +} diff --git a/main.tf b/main.tf index 2cd9305..efed6e0 100644 --- a/main.tf +++ b/main.tf @@ -4,195 +4,207 @@ locals { } module "ui-access" { - source = "modules/ui-access" + source = "./modules/ui-access" ## required parameters - vpc_id = "${var.vpc_id}" - subnet_ids = "${var.alb_subnet_ids}" - consul_server_asg_name = "${module.consul.asg_name_consul_servers}" - nomad_server_asg_name = "${module.nomad.asg_name_nomad_servers}" - fabio_server_asg_name = "${module.dc-public-services.asg_name}" - ui_alb_https_listener_cert_arn = "${var.ui_alb_https_listener_cert_arn}" - ui_alb_use_https_listener = "${var.ui_alb_use_https_listener}" + vpc_id = var.vpc_id + subnet_ids = var.alb_subnet_ids + consul_server_asg_name = module.consul.asg_name_consul_servers + nomad_server_asg_name = module.nomad.asg_name_nomad_servers + fabio_server_asg_name = module.dc-public-services.asg_name + ui_alb_https_listener_cert_arn = var.ui_alb_https_listener_cert_arn + ui_alb_use_https_listener = var.ui_alb_use_https_listener ## optional parameters - aws_region = "${var.aws_region}" - env_name = "${var.env_name}" - stack_name = "${var.stack_name}" - unique_postfix = "${var.unique_postfix}" - allowed_cidr_blocks_for_ui_alb = "${var.allowed_cidr_blocks_for_ui_alb}" + aws_region = var.aws_region + env_name = var.env_name + stack_name = var.stack_name + unique_postfix = var.unique_postfix + allowed_cidr_blocks_for_ui_alb = var.allowed_cidr_blocks_for_ui_alb } module "consul" { - source = "modules/consul" + source = "./modules/consul" ## required parameters - ami_id = "${var.consul_ami_id}" - vpc_id = "${var.vpc_id}" - subnet_ids = "${var.consul_server_subnet_ids}" + ami_id = var.consul_ami_id + vpc_id = var.vpc_id + subnet_ids = var.consul_server_subnet_ids ## optional parameters - env_name = "${var.env_name}" - aws_region = "${var.aws_region}" - stack_name = "${var.stack_name}" - cluster_tag_key = "${local.consul_cluster_tag_key}" - cluster_tag_value = "${local.consul_cluster_tag_value}" - allowed_ssh_cidr_blocks = "${var.allowed_ssh_cidr_blocks}" - num_servers = "${var.consul_num_servers}" - instance_type = "${var.consul_instance_type}" - ssh_key_name = "${var.ssh_key_name}" + env_name = var.env_name + aws_region = var.aws_region + stack_name = var.stack_name + cluster_tag_key = local.consul_cluster_tag_key + cluster_tag_value = local.consul_cluster_tag_value + allowed_ssh_cidr_blocks = var.allowed_ssh_cidr_blocks + num_servers = var.consul_num_servers + instance_type = var.consul_instance_type + ssh_key_name = var.ssh_key_name } #### DC: PUBLIC-SERVICES ################################################### module "dc-public-services" { - source = "modules/nomad-datacenter" + source = "./modules/nomad-datacenter" ## required parameters - vpc_id = "${var.vpc_id}" - subnet_ids = "${var.nomad_clients_public_services_subnet_ids}" - ami_id = "${var.nomad_ami_id_clients}" - consul_cluster_tag_key = "${local.consul_cluster_tag_key}" - consul_cluster_tag_value = "${local.consul_cluster_tag_value}" - server_sg_id = "${module.nomad.security_group_id_nomad_servers}" + vpc_id = var.vpc_id + subnet_ids = var.nomad_clients_public_services_subnet_ids + ami_id = var.nomad_ami_id_clients + consul_cluster_tag_key = local.consul_cluster_tag_key + consul_cluster_tag_value = local.consul_cluster_tag_value + server_sg_id = module.nomad.security_group_id_nomad_servers ## optional parameters - env_name = "${var.env_name}" - stack_name = "${var.stack_name}" - aws_region = "${var.aws_region}" - instance_type = "${lookup(var.nomad_public_services_dc_node_cfg,"instance_type","INVALID")}" - allowed_ssh_cidr_blocks = "${var.allowed_ssh_cidr_blocks}" - ssh_key_name = "${var.ssh_key_name}" + env_name = var.env_name + stack_name = var.stack_name + aws_region = var.aws_region + instance_type = lookup( + var.nomad_public_services_dc_node_cfg, + "instance_type", + "INVALID", + ) + allowed_ssh_cidr_blocks = var.allowed_ssh_cidr_blocks + ssh_key_name = var.ssh_key_name datacenter_name = "public-services" - unique_postfix = "${var.unique_postfix}" - alb_ingress_https_listener_arn = "${var.alb_ingress_https_listener_arn}" + unique_postfix = var.unique_postfix + alb_ingress_https_listener_arn = var.alb_ingress_https_listener_arn attach_ingress_alb_listener = true - node_scaling_cfg = "${var.nomad_public_services_dc_node_cfg}" - ebs_block_devices = "${var.ebs_block_devices_public_services_dc}" - device_to_mount_target_map = "${var.device_to_mount_target_map_public_services_dc}" - additional_instance_tags = "${var.additional_instance_tags_public_services_dc}" + node_scaling_cfg = var.nomad_public_services_dc_node_cfg + ebs_block_devices = var.ebs_block_devices_public_services_dc + device_to_mount_target_map = var.device_to_mount_target_map_public_services_dc + additional_instance_tags = var.additional_instance_tags_public_services_dc } #### DC: PRIVATE-SERVICES ################################################### module "dc-private-services" { - source = "modules/nomad-datacenter" + source = "./modules/nomad-datacenter" ## required parameters - vpc_id = "${var.vpc_id}" - subnet_ids = "${var.nomad_clients_private_services_subnet_ids}" - ami_id = "${var.nomad_ami_id_clients}" - consul_cluster_tag_key = "${local.consul_cluster_tag_key}" - consul_cluster_tag_value = "${local.consul_cluster_tag_value}" - server_sg_id = "${module.nomad.security_group_id_nomad_servers}" + vpc_id = var.vpc_id + subnet_ids = var.nomad_clients_private_services_subnet_ids + ami_id = var.nomad_ami_id_clients + consul_cluster_tag_key = local.consul_cluster_tag_key + consul_cluster_tag_value = local.consul_cluster_tag_value + server_sg_id = module.nomad.security_group_id_nomad_servers ## optional parameters - env_name = "${var.env_name}" - stack_name = "${var.stack_name}" - aws_region = "${var.aws_region}" - instance_type = "${lookup(var.nomad_private_services_dc_node_cfg,"instance_type","INVALID")}" - allowed_ssh_cidr_blocks = "${var.allowed_ssh_cidr_blocks}" - ssh_key_name = "${var.ssh_key_name}" + env_name = var.env_name + stack_name = var.stack_name + aws_region = var.aws_region + instance_type = lookup( + var.nomad_private_services_dc_node_cfg, + "instance_type", + "INVALID", + ) + allowed_ssh_cidr_blocks = var.allowed_ssh_cidr_blocks + ssh_key_name = var.ssh_key_name datacenter_name = "private-services" - unique_postfix = "${var.unique_postfix}" - node_scaling_cfg = "${var.nomad_private_services_dc_node_cfg}" - efs_dns_name = "${var.efs_dns_name}" - map_bucket_name = "${var.map_bucket_name}" - ebs_block_devices = "${var.ebs_block_devices_private_services_dc}" - device_to_mount_target_map = "${var.device_to_mount_target_map_private_services_dc}" - additional_instance_tags = "${var.additional_instance_tags_private_services_dc}" + unique_postfix = var.unique_postfix + node_scaling_cfg = var.nomad_private_services_dc_node_cfg + efs_dns_name = var.efs_dns_name + map_bucket_name = var.map_bucket_name + ebs_block_devices = var.ebs_block_devices_private_services_dc + device_to_mount_target_map = var.device_to_mount_target_map_private_services_dc + additional_instance_tags = var.additional_instance_tags_private_services_dc } #### DC: BACKOFFICE ################################################### module "dc-backoffice" { - source = "modules/nomad-datacenter" + source = "./modules/nomad-datacenter" ## required parameters - vpc_id = "${var.vpc_id}" - subnet_ids = "${var.nomad_clients_backoffice_subnet_ids}" - ami_id = "${var.nomad_ami_id_clients}" - consul_cluster_tag_key = "${local.consul_cluster_tag_key}" - consul_cluster_tag_value = "${local.consul_cluster_tag_value}" - server_sg_id = "${module.nomad.security_group_id_nomad_servers}" + vpc_id = var.vpc_id + subnet_ids = var.nomad_clients_backoffice_subnet_ids + ami_id = var.nomad_ami_id_clients + consul_cluster_tag_key = local.consul_cluster_tag_key + consul_cluster_tag_value = local.consul_cluster_tag_value + server_sg_id = module.nomad.security_group_id_nomad_servers ## optional parameters - env_name = "${var.env_name}" - stack_name = "${var.stack_name}" - aws_region = "${var.aws_region}" - instance_type = "${lookup(var.nomad_backoffice_dc_node_cfg,"instance_type","INVALID")}" - allowed_ssh_cidr_blocks = "${var.allowed_ssh_cidr_blocks}" - ssh_key_name = "${var.ssh_key_name}" + env_name = var.env_name + stack_name = var.stack_name + aws_region = var.aws_region + instance_type = lookup(var.nomad_backoffice_dc_node_cfg, "instance_type", "INVALID") + allowed_ssh_cidr_blocks = var.allowed_ssh_cidr_blocks + ssh_key_name = var.ssh_key_name datacenter_name = "backoffice" - unique_postfix = "${var.unique_postfix}" - alb_ingress_https_listener_arn = "${var.alb_backoffice_https_listener_arn}" - attach_ingress_alb_listener = "${var.attach_backoffice_alb_listener}" - node_scaling_cfg = "${var.nomad_backoffice_dc_node_cfg}" - ebs_block_devices = "${var.ebs_block_devices_backoffice_dc}" - device_to_mount_target_map = "${var.device_to_mount_target_map_backoffice_dc}" - additional_instance_tags = "${var.additional_instance_tags_backoffice_dc}" + unique_postfix = var.unique_postfix + alb_ingress_https_listener_arn = var.alb_backoffice_https_listener_arn + attach_ingress_alb_listener = var.attach_backoffice_alb_listener + node_scaling_cfg = var.nomad_backoffice_dc_node_cfg + ebs_block_devices = var.ebs_block_devices_backoffice_dc + device_to_mount_target_map = var.device_to_mount_target_map_backoffice_dc + additional_instance_tags = var.additional_instance_tags_backoffice_dc } #### DC: CONTENT-CONNECTOR ################################################### module "dc-content-connector" { - source = "modules/nomad-datacenter" + source = "./modules/nomad-datacenter" ## required parameters - vpc_id = "${var.vpc_id}" - subnet_ids = "${var.nomad_clients_content_connector_subnet_ids}" - ami_id = "${var.nomad_ami_id_clients}" - consul_cluster_tag_key = "${local.consul_cluster_tag_key}" - consul_cluster_tag_value = "${local.consul_cluster_tag_value}" - server_sg_id = "${module.nomad.security_group_id_nomad_servers}" + vpc_id = var.vpc_id + subnet_ids = var.nomad_clients_content_connector_subnet_ids + ami_id = var.nomad_ami_id_clients + consul_cluster_tag_key = local.consul_cluster_tag_key + consul_cluster_tag_value = local.consul_cluster_tag_value + server_sg_id = module.nomad.security_group_id_nomad_servers ## optional parameters - env_name = "${var.env_name}" - stack_name = "${var.stack_name}" - aws_region = "${var.aws_region}" - instance_type = "${lookup(var.nomad_content_connector_dc_node_cfg,"instance_type","INVALID")}" - allowed_ssh_cidr_blocks = "${var.allowed_ssh_cidr_blocks}" - ssh_key_name = "${var.ssh_key_name}" + env_name = var.env_name + stack_name = var.stack_name + aws_region = var.aws_region + instance_type = lookup( + var.nomad_content_connector_dc_node_cfg, + "instance_type", + "INVALID", + ) + allowed_ssh_cidr_blocks = var.allowed_ssh_cidr_blocks + ssh_key_name = var.ssh_key_name datacenter_name = "content-connector" - unique_postfix = "${var.unique_postfix}" - node_scaling_cfg = "${var.nomad_content_connector_dc_node_cfg}" - ebs_block_devices = "${var.ebs_block_devices_content_connector_dc}" - device_to_mount_target_map = "${var.device_to_mount_target_map_content_connector_dc}" - additional_instance_tags = "${var.additional_instance_tags_content_connector_dc}" + unique_postfix = var.unique_postfix + node_scaling_cfg = var.nomad_content_connector_dc_node_cfg + ebs_block_devices = var.ebs_block_devices_content_connector_dc + device_to_mount_target_map = var.device_to_mount_target_map_content_connector_dc + additional_instance_tags = var.additional_instance_tags_content_connector_dc } module "nomad" { - source = "modules/nomad" + source = "./modules/nomad" ## required parameters - vpc_id = "${var.vpc_id}" - subnet_ids = "${var.nomad_server_subnet_ids}" - ami_id = "${var.nomad_ami_id_servers}" - consul_cluster_tag_key = "${local.consul_cluster_tag_key}" - consul_cluster_tag_value = "${local.consul_cluster_tag_value}" - consul_cluster_security_group_id = "${module.consul.security_group_id_consul_servers}" + vpc_id = var.vpc_id + subnet_ids = var.nomad_server_subnet_ids + ami_id = var.nomad_ami_id_servers + consul_cluster_tag_key = local.consul_cluster_tag_key + consul_cluster_tag_value = local.consul_cluster_tag_value ## optional parameters - env_name = "${var.env_name}" - stack_name = "${var.stack_name}" - aws_region = "${var.aws_region}" - instance_type = "${var.instance_type_server}" - allowed_ssh_cidr_blocks = "${var.allowed_ssh_cidr_blocks}" - ssh_key_name = "${var.ssh_key_name}" - node_scaling_cfg = "${var.nomad_server_scaling_cfg}" - unique_postfix = "${var.unique_postfix}" + env_name = var.env_name + stack_name = var.stack_name + aws_region = var.aws_region + instance_type = var.instance_type_server + allowed_ssh_cidr_blocks = var.allowed_ssh_cidr_blocks + ssh_key_name = var.ssh_key_name + node_scaling_cfg = var.nomad_server_scaling_cfg + unique_postfix = var.unique_postfix } module "sgrules" { - source = "modules/sgrules" - sg_id_public_services_dc = "${module.dc-public-services.sg_datacenter_id}" - sg_id_private_services_dc = "${module.dc-private-services.sg_datacenter_id}" - sg_id_content_connector_dc = "${module.dc-content-connector.sg_datacenter_id}" - sg_id_backoffice_dc = "${module.dc-backoffice.sg_datacenter_id}" - sg_id_consul = "${module.consul.security_group_id_consul_servers}" - sg_id_nomad_server = "${module.nomad.security_group_id_nomad_servers}" - sg_id_ui_alb_nomad = "${module.ui-access.nomad_ui_alb_sg_id}" - sg_id_ui_alb_consul = "${module.ui-access.consul_ui_alb_sg_id}" + source = "./modules/sgrules" + sg_id_public_services_dc = module.dc-public-services.sg_datacenter_id + sg_id_private_services_dc = module.dc-private-services.sg_datacenter_id + sg_id_content_connector_dc = module.dc-content-connector.sg_datacenter_id + sg_id_backoffice_dc = module.dc-backoffice.sg_datacenter_id + sg_id_consul = module.consul.security_group_id_consul_servers + sg_id_nomad_server = module.nomad.security_group_id_nomad_servers + sg_id_ui_alb_nomad = module.ui-access.nomad_ui_alb_sg_id + sg_id_ui_alb_consul = module.ui-access.consul_ui_alb_sg_id } module "ecr" { - source = "modules/ecr" + source = "./modules/ecr" - ecr_repositories = "${var.ecr_repositories}" + ecr_repositories = var.ecr_repositories } + diff --git a/modules/bastion/main.tf b/modules/bastion/main.tf index 3b14cf0..7ec7d92 100644 --- a/modules/bastion/main.tf +++ b/modules/bastion/main.tf @@ -1,22 +1,22 @@ resource "aws_instance" "ec2_bastion" { - ami = "${var.ami_id}" - instance_type = "${var.instance_type}" - key_name = "${var.ssh_key_name}" - subnet_id = "${var.subnet_id}" + ami = var.ami_id + instance_type = var.instance_type + key_name = var.ssh_key_name + subnet_id = var.subnet_id - vpc_security_group_ids = ["${aws_security_group.sg_bastion.id}"] + vpc_security_group_ids = [aws_security_group.sg_bastion.id] - tags { + tags = { Name = "${var.stack_name}-EC2-bastion${var.unique_postfix}" } } resource "aws_security_group" "sg_bastion" { - vpc_id = "${var.vpc_id}" + vpc_id = var.vpc_id name = "${var.stack_name}-SG-bastion${var.unique_postfix}" description = "Security Group for basition server" - tags { + tags = { Name = "${var.stack_name}-SG-bastion${var.unique_postfix}" } @@ -34,30 +34,35 @@ resource "aws_security_group_rule" "sgr_bastion_egAll" { description = "egress all tcp" cidr_blocks = ["0.0.0.0/0"] - security_group_id = "${aws_security_group.sg_bastion.id}" + security_group_id = aws_security_group.sg_bastion.id } locals { - keys = "${keys(var.allowed_ssh_cidr_blocks)}" + keys = keys(var.allowed_ssh_cidr_blocks) } resource "aws_security_group_rule" "sgr_bastion_ig_ssh" { - count = "${length(local.keys)}" - description = "${element(local.keys,count.index)}: igress ssh" - type = "ingress" - from_port = 22 - to_port = 22 - protocol = "tcp" - cidr_blocks = ["${lookup(var.allowed_ssh_cidr_blocks,element(local.keys,count.index),"0.0.0.0/32")}"] - security_group_id = "${aws_security_group.sg_bastion.id}" + count = length(local.keys) + description = "${element(local.keys, count.index)}: igress ssh" + type = "ingress" + from_port = 22 + to_port = 22 + protocol = "tcp" + + cidr_blocks = [lookup( + var.allowed_ssh_cidr_blocks, + element(local.keys, count.index), + "0.0.0.0/32", + )] + security_group_id = aws_security_group.sg_bastion.id } # elastic ips needed for the bastion resource "aws_eip" "eip_bastion" { - instance = "${aws_instance.ec2_bastion.id}" + instance = aws_instance.ec2_bastion.id vpc = true - tags { + tags = { Name = "${var.stack_name}-EIP-bastion${var.unique_postfix}" } -} +} \ No newline at end of file diff --git a/modules/bastion/outputs.tf b/modules/bastion/outputs.tf index 0db7f28..a050f53 100644 --- a/modules/bastion/outputs.tf +++ b/modules/bastion/outputs.tf @@ -1,7 +1,7 @@ output "bastion_ip" { - value = "${aws_eip.eip_bastion.public_ip}" + value = aws_eip.eip_bastion.public_ip } output "ssh_key_name" { - value = "${var.ssh_key_name}" + value = var.ssh_key_name } diff --git a/modules/bastion/vars.tf b/modules/bastion/vars.tf index b4abe11..d168ecf 100644 --- a/modules/bastion/vars.tf +++ b/modules/bastion/vars.tf @@ -43,7 +43,7 @@ variable "unique_postfix" { variable "allowed_ssh_cidr_blocks" { description = "Map for cidr blocks that should get access to the bastion. The format is name:cidr-block. I.e. 'my_cidr'='90.250.75.79/32'" - type = "map" + type = map(string) default = { "all" = "0.0.0.0/0" diff --git a/modules/bastion/versions.tf b/modules/bastion/versions.tf new file mode 100644 index 0000000..8007815 --- /dev/null +++ b/modules/bastion/versions.tf @@ -0,0 +1,4 @@ +terraform { + required_version = ">= 0.12" +} + diff --git a/modules/consul/main.tf b/modules/consul/main.tf index f09ba0a..fe9ef8d 100644 --- a/modules/consul/main.tf +++ b/modules/consul/main.tf @@ -1,34 +1,28 @@ -# Terraform 0.9.5 suffered from https://github.com/hashicorp/terraform/issues/14399, which causes this template the -# conditionals in this template to fail. -terraform { - required_version = ">= 0.9.3, != 0.9.5" -} - # --------------------------------------------------------------------------------------------------------------------- # DEPLOY THE CONSUL SERVER NODES # --------------------------------------------------------------------------------------------------------------------- module "consul_servers" { - source = "git::https://github.com/hashicorp/terraform-aws-consul.git//modules/consul-cluster?ref=v0.4.4" + source = "git::https://github.com/hashicorp/terraform-aws-consul.git//modules/consul-cluster?ref=v0.7.0" - cluster_name = "${var.cluster_tag_value}" - cluster_size = "${var.num_servers}" - instance_type = "${var.instance_type}" + cluster_name = var.cluster_tag_value + cluster_size = var.num_servers + instance_type = var.instance_type # The EC2 Instances will use these tags to automatically discover each other and form a cluster - cluster_tag_key = "${var.cluster_tag_key}" - cluster_tag_value = "${var.cluster_tag_value}" + cluster_tag_key = var.cluster_tag_key + cluster_tag_value = var.cluster_tag_value - ami_id = "${var.ami_id}" - user_data = "${data.template_file.user_data_consul_server.rendered}" + ami_id = var.ami_id + user_data = data.template_file.user_data_consul_server.rendered - vpc_id = "${var.vpc_id}" - subnet_ids = "${var.subnet_ids}" + vpc_id = var.vpc_id + subnet_ids = var.subnet_ids - allowed_ssh_cidr_blocks = "${var.allowed_ssh_cidr_blocks}" + allowed_ssh_cidr_blocks = var.allowed_ssh_cidr_blocks allowed_inbound_cidr_blocks = [] allowed_inbound_security_group_count = 0 - ssh_key_name = "${var.ssh_key_name}" + ssh_key_name = var.ssh_key_name } # --------------------------------------------------------------------------------------------------------------------- @@ -38,8 +32,8 @@ module "consul_servers" { data "template_file" "user_data_consul_server" { template = "${file("${path.module}/user-data-consul-server.sh")}" - vars { - cluster_tag_key = "${var.cluster_tag_key}" - cluster_tag_value = "${var.cluster_tag_value}" + vars = { + cluster_tag_key = var.cluster_tag_key + cluster_tag_value = var.cluster_tag_value } } diff --git a/modules/consul/outputs.tf b/modules/consul/outputs.tf index 4793080..a846542 100644 --- a/modules/consul/outputs.tf +++ b/modules/consul/outputs.tf @@ -1,15 +1,15 @@ output "asg_name_consul_servers" { - value = "${module.consul_servers.asg_name}" + value = module.consul_servers.asg_name } output "security_group_id_consul_servers" { - value = "${module.consul_servers.security_group_id}" + value = module.consul_servers.security_group_id } output "consul_servers_cluster_tag_key" { - value = "${module.consul_servers.cluster_tag_key}" + value = module.consul_servers.cluster_tag_key } output "consul_servers_cluster_tag_value" { - value = "${module.consul_servers.cluster_tag_value}" + value = module.consul_servers.cluster_tag_value } diff --git a/modules/consul/sg.tf b/modules/consul/sg.tf index 33b66d7..c41c8bd 100644 --- a/modules/consul/sg.tf +++ b/modules/consul/sg.tf @@ -1,6 +1,6 @@ # obtain consul sg in order to add rules needed data "aws_security_group" "consul_sg" { - id = "${module.consul_servers.security_group_id}" + id = module.consul_servers.security_group_id } # Consul ports see: https://www.consul.io/docs/agent/options.html in section 'Ports Used' diff --git a/modules/consul/versions.tf b/modules/consul/versions.tf new file mode 100644 index 0000000..ac97c6a --- /dev/null +++ b/modules/consul/versions.tf @@ -0,0 +1,4 @@ + +terraform { + required_version = ">= 0.12" +} diff --git a/modules/networking/backoffice_alb.tf b/modules/networking/backoffice_alb.tf index 66a70a0..9b0df7f 100644 --- a/modules/networking/backoffice_alb.tf +++ b/modules/networking/backoffice_alb.tf @@ -2,10 +2,10 @@ resource "aws_alb" "alb_backoffice" { name = "${var.stack_name}-backoffice${var.unique_postfix}" internal = false - subnets = ["${aws_subnet.subn_public.*.id}"] - security_groups = ["${aws_security_group.sg_backoffice_alb.id}"] + subnets = aws_subnet.subn_public.*.id + security_groups = [aws_security_group.sg_backoffice_alb.id] - tags { + tags = { Name = "${var.stack_name}-backoffice${var.unique_postfix}" internal = false } @@ -16,23 +16,23 @@ resource "aws_alb_target_group" "tgr_dummy_backoffice" { name = "${var.stack_name}-backoffice-dummy${var.unique_postfix}" port = 5000 protocol = "HTTP" - vpc_id = "${aws_vpc.vpc_main.id}" + vpc_id = aws_vpc.vpc_main.id - tags { + tags = { Name = "${var.stack_name}-backoffice-dummy${var.unique_postfix}" } } # listener for https with one default action to a dummy target group resource "aws_alb_listener" "alb_backoffice_https" { - load_balancer_arn = "${aws_alb.alb_backoffice.arn}" + load_balancer_arn = aws_alb.alb_backoffice.arn # HACK: currently protocol is https although this is the https listener. protocol = "HTTP" port = "443" default_action { - target_group_arn = "${aws_alb_target_group.tgr_dummy_backoffice.arn}" + target_group_arn = aws_alb_target_group.tgr_dummy_backoffice.arn type = "forward" } } diff --git a/modules/networking/backoffice_alb_sg.tf b/modules/networking/backoffice_alb_sg.tf index 2d2bc48..20cd742 100644 --- a/modules/networking/backoffice_alb_sg.tf +++ b/modules/networking/backoffice_alb_sg.tf @@ -1,9 +1,9 @@ resource "aws_security_group" "sg_backoffice_alb" { - vpc_id = "${aws_vpc.vpc_main.id}" + vpc_id = aws_vpc.vpc_main.id name = "${var.stack_name}-backoffice-alb${var.unique_postfix}" description = "security group that allows ingress access to everyone." - tags { + tags = { Name = "${var.stack_name}-backoffice${var.unique_postfix}" } @@ -19,7 +19,7 @@ resource "aws_security_group_rule" "sgr_alb_backoffice_ig_https" { to_port = 443 protocol = "tcp" cidr_blocks = ["0.0.0.0/0"] - security_group_id = "${aws_security_group.sg_backoffice_alb.id}" + security_group_id = aws_security_group.sg_backoffice_alb.id } # grants access for all tcp but only to the services subnet @@ -31,5 +31,6 @@ resource "aws_security_group_rule" "sgr_alb_backoffice_egAll_server" { protocol = "tcp" cidr_blocks = ["0.0.0.0/0"] - security_group_id = "${aws_security_group.sg_backoffice_alb.id}" + security_group_id = aws_security_group.sg_backoffice_alb.id } + diff --git a/modules/networking/outputs.tf b/modules/networking/outputs.tf index 805f8c3..8e46379 100644 --- a/modules/networking/outputs.tf +++ b/modules/networking/outputs.tf @@ -1,39 +1,40 @@ output "services_subnet_ids" { - value = "${aws_subnet.subn_services.*.id}" + value = aws_subnet.subn_services.*.id } output "public_subnet_ids" { - value = "${aws_subnet.subn_public.*.id}" + value = aws_subnet.subn_public.*.id } output "backoffice_subnet_ids" { - value = "${aws_subnet.subn_backoffice.*.id}" + value = aws_subnet.subn_backoffice.*.id } output "content_connector_subnet_ids" { - value = "${aws_subnet.subn_contentconnector.*.id}" + value = aws_subnet.subn_contentconnector.*.id } output "vpc_id" { - value = "${aws_vpc.vpc_main.id}" + value = aws_vpc.vpc_main.id } output "alb_ingress_http_listener_arn" { - value = "${aws_alb_listener.alb_ingress_http.arn}" + value = aws_alb_listener.alb_ingress_http.arn } output "alb_ingress_https_listener_arn" { - value = "${aws_alb_listener.alb_ingress_https.arn}" + value = aws_alb_listener.alb_ingress_https.arn } output "alb_backoffice_https_listener_arn" { - value = "${aws_alb_listener.alb_backoffice_https.arn}" + value = aws_alb_listener.alb_backoffice_https.arn } output "alb_public_services_dns" { - value = "${aws_alb.alb_public_services.dns_name}" + value = aws_alb.alb_public_services.dns_name } output "vpc_cidr_block" { - value = "${aws_vpc.vpc_main.cidr_block}" + value = aws_vpc.vpc_main.cidr_block } + diff --git a/modules/networking/public_services_alb.tf b/modules/networking/public_services_alb.tf index b6d61d7..c7a1e6a 100644 --- a/modules/networking/public_services_alb.tf +++ b/modules/networking/public_services_alb.tf @@ -6,10 +6,10 @@ locals { resource "aws_alb" "alb_public_services" { name = "${var.stack_name}-ingress${var.unique_postfix}" internal = false - subnets = ["${aws_subnet.subn_public.*.id}"] - security_groups = ["${aws_security_group.sg_public_services_alb.id}"] + subnets = aws_subnet.subn_public.*.id + security_groups = [aws_security_group.sg_public_services_alb.id] - tags { + tags = { Name = "${var.stack_name}-ingress${var.unique_postfix}" internal = false } @@ -18,18 +18,18 @@ resource "aws_alb" "alb_public_services" { # Listener with empty dummy target group resource "aws_alb_target_group" "tgr_dummy_public_services" { name = "${var.stack_name}-igress-dummy${var.unique_postfix}" - port = "${local.dummy_port}" + port = local.dummy_port protocol = "HTTP" - vpc_id = "${aws_vpc.vpc_main.id}" + vpc_id = aws_vpc.vpc_main.id - tags { + tags = { Name = "${var.stack_name}-igress-dummy${var.unique_postfix}" } } # listener for https with one default action to a dummy target group resource "aws_alb_listener" "alb_ingress_https" { - load_balancer_arn = "${aws_alb.alb_public_services.arn}" + load_balancer_arn = aws_alb.alb_public_services.arn # HACK: currently protocol is https although this is the https listener. protocol = "HTTP" @@ -40,19 +40,20 @@ resource "aws_alb_listener" "alb_ingress_https" { #certificate_arn = "${var.dummy_listener_certificate_arn}" default_action { - target_group_arn = "${aws_alb_target_group.tgr_dummy_public_services.arn}" + target_group_arn = aws_alb_target_group.tgr_dummy_public_services.arn type = "forward" } } # listener for http with one default action to a dummy target group resource "aws_alb_listener" "alb_ingress_http" { - load_balancer_arn = "${aws_alb.alb_public_services.arn}" + load_balancer_arn = aws_alb.alb_public_services.arn protocol = "HTTP" port = "80" default_action { - target_group_arn = "${aws_alb_target_group.tgr_dummy_public_services.arn}" + target_group_arn = aws_alb_target_group.tgr_dummy_public_services.arn type = "forward" } } + diff --git a/modules/networking/public_services_alb_sg.tf b/modules/networking/public_services_alb_sg.tf index d62a456..35420df 100644 --- a/modules/networking/public_services_alb_sg.tf +++ b/modules/networking/public_services_alb_sg.tf @@ -1,9 +1,9 @@ resource "aws_security_group" "sg_public_services_alb" { - vpc_id = "${aws_vpc.vpc_main.id}" + vpc_id = aws_vpc.vpc_main.id name = "${var.stack_name}-ingress${var.unique_postfix}" description = "security group that allows ingress access to everyone." - tags { + tags = { Name = "${var.stack_name}-ingress${var.unique_postfix}" } @@ -20,7 +20,7 @@ resource "aws_security_group_rule" "sgr_alb_ig_http" { to_port = 80 protocol = "tcp" cidr_blocks = ["0.0.0.0/0"] - security_group_id = "${aws_security_group.sg_public_services_alb.id}" + security_group_id = aws_security_group.sg_public_services_alb.id } resource "aws_security_group_rule" "sgr_alb_ig_https" { @@ -30,7 +30,7 @@ resource "aws_security_group_rule" "sgr_alb_ig_https" { to_port = 443 protocol = "tcp" cidr_blocks = ["0.0.0.0/0"] - security_group_id = "${aws_security_group.sg_public_services_alb.id}" + security_group_id = aws_security_group.sg_public_services_alb.id } # grants access for all tcp but only to the services subnet @@ -42,5 +42,6 @@ resource "aws_security_group_rule" "sgr_alb_egAll_server" { protocol = "tcp" cidr_blocks = ["0.0.0.0/0"] - security_group_id = "${aws_security_group.sg_public_services_alb.id}" + security_group_id = aws_security_group.sg_public_services_alb.id } + diff --git a/modules/networking/subn_backoffice.tf b/modules/networking/subn_backoffice.tf index 637510b..cf2266f 100644 --- a/modules/networking/subn_backoffice.tf +++ b/modules/networking/subn_backoffice.tf @@ -1,41 +1,42 @@ # BACKOFFICE subnets resource "aws_subnet" "subn_backoffice" { # one for each az - count = "${length(var.az_postfixes)}" - vpc_id = "${aws_vpc.vpc_main.id}" + count = length(var.az_postfixes) + vpc_id = aws_vpc.vpc_main.id cidr_block = "${var.ip_prefix}.${140 + count.index}.0/24" - availability_zone = "${var.region}${element(var.az_postfixes,count.index)}" + availability_zone = "${var.region}${element(var.az_postfixes, count.index)}" - tags { - Name = "MNG-${var.stack_name}-${var.region}${element(var.az_postfixes,count.index)}-${var.env_name}-SUBN-backoffice" + tags = { + Name = "MNG-${var.stack_name}-${var.region}${element(var.az_postfixes, count.index)}-${var.env_name}-SUBN-backoffice" } } # route-table for the backoffice subnets resource "aws_route_table" "rtb_backoffice" { - vpc_id = "${aws_vpc.vpc_main.id}" + vpc_id = aws_vpc.vpc_main.id # one for each az - count = "${length(var.az_postfixes)}" + count = length(var.az_postfixes) - tags { - Name = "MNG-${var.stack_name}-${var.region}${element(var.az_postfixes,count.index)}-${var.env_name}-RTB-backoffice" + tags = { + Name = "MNG-${var.stack_name}-${var.region}${element(var.az_postfixes, count.index)}-${var.env_name}-RTB-backoffice" } } # association between the backoffice subnets and the backoffice routetable resource "aws_route_table_association" "rtassoc_backoffice" { # one for each az - count = "${length(var.az_postfixes)}" - subnet_id = "${element(aws_subnet.subn_backoffice.*.id,count.index)}" - route_table_id = "${element(aws_route_table.rtb_backoffice.*.id,count.index)}" + count = length(var.az_postfixes) + subnet_id = element(aws_subnet.subn_backoffice.*.id, count.index) + route_table_id = element(aws_route_table.rtb_backoffice.*.id, count.index) } # this is the route to the egress_aws natgateway resource "aws_route" "r_backoffice_egress_aws_ngw" { # one for each az - count = "${length(var.az_postfixes)}" - route_table_id = "${element(aws_route_table.rtb_backoffice.*.id,count.index)}" + count = length(var.az_postfixes) + route_table_id = element(aws_route_table.rtb_backoffice.*.id, count.index) destination_cidr_block = "0.0.0.0/0" - nat_gateway_id = "${element(aws_nat_gateway.ngw_egress_aws.*.id,count.index)}" + nat_gateway_id = element(aws_nat_gateway.ngw_egress_aws.*.id, count.index) } + diff --git a/modules/networking/subn_contentconnector.tf b/modules/networking/subn_contentconnector.tf index ff7d8aa..8ad6d9a 100644 --- a/modules/networking/subn_contentconnector.tf +++ b/modules/networking/subn_contentconnector.tf @@ -1,41 +1,42 @@ # CONTENTCONNECTOR subnets resource "aws_subnet" "subn_contentconnector" { # one for each az - count = "${length(var.az_postfixes)}" - vpc_id = "${aws_vpc.vpc_main.id}" - cidr_block = "${var.ip_prefix}.${132 + (count.index*2)}.0/23" - availability_zone = "${var.region}${element(var.az_postfixes,count.index)}" + count = length(var.az_postfixes) + vpc_id = aws_vpc.vpc_main.id + cidr_block = "${var.ip_prefix}.${132 + count.index * 2}.0/23" + availability_zone = "${var.region}${element(var.az_postfixes, count.index)}" - tags { - Name = "MNG-${var.stack_name}-${var.region}${element(var.az_postfixes,count.index)}-${var.env_name}-SUBN-contentconnector" + tags = { + Name = "MNG-${var.stack_name}-${var.region}${element(var.az_postfixes, count.index)}-${var.env_name}-SUBN-contentconnector" } } # route-table for the contentconnector subnets resource "aws_route_table" "rtb_contentconnector" { - vpc_id = "${aws_vpc.vpc_main.id}" + vpc_id = aws_vpc.vpc_main.id # one for each az - count = "${length(var.az_postfixes)}" + count = length(var.az_postfixes) - tags { - Name = "MNG-${var.stack_name}-${var.region}${element(var.az_postfixes,count.index)}-${var.env_name}-RTB-contentconnector" + tags = { + Name = "MNG-${var.stack_name}-${var.region}${element(var.az_postfixes, count.index)}-${var.env_name}-RTB-contentconnector" } } # association between the contentconnector subnets and the contentconnector routetable resource "aws_route_table_association" "rtassoc_contentconnector" { # one for each az - count = "${length(var.az_postfixes)}" - subnet_id = "${element(aws_subnet.subn_contentconnector.*.id,count.index)}" - route_table_id = "${element(aws_route_table.rtb_contentconnector.*.id,count.index)}" + count = length(var.az_postfixes) + subnet_id = element(aws_subnet.subn_contentconnector.*.id, count.index) + route_table_id = element(aws_route_table.rtb_contentconnector.*.id, count.index) } # this is the route to the egress_public natgateway resource "aws_route" "r_egress_public_ngw" { # one for each az - count = "${length(var.az_postfixes)}" - route_table_id = "${element(aws_route_table.rtb_contentconnector.*.id,count.index)}" + count = length(var.az_postfixes) + route_table_id = element(aws_route_table.rtb_contentconnector.*.id, count.index) destination_cidr_block = "0.0.0.0/0" - nat_gateway_id = "${element(aws_nat_gateway.ngw_egress_public.*.id,count.index)}" + nat_gateway_id = element(aws_nat_gateway.ngw_egress_public.*.id, count.index) } + diff --git a/modules/networking/subn_egress_aws.tf b/modules/networking/subn_egress_aws.tf index 37e1115..d1bacc3 100644 --- a/modules/networking/subn_egress_aws.tf +++ b/modules/networking/subn_egress_aws.tf @@ -1,56 +1,81 @@ # EGRESS AWS subnets resource "aws_subnet" "subn_egress_aws" { # one for each az - count = "${length(var.az_postfixes)}" - vpc_id = "${aws_vpc.vpc_main.id}" + count = length(var.az_postfixes) + vpc_id = aws_vpc.vpc_main.id cidr_block = "${var.ip_prefix}.${144 + count.index}.0/24" - availability_zone = "${var.region}${element(var.az_postfixes,count.index)}" + availability_zone = "${var.region}${element(var.az_postfixes, count.index)}" - tags { - Name = "MNG-${var.stack_name}-${var.region}${element(var.az_postfixes,count.index)}-${var.env_name}-SUBN-egress_aws" + tags = { + Name = "MNG-${var.stack_name}-${var.region}${element(var.az_postfixes, count.index)}-${var.env_name}-SUBN-egress_aws" } } +locals { + # This variable contains the cidr's that should be allowed to be routed to per route-table (per availability zone) + # This variable is list of lists. Each element of the list contains two elements. First the id of the route-table the route should be added to, + # second the destination cidr for that route. + # Example: (one rtb-id, three cidr's: setproduct(["rtb-00415f0251d6bfa34"],["72.0.0.0/8","75.0.0.0/8","174.0.0.0/8"]) ) + # [ + # [ + # "rtb-00415f0251d6bfa34", + # "72.0.0.0/8" + # ], + # [ + # "rtb-00415f0251d6bfa34", + # "75.0.0.0/8" + # ], + # [ + # "rtb-00415f0251d6bfa34", + # "174.0.0.0/8" + # ] + # ] + cidr_per_route_table_id = setproduct(aws_route_table.rtb_egress_aws.*.id, var.aws_ip_address_ranges) +} + + # route-table for the egress_aws subnets resource "aws_route_table" "rtb_egress_aws" { - vpc_id = "${aws_vpc.vpc_main.id}" + vpc_id = aws_vpc.vpc_main.id # one for each az - count = "${length(var.az_postfixes)}" + count = length(var.az_postfixes) - tags { - Name = "MNG-${var.stack_name}-${var.region}${element(var.az_postfixes,count.index)}-${var.env_name}-RTB-egress_aws" + tags = { + Name = "MNG-${var.stack_name}-${var.region}${element(var.az_postfixes, count.index)}-${var.env_name}-RTB-egress_aws" } } # association between the egress_aws subnets and the egress_aws routetable resource "aws_route_table_association" "rtassoc_egress_aws" { # one for each az - count = "${length(var.az_postfixes)}" - subnet_id = "${element(aws_subnet.subn_egress_aws.*.id,count.index)}" - route_table_id = "${element(aws_route_table.rtb_egress_aws.*.id,count.index)}" + count = length(var.az_postfixes) + subnet_id = element(aws_subnet.subn_egress_aws.*.id, count.index) + route_table_id = element(aws_route_table.rtb_egress_aws.*.id, count.index) } # this is the route to the aws service ip-ranges resource "aws_route" "r_egress_aws_ips" { # one for each az - count = "${length(var.aws_ip_address_ranges) * length(var.az_postfixes)}" - route_table_id = "${element(aws_route_table.rtb_egress_aws.*.id, (count.index / length(var.aws_ip_address_ranges)) % length(var.az_postfixes))}" - destination_cidr_block = "${element(var.aws_ip_address_ranges,count.index % length(var.aws_ip_address_ranges))}" - gateway_id = "${aws_internet_gateway.igw_main.id}" + count = length(local.cidr_per_route_table_id) + + route_table_id = element(element(local.cidr_per_route_table_id, count.index), 0) + destination_cidr_block = element(element(local.cidr_per_route_table_id, count.index), 1) + gateway_id = aws_internet_gateway.igw_main.id } # elastic ips needed for the egress_aws natgateways resource "aws_eip" "eip_egress_aws" { # one for each az - count = "${length(var.az_postfixes)}" + count = length(var.az_postfixes) vpc = true } # the natgateways for egress aws access resource "aws_nat_gateway" "ngw_egress_aws" { # one for each az - count = "${length(var.az_postfixes)}" - allocation_id = "${element(aws_eip.eip_egress_aws.*.id,count.index)}" - subnet_id = "${element(aws_subnet.subn_egress_aws.*.id,count.index)}" + count = length(var.az_postfixes) + allocation_id = element(aws_eip.eip_egress_aws.*.id, count.index) + subnet_id = element(aws_subnet.subn_egress_aws.*.id, count.index) } + diff --git a/modules/networking/subn_egress_public.tf b/modules/networking/subn_egress_public.tf index c897ede..80b6efd 100644 --- a/modules/networking/subn_egress_public.tf +++ b/modules/networking/subn_egress_public.tf @@ -1,56 +1,57 @@ # EGRESS PUBLIC subnets resource "aws_subnet" "subn_egress_public" { # one for each az - count = "${length(var.az_postfixes)}" - vpc_id = "${aws_vpc.vpc_main.id}" + count = length(var.az_postfixes) + vpc_id = aws_vpc.vpc_main.id cidr_block = "${var.ip_prefix}.${148 + count.index}.0/24" - availability_zone = "${var.region}${element(var.az_postfixes,count.index)}" + availability_zone = "${var.region}${element(var.az_postfixes, count.index)}" - tags { - Name = "MNG-${var.stack_name}-${var.region}${element(var.az_postfixes,count.index)}-${var.env_name}-SUBN-egress_public" + tags = { + Name = "MNG-${var.stack_name}-${var.region}${element(var.az_postfixes, count.index)}-${var.env_name}-SUBN-egress_public" } } # route-table for the egress_public subnets resource "aws_route_table" "rtb_egress_public" { - vpc_id = "${aws_vpc.vpc_main.id}" + vpc_id = aws_vpc.vpc_main.id # one for each az - count = "${length(var.az_postfixes)}" + count = length(var.az_postfixes) - tags { - Name = "MNG-${var.stack_name}-${var.region}${element(var.az_postfixes,count.index)}-${var.env_name}-RTB-egress_public" + tags = { + Name = "MNG-${var.stack_name}-${var.region}${element(var.az_postfixes, count.index)}-${var.env_name}-RTB-egress_public" } } # association between the egress_public subnets and the egress_public routetable resource "aws_route_table_association" "rtassoc_egress_public" { # one for each az - count = "${length(var.az_postfixes)}" - subnet_id = "${element(aws_subnet.subn_egress_public.*.id,count.index)}" - route_table_id = "${element(aws_route_table.rtb_egress_public.*.id,count.index)}" + count = length(var.az_postfixes) + subnet_id = element(aws_subnet.subn_egress_public.*.id, count.index) + route_table_id = element(aws_route_table.rtb_egress_public.*.id, count.index) } # this is the route to the internet resource "aws_route" "r_egress_public_inet" { # one for each az - count = "${length(var.az_postfixes)}" - route_table_id = "${element(aws_route_table.rtb_egress_public.*.id,count.index)}" + count = length(var.az_postfixes) + route_table_id = element(aws_route_table.rtb_egress_public.*.id, count.index) destination_cidr_block = "0.0.0.0/0" - gateway_id = "${aws_internet_gateway.igw_main.id}" + gateway_id = aws_internet_gateway.igw_main.id } # elastic ips needed for the natgateways resource "aws_eip" "eip_egress_public" { # one for each az - count = "${length(var.az_postfixes)}" + count = length(var.az_postfixes) vpc = true } # the natgateways for egress public access resource "aws_nat_gateway" "ngw_egress_public" { # one for each az - count = "${length(var.az_postfixes)}" - allocation_id = "${element(aws_eip.eip_egress_public.*.id,count.index)}" - subnet_id = "${element(aws_subnet.subn_egress_public.*.id,count.index)}" + count = length(var.az_postfixes) + allocation_id = element(aws_eip.eip_egress_public.*.id, count.index) + subnet_id = element(aws_subnet.subn_egress_public.*.id, count.index) } + diff --git a/modules/networking/subn_public.tf b/modules/networking/subn_public.tf index 8d6a54d..68a59de 100644 --- a/modules/networking/subn_public.tf +++ b/modules/networking/subn_public.tf @@ -1,41 +1,42 @@ # PUBLIC subnets resource "aws_subnet" "subn_public" { # one for each az - count = "${length(var.az_postfixes)}" - vpc_id = "${aws_vpc.vpc_main.id}" + count = length(var.az_postfixes) + vpc_id = aws_vpc.vpc_main.id cidr_block = "${var.ip_prefix}.${128 + count.index}.0/24" - availability_zone = "${var.region}${element(var.az_postfixes,count.index)}" + availability_zone = "${var.region}${element(var.az_postfixes, count.index)}" - tags { - Name = "MNG-${var.stack_name}-${var.region}${element(var.az_postfixes,count.index)}-SUBN-public" + tags = { + Name = "MNG-${var.stack_name}-${var.region}${element(var.az_postfixes, count.index)}-SUBN-public" } } # route-table for the public subnet resource "aws_route_table" "rtb_public" { - vpc_id = "${aws_vpc.vpc_main.id}" + vpc_id = aws_vpc.vpc_main.id # one for each az - count = "${length(var.az_postfixes)}" + count = length(var.az_postfixes) - tags { - Name = "MNG-${var.stack_name}-${var.region}${element(var.az_postfixes,count.index)}-RTB-public" + tags = { + Name = "MNG-${var.stack_name}-${var.region}${element(var.az_postfixes, count.index)}-RTB-public" } } # association between the public subnets and the public routetable resource "aws_route_table_association" "rtassoc_public" { # one for each az - count = "${length(var.az_postfixes)}" - subnet_id = "${element(aws_subnet.subn_public.*.id,count.index)}" - route_table_id = "${element(aws_route_table.rtb_public.*.id,count.index)}" + count = length(var.az_postfixes) + subnet_id = element(aws_subnet.subn_public.*.id, count.index) + route_table_id = element(aws_route_table.rtb_public.*.id, count.index) } # this is the route to the internet resource "aws_route" "r_public_inet" { # one for each az - count = "${length(var.az_postfixes)}" - route_table_id = "${element(aws_route_table.rtb_public.*.id,count.index)}" + count = length(var.az_postfixes) + route_table_id = element(aws_route_table.rtb_public.*.id, count.index) destination_cidr_block = "0.0.0.0/0" - gateway_id = "${aws_internet_gateway.igw_main.id}" + gateway_id = aws_internet_gateway.igw_main.id } + diff --git a/modules/networking/subn_services.tf b/modules/networking/subn_services.tf index 8372413..ae43c28 100644 --- a/modules/networking/subn_services.tf +++ b/modules/networking/subn_services.tf @@ -1,41 +1,42 @@ # SERVICES subnets resource "aws_subnet" "subn_services" { # one for each az - count = "${length(var.az_postfixes)}" - vpc_id = "${aws_vpc.vpc_main.id}" + count = length(var.az_postfixes) + vpc_id = aws_vpc.vpc_main.id cidr_block = "${var.ip_prefix}.${0 + count.index * 32}.0/19" - availability_zone = "${var.region}${element(var.az_postfixes,count.index)}" + availability_zone = "${var.region}${element(var.az_postfixes, count.index)}" - tags { - Name = "${var.stack_name}-${var.region}${element(var.az_postfixes,count.index)}-SUBN-services" + tags = { + Name = "${var.stack_name}-${var.region}${element(var.az_postfixes, count.index)}-SUBN-services" } } # route-table for the services subnets resource "aws_route_table" "rtb_services" { - vpc_id = "${aws_vpc.vpc_main.id}" + vpc_id = aws_vpc.vpc_main.id # one for each az - count = "${length(var.az_postfixes)}" + count = length(var.az_postfixes) - tags { - Name = "${var.stack_name}-${var.region}${element(var.az_postfixes,count.index)}-RTB-services" + tags = { + Name = "${var.stack_name}-${var.region}${element(var.az_postfixes, count.index)}-RTB-services" } } # association between the services subnets and the services routetable resource "aws_route_table_association" "rtassoc_services" { # one for each az - count = "${length(var.az_postfixes)}" - subnet_id = "${element(aws_subnet.subn_services.*.id,count.index)}" - route_table_id = "${element(aws_route_table.rtb_services.*.id,count.index)}" + count = length(var.az_postfixes) + subnet_id = element(aws_subnet.subn_services.*.id, count.index) + route_table_id = element(aws_route_table.rtb_services.*.id, count.index) } # this is the route to the egress_aws natgateway resource "aws_route" "r_services_egress_aws_ngw" { # one for each az - count = "${length(var.az_postfixes)}" - route_table_id = "${element(aws_route_table.rtb_services.*.id,count.index)}" + count = length(var.az_postfixes) + route_table_id = element(aws_route_table.rtb_services.*.id, count.index) destination_cidr_block = "0.0.0.0/0" - nat_gateway_id = "${element(aws_nat_gateway.ngw_egress_aws.*.id,count.index)}" + nat_gateway_id = element(aws_nat_gateway.ngw_egress_aws.*.id, count.index) } + diff --git a/modules/networking/vars.tf b/modules/networking/vars.tf index bd81c85..11cbbe1 100644 --- a/modules/networking/vars.tf +++ b/modules/networking/vars.tf @@ -10,7 +10,7 @@ variable "stack_name" { variable "az_postfixes" { description = "list of AZ postfixes" - type = "list" + type = list(string) default = ["a", "b", "c"] } @@ -30,9 +30,29 @@ variable "unique_postfix" { variable "aws_ip_address_ranges" { description = "List of ip-ranges for accessing aws services (S3, EC2, ElastiCache, ..) in us-east-1 see: http://docs.aws.amazon.com/general/latest/gr/aws-ip-ranges.html" - type = "list" + type = list(string) - default = ["18.0.0.0/8", "23.0.0.0/8", "34.0.0.0/8", "35.0.0.0/8", "50.0.0.0/8", "52.0.0.0/8", "54.0.0.0/8", "67.0.0.0/8", "72.0.0.0/8", "75.0.0.0/8", "107.0.0.0/8", "174.0.0.0/8", "184.0.0.0/8", "204.0.0.0/8", "216.0.0.0/8", "172.0.0.0/8", "176.0.0.0/8", "205.0.0.0/8", "207.0.0.0/8", + default = [ + "18.0.0.0/8", + "23.0.0.0/8", + "34.0.0.0/8", + "35.0.0.0/8", + "50.0.0.0/8", + "52.0.0.0/8", + "54.0.0.0/8", + "67.0.0.0/8", + "72.0.0.0/8", + "75.0.0.0/8", + "107.0.0.0/8", + "174.0.0.0/8", + "184.0.0.0/8", + "204.0.0.0/8", + "216.0.0.0/8", + "172.0.0.0/8", + "176.0.0.0/8", + "205.0.0.0/8", + "207.0.0.0/8", "192.30.253.0/24", ] # HACK: "192.30.253.0/24" is for github } + diff --git a/modules/networking/versions.tf b/modules/networking/versions.tf new file mode 100644 index 0000000..ac97c6a --- /dev/null +++ b/modules/networking/versions.tf @@ -0,0 +1,4 @@ + +terraform { + required_version = ">= 0.12" +} diff --git a/modules/networking/vpc.tf b/modules/networking/vpc.tf index c73db24..60d780b 100644 --- a/modules/networking/vpc.tf +++ b/modules/networking/vpc.tf @@ -5,7 +5,7 @@ resource "aws_vpc" "vpc_main" { enable_dns_hostnames = "true" enable_dns_support = "true" - tags { + tags = { Name = "${var.stack_name}-VPC-main" } } @@ -17,23 +17,24 @@ locals { #dhcp options resource "aws_vpc_dhcp_options" "vpc_main_dns" { domain_name = "nomad-${var.region}" - domain_name_servers = ["${local.dns_ip}", "AmazonProvidedDNS"] + domain_name_servers = [local.dns_ip, "AmazonProvidedDNS"] - tags { + tags = { Name = "${var.stack_name}-DOPT-vpc" } } resource "aws_vpc_dhcp_options_association" "vpc_main_dns_resolver" { - vpc_id = "${aws_vpc.vpc_main.id}" - dhcp_options_id = "${aws_vpc_dhcp_options.vpc_main_dns.id}" + vpc_id = aws_vpc.vpc_main.id + dhcp_options_id = aws_vpc_dhcp_options.vpc_main_dns.id } # the internet gateway resource "aws_internet_gateway" "igw_main" { - vpc_id = "${aws_vpc.vpc_main.id}" + vpc_id = aws_vpc.vpc_main.id - tags { + tags = { Name = "${var.stack_name}-IGW-main" } } + diff --git a/modules/nomad-datacenter/access_full.tf b/modules/nomad-datacenter/access_full.tf index f41ee79..22a61b6 100644 --- a/modules/nomad-datacenter/access_full.tf +++ b/modules/nomad-datacenter/access_full.tf @@ -1,11 +1,12 @@ # Add policy to the already created iam role of the nomad clients in the nomad cluster module. # Policy-attachment that grants full access to all AWS services for nomad clients resource "aws_iam_role_policy_attachment" "irpa_full_access" { - role = "${module.data_center.iam_role_id}" - policy_arn = "${aws_iam_policy.ip_full_access.arn}" + role = module.data_center.iam_role_id + policy_arn = aws_iam_policy.ip_full_access.arn } resource "aws_iam_policy" "ip_full_access" { name = "${var.stack_name}-${var.datacenter_name}${var.unique_postfix}-full" - policy = "${file("${path.module}/access_full.json")}" + policy = file("${path.module}/access_full.json") } + diff --git a/modules/nomad-datacenter/alb_ingress_attachment.tf b/modules/nomad-datacenter/alb_ingress_attachment.tf index 88e5ff7..35150a4 100644 --- a/modules/nomad-datacenter/alb_ingress_attachment.tf +++ b/modules/nomad-datacenter/alb_ingress_attachment.tf @@ -5,18 +5,18 @@ # Define autoscaling attachments to connect the ingress-controller target group with the autoscaling group having the ingress-contoller instances. resource "aws_autoscaling_attachment" "asga_ingress_controller" { - count = "${var.attach_ingress_alb_listener}" - autoscaling_group_name = "${module.data_center.asg_name}" - alb_target_group_arn = "${aws_alb_target_group.tgr_ingress_controller.arn}" + count = var.attach_ingress_alb_listener ? 1 : 0 + autoscaling_group_name = module.data_center.asg_name + alb_target_group_arn = aws_alb_target_group.tgr_ingress_controller[0].arn } # Targetgroup that points to the ingress-controller (i.e. fabio) port resource "aws_alb_target_group" "tgr_ingress_controller" { - count = "${var.attach_ingress_alb_listener}" + count = var.attach_ingress_alb_listener ? 1 : 0 name = "${var.datacenter_name}-inctl${var.unique_postfix}" - port = "${var.ingress_controller_port}" + port = var.ingress_controller_port protocol = "HTTP" - vpc_id = "${var.vpc_id}" + vpc_id = var.vpc_id health_check { interval = 15 @@ -28,19 +28,19 @@ resource "aws_alb_target_group" "tgr_ingress_controller" { unhealthy_threshold = 2 } - tags { + tags = { Name = "${var.stack_name}-${var.datacenter_name}-ingress-controller${var.unique_postfix}" } } # listener rule for HTTPS resource "aws_alb_listener_rule" "alr_ingress_https" { - count = "${var.attach_ingress_alb_listener}" - listener_arn = "${var.alb_ingress_https_listener_arn}" + count = var.attach_ingress_alb_listener ? 1 : 0 + listener_arn = var.alb_ingress_https_listener_arn action { type = "forward" - target_group_arn = "${aws_alb_target_group.tgr_ingress_controller.arn}" + target_group_arn = aws_alb_target_group.tgr_ingress_controller[0].arn } condition { @@ -48,3 +48,4 @@ resource "aws_alb_listener_rule" "alr_ingress_https" { values = ["/*"] } } + diff --git a/modules/nomad-datacenter/datacenter.tf b/modules/nomad-datacenter/datacenter.tf index 7a9786d..4511921 100644 --- a/modules/nomad-datacenter/datacenter.tf +++ b/modules/nomad-datacenter/datacenter.tf @@ -1,54 +1,58 @@ # reading values from the node_scaling_cfg locals { - min = "${lookup(var.node_scaling_cfg,"min","INVALID")}" - max = "${lookup(var.node_scaling_cfg,"max","INVALID")}" - desired_capacity = "${lookup(var.node_scaling_cfg,"desired_capacity","INVALID")}" + min = lookup(var.node_scaling_cfg, "min", "INVALID") + max = lookup(var.node_scaling_cfg, "max", "INVALID") + desired_capacity = lookup(var.node_scaling_cfg, "desired_capacity", "INVALID") cluster_name = "${local.base_cluster_name}${var.unique_postfix}" + + default_tags = [ + { + "key" = "datacenter" + "value" = var.datacenter_name + "propagate_at_launch" = "true" + }, + { + "key" = "node-type" + "value" = "client" + "propagate_at_launch" = "true" + }, + ] } module "data_center" { - source = "git::https://github.com/hashicorp/terraform-aws-nomad.git//modules/nomad-cluster?ref=v0.4.5" + source = "git::https://github.com/hashicorp/terraform-aws-nomad.git//modules/nomad-cluster?ref=v0.5.0" - cluster_name = "${local.cluster_name}" - cluster_tag_value = "${local.cluster_name}" - instance_type = "${var.instance_type}" - ami_id = "${var.ami_id}" - vpc_id = "${var.vpc_id}" - subnet_ids = "${var.subnet_ids}" - allowed_ssh_cidr_blocks = "${var.allowed_ssh_cidr_blocks}" - user_data = "${data.template_file.user_data_data_center.rendered}" - ssh_key_name = "${var.ssh_key_name}" + cluster_name = local.cluster_name + cluster_tag_value = local.cluster_name + instance_type = var.instance_type + ami_id = var.ami_id + vpc_id = var.vpc_id + subnet_ids = var.subnet_ids + allowed_ssh_cidr_blocks = var.allowed_ssh_cidr_blocks + user_data = data.template_file.user_data_data_center.rendered + ssh_key_name = var.ssh_key_name associate_public_ip_address = false # To keep the example simple, we are using a fixed-size cluster. In real-world usage, you could use auto scaling # policies to dynamically resize the cluster in response to load. - min_size = "${local.min}" - max_size = "${local.max}" - desired_capacity = "${local.desired_capacity}" + min_size = local.min + max_size = local.max + desired_capacity = local.desired_capacity security_groups = [ - "${aws_security_group.sg_datacenter.id}", + aws_security_group.sg_datacenter.id, ] + # Access over cidr blocks is disabled here. # The need access for the nomad-server is granted over the # aws_security_group.sg_nomad_server_access.id. allowed_inbound_cidr_blocks = ["0.0.0.0/32"] + # propagate tags to the instances - tags = [ - { - "key" = "datacenter" - "value" = "${var.datacenter_name}" - "propagate_at_launch" = "true" - }, - { - "key" = "node-type" - "value" = "client" - "propagate_at_launch" = "true" - }, - "${var.additional_instance_tags}", - ] + tags = concat(local.default_tags,var.additional_instance_tags) + # Configuration for additional ebs_block devices - ebs_block_devices = "${var.ebs_block_devices}" + ebs_block_devices = var.ebs_block_devices } # --------------------------------------------------------------------------------------------------------------------- @@ -57,26 +61,28 @@ module "data_center" { # the Consul AWS Module's consul-iam-policies module. # --------------------------------------------------------------------------------------------------------------------- module "consul_iam_policies_datacenter" { - source = "git::https://github.com/hashicorp/terraform-aws-consul.git//modules/consul-iam-policies?ref=v0.3.1" + source = "git::https://github.com/hashicorp/terraform-aws-consul.git//modules/consul-iam-policies?ref=v0.7.0" - iam_role_id = "${module.data_center.iam_role_id}" + iam_role_id = module.data_center.iam_role_id } -data "aws_caller_identity" "aws_account_id" {} +data "aws_caller_identity" "aws_account_id" { +} # This script will configure and start Consul and Nomad data "template_file" "user_data_data_center" { - template = "${file("${path.module}/user-data-nomad-client.sh")}" + template = file("${path.module}/user-data-nomad-client.sh") - vars { - cluster_tag_key = "${var.consul_cluster_tag_key}" - cluster_tag_value = "${var.consul_cluster_tag_value}" - datacenter = "${var.datacenter_name}" - efs_dns_name = "${var.efs_dns_name}" - map_bucket_name = "${var.map_bucket_name}" - device_to_mount_target_map = "${join(" ", var.device_to_mount_target_map)}" - fs_type = "${var.fs_type}" - aws_account_id = "${data.aws_caller_identity.aws_account_id.account_id}" - aws_region = "${var.aws_region}" + vars = { + cluster_tag_key = var.consul_cluster_tag_key + cluster_tag_value = var.consul_cluster_tag_value + datacenter = var.datacenter_name + efs_dns_name = var.efs_dns_name + map_bucket_name = var.map_bucket_name + device_to_mount_target_map = join(" ", var.device_to_mount_target_map) + fs_type = var.fs_type + aws_account_id = data.aws_caller_identity.aws_account_id.account_id + aws_region = var.aws_region } } + diff --git a/modules/nomad-datacenter/datacenter_sg.tf b/modules/nomad-datacenter/datacenter_sg.tf index 8e759b3..522fe18 100644 --- a/modules/nomad-datacenter/datacenter_sg.tf +++ b/modules/nomad-datacenter/datacenter_sg.tf @@ -6,11 +6,11 @@ # Every client can get requests for the automatically assigned ports for each nomad job, # for example fabio will use consul to figure out the port mapping and direct requests directly to this ports. resource "aws_security_group" "sg_datacenter" { - vpc_id = "${var.vpc_id}" + vpc_id = var.vpc_id name = "${var.stack_name}-${var.datacenter_name}${var.unique_postfix}" description = "Security group that allows ingress access for the nomad service handling and docker ports." - tags { + tags = { Name = "${var.stack_name}-${var.datacenter_name}${var.unique_postfix}" } @@ -28,5 +28,6 @@ resource "aws_security_group_rule" "sgr_datacenter_eg_all" { to_port = 0 protocol = "-1" cidr_blocks = ["0.0.0.0/0"] - security_group_id = "${aws_security_group.sg_datacenter.id}" + security_group_id = aws_security_group.sg_datacenter.id } + diff --git a/modules/nomad-datacenter/ecr.tf b/modules/nomad-datacenter/ecr.tf index 64430df..b2542e1 100644 --- a/modules/nomad-datacenter/ecr.tf +++ b/modules/nomad-datacenter/ecr.tf @@ -2,13 +2,13 @@ # Policy-attachment that grants read access to AWS ECR for nomad clients resource "aws_iam_role_policy_attachment" "irpa_ecr_read_access" { # FIXME: Because of this constellation it is not possible to provide the ECR access configuration as module. - role = "${module.data_center.iam_role_id}" - policy_arn = "${aws_iam_policy.ip_ecr_read_access.arn}" + role = module.data_center.iam_role_id + policy_arn = aws_iam_policy.ip_ecr_read_access.arn } resource "aws_iam_policy" "ip_ecr_read_access" { name = "${var.stack_name}-${var.datacenter_name}${var.unique_postfix}" - policy = "${data.aws_iam_policy_document.ipd_ecr_read_access.json}" + policy = data.aws_iam_policy_document.ipd_ecr_read_access.json } data "aws_iam_policy_document" "ipd_ecr_read_access" { @@ -23,3 +23,4 @@ data "aws_iam_policy_document" "ipd_ecr_read_access" { resources = ["*"] } } + diff --git a/modules/nomad-datacenter/main.tf b/modules/nomad-datacenter/main.tf index 402b629..a5729ce 100644 --- a/modules/nomad-datacenter/main.tf +++ b/modules/nomad-datacenter/main.tf @@ -1,11 +1,6 @@ -# Terraform 0.9.5 suffered from https://github.com/hashicorp/terraform/issues/14399, which causes this template the -# conditionals in this template to fail. -terraform { - required_version = ">= 0.9.3, != 0.9.5" -} - locals { - short_dc_name = "${format("%.10s",var.datacenter_name)}" + short_dc_name = format("%.10s", var.datacenter_name) cluster_prefix = "${var.stack_name}-NMC" base_cluster_name = "${local.cluster_prefix}-${local.short_dc_name}" } + diff --git a/modules/nomad-datacenter/outputs.tf b/modules/nomad-datacenter/outputs.tf index 17b8b25..953fe19 100644 --- a/modules/nomad-datacenter/outputs.tf +++ b/modules/nomad-datacenter/outputs.tf @@ -1,27 +1,28 @@ output "num_nodes" { - value = "${module.data_center.cluster_size}" + value = module.data_center.cluster_size } output "asg_name" { - value = "${module.data_center.asg_name}" + value = module.data_center.asg_name } output "aws_region" { - value = "${var.aws_region}" + value = var.aws_region } output "cluster_tag_value" { - value = "${module.data_center.cluster_tag_value}" + value = module.data_center.cluster_tag_value } output "cluster_prefix" { - value = "${local.cluster_prefix}" + value = local.cluster_prefix } output "sg_datacenter_id" { - value = "${aws_security_group.sg_datacenter.id}" + value = aws_security_group.sg_datacenter.id } output "alb_https_targetgroup_arn" { - value = "${aws_alb_target_group.tgr_ingress_controller.*.arn}" + value = aws_alb_target_group.tgr_ingress_controller.*.arn } + diff --git a/modules/nomad-datacenter/vars.tf b/modules/nomad-datacenter/vars.tf index 9958f02..c9a4db9 100644 --- a/modules/nomad-datacenter/vars.tf +++ b/modules/nomad-datacenter/vars.tf @@ -21,7 +21,7 @@ variable "consul_cluster_tag_value" { variable "subnet_ids" { description = "Subnet id's for nomad client nodes providing this data-center." - type = "list" + type = list(string) } #### Optional Variables ############################################ @@ -62,7 +62,7 @@ variable "datacenter_name" { variable "node_scaling_cfg" { description = "Scaling configuration for the nomad nodes to deploy for this datacenter. You can deploy as many as you need to run your jobs." - type = "map" + type = map(string) default = { "min" = 1 @@ -83,7 +83,7 @@ variable "ssh_key_name" { variable "allowed_ssh_cidr_blocks" { description = "A list of cidr block from which inbound ssh traffic should be allowed for this datacenter." - type = "list" + type = list(string) default = [] } @@ -113,8 +113,7 @@ variable "map_bucket_name" { # }] variable "ebs_block_devices" { description = "List of ebs volume definitions for those ebs_volumes that should be added to the instances created with the EC2 launch-configurationd. Each element in the list is a map containing keys defined for ebs_block_device (see: https://www.terraform.io/docs/providers/aws/r/launch_configuration.html#ebs_block_device." - type = "list" - + type = any default = [] } @@ -125,7 +124,7 @@ variable "ebs_block_devices" { # Example: ["/dev/xvde:/mnt/map1","/dev/xvdf:/mnt/map2"] variable "device_to_mount_target_map" { description = "List of device to mount target entries." - type = "list" + type = list(string) default = [] } @@ -152,6 +151,11 @@ variable "fs_type" { # }] variable "additional_instance_tags" { description = "List of tags to add to the datacenter instances. The entries of the list are maps consiting of key, value and propagate at launch." - type = "list" + type = list(object({ + key = string + value = string + propagate_at_launch = bool + })) default = [] } + diff --git a/modules/nomad-datacenter/versions.tf b/modules/nomad-datacenter/versions.tf new file mode 100644 index 0000000..ac97c6a --- /dev/null +++ b/modules/nomad-datacenter/versions.tf @@ -0,0 +1,4 @@ + +terraform { + required_version = ">= 0.12" +} diff --git a/modules/nomad/main.tf b/modules/nomad/main.tf index 0335d23..df6d9bd 100644 --- a/modules/nomad/main.tf +++ b/modules/nomad/main.tf @@ -1,10 +1,4 @@ -# Terraform 0.9.5 suffered from https://github.com/hashicorp/terraform/issues/14399, which causes this template the -# conditionals in this template to fail. -terraform { - required_version = ">= 0.9.3, != 0.9.5" -} - locals { - short_dc_name = "${format("%.10s",var.datacenter_name)}" + short_dc_name = format("%.10s", var.datacenter_name) base_cluster_name = "${var.stack_name}-NMS-${local.short_dc_name}" -} +} \ No newline at end of file diff --git a/modules/nomad/outputs.tf b/modules/nomad/outputs.tf index ef2d1bb..45bfefa 100644 --- a/modules/nomad/outputs.tf +++ b/modules/nomad/outputs.tf @@ -1,35 +1,36 @@ output "num_nomad_servers" { - value = "${module.nomad_servers.cluster_size}" + value = module.nomad_servers.cluster_size } output "asg_name_nomad_servers" { - value = "${module.nomad_servers.asg_name}" + value = module.nomad_servers.asg_name } output "launch_config_name_nomad_servers" { - value = "${module.nomad_servers.launch_config_name}" + value = module.nomad_servers.launch_config_name } output "iam_role_arn_nomad_servers" { - value = "${module.nomad_servers.iam_role_arn}" + value = module.nomad_servers.iam_role_arn } output "iam_role_id_nomad_servers" { - value = "${module.nomad_servers.iam_role_id}" + value = module.nomad_servers.iam_role_id } output "security_group_id_nomad_servers" { - value = "${aws_security_group.sg_server.id}" + value = aws_security_group.sg_server.id } output "aws_region" { - value = "${var.aws_region}" + value = var.aws_region } output "nomad_servers_cluster_tag_key" { - value = "${module.nomad_servers.cluster_tag_key}" + value = module.nomad_servers.cluster_tag_key } output "nomad_servers_cluster_tag_value" { - value = "${module.nomad_servers.cluster_tag_value}" + value = module.nomad_servers.cluster_tag_value } + diff --git a/modules/nomad/server_sg.tf b/modules/nomad/server_sg.tf index b558da6..2c9e888 100644 --- a/modules/nomad/server_sg.tf +++ b/modules/nomad/server_sg.tf @@ -1,9 +1,9 @@ resource "aws_security_group" "sg_server" { - vpc_id = "${var.vpc_id}" + vpc_id = var.vpc_id name = "${local.base_cluster_name}-SG${var.unique_postfix}" description = "Security group for the nomad-server." - tags { + tags = { Name = "${local.base_cluster_name}-SG${var.unique_postfix}" } @@ -21,7 +21,7 @@ resource "aws_security_group_rule" "sgr_server_eg_all" { to_port = 0 protocol = "-1" cidr_blocks = ["0.0.0.0/0"] - security_group_id = "${aws_security_group.sg_server.id}" + security_group_id = aws_security_group.sg_server.id } # Inject rule into sg of nomad server to get access over ports 4646..4648 @@ -32,8 +32,8 @@ resource "aws_security_group_rule" "sgr_server_to_server_ig4646_4648" { to_port = 4648 protocol = "tcp" - source_security_group_id = "${aws_security_group.sg_server.id}" - security_group_id = "${aws_security_group.sg_server.id}" + source_security_group_id = aws_security_group.sg_server.id + security_group_id = aws_security_group.sg_server.id } # Inject rule into sg of nomad server to get access over ports 4648 @@ -44,6 +44,7 @@ resource "aws_security_group_rule" "sgr_serversto_server_ig4648" { to_port = 4648 protocol = "udp" - source_security_group_id = "${aws_security_group.sg_server.id}" - security_group_id = "${aws_security_group.sg_server.id}" + source_security_group_id = aws_security_group.sg_server.id + security_group_id = aws_security_group.sg_server.id } + diff --git a/modules/nomad/servers.tf b/modules/nomad/servers.tf index 9640017..64155e0 100644 --- a/modules/nomad/servers.tf +++ b/modules/nomad/servers.tf @@ -1,8 +1,8 @@ # reading values from the node_scaling_cfg locals { - min = "${lookup(var.node_scaling_cfg,"min","INVALID")}" - max = "${lookup(var.node_scaling_cfg,"max","INVALID")}" - desired_capacity = "${lookup(var.node_scaling_cfg,"desired_capacity","INVALID")}" + min = lookup(var.node_scaling_cfg, "min", "INVALID") + max = lookup(var.node_scaling_cfg, "max", "INVALID") + desired_capacity = lookup(var.node_scaling_cfg, "desired_capacity", "INVALID") cluster_name = "${local.base_cluster_name}${var.unique_postfix}" } @@ -10,25 +10,25 @@ locals { # DEPLOY THE NOMAD SERVER NODES # --------------------------------------------------------------------------------------------------------------------- module "nomad_servers" { - source = "git::https://github.com/hashicorp/terraform-aws-nomad.git//modules/nomad-cluster?ref=v0.4.5" + source = "git::https://github.com/hashicorp/terraform-aws-nomad.git//modules/nomad-cluster?ref=v0.5.0" - cluster_name = "${local.cluster_name}" - cluster_tag_value = "${local.cluster_name}" - instance_type = "${var.instance_type}" - ami_id = "${var.ami_id}" - vpc_id = "${var.vpc_id}" - subnet_ids = "${var.subnet_ids}" - allowed_ssh_cidr_blocks = "${var.allowed_ssh_cidr_blocks}" - user_data = "${data.template_file.user_data_server.rendered}" - ssh_key_name = "${var.ssh_key_name}" + cluster_name = local.cluster_name + cluster_tag_value = local.cluster_name + instance_type = var.instance_type + ami_id = var.ami_id + vpc_id = var.vpc_id + subnet_ids = var.subnet_ids + allowed_ssh_cidr_blocks = var.allowed_ssh_cidr_blocks + user_data = data.template_file.user_data_server.rendered + ssh_key_name = var.ssh_key_name associate_public_ip_address = false # You should typically use a fixed size of 3 or 5 for your Nomad server cluster - min_size = "${local.min}" - max_size = "${local.max}" - desired_capacity = "${local.desired_capacity}" + min_size = local.min + max_size = local.max + desired_capacity = local.desired_capacity - security_groups = ["${aws_security_group.sg_server.id}"] + security_groups = [aws_security_group.sg_server.id] # Access over cidr blocks is disabled here. # The need access for the nomad-server is granted over the @@ -39,7 +39,7 @@ module "nomad_servers" { tags = [ { "key" = "datacenter" - "value" = "${var.datacenter_name}" + "value" = var.datacenter_name "propagate_at_launch" = "true" }, { @@ -56,22 +56,24 @@ module "nomad_servers" { # the Consul AWS Module's consul-iam-policies module. # --------------------------------------------------------------------------------------------------------------------- module "consul_iam_policies_servers" { - source = "git::https://github.com/hashicorp/terraform-aws-consul.git//modules/consul-iam-policies?ref=v0.3.1" - iam_role_id = "${module.nomad_servers.iam_role_id}" + source = "git::https://github.com/hashicorp/terraform-aws-consul.git//modules/consul-iam-policies?ref=v0.7.0" + iam_role_id = module.nomad_servers.iam_role_id } -data "aws_caller_identity" "aws_account_id" {} +data "aws_caller_identity" "aws_account_id" { +} # This script will configure and start Consul and Nomad data "template_file" "user_data_server" { - template = "${file("${path.module}/user-data-nomad-server.sh")}" + template = file("${path.module}/user-data-nomad-server.sh") - vars { - num_servers = "${local.desired_capacity}" - cluster_tag_key = "${var.consul_cluster_tag_key}" - cluster_tag_value = "${var.consul_cluster_tag_value}" - datacenter = "${var.datacenter_name}" - aws_account_id = "${data.aws_caller_identity.aws_account_id.account_id}" - aws_region = "${var.aws_region}" + vars = { + num_servers = local.desired_capacity + cluster_tag_key = var.consul_cluster_tag_key + cluster_tag_value = var.consul_cluster_tag_value + datacenter = var.datacenter_name + aws_account_id = data.aws_caller_identity.aws_account_id.account_id + aws_region = var.aws_region } } + diff --git a/modules/nomad/vars.tf b/modules/nomad/vars.tf index c5294fb..c9c2108 100644 --- a/modules/nomad/vars.tf +++ b/modules/nomad/vars.tf @@ -9,7 +9,7 @@ variable "vpc_id" { variable "subnet_ids" { description = "Ids of the subnets to deploy the nomad servers into." - type = "list" + type = list(string) } variable "consul_cluster_tag_key" { @@ -20,10 +20,6 @@ variable "consul_cluster_tag_value" { description = "This variable defines the value of the tag defined by consul_cluster_tag_key. This is used to find the consul servers (see: consul_cluster_tag_key)." } -variable "consul_cluster_security_group_id" { - description = "Id of the security-group of the consul server." -} - #### Optional Variables ############################################ variable "env_name" { description = "name of the environment (i.e. prod)" @@ -57,7 +53,7 @@ variable "unique_postfix" { variable "allowed_ssh_cidr_blocks" { description = "A list of cidr block from which inbound ssh traffic should be allowed." - type = "list" + type = list(string) default = [] } @@ -68,7 +64,7 @@ variable "datacenter_name" { variable "node_scaling_cfg" { description = "Scaling configuration for the nomad servers." - type = "map" + type = map(string) default = { "min" = 3 diff --git a/modules/nomad/versions.tf b/modules/nomad/versions.tf new file mode 100644 index 0000000..ac97c6a --- /dev/null +++ b/modules/nomad/versions.tf @@ -0,0 +1,4 @@ + +terraform { + required_version = ">= 0.12" +} diff --git a/modules/sgrules/sg_consul_nomad_clients.tf b/modules/sgrules/sg_consul_nomad_clients.tf index 050c949..ae6f171 100644 --- a/modules/sgrules/sg_consul_nomad_clients.tf +++ b/modules/sgrules/sg_consul_nomad_clients.tf @@ -16,8 +16,8 @@ resource "aws_security_group_rule" "sgr_public_services_to_consul_tcp" { from_port = 8300 to_port = 8302 protocol = "tcp" - source_security_group_id = "${var.sg_id_public_services_dc}" - security_group_id = "${var.sg_id_consul}" + source_security_group_id = var.sg_id_public_services_dc + security_group_id = var.sg_id_consul } # rule granting access from private-services data-center consul on ports @@ -29,8 +29,8 @@ resource "aws_security_group_rule" "sgr_private_services_to_consul_tcp" { from_port = 8300 to_port = 8302 protocol = "tcp" - source_security_group_id = "${var.sg_id_private_services_dc}" - security_group_id = "${var.sg_id_consul}" + source_security_group_id = var.sg_id_private_services_dc + security_group_id = var.sg_id_consul } # rule granting access from content-connector data-center consul on ports @@ -42,8 +42,8 @@ resource "aws_security_group_rule" "sgr_content_connector_to_consul_tcp" { from_port = 8300 to_port = 8302 protocol = "tcp" - source_security_group_id = "${var.sg_id_content_connector_dc}" - security_group_id = "${var.sg_id_consul}" + source_security_group_id = var.sg_id_content_connector_dc + security_group_id = var.sg_id_consul } # rule granting access from backoffice data-center consul on ports @@ -55,8 +55,8 @@ resource "aws_security_group_rule" "sgr_backoffice_to_consul_tcp" { from_port = 8300 to_port = 8302 protocol = "tcp" - source_security_group_id = "${var.sg_id_backoffice_dc}" - security_group_id = "${var.sg_id_consul}" + source_security_group_id = var.sg_id_backoffice_dc + security_group_id = var.sg_id_consul } # rule granting access from public-services data-center consul on ports @@ -68,8 +68,8 @@ resource "aws_security_group_rule" "sgr_public_services_to_consul_udp" { from_port = 8301 to_port = 8302 protocol = "udp" - source_security_group_id = "${var.sg_id_public_services_dc}" - security_group_id = "${var.sg_id_consul}" + source_security_group_id = var.sg_id_public_services_dc + security_group_id = var.sg_id_consul } # rule granting access from private-services data-center consul on ports @@ -81,8 +81,8 @@ resource "aws_security_group_rule" "sgr_private_services_to_consul_udp" { from_port = 8301 to_port = 8302 protocol = "udp" - source_security_group_id = "${var.sg_id_private_services_dc}" - security_group_id = "${var.sg_id_consul}" + source_security_group_id = var.sg_id_private_services_dc + security_group_id = var.sg_id_consul } # rule granting access from content-connector data-center consul on ports @@ -94,8 +94,8 @@ resource "aws_security_group_rule" "sgr_content_connector_to_consul_udp" { from_port = 8301 to_port = 8302 protocol = "udp" - source_security_group_id = "${var.sg_id_content_connector_dc}" - security_group_id = "${var.sg_id_consul}" + source_security_group_id = var.sg_id_content_connector_dc + security_group_id = var.sg_id_consul } # rule granting access from backoffice data-center consul on ports @@ -107,8 +107,8 @@ resource "aws_security_group_rule" "sgr_backoffice_to_consul_udp" { from_port = 8301 to_port = 8302 protocol = "udp" - source_security_group_id = "${var.sg_id_backoffice_dc}" - security_group_id = "${var.sg_id_consul}" + source_security_group_id = var.sg_id_backoffice_dc + security_group_id = var.sg_id_consul } # rule granting access from public-services data-center consul on ports @@ -120,8 +120,8 @@ resource "aws_security_group_rule" "sgr_public_services_to_consul_http" { from_port = 8500 to_port = 8500 protocol = "tcp" - source_security_group_id = "${var.sg_id_public_services_dc}" - security_group_id = "${var.sg_id_consul}" + source_security_group_id = var.sg_id_public_services_dc + security_group_id = var.sg_id_consul } # rule granting access from private-services data-center consul on ports @@ -133,8 +133,8 @@ resource "aws_security_group_rule" "sgr_private_services_to_consul_http" { from_port = 8500 to_port = 8500 protocol = "tcp" - source_security_group_id = "${var.sg_id_private_services_dc}" - security_group_id = "${var.sg_id_consul}" + source_security_group_id = var.sg_id_private_services_dc + security_group_id = var.sg_id_consul } # rule granting access from content-connector data-center consul on ports @@ -146,8 +146,8 @@ resource "aws_security_group_rule" "sgr_content_connector_to_consul_http" { from_port = 8500 to_port = 8500 protocol = "tcp" - source_security_group_id = "${var.sg_id_content_connector_dc}" - security_group_id = "${var.sg_id_consul}" + source_security_group_id = var.sg_id_content_connector_dc + security_group_id = var.sg_id_consul } # rule granting access from backoffice data-center consul on ports @@ -159,8 +159,8 @@ resource "aws_security_group_rule" "sgr_backoffice_to_consul_http" { from_port = 8500 to_port = 8500 protocol = "tcp" - source_security_group_id = "${var.sg_id_backoffice_dc}" - security_group_id = "${var.sg_id_consul}" + source_security_group_id = var.sg_id_backoffice_dc + security_group_id = var.sg_id_consul } # rule granting access from public-services data-center consul on ports @@ -172,8 +172,8 @@ resource "aws_security_group_rule" "sgr_public_services_to_consul_dns_tcp" { from_port = 8600 to_port = 8600 protocol = "tcp" - source_security_group_id = "${var.sg_id_public_services_dc}" - security_group_id = "${var.sg_id_consul}" + source_security_group_id = var.sg_id_public_services_dc + security_group_id = var.sg_id_consul } # rule granting access from private-services data-center consul on ports @@ -185,8 +185,8 @@ resource "aws_security_group_rule" "sgr_private_services_to_consul_dns_tcp" { from_port = 8600 to_port = 8600 protocol = "tcp" - source_security_group_id = "${var.sg_id_private_services_dc}" - security_group_id = "${var.sg_id_consul}" + source_security_group_id = var.sg_id_private_services_dc + security_group_id = var.sg_id_consul } # rule granting access from content-connector data-center consul on ports @@ -198,8 +198,8 @@ resource "aws_security_group_rule" "sgr_content_connector_to_consul_dns_tcp" { from_port = 8600 to_port = 8600 protocol = "tcp" - source_security_group_id = "${var.sg_id_content_connector_dc}" - security_group_id = "${var.sg_id_consul}" + source_security_group_id = var.sg_id_content_connector_dc + security_group_id = var.sg_id_consul } # rule granting access from backoffice data-center consul on ports @@ -211,8 +211,8 @@ resource "aws_security_group_rule" "sgr_backoffice_to_consul_dns_tcp" { from_port = 8600 to_port = 8600 protocol = "tcp" - source_security_group_id = "${var.sg_id_backoffice_dc}" - security_group_id = "${var.sg_id_consul}" + source_security_group_id = var.sg_id_backoffice_dc + security_group_id = var.sg_id_consul } # rule granting access from public-services data-center consul on ports @@ -224,8 +224,8 @@ resource "aws_security_group_rule" "sgr_public_services_to_consul_dns_udp" { from_port = 8600 to_port = 8600 protocol = "udp" - source_security_group_id = "${var.sg_id_public_services_dc}" - security_group_id = "${var.sg_id_consul}" + source_security_group_id = var.sg_id_public_services_dc + security_group_id = var.sg_id_consul } # rule granting access from private-services data-center consul on ports @@ -237,8 +237,8 @@ resource "aws_security_group_rule" "sgr_private_services_to_consul_dns_udp" { from_port = 8600 to_port = 8600 protocol = "udp" - source_security_group_id = "${var.sg_id_private_services_dc}" - security_group_id = "${var.sg_id_consul}" + source_security_group_id = var.sg_id_private_services_dc + security_group_id = var.sg_id_consul } # rule granting access from content-connector data-center consul on ports @@ -250,8 +250,8 @@ resource "aws_security_group_rule" "sgr_content_connector_to_consul_dns_udp" { from_port = 8600 to_port = 8600 protocol = "udp" - source_security_group_id = "${var.sg_id_content_connector_dc}" - security_group_id = "${var.sg_id_consul}" + source_security_group_id = var.sg_id_content_connector_dc + security_group_id = var.sg_id_consul } # rule granting access from backoffice data-center consul on ports @@ -263,6 +263,6 @@ resource "aws_security_group_rule" "sgr_backoffice_to_consul_dns_udp" { from_port = 8600 to_port = 8600 protocol = "udp" - source_security_group_id = "${var.sg_id_backoffice_dc}" - security_group_id = "${var.sg_id_consul}" -} + source_security_group_id = var.sg_id_backoffice_dc + security_group_id = var.sg_id_consul +} \ No newline at end of file diff --git a/modules/sgrules/sg_consul_nomad_server.tf b/modules/sgrules/sg_consul_nomad_server.tf index fc08ddb..724180d 100644 --- a/modules/sgrules/sg_consul_nomad_server.tf +++ b/modules/sgrules/sg_consul_nomad_server.tf @@ -16,8 +16,8 @@ resource "aws_security_group_rule" "sgr_nomad_server_to_consul_tcp" { from_port = 8300 to_port = 8302 protocol = "tcp" - source_security_group_id = "${var.sg_id_nomad_server}" - security_group_id = "${var.sg_id_consul}" + source_security_group_id = var.sg_id_nomad_server + security_group_id = var.sg_id_consul } # rule granting access from nomad server to consul on ports @@ -29,8 +29,8 @@ resource "aws_security_group_rule" "sgr_nomad_server_to_consul_udp" { from_port = 8301 to_port = 8302 protocol = "udp" - source_security_group_id = "${var.sg_id_nomad_server}" - security_group_id = "${var.sg_id_consul}" + source_security_group_id = var.sg_id_nomad_server + security_group_id = var.sg_id_consul } # rule granting access from nomad server to consul on ports @@ -42,8 +42,8 @@ resource "aws_security_group_rule" "sgr_nomad_server_to_consul_http" { from_port = 8500 to_port = 8500 protocol = "tcp" - source_security_group_id = "${var.sg_id_nomad_server}" - security_group_id = "${var.sg_id_consul}" + source_security_group_id = var.sg_id_nomad_server + security_group_id = var.sg_id_consul } # rule granting access from nomad server to consul on ports @@ -55,8 +55,8 @@ resource "aws_security_group_rule" "sgr_nomad_server_to_consul_dns_tcp" { from_port = 8600 to_port = 8600 protocol = "tcp" - source_security_group_id = "${var.sg_id_nomad_server}" - security_group_id = "${var.sg_id_consul}" + source_security_group_id = var.sg_id_nomad_server + security_group_id = var.sg_id_consul } # rule granting access from nomad server to consul on ports @@ -68,6 +68,7 @@ resource "aws_security_group_rule" "sgr_nomad_server_to_consul_dns_udp" { from_port = 8600 to_port = 8600 protocol = "udp" - source_security_group_id = "${var.sg_id_nomad_server}" - security_group_id = "${var.sg_id_consul}" + source_security_group_id = var.sg_id_nomad_server + security_group_id = var.sg_id_consul } + diff --git a/modules/sgrules/sg_consul_ui_alb.tf b/modules/sgrules/sg_consul_ui_alb.tf index 76a168b..a27b3ee 100644 --- a/modules/sgrules/sg_consul_ui_alb.tf +++ b/modules/sgrules/sg_consul_ui_alb.tf @@ -6,6 +6,7 @@ resource "aws_security_group_rule" "sgr_ui_alb_to_consul_tcp" { from_port = 8500 to_port = 8500 protocol = "tcp" - source_security_group_id = "${var.sg_id_ui_alb_nomad}" - security_group_id = "${var.sg_id_consul}" + source_security_group_id = var.sg_id_ui_alb_nomad + security_group_id = var.sg_id_consul } + diff --git a/modules/sgrules/sg_nomad_clients.tf b/modules/sgrules/sg_nomad_clients.tf index e54ea80..ef1c274 100644 --- a/modules/sgrules/sg_nomad_clients.tf +++ b/modules/sgrules/sg_nomad_clients.tf @@ -9,8 +9,8 @@ resource "aws_security_group_rule" "sgr_private_to_public_services_http_rcp" { from_port = 4646 to_port = 4647 protocol = "tcp" - source_security_group_id = "${var.sg_id_private_services_dc}" - security_group_id = "${var.sg_id_public_services_dc}" + source_security_group_id = var.sg_id_private_services_dc + security_group_id = var.sg_id_public_services_dc } # rule granting access from content-connector to public services data-center on ports @@ -22,8 +22,8 @@ resource "aws_security_group_rule" "sgr_content_connector_to_public_services_htt from_port = 4646 to_port = 4647 protocol = "tcp" - source_security_group_id = "${var.sg_id_content_connector_dc}" - security_group_id = "${var.sg_id_public_services_dc}" + source_security_group_id = var.sg_id_content_connector_dc + security_group_id = var.sg_id_public_services_dc } # rule granting access from backoffice to public services data-center on ports @@ -35,8 +35,8 @@ resource "aws_security_group_rule" "sgr_backoffice_to_public_services_http_rcp" from_port = 4646 to_port = 4647 protocol = "tcp" - source_security_group_id = "${var.sg_id_backoffice_dc}" - security_group_id = "${var.sg_id_public_services_dc}" + source_security_group_id = var.sg_id_backoffice_dc + security_group_id = var.sg_id_public_services_dc } # rule granting access from public to private services data-center on ports @@ -48,8 +48,8 @@ resource "aws_security_group_rule" "sgr_public_to_private_services_http_rcp" { from_port = 4646 to_port = 4647 protocol = "tcp" - source_security_group_id = "${var.sg_id_public_services_dc}" - security_group_id = "${var.sg_id_private_services_dc}" + source_security_group_id = var.sg_id_public_services_dc + security_group_id = var.sg_id_private_services_dc } # rule granting access from content-connector to private services data-center on ports @@ -61,8 +61,8 @@ resource "aws_security_group_rule" "sgr_content_connector_to_private_services_ht from_port = 4646 to_port = 4647 protocol = "tcp" - source_security_group_id = "${var.sg_id_content_connector_dc}" - security_group_id = "${var.sg_id_private_services_dc}" + source_security_group_id = var.sg_id_content_connector_dc + security_group_id = var.sg_id_private_services_dc } # rule granting access from backoffice to private services data-center on ports @@ -74,8 +74,8 @@ resource "aws_security_group_rule" "sgr_backoffice_to_private_services_http_rcp" from_port = 4646 to_port = 4647 protocol = "tcp" - source_security_group_id = "${var.sg_id_backoffice_dc}" - security_group_id = "${var.sg_id_private_services_dc}" + source_security_group_id = var.sg_id_backoffice_dc + security_group_id = var.sg_id_private_services_dc } # rule granting access from public to content-connector data-center on ports @@ -87,8 +87,8 @@ resource "aws_security_group_rule" "sgr_public_to_content_connector_http_rcp" { from_port = 4646 to_port = 4647 protocol = "tcp" - source_security_group_id = "${var.sg_id_public_services_dc}" - security_group_id = "${var.sg_id_content_connector_dc}" + source_security_group_id = var.sg_id_public_services_dc + security_group_id = var.sg_id_content_connector_dc } # rule granting access from private-services to content-connector data-center on ports @@ -100,8 +100,8 @@ resource "aws_security_group_rule" "sgr_private_to_content_connector_http_rcp" { from_port = 4646 to_port = 4647 protocol = "tcp" - source_security_group_id = "${var.sg_id_private_services_dc}" - security_group_id = "${var.sg_id_content_connector_dc}" + source_security_group_id = var.sg_id_private_services_dc + security_group_id = var.sg_id_content_connector_dc } # rule granting access from backoffice to content-connector data-center on ports @@ -113,8 +113,8 @@ resource "aws_security_group_rule" "sgr_backoffice_to_content_connector_http_rcp from_port = 4646 to_port = 4647 protocol = "tcp" - source_security_group_id = "${var.sg_id_backoffice_dc}" - security_group_id = "${var.sg_id_content_connector_dc}" + source_security_group_id = var.sg_id_backoffice_dc + security_group_id = var.sg_id_content_connector_dc } # rule granting access from public to backoffice data-center on ports @@ -126,8 +126,8 @@ resource "aws_security_group_rule" "sgr_public_to_backoffice_http_rcp" { from_port = 4646 to_port = 4647 protocol = "tcp" - source_security_group_id = "${var.sg_id_public_services_dc}" - security_group_id = "${var.sg_id_backoffice_dc}" + source_security_group_id = var.sg_id_public_services_dc + security_group_id = var.sg_id_backoffice_dc } # rule granting access from content-connector to backoffice data-center on ports @@ -139,8 +139,8 @@ resource "aws_security_group_rule" "sgr_content_connector_to_backoffice_http_rcp from_port = 4646 to_port = 4647 protocol = "tcp" - source_security_group_id = "${var.sg_id_content_connector_dc}" - security_group_id = "${var.sg_id_backoffice_dc}" + source_security_group_id = var.sg_id_content_connector_dc + security_group_id = var.sg_id_backoffice_dc } # rule granting access from private to backoffice data-center on ports @@ -152,8 +152,8 @@ resource "aws_security_group_rule" "sgr_private_to_backoffice_http_rcp" { from_port = 4646 to_port = 4647 protocol = "tcp" - source_security_group_id = "${var.sg_id_private_services_dc}" - security_group_id = "${var.sg_id_backoffice_dc}" + source_security_group_id = var.sg_id_private_services_dc + security_group_id = var.sg_id_backoffice_dc } # rule granting self-access to backoffice data-center on ports @@ -165,8 +165,8 @@ resource "aws_security_group_rule" "sgr_backoffice_to_backoffice_http_rcp" { from_port = 4646 to_port = 4647 protocol = "tcp" - source_security_group_id = "${var.sg_id_backoffice_dc}" - security_group_id = "${var.sg_id_backoffice_dc}" + source_security_group_id = var.sg_id_backoffice_dc + security_group_id = var.sg_id_backoffice_dc } # rule granting self-access to private_services data-center on ports @@ -178,8 +178,8 @@ resource "aws_security_group_rule" "sgr_private_to_private_http_rcp" { from_port = 4646 to_port = 4647 protocol = "tcp" - source_security_group_id = "${var.sg_id_private_services_dc}" - security_group_id = "${var.sg_id_private_services_dc}" + source_security_group_id = var.sg_id_private_services_dc + security_group_id = var.sg_id_private_services_dc } # rule granting self-access to public_services data-center on ports @@ -191,8 +191,8 @@ resource "aws_security_group_rule" "sgr_public_to_public_http_rcp" { from_port = 4646 to_port = 4647 protocol = "tcp" - source_security_group_id = "${var.sg_id_public_services_dc}" - security_group_id = "${var.sg_id_public_services_dc}" + source_security_group_id = var.sg_id_public_services_dc + security_group_id = var.sg_id_public_services_dc } # rule granting self-access to content-connector data-center on ports @@ -204,6 +204,7 @@ resource "aws_security_group_rule" "sgr_content_connector_to_content_connector_h from_port = 4646 to_port = 4647 protocol = "tcp" - source_security_group_id = "${var.sg_id_content_connector_dc}" - security_group_id = "${var.sg_id_content_connector_dc}" -} \ No newline at end of file + source_security_group_id = var.sg_id_content_connector_dc + security_group_id = var.sg_id_content_connector_dc +} + diff --git a/modules/sgrules/sg_nomad_clients_consul.tf b/modules/sgrules/sg_nomad_clients_consul.tf index 7234ac1..063a994 100644 --- a/modules/sgrules/sg_nomad_clients_consul.tf +++ b/modules/sgrules/sg_nomad_clients_consul.tf @@ -16,8 +16,8 @@ resource "aws_security_group_rule" "sgr_consul_to_public_services_tcp" { from_port = 8301 to_port = 8302 protocol = "tcp" - source_security_group_id = "${var.sg_id_consul}" - security_group_id = "${var.sg_id_public_services_dc}" + source_security_group_id = var.sg_id_consul + security_group_id = var.sg_id_public_services_dc } # rule granting access from consul to private-services data-center on ports @@ -29,8 +29,8 @@ resource "aws_security_group_rule" "sgr_consul_to_private_services_tcp" { from_port = 8301 to_port = 8302 protocol = "tcp" - source_security_group_id = "${var.sg_id_consul}" - security_group_id = "${var.sg_id_private_services_dc}" + source_security_group_id = var.sg_id_consul + security_group_id = var.sg_id_private_services_dc } # rule granting access from consul to content-connector data-center on ports @@ -42,8 +42,8 @@ resource "aws_security_group_rule" "sgr_consul_to_content_connector_tcp" { from_port = 8301 to_port = 8302 protocol = "tcp" - source_security_group_id = "${var.sg_id_consul}" - security_group_id = "${var.sg_id_content_connector_dc}" + source_security_group_id = var.sg_id_consul + security_group_id = var.sg_id_content_connector_dc } # rule granting access from consul to backoffice data-center on ports @@ -55,8 +55,8 @@ resource "aws_security_group_rule" "sgr_consul_to_backoffice_tcp" { from_port = 8301 to_port = 8302 protocol = "tcp" - source_security_group_id = "${var.sg_id_consul}" - security_group_id = "${var.sg_id_backoffice_dc}" + source_security_group_id = var.sg_id_consul + security_group_id = var.sg_id_backoffice_dc } # rule granting access from consul to public-services data-center on ports @@ -68,8 +68,8 @@ resource "aws_security_group_rule" "sgr_consul_to_public_services_udp" { from_port = 8301 to_port = 8302 protocol = "udp" - source_security_group_id = "${var.sg_id_consul}" - security_group_id = "${var.sg_id_public_services_dc}" + source_security_group_id = var.sg_id_consul + security_group_id = var.sg_id_public_services_dc } # rule granting access from consul to private-services data-center on ports @@ -81,8 +81,8 @@ resource "aws_security_group_rule" "sgr_consul_to_private_services_udp" { from_port = 8301 to_port = 8302 protocol = "udp" - source_security_group_id = "${var.sg_id_consul}" - security_group_id = "${var.sg_id_private_services_dc}" + source_security_group_id = var.sg_id_consul + security_group_id = var.sg_id_private_services_dc } # rule granting access from consul to content-connector data-center on ports @@ -94,8 +94,8 @@ resource "aws_security_group_rule" "sgr_consul_to_content_connector_udp" { from_port = 8301 to_port = 8302 protocol = "udp" - source_security_group_id = "${var.sg_id_consul}" - security_group_id = "${var.sg_id_content_connector_dc}" + source_security_group_id = var.sg_id_consul + security_group_id = var.sg_id_content_connector_dc } # rule granting access from consul to backoffice data-center on ports @@ -107,6 +107,7 @@ resource "aws_security_group_rule" "sgr_consul_to_backoffice_udp" { from_port = 8301 to_port = 8302 protocol = "udp" - source_security_group_id = "${var.sg_id_consul}" - security_group_id = "${var.sg_id_backoffice_dc}" + source_security_group_id = var.sg_id_consul + security_group_id = var.sg_id_backoffice_dc } + diff --git a/modules/sgrules/sg_nomad_clients_docker.tf b/modules/sgrules/sg_nomad_clients_docker.tf index c982bfc..334cd14 100644 --- a/modules/sgrules/sg_nomad_clients_docker.tf +++ b/modules/sgrules/sg_nomad_clients_docker.tf @@ -9,7 +9,7 @@ resource "aws_security_group_rule" "sgr_to_public_services_docker" { to_port = 32000 protocol = "tcp" cidr_blocks = ["0.0.0.0/0"] - security_group_id = "${var.sg_id_public_services_dc}" + security_group_id = var.sg_id_public_services_dc } # rule granting access to private services data-center for docker ports @@ -21,7 +21,7 @@ resource "aws_security_group_rule" "sgr_to_private_services_docker" { to_port = 32000 protocol = "tcp" cidr_blocks = ["0.0.0.0/0"] - security_group_id = "${var.sg_id_private_services_dc}" + security_group_id = var.sg_id_private_services_dc } # rule granting access to content-conncetor data-center for docker ports @@ -33,7 +33,7 @@ resource "aws_security_group_rule" "sgr_to_content_connector_docker" { to_port = 32000 protocol = "tcp" cidr_blocks = ["0.0.0.0/0"] - security_group_id = "${var.sg_id_content_connector_dc}" + security_group_id = var.sg_id_content_connector_dc } # rule granting access to backoffice data-center for docker ports @@ -45,7 +45,7 @@ resource "aws_security_group_rule" "sgr_to_backoffice_docker" { to_port = 32000 protocol = "tcp" cidr_blocks = ["0.0.0.0/0"] - security_group_id = "${var.sg_id_backoffice_dc}" + security_group_id = var.sg_id_backoffice_dc } # rule granting access to public services data-center for docker ports @@ -57,7 +57,7 @@ resource "aws_security_group_rule" "sgr_to_public_services_docker_udp" { to_port = 32000 protocol = "udp" cidr_blocks = ["0.0.0.0/0"] - security_group_id = "${var.sg_id_public_services_dc}" + security_group_id = var.sg_id_public_services_dc } # rule granting access to private services data-center for docker ports @@ -69,7 +69,7 @@ resource "aws_security_group_rule" "sgr_to_private_services_docker_udp" { to_port = 32000 protocol = "udp" cidr_blocks = ["0.0.0.0/0"] - security_group_id = "${var.sg_id_private_services_dc}" + security_group_id = var.sg_id_private_services_dc } # rule granting access to content-conncetor data-center for docker ports @@ -81,7 +81,7 @@ resource "aws_security_group_rule" "sgr_to_content_connector_docker_udp" { to_port = 32000 protocol = "udp" cidr_blocks = ["0.0.0.0/0"] - security_group_id = "${var.sg_id_content_connector_dc}" + security_group_id = var.sg_id_content_connector_dc } # rule granting access to backoffice data-center for docker ports @@ -93,5 +93,6 @@ resource "aws_security_group_rule" "sgr_to_backoffice_docker_udp" { to_port = 32000 protocol = "udp" cidr_blocks = ["0.0.0.0/0"] - security_group_id = "${var.sg_id_backoffice_dc}" + security_group_id = var.sg_id_backoffice_dc } + diff --git a/modules/sgrules/sg_nomad_clients_igress.tf b/modules/sgrules/sg_nomad_clients_igress.tf index 7f5e64d..75f4260 100644 --- a/modules/sgrules/sg_nomad_clients_igress.tf +++ b/modules/sgrules/sg_nomad_clients_igress.tf @@ -9,7 +9,7 @@ resource "aws_security_group_rule" "sgr_public_services_ig_999x" { to_port = 9999 protocol = "tcp" cidr_blocks = ["0.0.0.0/0"] - security_group_id = "${var.sg_id_public_services_dc}" + security_group_id = var.sg_id_public_services_dc } # rule granting access on igress-ports to private services data-center on ports @@ -21,10 +21,9 @@ resource "aws_security_group_rule" "sgr_private_services_ig_999x" { to_port = 9999 protocol = "tcp" cidr_blocks = ["0.0.0.0/0"] - security_group_id = "${var.sg_id_private_services_dc}" + security_group_id = var.sg_id_private_services_dc } - # rule granting access on igress-ports to content-connector data-center on ports # 9998 ... 9999 resource "aws_security_group_rule" "sgr_content_connector_ig_999x" { @@ -34,10 +33,9 @@ resource "aws_security_group_rule" "sgr_content_connector_ig_999x" { to_port = 9999 protocol = "tcp" cidr_blocks = ["0.0.0.0/0"] - security_group_id = "${var.sg_id_content_connector_dc}" + security_group_id = var.sg_id_content_connector_dc } - # rule granting access on igress-ports to backoffice services data-center on ports # 9998 ... 9999 resource "aws_security_group_rule" "sgr_backoffice_ig_999x" { @@ -47,5 +45,6 @@ resource "aws_security_group_rule" "sgr_backoffice_ig_999x" { to_port = 9999 protocol = "tcp" cidr_blocks = ["0.0.0.0/0"] - security_group_id = "${var.sg_id_backoffice_dc}" + security_group_id = var.sg_id_backoffice_dc } + diff --git a/modules/sgrules/sg_nomad_clients_server_access.tf b/modules/sgrules/sg_nomad_clients_server_access.tf index c392e1b..78e5a08 100644 --- a/modules/sgrules/sg_nomad_clients_server_access.tf +++ b/modules/sgrules/sg_nomad_clients_server_access.tf @@ -10,8 +10,8 @@ resource "aws_security_group_rule" "sgr_server_to_public_services_tcp" { from_port = 4646 to_port = 4648 protocol = "tcp" - source_security_group_id = "${var.sg_id_nomad_server}" - security_group_id = "${var.sg_id_public_services_dc}" + source_security_group_id = var.sg_id_nomad_server + security_group_id = var.sg_id_public_services_dc } # rule that grants TCP ingress access from nomad-server to private-services data-center on ports @@ -24,8 +24,8 @@ resource "aws_security_group_rule" "sgr_server_to_private_services_tcp" { from_port = 4646 to_port = 4648 protocol = "tcp" - source_security_group_id = "${var.sg_id_nomad_server}" - security_group_id = "${var.sg_id_private_services_dc}" + source_security_group_id = var.sg_id_nomad_server + security_group_id = var.sg_id_private_services_dc } # rule that grants TCP ingress access from nomad-server to content-connector data-center on ports @@ -38,8 +38,8 @@ resource "aws_security_group_rule" "sgr_server_to_content_connector_tcp" { from_port = 4646 to_port = 4648 protocol = "tcp" - source_security_group_id = "${var.sg_id_nomad_server}" - security_group_id = "${var.sg_id_content_connector_dc}" + source_security_group_id = var.sg_id_nomad_server + security_group_id = var.sg_id_content_connector_dc } # rule that grants TCP ingress access from nomad-server to backoffice data-center on ports @@ -52,8 +52,8 @@ resource "aws_security_group_rule" "sgr_server_to_backoffice_tcp" { from_port = 4646 to_port = 4648 protocol = "tcp" - source_security_group_id = "${var.sg_id_nomad_server}" - security_group_id = "${var.sg_id_backoffice_dc}" + source_security_group_id = var.sg_id_nomad_server + security_group_id = var.sg_id_backoffice_dc } # rule that grants UDP ingress access from nomad-server to public-services data-center on ports @@ -64,8 +64,8 @@ resource "aws_security_group_rule" "sgr_server_to_public_services_udp" { from_port = 4648 to_port = 4648 protocol = "udp" - source_security_group_id = "${var.sg_id_nomad_server}" - security_group_id = "${var.sg_id_public_services_dc}" + source_security_group_id = var.sg_id_nomad_server + security_group_id = var.sg_id_public_services_dc } # rule that grants UDP ingress access from nomad-server to private-services data-center on ports @@ -76,8 +76,8 @@ resource "aws_security_group_rule" "sgr_server_to_private_services_udp" { from_port = 4648 to_port = 4648 protocol = "udp" - source_security_group_id = "${var.sg_id_nomad_server}" - security_group_id = "${var.sg_id_private_services_dc}" + source_security_group_id = var.sg_id_nomad_server + security_group_id = var.sg_id_private_services_dc } # rule that grants UDP ingress access from nomad-server to content-connector data-center on ports @@ -88,8 +88,8 @@ resource "aws_security_group_rule" "sgr_server_to_content_connector_udp" { from_port = 4648 to_port = 4648 protocol = "udp" - source_security_group_id = "${var.sg_id_nomad_server}" - security_group_id = "${var.sg_id_content_connector_dc}" + source_security_group_id = var.sg_id_nomad_server + security_group_id = var.sg_id_content_connector_dc } # rule that grants UDP ingress access from nomad-server to backoffice data-center on ports @@ -100,6 +100,7 @@ resource "aws_security_group_rule" "sgr_server_to_backoffice_udp" { from_port = 4648 to_port = 4648 protocol = "udp" - source_security_group_id = "${var.sg_id_nomad_server}" - security_group_id = "${var.sg_id_backoffice_dc}" + source_security_group_id = var.sg_id_nomad_server + security_group_id = var.sg_id_backoffice_dc } + diff --git a/modules/sgrules/sg_nomad_server_clients_access.tf b/modules/sgrules/sg_nomad_server_clients_access.tf index c15cce7..f579658 100644 --- a/modules/sgrules/sg_nomad_server_clients_access.tf +++ b/modules/sgrules/sg_nomad_server_clients_access.tf @@ -10,8 +10,8 @@ resource "aws_security_group_rule" "sgr_public_services_to_server_tcp" { from_port = 4646 to_port = 4648 protocol = "tcp" - source_security_group_id = "${var.sg_id_public_services_dc}" - security_group_id = "${var.sg_id_nomad_server}" + source_security_group_id = var.sg_id_public_services_dc + security_group_id = var.sg_id_nomad_server } # rule that grants TCP ingress access from private-services data-center to nomad-server to on ports @@ -24,8 +24,8 @@ resource "aws_security_group_rule" "sgr_private_services_to_server_tcp" { from_port = 4646 to_port = 4648 protocol = "tcp" - source_security_group_id = "${var.sg_id_private_services_dc}" - security_group_id = "${var.sg_id_nomad_server}" + source_security_group_id = var.sg_id_private_services_dc + security_group_id = var.sg_id_nomad_server } # rule that grants TCP ingress access from content-connector data-center to nomad-server to on ports @@ -38,8 +38,8 @@ resource "aws_security_group_rule" "sgr_content_connector_to_server_tcp" { from_port = 4646 to_port = 4648 protocol = "tcp" - source_security_group_id = "${var.sg_id_content_connector_dc}" - security_group_id = "${var.sg_id_nomad_server}" + source_security_group_id = var.sg_id_content_connector_dc + security_group_id = var.sg_id_nomad_server } # rule that grants TCP ingress access from backoffice data-center to nomad-server to on ports @@ -52,8 +52,8 @@ resource "aws_security_group_rule" "sgr_backoffice_to_server_tcp" { from_port = 4646 to_port = 4648 protocol = "tcp" - source_security_group_id = "${var.sg_id_backoffice_dc}" - security_group_id = "${var.sg_id_nomad_server}" + source_security_group_id = var.sg_id_backoffice_dc + security_group_id = var.sg_id_nomad_server } # rule that grants UDP ingress access from public-services data-center to nomad-server to on ports @@ -64,8 +64,8 @@ resource "aws_security_group_rule" "sgr_public_services_to_server_udp" { from_port = 4648 to_port = 4648 protocol = "udp" - source_security_group_id = "${var.sg_id_public_services_dc}" - security_group_id = "${var.sg_id_nomad_server}" + source_security_group_id = var.sg_id_public_services_dc + security_group_id = var.sg_id_nomad_server } # rule that grants UDP ingress access from private-services data-center to nomad-server to on ports @@ -76,8 +76,8 @@ resource "aws_security_group_rule" "sgr_private_services_to_server_udp" { from_port = 4648 to_port = 4648 protocol = "udp" - source_security_group_id = "${var.sg_id_private_services_dc}" - security_group_id = "${var.sg_id_nomad_server}" + source_security_group_id = var.sg_id_private_services_dc + security_group_id = var.sg_id_nomad_server } # rule that grants UDP ingress access from content-connector data-center to nomad-server to on ports @@ -88,8 +88,8 @@ resource "aws_security_group_rule" "sgr_content_connector_to_server_udp" { from_port = 4648 to_port = 4648 protocol = "udp" - source_security_group_id = "${var.sg_id_content_connector_dc}" - security_group_id = "${var.sg_id_nomad_server}" + source_security_group_id = var.sg_id_content_connector_dc + security_group_id = var.sg_id_nomad_server } # rule that grants UDP ingress access from backoffice data-center to nomad-server to on ports @@ -100,6 +100,7 @@ resource "aws_security_group_rule" "sgr_backoffice_to_server_udp" { from_port = 4648 to_port = 4648 protocol = "udp" - source_security_group_id = "${var.sg_id_backoffice_dc}" - security_group_id = "${var.sg_id_nomad_server}" + source_security_group_id = var.sg_id_backoffice_dc + security_group_id = var.sg_id_nomad_server } + diff --git a/modules/sgrules/sg_nomad_server_consul.tf b/modules/sgrules/sg_nomad_server_consul.tf index 3ffa1dd..f94ca60 100644 --- a/modules/sgrules/sg_nomad_server_consul.tf +++ b/modules/sgrules/sg_nomad_server_consul.tf @@ -16,8 +16,8 @@ resource "aws_security_group_rule" "sgr_consul_to_nomad_server_tcp" { from_port = 8300 to_port = 8302 protocol = "tcp" - source_security_group_id = "${var.sg_id_consul}" - security_group_id = "${var.sg_id_nomad_server}" + source_security_group_id = var.sg_id_consul + security_group_id = var.sg_id_nomad_server } # rule granting access from consul to nomad server on ports @@ -29,6 +29,7 @@ resource "aws_security_group_rule" "sgr_consul_to_nomad_server_udp" { from_port = 8301 to_port = 8302 protocol = "udp" - source_security_group_id = "${var.sg_id_consul}" - security_group_id = "${var.sg_id_nomad_server}" + source_security_group_id = var.sg_id_consul + security_group_id = var.sg_id_nomad_server } + diff --git a/modules/sgrules/sg_nomad_server_ui_alb.tf b/modules/sgrules/sg_nomad_server_ui_alb.tf index c2af1ef..9e8b3bc 100644 --- a/modules/sgrules/sg_nomad_server_ui_alb.tf +++ b/modules/sgrules/sg_nomad_server_ui_alb.tf @@ -6,6 +6,7 @@ resource "aws_security_group_rule" "sgr_ui_alb_to_nomad_server_tcp" { from_port = 4646 to_port = 4646 protocol = "tcp" - source_security_group_id = "${var.sg_id_ui_alb_nomad}" - security_group_id = "${var.sg_id_nomad_server}" + source_security_group_id = var.sg_id_ui_alb_nomad + security_group_id = var.sg_id_nomad_server } + diff --git a/modules/sgrules/versions.tf b/modules/sgrules/versions.tf new file mode 100644 index 0000000..ac97c6a --- /dev/null +++ b/modules/sgrules/versions.tf @@ -0,0 +1,4 @@ + +terraform { + required_version = ">= 0.12" +} diff --git a/modules/ui-access/consul_ui_alb.tf b/modules/ui-access/consul_ui_alb.tf index ddee59e..9aa3bb1 100644 --- a/modules/ui-access/consul_ui_alb.tf +++ b/modules/ui-access/consul_ui_alb.tf @@ -3,64 +3,65 @@ resource "aws_alb" "alb_consul_ui" { name = "${var.stack_name}-consul-ui${var.unique_postfix}" internal = false - subnets = ["${var.subnet_ids}"] - security_groups = ["${aws_security_group.sg_ui_alb.id}"] + subnets = var.subnet_ids + security_groups = [aws_security_group.sg_ui_alb.id] - tags { + tags = { Name = "${var.stack_name}-consul-ui${var.unique_postfix}" } } resource "aws_autoscaling_attachment" "asg_attachment_consul_ui" { - autoscaling_group_name = "${var.consul_server_asg_name}" - alb_target_group_arn = "${aws_alb_target_group.tgr_consul_ui.arn}" + autoscaling_group_name = var.consul_server_asg_name + alb_target_group_arn = aws_alb_target_group.tgr_consul_ui.arn } resource "aws_alb_target_group" "tgr_consul_ui" { name_prefix = "consul" - port = "${var.consul_ui_port}" + port = var.consul_ui_port protocol = "HTTP" - vpc_id = "${var.vpc_id}" + vpc_id = var.vpc_id health_check { interval = 15 path = "/v1/status/leader" - port = "${var.consul_ui_port}" + port = var.consul_ui_port protocol = "HTTP" timeout = 3 healthy_threshold = 2 unhealthy_threshold = 2 } - tags { + tags = { Name = "${var.stack_name}-consul-ui${var.unique_postfix}" } } # HTTP listener, used when no https certificate is provided. resource "aws_alb_listener" "albl_http_consul_ui" { - count = "${var.ui_alb_use_https_listener? 0 : 1}" - load_balancer_arn = "${aws_alb.alb_consul_ui.arn}" + count = var.ui_alb_use_https_listener ? 0 : 1 + load_balancer_arn = aws_alb.alb_consul_ui.arn protocol = "HTTP" - port = "${local.listener_port}" + port = local.listener_port default_action { - target_group_arn = "${aws_alb_target_group.tgr_consul_ui.arn}" + target_group_arn = aws_alb_target_group.tgr_consul_ui.arn type = "forward" } } # HTTPS listener, used when a https certificate is provided. resource "aws_alb_listener" "albl_https_consul_ui" { - count = "${var.ui_alb_use_https_listener}" - load_balancer_arn = "${aws_alb.alb_consul_ui.arn}" + count = var.ui_alb_use_https_listener ? 1 : 0 + load_balancer_arn = aws_alb.alb_consul_ui.arn protocol = "HTTPS" - port = "${local.listener_port}" - certificate_arn = "${var.ui_alb_https_listener_cert_arn}" - ssl_policy = "${local.ssl_policy}" + port = local.listener_port + certificate_arn = var.ui_alb_https_listener_cert_arn + ssl_policy = local.ssl_policy default_action { - target_group_arn = "${aws_alb_target_group.tgr_consul_ui.arn}" + target_group_arn = aws_alb_target_group.tgr_consul_ui.arn type = "forward" } } + diff --git a/modules/ui-access/fabio_ui_alb.tf b/modules/ui-access/fabio_ui_alb.tf index 196b5dd..592a9f4 100644 --- a/modules/ui-access/fabio_ui_alb.tf +++ b/modules/ui-access/fabio_ui_alb.tf @@ -3,64 +3,65 @@ resource "aws_alb" "alb_fabio_ui" { name = "${var.stack_name}-fabio-ui${var.unique_postfix}" internal = false - subnets = ["${var.subnet_ids}"] - security_groups = ["${aws_security_group.sg_ui_alb.id}"] + subnets = var.subnet_ids + security_groups = [aws_security_group.sg_ui_alb.id] - tags { + tags = { Name = "${var.stack_name}-fabio-ui${var.unique_postfix}" } } resource "aws_autoscaling_attachment" "asg_attachment_fabio_ui" { - autoscaling_group_name = "${var.fabio_server_asg_name}" - alb_target_group_arn = "${aws_alb_target_group.tgr_fabio_ui.arn}" + autoscaling_group_name = var.fabio_server_asg_name + alb_target_group_arn = aws_alb_target_group.tgr_fabio_ui.arn } resource "aws_alb_target_group" "tgr_fabio_ui" { name_prefix = "fabio" - port = "${var.fabio_ui_port}" + port = var.fabio_ui_port protocol = "HTTP" - vpc_id = "${var.vpc_id}" + vpc_id = var.vpc_id health_check { interval = 15 path = "/health" - port = "${var.fabio_ui_port}" + port = var.fabio_ui_port protocol = "HTTP" timeout = 3 healthy_threshold = 2 unhealthy_threshold = 2 } - tags { + tags = { Name = "${var.stack_name}-fabio-ui${var.unique_postfix}" } } # HTTP listener, used when no https certificate is provided. resource "aws_alb_listener" "albl_http_fabio_ui" { - count = "${var.ui_alb_use_https_listener? 0 : 1}" - load_balancer_arn = "${aws_alb.alb_fabio_ui.arn}" + count = var.ui_alb_use_https_listener ? 0 : 1 + load_balancer_arn = aws_alb.alb_fabio_ui.arn protocol = "HTTP" - port = "${local.listener_port}" + port = local.listener_port default_action { - target_group_arn = "${aws_alb_target_group.tgr_fabio_ui.arn}" + target_group_arn = aws_alb_target_group.tgr_fabio_ui.arn type = "forward" } } # HTTPS listener, used when a https certificate is provided. resource "aws_alb_listener" "albl_https_fabio_ui" { - count = "${var.ui_alb_use_https_listener}" - load_balancer_arn = "${aws_alb.alb_fabio_ui.arn}" + count = var.ui_alb_use_https_listener ? 1 : 0 + load_balancer_arn = aws_alb.alb_fabio_ui.arn protocol = "HTTPS" - port = "${local.listener_port}" - certificate_arn = "${var.ui_alb_https_listener_cert_arn}" - ssl_policy = "${local.ssl_policy}" + port = local.listener_port + certificate_arn = var.ui_alb_https_listener_cert_arn + ssl_policy = local.ssl_policy default_action { - target_group_arn = "${aws_alb_target_group.tgr_fabio_ui.arn}" + target_group_arn = aws_alb_target_group.tgr_fabio_ui.arn type = "forward" } } + diff --git a/modules/ui-access/main.tf b/modules/ui-access/main.tf index 7d6b642..4f15414 100644 --- a/modules/ui-access/main.tf +++ b/modules/ui-access/main.tf @@ -1,5 +1,5 @@ locals { - listener_protocol = "${var.ui_alb_use_https_listener?"HTTPS":"HTTP"}" - listener_port = "${var.ui_alb_use_https_listener?"443":"80"}" + listener_protocol = var.ui_alb_use_https_listener ? "HTTPS" : "HTTP" + listener_port = var.ui_alb_use_https_listener ? "443" : "80" ssl_policy = "ELBSecurityPolicy-TLS-1-2-Ext-2018-06" -} +} \ No newline at end of file diff --git a/modules/ui-access/nomad_ui_alb.tf b/modules/ui-access/nomad_ui_alb.tf index b221381..010e4cd 100644 --- a/modules/ui-access/nomad_ui_alb.tf +++ b/modules/ui-access/nomad_ui_alb.tf @@ -3,64 +3,65 @@ resource "aws_alb" "alb_nomad_ui" { name = "${var.stack_name}-nomad-ui${var.unique_postfix}" internal = false - subnets = ["${var.subnet_ids}"] - security_groups = ["${aws_security_group.sg_ui_alb.id}"] + subnets = var.subnet_ids + security_groups = [aws_security_group.sg_ui_alb.id] - tags { + tags = { Name = "${var.stack_name}-nomad-ui${var.unique_postfix}" } } resource "aws_autoscaling_attachment" "asg_attachment_nomad_ui" { - autoscaling_group_name = "${var.nomad_server_asg_name}" - alb_target_group_arn = "${aws_alb_target_group.tgr_nomad_ui.arn}" + autoscaling_group_name = var.nomad_server_asg_name + alb_target_group_arn = aws_alb_target_group.tgr_nomad_ui.arn } resource "aws_alb_target_group" "tgr_nomad_ui" { name_prefix = "nomad" - port = "${var.nomad_ui_port}" + port = var.nomad_ui_port protocol = "HTTP" - vpc_id = "${var.vpc_id}" + vpc_id = var.vpc_id health_check { interval = 15 path = "/ui/jobs" - port = "${var.nomad_ui_port}" + port = var.nomad_ui_port protocol = "HTTP" timeout = 3 healthy_threshold = 2 unhealthy_threshold = 2 } - tags { + tags = { Name = "${var.stack_name}-nomad-ui${var.unique_postfix}" } } # HTTP listener, used when no https certificate is provided. resource "aws_alb_listener" "albl_http_nomad_ui" { - count = "${var.ui_alb_use_https_listener? 0 : 1}" - load_balancer_arn = "${aws_alb.alb_nomad_ui.arn}" + count = var.ui_alb_use_https_listener ? 0 : 1 + load_balancer_arn = aws_alb.alb_nomad_ui.arn protocol = "HTTP" - port = "${local.listener_port}" + port = local.listener_port default_action { - target_group_arn = "${aws_alb_target_group.tgr_nomad_ui.arn}" + target_group_arn = aws_alb_target_group.tgr_nomad_ui.arn type = "forward" } } # HTTPS listener, used when a https certificate is provided. resource "aws_alb_listener" "albl_https_nomad_ui" { - count = "${var.ui_alb_use_https_listener}" - load_balancer_arn = "${aws_alb.alb_nomad_ui.arn}" + count = var.ui_alb_use_https_listener ? 1 : 0 + load_balancer_arn = aws_alb.alb_nomad_ui.arn protocol = "HTTPS" - port = "${local.listener_port}" - certificate_arn = "${var.ui_alb_https_listener_cert_arn}" - ssl_policy = "${local.ssl_policy}" + port = local.listener_port + certificate_arn = var.ui_alb_https_listener_cert_arn + ssl_policy = local.ssl_policy default_action { - target_group_arn = "${aws_alb_target_group.tgr_nomad_ui.arn}" + target_group_arn = aws_alb_target_group.tgr_nomad_ui.arn type = "forward" } } + diff --git a/modules/ui-access/outputs.tf b/modules/ui-access/outputs.tf index c4b6797..d51664c 100644 --- a/modules/ui-access/outputs.tf +++ b/modules/ui-access/outputs.tf @@ -1,59 +1,60 @@ output "nomad_ui_alb_zone_id" { - value = "${aws_alb.alb_nomad_ui.zone_id}" + value = aws_alb.alb_nomad_ui.zone_id } output "nomad_ui_alb_sg_id" { - value = "${aws_security_group.sg_ui_alb.id}" + value = aws_security_group.sg_ui_alb.id } output "nomad_ui_alb_dns_name" { - value = "${aws_alb.alb_nomad_ui.dns_name}" + value = aws_alb.alb_nomad_ui.dns_name } output "nomad_ui_alb_https_targetgroup_arn" { - value = "${aws_alb_target_group.tgr_nomad_ui.arn}" + value = aws_alb_target_group.tgr_nomad_ui.arn } output "nomad_ui_alb_https_listener_arn" { - value = "${aws_alb_listener.albl_https_nomad_ui.*.arn}" + value = aws_alb_listener.albl_https_nomad_ui.*.arn } output "consul_ui_alb_zone_id" { - value = "${aws_alb.alb_consul_ui.zone_id}" + value = aws_alb.alb_consul_ui.zone_id } output "consul_ui_alb_sg_id" { - value = "${aws_security_group.sg_ui_alb.id}" + value = aws_security_group.sg_ui_alb.id } output "consul_ui_alb_dns_name" { - value = "${aws_alb.alb_consul_ui.dns_name}" + value = aws_alb.alb_consul_ui.dns_name } output "consul_ui_alb_https_targetgroup_arn" { - value = "${aws_alb_target_group.tgr_consul_ui.arn}" + value = aws_alb_target_group.tgr_consul_ui.arn } output "consul_ui_alb_https_listener_arn" { - value = "${aws_alb_listener.albl_https_consul_ui.*.arn}" + value = aws_alb_listener.albl_https_consul_ui.*.arn } output "fabio_ui_alb_zone_id" { - value = "${aws_alb.alb_fabio_ui.zone_id}" + value = aws_alb.alb_fabio_ui.zone_id } output "fabio_ui_alb_sg_id" { - value = "${aws_security_group.sg_ui_alb.id}" + value = aws_security_group.sg_ui_alb.id } output "fabio_ui_alb_dns_name" { - value = "${aws_alb.alb_fabio_ui.dns_name}" + value = aws_alb.alb_fabio_ui.dns_name } output "fabio_ui_alb_https_targetgroup_arn" { - value = "${aws_alb_target_group.tgr_fabio_ui.arn}" + value = aws_alb_target_group.tgr_fabio_ui.arn } output "fabio_ui_alb_https_listener_arn" { - value = "${aws_alb_listener.albl_https_fabio_ui.*.arn}" + value = aws_alb_listener.albl_https_fabio_ui.*.arn } + diff --git a/modules/ui-access/ui_alb_sg.tf b/modules/ui-access/ui_alb_sg.tf index b2fd677..8439f14 100644 --- a/modules/ui-access/ui_alb_sg.tf +++ b/modules/ui-access/ui_alb_sg.tf @@ -1,9 +1,9 @@ resource "aws_security_group" "sg_ui_alb" { - vpc_id = "${var.vpc_id}" + vpc_id = var.vpc_id name = "${var.stack_name}-ui-alb${var.unique_postfix}" description = "Security group that allows ingress access to uis in backoffice." - tags { + tags = { Name = "${var.stack_name}-ui-alb${var.unique_postfix}" } @@ -13,19 +13,24 @@ resource "aws_security_group" "sg_ui_alb" { } locals { - keys = "${keys(var.allowed_cidr_blocks_for_ui_alb)}" + keys = keys(var.allowed_cidr_blocks_for_ui_alb) } # INGRESS UI access rules resource "aws_security_group_rule" "sgr_alb_ig_ui" { - count = "${length(local.keys)}" - description = "${element(local.keys,count.index)}: UI - igress ${local.listener_port}" - type = "ingress" - from_port = "${local.listener_port}" - to_port = "${local.listener_port}" - protocol = "tcp" - cidr_blocks = ["${lookup(var.allowed_cidr_blocks_for_ui_alb,element(local.keys,count.index),"0.0.0.0/32")}"] - security_group_id = "${aws_security_group.sg_ui_alb.id}" + count = length(local.keys) + description = "${element(local.keys, count.index)}: UI - igress ${local.listener_port}" + type = "ingress" + from_port = local.listener_port + to_port = local.listener_port + protocol = "tcp" + + cidr_blocks = [lookup( + var.allowed_cidr_blocks_for_ui_alb, + element(local.keys, count.index), + "0.0.0.0/32", + )] + security_group_id = aws_security_group.sg_ui_alb.id } # EGRESS Grants access for all tcp @@ -37,5 +42,5 @@ resource "aws_security_group_rule" "sgr_alb_egAll_ui" { protocol = "tcp" cidr_blocks = ["0.0.0.0/0"] - security_group_id = "${aws_security_group.sg_ui_alb.id}" -} + security_group_id = aws_security_group.sg_ui_alb.id +} \ No newline at end of file diff --git a/modules/ui-access/vars.tf b/modules/ui-access/vars.tf index db5e666..0a46e0d 100644 --- a/modules/ui-access/vars.tf +++ b/modules/ui-access/vars.tf @@ -5,7 +5,7 @@ variable "vpc_id" { variable "subnet_ids" { description = "Ids of the subnets to deploy the alb's into." - type = "list" + type = list(string) } variable "nomad_server_asg_name" { @@ -53,7 +53,7 @@ variable "fabio_ui_port" { variable "allowed_cidr_blocks_for_ui_alb" { description = "Map for cidr blocks that should get access over alb. The format is name:cidr-block. I.e. 'my_cidr'='90.250.75.79/32'" - type = "map" + type = map(string) default = { "all" = "0.0.0.0/0" @@ -74,3 +74,4 @@ variable "ui_alb_use_https_listener" { description = "If true, the https endpoint for the ui-albs will be created instead of the http one. Precondition for this is that ui_alb_https_listener_cert_arn is set apropriately." default = false } + diff --git a/modules/ui-access/versions.tf b/modules/ui-access/versions.tf new file mode 100644 index 0000000..ac97c6a --- /dev/null +++ b/modules/ui-access/versions.tf @@ -0,0 +1,4 @@ + +terraform { + required_version = ">= 0.12" +} diff --git a/outputs.tf b/outputs.tf index 164b9aa..a224804 100644 --- a/outputs.tf +++ b/outputs.tf @@ -1,81 +1,81 @@ output "aws_region" { - value = "${var.aws_region}" + value = var.aws_region } output "nomad_servers_cluster_tag_key" { - value = "${module.nomad.nomad_servers_cluster_tag_key}" + value = module.nomad.nomad_servers_cluster_tag_key } output "nomad_servers_cluster_tag_value" { - value = "${module.nomad.nomad_servers_cluster_tag_value}" + value = module.nomad.nomad_servers_cluster_tag_value } output "num_nomad_servers" { - value = "${module.nomad.num_nomad_servers}" + value = module.nomad.num_nomad_servers } output "nomad_clients_public_services_cluster_tag_value" { - value = "${module.dc-public-services.cluster_tag_value}" + value = module.dc-public-services.cluster_tag_value } output "nomad_ui_alb_dns_name" { - value = "${module.ui-access.nomad_ui_alb_dns_name}" + value = module.ui-access.nomad_ui_alb_dns_name } output "nomad_ui_alb_https_targetgroup_arn" { - value = "${module.ui-access.nomad_ui_alb_https_targetgroup_arn}" + value = module.ui-access.nomad_ui_alb_https_targetgroup_arn } output "nomad_ui_alb_https_listener_arn" { - value = "${module.ui-access.nomad_ui_alb_https_listener_arn}" + value = module.ui-access.nomad_ui_alb_https_listener_arn } output "consul_ui_alb_dns_name" { - value = "${module.ui-access.consul_ui_alb_dns_name}" + value = module.ui-access.consul_ui_alb_dns_name } output "consul_ui_alb_https_targetgroup_arn" { - value = "${module.ui-access.consul_ui_alb_https_targetgroup_arn}" + value = module.ui-access.consul_ui_alb_https_targetgroup_arn } output "consul_ui_alb_https_listener_arn" { - value = "${module.ui-access.consul_ui_alb_https_listener_arn}" + value = module.ui-access.consul_ui_alb_https_listener_arn } output "fabio_ui_alb_dns_name" { - value = "${module.ui-access.fabio_ui_alb_dns_name}" + value = module.ui-access.fabio_ui_alb_dns_name } output "fabio_ui_alb_https_targetgroup_arn" { - value = "${module.ui-access.fabio_ui_alb_https_targetgroup_arn}" + value = module.ui-access.fabio_ui_alb_https_targetgroup_arn } output "fabio_ui_alb_https_listener_arn" { - value = "${module.ui-access.fabio_ui_alb_https_listener_arn}" + value = module.ui-access.fabio_ui_alb_https_listener_arn } output "nomad_ui_alb_zone_id" { - value = "${module.ui-access.nomad_ui_alb_zone_id}" + value = module.ui-access.nomad_ui_alb_zone_id } output "consul_ui_alb_zone_id" { - value = "${module.ui-access.consul_ui_alb_zone_id}" + value = module.ui-access.consul_ui_alb_zone_id } output "fabio_ui_alb_zone_id" { - value = "${module.ui-access.fabio_ui_alb_zone_id}" + value = module.ui-access.fabio_ui_alb_zone_id } output "vpc_id" { - value = "${var.vpc_id}" + value = var.vpc_id } output "ssh_key_name" { - value = "${var.ssh_key_name}" + value = var.ssh_key_name } output "cluster_prefix" { - value = "${module.dc-public-services.cluster_prefix}" + value = module.dc-public-services.cluster_prefix } output "dc-public-services_asg_name" { @@ -83,37 +83,38 @@ output "dc-public-services_asg_name" { } output "dc-public-services_sg_id" { - value = "${module.dc-public-services.sg_datacenter_id}" + value = module.dc-public-services.sg_datacenter_id } output "dc-public-services_alb_https_targetgroup_arn" { - value = "${module.dc-public-services.alb_https_targetgroup_arn}" + value = module.dc-public-services.alb_https_targetgroup_arn } output "dc-private-services_sg_id" { - value = "${module.dc-private-services.sg_datacenter_id}" + value = module.dc-private-services.sg_datacenter_id } output "dc-private-services_alb_https_targetgroup_arn" { - value = "${module.dc-private-services.alb_https_targetgroup_arn}" + value = module.dc-private-services.alb_https_targetgroup_arn } output "dc-backoffice_sg_id" { - value = "${module.dc-backoffice.sg_datacenter_id}" + value = module.dc-backoffice.sg_datacenter_id } output "dc-backoffice_alb_https_targetgroup_arn" { - value = "${module.dc-backoffice.alb_https_targetgroup_arn}" + value = module.dc-backoffice.alb_https_targetgroup_arn } output "consul_servers_sg_id" { - value = "${module.consul.security_group_id_consul_servers}" + value = module.consul.security_group_id_consul_servers } output "consul_servers_cluster_tag_key" { - value = "${module.consul.consul_servers_cluster_tag_key}" + value = module.consul.consul_servers_cluster_tag_key } output "consul_servers_cluster_tag_value" { - value = "${module.consul.consul_servers_cluster_tag_value}" + value = module.consul.consul_servers_cluster_tag_value } + diff --git a/vars.tf b/vars.tf index 2154a6c..f460a89 100644 --- a/vars.tf +++ b/vars.tf @@ -13,7 +13,7 @@ variable "vpc_id" { variable "alb_subnet_ids" { description = "Ids of the subnets to deploy the alb's into." - type = "list" + type = list(string) } #### [Nomad] Required Variables ################################################################### @@ -27,27 +27,27 @@ variable "nomad_ami_id_clients" { variable "nomad_clients_public_services_subnet_ids" { description = "Ids of the subnets to deploy the nomad client nodes providing the data-center public-services into." - type = "list" + type = list(string) } variable "nomad_clients_private_services_subnet_ids" { description = "Ids of the subnets to deploy the nomad client nodes providing the data-center private-services into." - type = "list" + type = list(string) } variable "nomad_clients_content_connector_subnet_ids" { description = "Ids of the subnets to deploy the nomad client nodes providing the data-center content-connector into." - type = "list" + type = list(string) } variable "nomad_clients_backoffice_subnet_ids" { description = "Ids of the subnets to deploy the nomad client nodes providing the data-center backoffice into." - type = "list" + type = list(string) } variable "nomad_server_subnet_ids" { description = "Ids of the subnets to deploy the nomad servers into." - type = "list" + type = list(string) } #### [Consul] Required Variables ################################################################## @@ -57,7 +57,7 @@ variable "consul_ami_id" { variable "consul_server_subnet_ids" { description = "Ids of the subnets to deploy the consul servers into." - type = "list" + type = list(string) } #### [General] Optional Variables ################################################################## @@ -88,13 +88,13 @@ variable "stack_name" { variable "allowed_ssh_cidr_blocks" { description = "A list of cidr block from which inbound ssh traffic should be allowed." - type = "list" + type = list(string) default = [] } variable "allowed_cidr_blocks_for_ui_alb" { description = "Map for cidr blocks that should get access over alb. The format is name:cidr-block. I.e. 'my_cidr'='90.250.75.79/32'" - type = "map" + type = map(string) default = { "all" = "0.0.0.0/0" @@ -124,7 +124,7 @@ variable "attach_backoffice_alb_listener" { #### [Nomad] Optional Variables ################################################################### variable "nomad_server_scaling_cfg" { description = "Scaling configuration for the nomad servers." - type = "map" + type = map(string) default = { "min" = 3 @@ -135,7 +135,7 @@ variable "nomad_server_scaling_cfg" { variable "nomad_private_services_dc_node_cfg" { description = "Node configuration for the nomad nodes of the private-services data center." - type = "map" + type = map(string) default = { "min" = 1 @@ -156,7 +156,7 @@ variable "nomad_private_services_dc_node_cfg" { # }] variable "ebs_block_devices_private_services_dc" { description = "List of ebs volume definitions for those ebs_volumes that should be added to the instances created with the EC2 launch-configurationd. Each element in the list is a map containing keys defined for ebs_block_device (see: https://www.terraform.io/docs/providers/aws/r/launch_configuration.html#ebs_block_device." - type = "list" + type = any default = [] } @@ -168,7 +168,7 @@ variable "ebs_block_devices_private_services_dc" { # Example: ["/dev/xvde:/mnt/map1","/dev/xvdf:/mnt/map2"] variable "device_to_mount_target_map_private_services_dc" { description = "List of device to mount target entries." - type = "list" + type = list(string) default = [] } @@ -184,13 +184,17 @@ variable "device_to_mount_target_map_private_services_dc" { # }] variable "additional_instance_tags_private_services_dc" { description = "List of tags to add to the private_services datacenter instances. The entries of the list are maps consiting of key, value and propagate at launch." - type = "list" - default = [] + type = list(object({ + key = string + value = string + propagate_at_launch = bool + })) + default = [] } variable "nomad_public_services_dc_node_cfg" { description = "Node configuration for the nomad nodes of the public-services data center." - type = "map" + type = map(string) default = { "min" = 1 @@ -211,7 +215,7 @@ variable "nomad_public_services_dc_node_cfg" { # }] variable "ebs_block_devices_public_services_dc" { description = "List of ebs volume definitions for those ebs_volumes that should be added to the instances of the public-services dc. Each element in the list is a map containing keys defined for ebs_block_device (see: https://www.terraform.io/docs/providers/aws/r/launch_configuration.html#ebs_block_device." - type = "list" + type = any default = [] } @@ -223,7 +227,7 @@ variable "ebs_block_devices_public_services_dc" { # Example: ["/dev/xvde:/mnt/map1","/dev/xvdf:/mnt/map2"] variable "device_to_mount_target_map_public_services_dc" { description = "List of device to mount target entries." - type = "list" + type = list(string) default = [] } @@ -239,13 +243,17 @@ variable "device_to_mount_target_map_public_services_dc" { # }] variable "additional_instance_tags_public_services_dc" { description = "List of tags to add to the public_services datacenter instances. The entries of the list are maps consiting of key, value and propagate at launch." - type = "list" - default = [] + type = list(object({ + key = string + value = string + propagate_at_launch = bool + })) + default = [] } variable "nomad_backoffice_dc_node_cfg" { description = "Node configuration for the nomad nodes of the backoffice data center." - type = "map" + type = map(string) default = { "min" = 1 @@ -266,7 +274,7 @@ variable "nomad_backoffice_dc_node_cfg" { # }] variable "ebs_block_devices_backoffice_dc" { description = "List of ebs volume definitions for those ebs_volumes that should be added to the instances of the backoffice dc. Each element in the list is a map containing keys defined for ebs_block_device (see: https://www.terraform.io/docs/providers/aws/r/launch_configuration.html#ebs_block_device." - type = "list" + type = any default = [] } @@ -278,7 +286,7 @@ variable "ebs_block_devices_backoffice_dc" { # Example: ["/dev/xvde:/mnt/map1","/dev/xvdf:/mnt/map2"] variable "device_to_mount_target_map_backoffice_dc" { description = "List of device to mount target entries." - type = "list" + type = list(string) default = [] } @@ -294,13 +302,17 @@ variable "device_to_mount_target_map_backoffice_dc" { # }] variable "additional_instance_tags_backoffice_dc" { description = "List of tags to add to the backoffice datacenter instances. The entries of the list are maps consiting of key, value and propagate at launch." - type = "list" - default = [] + type = list(object({ + key = string + value = string + propagate_at_launch = bool + })) + default = [] } variable "nomad_content_connector_dc_node_cfg" { description = "Node configuration for the nomad nodes of the content-connetor data center." - type = "map" + type = map(string) default = { "min" = 1 @@ -321,7 +333,7 @@ variable "nomad_content_connector_dc_node_cfg" { # }] variable "ebs_block_devices_content_connector_dc" { description = "List of ebs volume definitions for those ebs_volumes that should be added to the instances of the content-connector dc. Each element in the list is a map containing keys defined for ebs_block_device (see: https://www.terraform.io/docs/providers/aws/r/launch_configuration.html#ebs_block_device." - type = "list" + type = any default = [] } @@ -333,7 +345,7 @@ variable "ebs_block_devices_content_connector_dc" { # Example: ["/dev/xvde:/mnt/map1","/dev/xvdf:/mnt/map2"] variable "device_to_mount_target_map_content_connector_dc" { description = "List of device to mount target entries." - type = "list" + type = list(string) default = [] } @@ -349,8 +361,12 @@ variable "device_to_mount_target_map_content_connector_dc" { # }] variable "additional_instance_tags_content_connector_dc" { description = "List of tags to add to the content_connector datacenter instances. The entries of the list are maps consiting of key, value and propagate at launch." - type = "list" - default = [] + type = list(object({ + key = string + value = string + propagate_at_launch = bool + })) + default = [] } variable "efs_dns_name" { @@ -376,6 +392,7 @@ variable "consul_instance_type" { variable "ecr_repositories" { description = "List of names for the ECR repositories to be created. Nomad will use them to get docker images from it in the job files." - type = "list" + type = list(string) default = [] } + diff --git a/versions.tf b/versions.tf new file mode 100644 index 0000000..ac97c6a --- /dev/null +++ b/versions.tf @@ -0,0 +1,4 @@ + +terraform { + required_version = ">= 0.12" +}