diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 0000000..9fed456 --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,24 @@ +# Changelog + +## 04/08/2021 + +### Branch: `patch-rgs` + +* Remove the resource group definitions throughout modules and submodules and use separate resource group module to pass into new variables. +* Change `subnet_id` from `string` to `list(string)` to support `vnet` with multiple subnets. +* Add variable `location` to use location of the resource_group dependency +* Add variable `resource_group_id` + +```hcl +resource "azurerm_lb_backend_address_pool" "bepool" { + name = "${var.name}-lbe-be-pool" + loadbalancer_id = azurerm_lb.this.id + resource_group_name = var.resource_group_name + # Deprecated in future azurerm releases, ignore linting + // resource_group_name = "" +} +``` +In the `azurem_lb_backend_address_pool` resource, resource_group_name is required, so it was added back in. + +* `upgrade_mode = "Manual"` (line 138 in `main.tf`) since `Automatic` was throwing errors about missing parameters not set. + diff --git a/README.md b/README.md index d7b4eff..89467fc 100644 --- a/README.md +++ b/README.md @@ -8,3 +8,11 @@ This repository is inteded to clearly demonstrate one method of deploying `rke2` We highly recommend you use the modules in this repository as stepping stones in solutions that meet the needs of your workflow and organization. If you have suggestions or areas of improvements, we would [love to hear them](https://slack.rancher.io/)! + +__WARNING:__ The leader election process is busted. To get this module to work you must select 1 server and rerun `01_rke.sh` in `/var/lib/cloud/instances/$INSTANCE/scripts/01_rke2.sh` on subsequent server nodes to get them to join the cluster. + +The `agents` module, however works just fine in joining the cluster once a master is present. + +## TODO: + +* Figure out missing inputs to get `upgrade_mode = "Automatic"` working. diff --git a/main.tf b/main.tf index 30486f5..854f3d6 100644 --- a/main.tf +++ b/main.tf @@ -7,9 +7,9 @@ locals { } } -data "azurerm_resource_group" "rg" { +/* data "azurerm_resource_group" "rg" { name = var.resource_group_name -} +} */ resource "random_string" "uid" { length = 3 @@ -28,141 +28,142 @@ module "statestore" { source = "./modules/statestore" name = local.uname - resource_group_name = data.azurerm_resource_group.rg.name - location = data.azurerm_resource_group.rg.location + resource_group_name = var.resource_group_name + location = var.location token = random_password.token.result reader_object_id = azurerm_user_assigned_identity.cluster.principal_id + subnet_ids = var.subnet_ids } # # Server Identity # -//resource "azurerm_role_definition" "server" { -// name = "${local.uname}-server" -// scope = data.azurerm_resource_group.rg.id -// -// permissions { -// actions = [ -// // Required to create, delete or update LoadBalancer for LoadBalancer service -// "Microsoft.Network/loadBalancers/delete", -// "Microsoft.Network/loadBalancers/read", -// "Microsoft.Network/loadBalancers/write", -// -// // Required to allow query, create or delete public IPs for LoadBalancer service -// "Microsoft.Network/publicIPAddresses/delete", -// "Microsoft.Network/publicIPAddresses/read", -// "Microsoft.Network/publicIPAddresses/write", -// -// // Required if public IPs from another resource group are used for LoadBalancer service -// // This is because of the linked access check when adding the public IP to LB frontendIPConfiguration -// "Microsoft.Network/publicIPAddresses/join/action", -// -// // Required to create or delete security rules for LoadBalancer service -// "Microsoft.Network/networkSecurityGroups/read", -// "Microsoft.Network/networkSecurityGroups/write", -// -// // Required to create, delete or update AzureDisks -// "Microsoft.Compute/disks/delete", -// "Microsoft.Compute/disks/read", -// "Microsoft.Compute/disks/write", -// "Microsoft.Compute/locations/DiskOperations/read", -// -// // Required to create, update or delete storage accounts for AzureFile or AzureDisk -// "Microsoft.Storage/storageAccounts/delete", -// "Microsoft.Storage/storageAccounts/listKeys/action", -// "Microsoft.Storage/storageAccounts/read", -// "Microsoft.Storage/storageAccounts/write", -// "Microsoft.Storage/operations/read", -// -// // Required to create, delete or update routeTables and routes for nodes -// "Microsoft.Network/routeTables/read", -// "Microsoft.Network/routeTables/routes/delete", -// "Microsoft.Network/routeTables/routes/read", -// "Microsoft.Network/routeTables/routes/write", -// "Microsoft.Network/routeTables/write", -// -// // Required to query information for VM (e.g. zones, faultdomain, size and data disks) -// "Microsoft.Compute/virtualMachines/read", -// -// // Required to attach AzureDisks to VM -// "Microsoft.Compute/virtualMachines/write", -// -// // Required to query information for vmssVM (e.g. zones, faultdomain, size and data disks) -// "Microsoft.Compute/virtualMachineScaleSets/read", -// "Microsoft.Compute/virtualMachineScaleSets/virtualMachines/read", -// "Microsoft.Compute/virtualMachineScaleSets/virtualmachines/instanceView/read", -// -// // Required to add VM to LoadBalancer backendAddressPools -// "Microsoft.Network/networkInterfaces/write", -// -// // Required to add vmss to LoadBalancer backendAddressPools -// "Microsoft.Compute/virtualMachineScaleSets/write", -// -// // Required to attach AzureDisks and add vmssVM to LB -// "Microsoft.Compute/virtualMachineScaleSets/virtualmachines/write", -// -// // Required to upgrade VMSS models to latest for all instances -// // only needed for Kubernetes 1.11.0-1.11.9, 1.12.0-1.12.8, 1.13.0-1.13.5, 1.14.0-1.14.1 -// "Microsoft.Compute/virtualMachineScaleSets/manualupgrade/action", -// -// // Required to query internal IPs and loadBalancerBackendAddressPools for VM -// "Microsoft.Network/networkInterfaces/read", -// -// // Required to query internal IPs and loadBalancerBackendAddressPools for vmssVM -// "Microsoft.Compute/virtualMachineScaleSets/virtualMachines/networkInterfaces/read", -// -// // Required to get public IPs for vmssVM -// "Microsoft.Compute/virtualMachineScaleSets/virtualMachines/networkInterfaces/ipconfigurations/publicipaddresses/read", -// -// // Required to check whether subnet existing for ILB in another resource group -// "Microsoft.Network/virtualNetworks/read", -// "Microsoft.Network/virtualNetworks/subnets/read", -// -// // Required to create, update or delete snapshots for AzureDisk -// "Microsoft.Compute/snapshots/delete", -// "Microsoft.Compute/snapshots/read", -// "Microsoft.Compute/snapshots/write", -// -// // Required to get vm sizes for getting AzureDisk volume limit -// "Microsoft.Compute/locations/vmSizes/read", -// "Microsoft.Compute/locations/operations/read", -// ] -// -// not_actions = [] -// } -// -// assignable_scopes = [ -// data.azurerm_resource_group.rg.id, -// ] -//} - -//resource "azurerm_role_assignment" "server" { -// scope = data.azurerm_resource_group.rg.id -// principal_id = azurerm_user_assigned_identity.server.principal_id -// role_definition_id = azurerm_role_definition.server.role_definition_id -//} -// -//resource "azurerm_user_assigned_identity" "server" { -// name = "${local.uname}-server" -// -// resource_group_name = data.azurerm_resource_group.rg.name -// location = data.azurerm_resource_group.rg.location -// -// tags = merge({}, var.tags) -//} +resource "azurerm_role_definition" "server" { + name = "${local.uname}-server" + scope = var.resource_group_id + + permissions { + actions = [ + // Required to create, delete or update LoadBalancer for LoadBalancer service + "Microsoft.Network/loadBalancers/delete", + "Microsoft.Network/loadBalancers/read", + "Microsoft.Network/loadBalancers/write", + + // Required to allow query, create or delete public IPs for LoadBalancer service + "Microsoft.Network/publicIPAddresses/delete", + "Microsoft.Network/publicIPAddresses/read", + "Microsoft.Network/publicIPAddresses/write", + + // Required if public IPs from another resource group are used for LoadBalancer service + // This is because of the linked access check when adding the public IP to LB frontendIPConfiguration + "Microsoft.Network/publicIPAddresses/join/action", + + // Required to create or delete security rules for LoadBalancer service + "Microsoft.Network/networkSecurityGroups/read", + "Microsoft.Network/networkSecurityGroups/write", + + // Required to create, delete or update AzureDisks + "Microsoft.Compute/disks/delete", + "Microsoft.Compute/disks/read", + "Microsoft.Compute/disks/write", + "Microsoft.Compute/locations/DiskOperations/read", + + // Required to create, update or delete storage accounts for AzureFile or AzureDisk + "Microsoft.Storage/storageAccounts/delete", + "Microsoft.Storage/storageAccounts/listKeys/action", + "Microsoft.Storage/storageAccounts/read", + "Microsoft.Storage/storageAccounts/write", + "Microsoft.Storage/operations/read", + + // Required to create, delete or update routeTables and routes for nodes + "Microsoft.Network/routeTables/read", + "Microsoft.Network/routeTables/routes/delete", + "Microsoft.Network/routeTables/routes/read", + "Microsoft.Network/routeTables/routes/write", + "Microsoft.Network/routeTables/write", + + // Required to query information for VM (e.g. zones, faultdomain, size and data disks) + "Microsoft.Compute/virtualMachines/read", + + // Required to attach AzureDisks to VM + "Microsoft.Compute/virtualMachines/write", + + // Required to query information for vmssVM (e.g. zones, faultdomain, size and data disks) + "Microsoft.Compute/virtualMachineScaleSets/read", + "Microsoft.Compute/virtualMachineScaleSets/virtualMachines/read", + "Microsoft.Compute/virtualMachineScaleSets/virtualmachines/instanceView/read", + + // Required to add VM to LoadBalancer backendAddressPools + "Microsoft.Network/networkInterfaces/write", + + // Required to add vmss to LoadBalancer backendAddressPools + "Microsoft.Compute/virtualMachineScaleSets/write", + + // Required to attach AzureDisks and add vmssVM to LB + "Microsoft.Compute/virtualMachineScaleSets/virtualmachines/write", + + // Required to upgrade VMSS models to latest for all instances + // only needed for Kubernetes 1.11.0-1.11.9, 1.12.0-1.12.8, 1.13.0-1.13.5, 1.14.0-1.14.1 + "Microsoft.Compute/virtualMachineScaleSets/manualupgrade/action", + + // Required to query internal IPs and loadBalancerBackendAddressPools for VM + "Microsoft.Network/networkInterfaces/read", + + // Required to query internal IPs and loadBalancerBackendAddressPools for vmssVM + "Microsoft.Compute/virtualMachineScaleSets/virtualMachines/networkInterfaces/read", + + // Required to get public IPs for vmssVM + "Microsoft.Compute/virtualMachineScaleSets/virtualMachines/networkInterfaces/ipconfigurations/publicipaddresses/read", + + // Required to check whether subnet existing for ILB in another resource group + "Microsoft.Network/virtualNetworks/read", + "Microsoft.Network/virtualNetworks/subnets/read", + + // Required to create, update or delete snapshots for AzureDisk + "Microsoft.Compute/snapshots/delete", + "Microsoft.Compute/snapshots/read", + "Microsoft.Compute/snapshots/write", + + // Required to get vm sizes for getting AzureDisk volume limit + "Microsoft.Compute/locations/vmSizes/read", + "Microsoft.Compute/locations/operations/read", + ] + + not_actions = [] + } + + assignable_scopes = [ + var.resource_group_id, + ] +} + +resource "azurerm_role_assignment" "server" { + scope = var.resource_group_id + principal_id = azurerm_user_assigned_identity.server.principal_id + role_definition_name = "Key Vault Secrets User" +} + +resource "azurerm_user_assigned_identity" "server" { + name = "${local.uname}-server" + + resource_group_name = var.resource_group_name + location = var.location + + tags = merge({}, var.tags) +} resource "azurerm_user_assigned_identity" "cluster" { name = "${local.uname}-cluster" - resource_group_name = data.azurerm_resource_group.rg.name - location = data.azurerm_resource_group.rg.location + resource_group_name = var.resource_group_name + location = var.location tags = merge({}, var.tags) } - -resource "azurerm_role_assignment" "cluster_vault" { - scope = data.azurerm_resource_group.rg.id +## TODO: I KNOW THIS IS BAD + resource "azurerm_role_assignment" "cluster_vault" { + scope = var.resource_group_id principal_id = azurerm_user_assigned_identity.cluster.principal_id role_definition_name = "Key Vault Secrets User" } @@ -171,7 +172,8 @@ resource "azurerm_role_assignment" "cluster_reader" { scope = module.servers.scale_set_id principal_id = azurerm_user_assigned_identity.cluster.principal_id role_definition_name = "Reader" -} +} + # # Server Network Security Group @@ -179,8 +181,8 @@ resource "azurerm_role_assignment" "cluster_reader" { resource "azurerm_network_security_group" "server" { name = "${local.uname}-rke2-server-nsg" - resource_group_name = data.azurerm_resource_group.rg.name - location = data.azurerm_resource_group.rg.location + resource_group_name = var.resource_group_name + location = var.location tags = merge({}, var.tags) } @@ -192,7 +194,7 @@ resource "azurerm_network_security_rule" "server_cp" { direction = "Inbound" priority = 101 protocol = "Tcp" - resource_group_name = data.azurerm_resource_group.rg.name + resource_group_name = var.resource_group_name source_port_range = "*" destination_port_range = "6443" @@ -207,7 +209,7 @@ resource "azurerm_network_security_rule" "server_supervisor" { direction = "Inbound" priority = 102 protocol = "Tcp" - resource_group_name = data.azurerm_resource_group.rg.name + resource_group_name = var.resource_group_name source_port_range = "*" destination_port_range = "9345" @@ -216,36 +218,55 @@ resource "azurerm_network_security_rule" "server_supervisor" { } # Default vnet behavior for azure, but include anyways? -//resource "azurerm_network_security_rule" "vnet" { -// name = "${local.uname}-rke2-self" -// network_security_group_name = module.servers.network_security_group_name -// access = "Allow" -// direction = "Inbound" -// priority = 1001 -// protocol = "*" -// resource_group_name = data.azurerm_resource_group.rg.name -// -// source_port_range = "*" -// destination_port_range = "*" -// source_address_prefix = "VirtualNetwork" -// destination_address_prefix = "VirtualNetwork" -//} +resource "azurerm_network_security_rule" "vnet" { + name = "${local.uname}-rke2-self" + network_security_group_name = azurerm_network_security_group.server.name + access = "Allow" + direction = "Inbound" + priority = 1001 + protocol = "*" + resource_group_name = var.resource_group_name + + source_port_range = "*" + destination_port_range = "*" + source_address_prefix = "VirtualNetwork" + destination_address_prefix = "VirtualNetwork" +} # Default outbound behavior for azure, but include anyways? -//resource "azurerm_network_security_rule" "server_outbound" { -// name = "${local.uname}-rke2-server-outbound" -// network_security_group_name = module.servers.network_security_group_name -// access = "Allow" -// direction = "Outbound" -// priority = 101 -// protocol = "*" -// resource_group_name = data.azurerm_resource_group.rg.name -// -// source_port_range = "*" -// destination_port_range = "*" -// source_address_prefix = "*" -// destination_address_prefix = "*" -//} +resource "azurerm_network_security_rule" "server_outbound" { + name = "${local.uname}-rke2-server-outbound" + network_security_group_name = azurerm_network_security_group.server.name + access = "Allow" + direction = "Outbound" + priority = 101 + protocol = "*" + resource_group_name = var.resource_group_name + + source_port_range = "*" + destination_port_range = "*" + source_address_prefix = "*" + destination_address_prefix = "*" +} +# +# Dev/Example settings only +# +# Open up ssh on all the nodepools +resource "azurerm_network_security_rule" "ssh" { + + name = "${local.uname}-ssh" + access = "Allow" + direction = "Inbound" + network_security_group_name = azurerm_network_security_group.server.name + priority = 201 + protocol = "Tcp" + resource_group_name = var.resource_group_name + + source_address_prefix = "*" + source_port_range = "*" + destination_address_prefix = "*" + destination_port_range = "22" +} # # Server Nodepool @@ -288,12 +309,12 @@ module "cp_lb" { source = "./modules/lb" name = local.uname - resource_group_name = data.azurerm_resource_group.rg.name - + resource_group_name = var.resource_group_name + resource_group_id = var.resource_group_id subnet_id = var.subnet_id private_ip_address = var.controlplane_loadbalancer_private_ip_address private_ip_address_allocation = var.controlplane_loadbalancer_private_ip_address_allocation - + location = var.location tags = merge({}, var.tags) } @@ -301,8 +322,8 @@ module "servers" { source = "./modules/nodepool" name = "${local.uname}-server" - - resource_group_name = data.azurerm_resource_group.rg.name + location = var.location + resource_group_name = var.resource_group_name virtual_network_id = var.virtual_network_id subnet_id = var.subnet_id diff --git a/modules/agents/main.tf b/modules/agents/main.tf index e0a328f..f461cf2 100644 --- a/modules/agents/main.tf +++ b/modules/agents/main.tf @@ -6,9 +6,10 @@ locals { } } -data "azurerm_resource_group" "rg" { +# Using resource group module and importing through variablies +/* data "azurerm_resource_group" "rg" { name = var.resource_group_name -} +} */ # # Agent Nodepool @@ -50,8 +51,8 @@ data "template_cloudinit_config" "init" { resource "azurerm_network_security_group" "agent" { name = "${local.name}-agent-nsg" - resource_group_name = data.azurerm_resource_group.rg.name - location = data.azurerm_resource_group.rg.location + resource_group_name = var.resource_group_name + location = var.location tags = merge({}, var.tags) } @@ -61,10 +62,10 @@ module "agents" { name = "${local.name}-agent" - resource_group_name = data.azurerm_resource_group.rg.name + resource_group_name = var.resource_group_name virtual_network_id = var.virtual_network_id subnet_id = var.subnet_id - + location = var.location admin_username = var.admin_username admin_ssh_public_key = var.admin_ssh_public_key diff --git a/modules/agents/variables.tf b/modules/agents/variables.tf index 482a6c7..48e9035 100644 --- a/modules/agents/variables.tf +++ b/modules/agents/variables.tf @@ -1,19 +1,31 @@ -variable "name" {} +variable "name" { + type = string +} -variable "resource_group_name" {} -variable "virtual_network_id" {} -variable "subnet_id" {} +variable "resource_group_name" { + type = string +} +variable "virtual_network_id" { + type = string +} +variable "subnet_id" { + type = list(string) + default = null +} variable "admin_username" { + type = string default = "rke2" } variable "admin_ssh_public_key" { default = "" + type = string } variable "assign_public_ips" { default = false + type = bool } variable "instances" { @@ -80,6 +92,7 @@ variable "vm_size" { variable "rke2_version" { default = "v1.19.8+rke2r1" + type = string } variable "tags" { @@ -104,6 +117,7 @@ variable "cluster_data" { variable "overprovision" { description = "(Optional) Toggle agent scaleset overprovisioning." default = true + type = bool } variable "zones" { @@ -114,26 +128,31 @@ variable "zones" { variable "zone_balance" { description = "(Optional) Toggle server balance within availability zones specified." default = null + type = bool } variable "single_placement_group" { description = "TODO: (Optional) Toggle single placement group." default = null + type = bool } variable "upgrade_mode" { description = "(Optional) Specify how upgrades should happen. Possible values are Automatic, Manual and Rolling. Defaults to Automatic." default = "Automatic" + type = string } variable "priority" { description = "(Optional) Specify the priority of the VMSS. Possible values are Regular and Spot. Defaults to Regular" default = "Regular" + type = string } variable "eviction_policy" { description = "(Optional) Specify how server instances should be evicted. Possible values are Delete and Deallocate." default = "Delete" + type = string } variable "dns_servers" { @@ -174,4 +193,8 @@ variable "additional_data_disks" { storage_account_type = string })) default = [] +} + +variable "location" { + type = string } \ No newline at end of file diff --git a/modules/custom_data/files/rke2-init.sh b/modules/custom_data/files/rke2-init.sh index 770ebf5..3a897e4 100644 --- a/modules/custom_data/files/rke2-init.sh +++ b/modules/custom_data/files/rke2-init.sh @@ -39,10 +39,10 @@ elect_leader() { read subscriptionId resourceGroupName virtualMachineScaleSetName < \ <(echo $(curl -s -H Metadata:true --noproxy "*" "http://169.254.169.254/metadata/instance?api-version=2020-09-01" | jq -r ".compute | .subscriptionId, .resourceGroupName, .vmScaleSetName")) - first=$(curl -s https://management.azure.com/subscriptions/$${subscriptionId}/resourceGroups/$${resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/$${virtualMachineScaleSetName}/virtualMachines?api-version=2020-12-01 \ + first=$(curl -s https://management.core.usgovcloudapi.net/subscriptions/$${subscriptionId}/resourceGroups/$${resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/$${virtualMachineScaleSetName}/virtualMachines?api-version=2020-12-01 \ -H "Authorization: Bearer $${access_token}" | jq -ej "[.value[]] | sort_by(.instanceId | tonumber) | .[0].properties.osProfile.computerName") - if [ $(hostname) = $${first} ]; then + if [[ $(hostname) = $${first} ]]; then SERVER_TYPE="leader" info "Electing as cluster leader" else @@ -85,14 +85,14 @@ cp_wait() { fetch_token() { info "Fetching rke2 join token..." - access_token=$(curl 'http://169.254.169.254/metadata/identity/oauth2/token?api-version=2018-02-01&resource=https%3A%2F%2Fvault.azure.net' -H Metadata:true | jq -r ".access_token") + access_token=$(curl 'http://169.254.169.254/metadata/identity/oauth2/token?api-version=2018-02-01&resource=https%3A%2F%2Fvault.usgovcloudapi.net' -H Metadata:true | jq -r ".access_token") token=$(curl '${vault_url}secrets/${token_secret}?api-version=2016-10-01' -H "Authorization: Bearer $${access_token}" | jq -r ".value") echo "token: $${token}" >> "/etc/rancher/rke2/config.yaml" } upload() { - # Wait for kubeconfig to exist, then upload to s3 bucket + # Wait for kubeconfig to exist, then upload to keyvault retries=10 while [ ! -f /etc/rancher/rke2/rke2.yaml ]; do @@ -103,7 +103,7 @@ upload() { ((retries--)) done - access_token=$(curl 'http://169.254.169.254/metadata/identity/oauth2/token?api-version=2018-02-01&resource=https%3A%2F%2Fvault.azure.net' -H Metadata:true | jq -r ".access_token") + access_token=$(curl 'http://169.254.169.254/metadata/identity/oauth2/token?api-version=2018-02-01&resource=https%3A%2F%2Fvault.usgovcloudapi.net' -H Metadata:true | jq -r ".access_token") curl -v -X PUT \ -H "Content-Type: application/json" \ @@ -134,10 +134,10 @@ post_userdata() { config fetch_token -# if [ $CCM = "true" ]; then -# append_config 'cloud-provider-name: "aws"' -# fi -# + if [ $CCM = "true" ]; then + append_config 'cloud-provider-name: "azure"' + fi + if [ $TYPE = "server" ]; then # Initialize server identify diff --git a/modules/lb/main.tf b/modules/lb/main.tf index 31511fd..3c6689f 100644 --- a/modules/lb/main.tf +++ b/modules/lb/main.tf @@ -1,6 +1,6 @@ -data "azurerm_resource_group" "rg" { +/* data "azurerm_resource_group" "rg" { name = var.resource_group_name -} +} */ resource "azurerm_public_ip" "pip" { count = var.type == "public" ? 1 : 0 @@ -9,8 +9,8 @@ resource "azurerm_public_ip" "pip" { allocation_method = "Static" sku = "Standard" - resource_group_name = data.azurerm_resource_group.rg.name - location = data.azurerm_resource_group.rg.location + resource_group_name = var.resource_group_name + location = var.location tags = merge({}, var.tags) } @@ -18,15 +18,15 @@ resource "azurerm_public_ip" "pip" { resource "azurerm_lb" "this" { name = "${var.name}-cp" - resource_group_name = data.azurerm_resource_group.rg.name - location = data.azurerm_resource_group.rg.location + resource_group_name = var.resource_group_name + location = var.location sku = var.lb_sku frontend_ip_configuration { name = "${var.name}-lb-fe" public_ip_address_id = var.type == "public" ? azurerm_public_ip.pip[0].id : null - subnet_id = var.subnet_id + subnet_id = var.subnet_id[0] private_ip_address = var.private_ip_address private_ip_address_allocation = var.private_ip_address_allocation } @@ -41,9 +41,7 @@ resource "azurerm_lb" "this" { resource "azurerm_lb_backend_address_pool" "bepool" { name = "${var.name}-lbe-be-pool" loadbalancer_id = azurerm_lb.this.id - - # Deprecated in future azurerm releases, ignore linting - // resource_group_name = "" + resource_group_name = var.resource_group_name } # @@ -52,7 +50,7 @@ resource "azurerm_lb_backend_address_pool" "bepool" { resource "azurerm_lb_probe" "this" { name = "${var.name}-lb-cp-probe" loadbalancer_id = azurerm_lb.this.id - resource_group_name = data.azurerm_resource_group.rg.name + resource_group_name = var.resource_group_name protocol = "Tcp" interval_in_seconds = 10 @@ -64,7 +62,7 @@ resource "azurerm_lb_probe" "this" { resource "azurerm_lb_rule" "controlplane" { name = "${var.name}-cp" loadbalancer_id = azurerm_lb.this.id - resource_group_name = data.azurerm_resource_group.rg.name + resource_group_name = var.resource_group_name protocol = "Tcp" frontend_port = 6443 @@ -78,7 +76,7 @@ resource "azurerm_lb_rule" "controlplane" { resource "azurerm_lb_rule" "supervisor" { name = "${var.name}-supervisor" loadbalancer_id = azurerm_lb.this.id - resource_group_name = data.azurerm_resource_group.rg.name + resource_group_name = var.resource_group_name protocol = "Tcp" backend_port = 9345 @@ -92,26 +90,27 @@ resource "azurerm_lb_rule" "supervisor" { # # Load Balancer NAT Pools # -//resource "azurerm_lb_nat_pool" "controlplane" { -// name = "${var.name}-lb-nat-pool-cp" -// loadbalancer_id = azurerm_lb.this.id -// resource_group_name = data.azurerm_resource_group.rg.name -// -// frontend_ip_configuration_name = azurerm_lb.this.frontend_ip_configuration.0.name -// protocol = "Tcp" -// frontend_port_start = 6443 -// frontend_port_end = sum([6443, 1]) -// backend_port = 6443 -//} -// -//resource "azurerm_lb_nat_pool" "supervisor" { -// name = "${var.name}-lb-nat-pool-supervisor" -// loadbalancer_id = azurerm_lb.this.id -// resource_group_name = data.azurerm_resource_group.rg.name -// -// frontend_ip_configuration_name = azurerm_lb.this.frontend_ip_configuration.0.name -// protocol = "Tcp" -// backend_port = 9345 -// frontend_port_start = 9345 -// frontend_port_end = sum([9345, 1]) -//} +resource "azurerm_lb_nat_pool" "controlplane" { + name = "${var.name}-lb-nat-pool-cp" + loadbalancer_id = azurerm_lb.this.id + resource_group_name = var.resource_group_name + + frontend_ip_configuration_name = azurerm_lb.this.frontend_ip_configuration.0.name + protocol = "Tcp" + frontend_port_start = 6443 + frontend_port_end = sum([6443, 1]) + backend_port = 6443 +} + +resource "azurerm_lb_nat_pool" "supervisor" { + name = "${var.name}-lb-nat-pool-supervisor" + loadbalancer_id = azurerm_lb.this.id + resource_group_name = var.resource_group_name + + frontend_ip_configuration_name = azurerm_lb.this.frontend_ip_configuration.0.name + protocol = "Tcp" + backend_port = 9345 + frontend_port_start = 9345 + frontend_port_end = sum([9345, 1]) +} +# NAT Removed and created during vnet creation \ No newline at end of file diff --git a/modules/lb/outputs.tf b/modules/lb/outputs.tf index 920a3c6..120242a 100644 --- a/modules/lb/outputs.tf +++ b/modules/lb/outputs.tf @@ -6,13 +6,13 @@ output "backend_pool_id" { value = azurerm_lb_backend_address_pool.bepool.id } -//output "controlplane_nat_pool_id" { -// value = azurerm_lb_nat_pool.controlplane.id -//} -// -//output "supervisor_nat_pool_id" { -// value = azurerm_lb_nat_pool.supervisor.id -//} +output "controlplane_nat_pool_id" { + value = azurerm_lb_nat_pool.controlplane.id +} + +output "supervisor_nat_pool_id" { + value = azurerm_lb_nat_pool.supervisor.id +} output "controlplane_probe_id" { value = azurerm_lb_probe.this.id diff --git a/modules/lb/variables.tf b/modules/lb/variables.tf index 0f19839..dc371e0 100644 --- a/modules/lb/variables.tf +++ b/modules/lb/variables.tf @@ -9,7 +9,7 @@ variable "type" { } variable "subnet_id" { - type = string + type = list(string) default = null } @@ -31,4 +31,11 @@ variable "lb_sku" { variable "tags" { default = {} type = map(string) +} +variable "resource_group_id" { + type = string +} + +variable "location" { + type = string } \ No newline at end of file diff --git a/modules/nodepool/main.tf b/modules/nodepool/main.tf index fbda457..7962d78 100644 --- a/modules/nodepool/main.tf +++ b/modules/nodepool/main.tf @@ -1,14 +1,14 @@ locals {} -data "azurerm_resource_group" "rg" { +/* data "azurerm_resource_group" "rg" { name = var.resource_group_name -} +} */ resource "azurerm_linux_virtual_machine_scale_set" "this" { name = format("vm-%s", lower(replace(var.name, "/[[:^alnum:]]/", ""))) - resource_group_name = data.azurerm_resource_group.rg.name - location = data.azurerm_resource_group.rg.location + resource_group_name = var.resource_group_name + location = var.location sku = var.vm_size instances = var.instances @@ -17,6 +17,7 @@ resource "azurerm_linux_virtual_machine_scale_set" "this" { zone_balance = var.zone_balance single_placement_group = var.single_placement_group upgrade_mode = var.upgrade_mode + /* automatic_os_upgrade_policy = var.os_upgrade_policy */ priority = var.priority eviction_policy = var.eviction_policy health_probe_id = var.health_probe_id @@ -69,7 +70,7 @@ resource "azurerm_linux_virtual_machine_scale_set" "this" { ip_configuration { name = "ipconfig-${format("vm-%s", lower(replace(var.name, "/[[:^alnum:]]/", "")))}" primary = true - subnet_id = var.subnet_id + subnet_id = var.subnet_id[0] load_balancer_backend_address_pool_ids = var.load_balancer_backend_address_pool_ids load_balancer_inbound_nat_rules_ids = var.load_balancer_inbound_nat_rules_ids diff --git a/modules/nodepool/variables.tf b/modules/nodepool/variables.tf index fb08867..4bda228 100644 --- a/modules/nodepool/variables.tf +++ b/modules/nodepool/variables.tf @@ -1,7 +1,10 @@ variable "name" {} variable "resource_group_name" {} variable "virtual_network_id" {} -variable "subnet_id" {} +variable "subnet_id" { + type = list(string) + default = [] +} variable "admin_username" {} variable "admin_ssh_public_key" {} variable "assign_public_ips" {} @@ -92,3 +95,8 @@ variable "zone_balance" {} variable "single_placement_group" {} variable "dns_servers" {} variable "enable_accelerated_networking" {} +variable "os_upgrade_policy" { + type = bool + default = true +} +variable "location" {} \ No newline at end of file diff --git a/modules/statestore/main.tf b/modules/statestore/main.tf index 13a5d3c..94e643f 100644 --- a/modules/statestore/main.tf +++ b/modules/statestore/main.tf @@ -8,7 +8,11 @@ resource "azurerm_key_vault" "this" { sku_name = "standard" tenant_id = data.azurerm_client_config.current.tenant_id enabled_for_template_deployment = true - + network_acls { + #virtual_network_subnet_ids = var.subnet_ids + bypass = "AzureServices" + default_action = "Allow" + } tags = merge({}, var.tags) } @@ -36,10 +40,10 @@ resource "azurerm_key_vault_access_policy" "service_reader" { tenant_id = data.azurerm_client_config.current.tenant_id object_id = var.reader_object_id - key_permissions = [] - secret_permissions = ["Get", "Set"] - certificate_permissions = [] - storage_permissions = [] + key_permissions = ["get", "list", "create"] + secret_permissions = ["get", "list", "set"] + certificate_permissions = ["get", "list"] + storage_permissions = ["list", "set", "get"] lifecycle { create_before_destroy = true @@ -68,26 +72,4 @@ resource "azurerm_key_vault_secret" "token" { tags = merge({}, var.tags) depends_on = [azurerm_key_vault_access_policy.policy] -} - -variable "name" {} -variable "location" {} -variable "resource_group_name" {} -variable "token" {} -variable "reader_object_id" {} -variable "tags" { - type = map(string) - default = {} -} - -output "vault_url" { - value = azurerm_key_vault.this.vault_uri -} - -output "token_secret_name" { - value = azurerm_key_vault_secret.token.name -} - -output "vault_name" { - value = azurerm_key_vault.this.name } \ No newline at end of file diff --git a/modules/statestore/outputs.tf b/modules/statestore/outputs.tf new file mode 100644 index 0000000..758d4e4 --- /dev/null +++ b/modules/statestore/outputs.tf @@ -0,0 +1,11 @@ +output "vault_url" { + value = azurerm_key_vault.this.vault_uri +} + +output "token_secret_name" { + value = azurerm_key_vault_secret.token.name +} + +output "vault_name" { + value = azurerm_key_vault.this.name +} \ No newline at end of file diff --git a/modules/statestore/variables.tf b/modules/statestore/variables.tf new file mode 100644 index 0000000..4fe5c57 --- /dev/null +++ b/modules/statestore/variables.tf @@ -0,0 +1,24 @@ +variable "name" { + type = string +} +variable "location" { + type = string +} +variable "resource_group_name" { + type = string +} +variable "token" { + type = string +} +variable "reader_object_id" { + type = string +} +variable "tags" { + type = map(string) + default = {} +} + +variable "subnet_ids" { + type = list(string) + +} \ No newline at end of file diff --git a/outputs.tf b/outputs.tf index e395d62..415833e 100644 --- a/outputs.tf +++ b/outputs.tf @@ -20,4 +20,4 @@ output "cluster_data" { token_secret = module.statestore.token_secret_name } } -} \ No newline at end of file +} diff --git a/variables.tf b/variables.tf index 1619173..27ffb07 100644 --- a/variables.tf +++ b/variables.tf @@ -2,7 +2,9 @@ variable "cluster_name" {} variable "resource_group_name" {} variable "virtual_network_id" {} -variable "subnet_id" {} +variable "subnet_id" { + type = list(string) +} variable "admin_username" { default = "rke2" @@ -187,4 +189,17 @@ variable "additional_data_disks" { storage_account_type = string })) default = [] +} + +variable "location" { + description = "resource group location" + type = string +} + +variable "resource_group_id" { + type = string +} + +variable "subnet_ids" { + type = list(string) } \ No newline at end of file