From 4d380e15e22ae33187781604f32bb2278e17ed25 Mon Sep 17 00:00:00 2001 From: lzianekhodja Date: Mon, 8 Jan 2024 14:49:31 +0100 Subject: [PATCH 01/34] Add PV for Grafana and Prometheus --- monitoring/onpremise/grafana/main.tf | 20 +++++++++++ .../onpremise/grafana/persistent-volume.tf | 35 +++++++++++++++++++ monitoring/onpremise/grafana/variables.tf | 18 ++++++++++ monitoring/onpremise/prometheus/main.tf | 20 +++++++++++ .../onpremise/prometheus/persistent-volume.tf | 35 +++++++++++++++++++ monitoring/onpremise/prometheus/variables.tf | 18 ++++++++++ 6 files changed, 146 insertions(+) create mode 100644 monitoring/onpremise/grafana/persistent-volume.tf create mode 100644 monitoring/onpremise/prometheus/persistent-volume.tf diff --git a/monitoring/onpremise/grafana/main.tf b/monitoring/onpremise/grafana/main.tf index b7b13fb85..33c829d76 100644 --- a/monitoring/onpremise/grafana/main.tf +++ b/monitoring/onpremise/grafana/main.tf @@ -50,6 +50,10 @@ resource "kubernetes_deployment" "grafana" { name = var.docker_image.image_pull_secrets } } + security_context { + run_as_user = 999 + fs_group = 999 + } container { name = "grafana" image = "${var.docker_image.image}:${var.docker_image.tag}" @@ -82,6 +86,13 @@ resource "kubernetes_deployment" "grafana" { name = "dashboards-json-configmap" mount_path = "/var/lib/grafana/dashboards/" } + dynamic "volume_mount" { + for_each = (var.persistent_volume != null && var.persistent_volume != "" ? [1] : []) + content { + name = "database" + mount_path = "/data/db" + } + } } volume { name = "datasources-configmap" @@ -111,6 +122,15 @@ resource "kubernetes_deployment" "grafana" { optional = false } } + dynamic "volume" { + for_each = (var.persistent_volume != null && var.persistent_volume != "" ? [1] : []) + content { + name = "database" + persistent_volume_claim { + claim_name = kubernetes_persistent_volume_claim.grafana[0].metadata[0].name + } + } + } } } } diff --git a/monitoring/onpremise/grafana/persistent-volume.tf b/monitoring/onpremise/grafana/persistent-volume.tf new file mode 100644 index 000000000..25dc13818 --- /dev/null +++ b/monitoring/onpremise/grafana/persistent-volume.tf @@ -0,0 +1,35 @@ +resource "kubernetes_storage_class" "grafana" { + count = (var.persistent_volume != null && var.persistent_volume != "" ? 1 : 0) + metadata { + name = "grafana" + labels = { + app = "grafana" + type = "storage-class" + service = "persistent-volume" + } + } + mount_options = ["tls"] + storage_provisioner = var.persistent_volume.storage_provisioner + parameters = var.persistent_volume.parameters +} + +resource "kubernetes_persistent_volume_claim" "grafana" { + count = length(kubernetes_storage_class.grafana) + metadata { + name = "grafana" + namespace = var.namespace + labels = { + app = "grafana" + type = "persistent-volume-claim" + service = "persistent-volume" + } + } + spec { + access_modes = ["ReadWriteMany"] + storage_class_name = kubernetes_storage_class.grafana[0].metadata[0].name + resources { + requests = var.persistent_volume.resources.requests + limits = var.persistent_volume.resources.limits + } + } +} diff --git a/monitoring/onpremise/grafana/variables.tf b/monitoring/onpremise/grafana/variables.tf index b7ec96f7b..74c81e3f7 100644 --- a/monitoring/onpremise/grafana/variables.tf +++ b/monitoring/onpremise/grafana/variables.tf @@ -45,3 +45,21 @@ variable "authentication" { type = bool default = false } + +# Persistent volume +variable "persistent_volume" { + description = "Persistent volume info" + type = object({ + storage_provisioner = string + parameters = map(string) + # Resources for PVC + resources = object({ + limits = object({ + storage = string + }) + requests = object({ + storage = string + }) + }) + }) +} diff --git a/monitoring/onpremise/prometheus/main.tf b/monitoring/onpremise/prometheus/main.tf index ebc1112ca..a9045d438 100644 --- a/monitoring/onpremise/prometheus/main.tf +++ b/monitoring/onpremise/prometheus/main.tf @@ -50,6 +50,10 @@ resource "kubernetes_deployment" "prometheus" { name = var.docker_image.image_pull_secrets } } + security_context { + run_as_user = 999 + fs_group = 999 + } container { name = "prometheus" image = "${var.docker_image.image}:${var.docker_image.tag}" @@ -68,6 +72,13 @@ resource "kubernetes_deployment" "prometheus" { mount_path = "/etc/prometheus/prometheus.yml" sub_path = "prometheus.yml" } + dynamic "volume_mount" { + for_each = (var.persistent_volume != null && var.persistent_volume != "" ? [1] : []) + content { + name = "database" + mount_path = "/data/db" + } + } } volume { name = "prometheus-configmap" @@ -76,6 +87,15 @@ resource "kubernetes_deployment" "prometheus" { optional = false } } + dynamic "volume" { + for_each = (var.persistent_volume != null && var.persistent_volume != "" ? [1] : []) + content { + name = "database" + persistent_volume_claim { + claim_name = kubernetes_persistent_volume_claim.prometheus[0].metadata[0].name + } + } + } } } } diff --git a/monitoring/onpremise/prometheus/persistent-volume.tf b/monitoring/onpremise/prometheus/persistent-volume.tf new file mode 100644 index 000000000..b6516bac7 --- /dev/null +++ b/monitoring/onpremise/prometheus/persistent-volume.tf @@ -0,0 +1,35 @@ +resource "kubernetes_storage_class" "prometheus" { + count = (var.persistent_volume != null && var.persistent_volume != "" ? 1 : 0) + metadata { + name = "prometheus" + labels = { + app = "prometheus" + type = "storage-class" + service = "persistent-volume" + } + } + mount_options = ["tls"] + storage_provisioner = var.persistent_volume.storage_provisioner + parameters = var.persistent_volume.parameters +} + +resource "kubernetes_persistent_volume_claim" "prometheus" { + count = length(kubernetes_storage_class.prometheus) + metadata { + name = "prometheus" + namespace = var.namespace + labels = { + app = "prometheus" + type = "persistent-volume-claim" + service = "persistent-volume" + } + } + spec { + access_modes = ["ReadWriteMany"] + storage_class_name = kubernetes_storage_class.prometheus[0].metadata[0].name + resources { + requests = var.persistent_volume.resources.requests + limits = var.persistent_volume.resources.limits + } + } +} diff --git a/monitoring/onpremise/prometheus/variables.tf b/monitoring/onpremise/prometheus/variables.tf index cc0618edb..8184cbb7a 100644 --- a/monitoring/onpremise/prometheus/variables.tf +++ b/monitoring/onpremise/prometheus/variables.tf @@ -32,3 +32,21 @@ variable "metrics_exporter_url" { description = "URL of metrics exporter" type = string } + +# Persistent volume +variable "persistent_volume" { + description = "Persistent volume info" + type = object({ + storage_provisioner = string + parameters = map(string) + # Resources for PVC + resources = object({ + limits = object({ + storage = string + }) + requests = object({ + storage = string + }) + }) + }) +} From 382b71ce0526d882b3f846aa9069117234f94086 Mon Sep 17 00:00:00 2001 From: lzianekhodja Date: Tue, 9 Jan 2024 09:04:36 +0100 Subject: [PATCH 02/34] Add PVC for grafana and prometheus --- monitoring/onpremise/grafana/main.tf | 9 +++++++-- monitoring/onpremise/prometheus/main.tf | 2 +- persistent-volume/aws/efs/efs.tf | 3 ++- persistent-volume/aws/efs/outputs.tf | 3 ++- 4 files changed, 12 insertions(+), 5 deletions(-) diff --git a/monitoring/onpremise/grafana/main.tf b/monitoring/onpremise/grafana/main.tf index 33c829d76..65f6a5805 100644 --- a/monitoring/onpremise/grafana/main.tf +++ b/monitoring/onpremise/grafana/main.tf @@ -82,15 +82,20 @@ resource "kubernetes_deployment" "grafana" { mount_path = "/etc/grafana/grafana.ini" sub_path = "grafana.ini" } - volume_mount { + /*volume_mount { name = "dashboards-json-configmap" mount_path = "/var/lib/grafana/dashboards/" + }*/ + volume_mount { + name = "dashboards-json-configmap" + mount_path = "/var/lib/grafana/dashboards/dashboard-armonik.json" + sub_path = "dashboard-armonik.json" } dynamic "volume_mount" { for_each = (var.persistent_volume != null && var.persistent_volume != "" ? [1] : []) content { name = "database" - mount_path = "/data/db" + mount_path = "/var/lib/grafana" } } } diff --git a/monitoring/onpremise/prometheus/main.tf b/monitoring/onpremise/prometheus/main.tf index a9045d438..eb270363c 100644 --- a/monitoring/onpremise/prometheus/main.tf +++ b/monitoring/onpremise/prometheus/main.tf @@ -76,7 +76,7 @@ resource "kubernetes_deployment" "prometheus" { for_each = (var.persistent_volume != null && var.persistent_volume != "" ? [1] : []) content { name = "database" - mount_path = "/data/db" + mount_path = "/prometheus/data" } } } diff --git a/persistent-volume/aws/efs/efs.tf b/persistent-volume/aws/efs/efs.tf index ad8b3b3b0..7c66f89a0 100644 --- a/persistent-volume/aws/efs/efs.tf +++ b/persistent-volume/aws/efs/efs.tf @@ -1,7 +1,8 @@ # AWS EFS -module "efs" { +/*module "efs" { source = "../../../storage/aws/efs" tags = local.tags vpc = var.vpc efs = var.efs } +*/ diff --git a/persistent-volume/aws/efs/outputs.tf b/persistent-volume/aws/efs/outputs.tf index 3dd4508f0..1889a8651 100644 --- a/persistent-volume/aws/efs/outputs.tf +++ b/persistent-volume/aws/efs/outputs.tf @@ -1,4 +1,5 @@ -output "efs_id" { +/*output "efs_id" { description = "EFS id for the persistent volume" value = module.efs.id } +*/ From 0240f79f338f8bc8ee22b29040860ec9c6375d25 Mon Sep 17 00:00:00 2001 From: lzianekhodja Date: Tue, 9 Jan 2024 09:33:34 +0100 Subject: [PATCH 03/34] Update PVC for EFS --- kubernetes/aws/eks/efs-csi.tf | 160 +++++++++++++++++++++++++++ kubernetes/aws/eks/locals.tf | 6 + kubernetes/aws/eks/variables.tf | 26 +++++ persistent-volume/aws/efs/efs.tf | 4 +- persistent-volume/aws/efs/outputs.tf | 4 +- 5 files changed, 196 insertions(+), 4 deletions(-) create mode 100644 kubernetes/aws/eks/efs-csi.tf diff --git a/kubernetes/aws/eks/efs-csi.tf b/kubernetes/aws/eks/efs-csi.tf new file mode 100644 index 000000000..a074685e9 --- /dev/null +++ b/kubernetes/aws/eks/efs-csi.tf @@ -0,0 +1,160 @@ +# Allow EKS and the driver to interact with EFS +data "aws_iam_policy_document" "efs_csi_driver" { + statement { + sid = "Describe" + actions = [ + "elasticfilesystem:DescribeAccessPoints", + "elasticfilesystem:DescribeFileSystems", + "elasticfilesystem:DescribeMountTargets", + "ec2:DescribeAvailabilityZones" + ] + effect = "Allow" + resources = ["*"] + } + statement { + sid = "Create" + actions = [ + "elasticfilesystem:CreateAccessPoint" + ] + effect = "Allow" + resources = ["*"] + condition { + test = "StringLike" + values = [true] + variable = "aws:RequestTag/efs.csi.aws.com/cluster" + } + } + statement { + sid = "Delete" + actions = [ + "elasticfilesystem:DeleteAccessPoint" + ] + effect = "Allow" + resources = ["*"] + condition { + test = "StringEquals" + values = [true] + variable = "aws:ResourceTag/efs.csi.aws.com/cluster" + } + } +} + +resource "aws_iam_policy" "efs_csi_driver" { + name_prefix = local.efs_csi_name + description = "Policy to allow EKS and the driver to interact with EFS" + policy = data.aws_iam_policy_document.efs_csi_driver.json + tags = local.tags +} + +data "aws_iam_openid_connect_provider" "eks_oidc" { + url = module.eks.cluster_oidc_issuer_url +} + +resource "aws_iam_role" "efs_csi_driver" { + name = local.efs_csi_name + assume_role_policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Effect = "Allow" + Principal = { + Federated = local.oidc_arn + } + Action = "sts:AssumeRoleWithWebIdentity" + Condition = { + StringEquals = { + #"${local.oidc_url}:aud" = "sts.amazonaws.com" + "${local.oidc_url}:sub" = "system:serviceaccount:${local.efs_csi_namespace}:efs-csi-controller-sa" + } + } + } + ] + }) + tags = local.tags +} + +resource "aws_iam_role_policy_attachment" "efs_csi_driver" { + policy_arn = aws_iam_policy.efs_csi_driver.arn + role = aws_iam_role.efs_csi_driver.name +} + +resource "kubernetes_service_account" "efs_csi_driver" { + metadata { + name = "efs-csi-controller-sa" + annotations = { + "eks.amazonaws.com/role-arn" = aws_iam_role.efs_csi_driver.arn + } + namespace = local.efs_csi_namespace + } +} + +resource "helm_release" "efs_csi" { + name = "efs-csi" + namespace = kubernetes_service_account.efs_csi_driver.metadata[0].namespace + chart = "aws-efs-csi-driver" + repository = var.eks.efs_csi.repository + version = var.eks.efs_csi.version + + set { + name = "image.repository" + value = var.eks.efs_csi.docker_images.efs_csi.image + } + set { + name = "image.tag" + value = var.eks.efs_csi.docker_images.efs_csi.tag + } + set { + name = "sidecars.livenessProbe.image.repository" + value = var.eks.efs_csi.docker_images.livenessprobe.image + } + set { + name = "sidecars.livenessProbe.image.tag" + value = var.eks.efs_csi.docker_images.livenessprobe.tag + } + set { + name = "sidecars.nodeDriverRegistrar.image.repository" + value = var.eks.efs_csi.docker_images.node_driver_registrar.image + } + set { + name = "sidecars.nodeDriverRegistrar.image.tag" + value = var.eks.efs_csi.docker_images.node_driver_registrar.tag + } + set { + name = "sidecars.csiProvisioner.image.repository" + value = var.eks.efs_csi.docker_images.external_provisioner.image + } + set { + name = "sidecars.csiProvisioner.image.tag" + value = var.eks.efs_csi.docker_images.external_provisioner.tag + } + set { + name = "imagePullSecrets" + value = var.eks.efs_csi.image_pull_secrets + } + + values = [ + yamlencode({ + controller = { + create = true + logLevel = 2 + extraCreateMetadata = true + tags = {} + deleteAccessPointRootDir = false + volMetricsOptIn = false + podAnnotations = {} + resources = {} + nodeSelector = var.eks.efs_csi.node_selector + tolerations = local.tolerations + affinity = {} + serviceAccount = { + create = false + name = kubernetes_service_account.efs_csi_driver.metadata[0].name + annotations = {} + } + healthPort = 9909 + regionalStsEndpoints = false + } + }) + ] +} + diff --git a/kubernetes/aws/eks/locals.tf b/kubernetes/aws/eks/locals.tf index af074df8b..b65507527 100644 --- a/kubernetes/aws/eks/locals.tf +++ b/kubernetes/aws/eks/locals.tf @@ -24,6 +24,12 @@ locals { aws_node_termination_handler_spot_name = "${var.name}-spot-termination" kubeconfig_output_path = coalesce(var.kubeconfig_file, "${path.root}/generated/kubeconfig") + # EFS CSI + efs_csi_name = try(var.eks.efs_csi.name, "efs-csi-driver") + oidc_arn = data.aws_iam_openid_connect_provider.eks_oidc.arn + oidc_url = trimprefix(module.eks.cluster_oidc_issuer_url, "https://") + efs_csi_namespace = try(var.eks.efs_csi.namespace, "kube-system") + # Custom ENI subnets = { subnets = [ diff --git a/kubernetes/aws/eks/variables.tf b/kubernetes/aws/eks/variables.tf index 02aba9530..c3777382e 100644 --- a/kubernetes/aws/eks/variables.tf +++ b/kubernetes/aws/eks/variables.tf @@ -112,6 +112,32 @@ variable "eks" { repository = string version = string }) + efs_csi = object({ + name = string + namespace = string + image_pull_secrets = string + node_selector = any + repository = string + version = string + docker_images = object({ + efs_csi = object({ + image = string + tag = string + }) + livenessprobe = object({ + image = string + tag = string + }) + node_driver_registrar = object({ + image = string + tag = string + }) + external_provisioner = object({ + image = string + tag = string + }) + }) + }) encryption_keys = object({ cluster_log_kms_key_id = string cluster_encryption_config = string diff --git a/persistent-volume/aws/efs/efs.tf b/persistent-volume/aws/efs/efs.tf index 7c66f89a0..01f411dd8 100644 --- a/persistent-volume/aws/efs/efs.tf +++ b/persistent-volume/aws/efs/efs.tf @@ -1,8 +1,8 @@ # AWS EFS -/*module "efs" { +module "efs" { source = "../../../storage/aws/efs" tags = local.tags vpc = var.vpc efs = var.efs } -*/ + diff --git a/persistent-volume/aws/efs/outputs.tf b/persistent-volume/aws/efs/outputs.tf index 1889a8651..2e796bdcb 100644 --- a/persistent-volume/aws/efs/outputs.tf +++ b/persistent-volume/aws/efs/outputs.tf @@ -1,5 +1,5 @@ -/*output "efs_id" { +output "efs_id" { description = "EFS id for the persistent volume" value = module.efs.id } -*/ + From df293ab840ba0bc2c0e40439ccd9b464eea8f861 Mon Sep 17 00:00:00 2001 From: lzianekhodja Date: Tue, 9 Jan 2024 11:08:20 +0100 Subject: [PATCH 04/34] Update the retrieving of oidc arn of EKS for EFS CSI --- kubernetes/aws/eks/efs-csi.tf | 4 ++-- kubernetes/aws/eks/locals.tf | 3 ++- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/kubernetes/aws/eks/efs-csi.tf b/kubernetes/aws/eks/efs-csi.tf index a074685e9..7a7a9729b 100644 --- a/kubernetes/aws/eks/efs-csi.tf +++ b/kubernetes/aws/eks/efs-csi.tf @@ -46,9 +46,9 @@ resource "aws_iam_policy" "efs_csi_driver" { tags = local.tags } -data "aws_iam_openid_connect_provider" "eks_oidc" { +/*data "aws_iam_openid_connect_provider" "eks_oidc" { url = module.eks.cluster_oidc_issuer_url -} +}*/ resource "aws_iam_role" "efs_csi_driver" { name = local.efs_csi_name diff --git a/kubernetes/aws/eks/locals.tf b/kubernetes/aws/eks/locals.tf index b65507527..4b86b4d8f 100644 --- a/kubernetes/aws/eks/locals.tf +++ b/kubernetes/aws/eks/locals.tf @@ -26,7 +26,8 @@ locals { # EFS CSI efs_csi_name = try(var.eks.efs_csi.name, "efs-csi-driver") - oidc_arn = data.aws_iam_openid_connect_provider.eks_oidc.arn + #oidc_arn = data.aws_iam_openid_connect_provider.eks_oidc.arn + oidc_arn = module.eks.oidc_provider_arn oidc_url = trimprefix(module.eks.cluster_oidc_issuer_url, "https://") efs_csi_namespace = try(var.eks.efs_csi.namespace, "kube-system") From 520ffaa752e1516d83f106836f813a360ff918b3 Mon Sep 17 00:00:00 2001 From: lzianekhodja Date: Tue, 9 Jan 2024 11:12:59 +0100 Subject: [PATCH 05/34] Add dependency between EFS CSI deployment and kubernetes service account used by the csi --- kubernetes/aws/eks/efs-csi.tf | 28 ++-------------------------- kubernetes/aws/eks/locals.tf | 23 ++++++++++++++++++++++- 2 files changed, 24 insertions(+), 27 deletions(-) diff --git a/kubernetes/aws/eks/efs-csi.tf b/kubernetes/aws/eks/efs-csi.tf index 7a7a9729b..b17f95db6 100644 --- a/kubernetes/aws/eks/efs-csi.tf +++ b/kubernetes/aws/eks/efs-csi.tf @@ -46,10 +46,6 @@ resource "aws_iam_policy" "efs_csi_driver" { tags = local.tags } -/*data "aws_iam_openid_connect_provider" "eks_oidc" { - url = module.eks.cluster_oidc_issuer_url -}*/ - resource "aws_iam_role" "efs_csi_driver" { name = local.efs_csi_name assume_role_policy = jsonencode({ @@ -133,28 +129,8 @@ resource "helm_release" "efs_csi" { } values = [ - yamlencode({ - controller = { - create = true - logLevel = 2 - extraCreateMetadata = true - tags = {} - deleteAccessPointRootDir = false - volMetricsOptIn = false - podAnnotations = {} - resources = {} - nodeSelector = var.eks.efs_csi.node_selector - tolerations = local.tolerations - affinity = {} - serviceAccount = { - create = false - name = kubernetes_service_account.efs_csi_driver.metadata[0].name - annotations = {} - } - healthPort = 9909 - regionalStsEndpoints = false - } - }) + yamlencode(local.controller) ] + depends_on = [kubernetes_service_account.efs_csi_driver] } diff --git a/kubernetes/aws/eks/locals.tf b/kubernetes/aws/eks/locals.tf index 4b86b4d8f..1cea9c931 100644 --- a/kubernetes/aws/eks/locals.tf +++ b/kubernetes/aws/eks/locals.tf @@ -26,10 +26,31 @@ locals { # EFS CSI efs_csi_name = try(var.eks.efs_csi.name, "efs-csi-driver") - #oidc_arn = data.aws_iam_openid_connect_provider.eks_oidc.arn oidc_arn = module.eks.oidc_provider_arn oidc_url = trimprefix(module.eks.cluster_oidc_issuer_url, "https://") efs_csi_namespace = try(var.eks.efs_csi.namespace, "kube-system") + controller = { + controller = { + create = true + logLevel = 2 + extraCreateMetadata = true + tags = {} + deleteAccessPointRootDir = false + volMetricsOptIn = false + podAnnotations = {} + resources = {} + nodeSelector = var.eks.efs_csi.node_selector + tolerations = local.tolerations + affinity = {} + serviceAccount = { + create = false + name = kubernetes_service_account.efs_csi_driver.metadata[0].name + annotations = {} + } + healthPort = 9909 + regionalStsEndpoints = false + } + } # Custom ENI subnets = { From 8c172cdf7c207af89fff1ef48888f49cca1872dd Mon Sep 17 00:00:00 2001 From: lzianekhodja Date: Tue, 9 Jan 2024 11:24:51 +0100 Subject: [PATCH 06/34] Reformat --- kubernetes/aws/eks/efs-csi.tf | 1 - kubernetes/aws/eks/locals.tf | 8 ++--- kubernetes/aws/eks/variables.tf | 46 ++++++++++++++-------------- kubernetes/aws/eks/versions.tf | 4 +++ persistent-volume/aws/efs/efs.tf | 1 - persistent-volume/aws/efs/outputs.tf | 1 - 6 files changed, 31 insertions(+), 30 deletions(-) diff --git a/kubernetes/aws/eks/efs-csi.tf b/kubernetes/aws/eks/efs-csi.tf index b17f95db6..31a8a958d 100644 --- a/kubernetes/aws/eks/efs-csi.tf +++ b/kubernetes/aws/eks/efs-csi.tf @@ -133,4 +133,3 @@ resource "helm_release" "efs_csi" { ] depends_on = [kubernetes_service_account.efs_csi_driver] } - diff --git a/kubernetes/aws/eks/locals.tf b/kubernetes/aws/eks/locals.tf index 1cea9c931..58f31d3b4 100644 --- a/kubernetes/aws/eks/locals.tf +++ b/kubernetes/aws/eks/locals.tf @@ -25,10 +25,10 @@ locals { kubeconfig_output_path = coalesce(var.kubeconfig_file, "${path.root}/generated/kubeconfig") # EFS CSI - efs_csi_name = try(var.eks.efs_csi.name, "efs-csi-driver") - oidc_arn = module.eks.oidc_provider_arn - oidc_url = trimprefix(module.eks.cluster_oidc_issuer_url, "https://") - efs_csi_namespace = try(var.eks.efs_csi.namespace, "kube-system") + efs_csi_name = try(var.eks.efs_csi.name, "efs-csi-driver") + oidc_arn = module.eks.oidc_provider_arn + oidc_url = trimprefix(module.eks.cluster_oidc_issuer_url, "https://") + efs_csi_namespace = try(var.eks.efs_csi.namespace, "kube-system") controller = { controller = { create = true diff --git a/kubernetes/aws/eks/variables.tf b/kubernetes/aws/eks/variables.tf index c3777382e..4fcfae645 100644 --- a/kubernetes/aws/eks/variables.tf +++ b/kubernetes/aws/eks/variables.tf @@ -113,31 +113,31 @@ variable "eks" { version = string }) efs_csi = object({ - name = string - namespace = string - image_pull_secrets = string - node_selector = any - repository = string - version = string - docker_images = object({ - efs_csi = object({ - image = string - tag = string - }) - livenessprobe = object({ - image = string - tag = string - }) - node_driver_registrar = object({ - image = string - tag = string - }) - external_provisioner = object({ - image = string - tag = string + name = string + namespace = string + image_pull_secrets = string + node_selector = any + repository = string + version = string + docker_images = object({ + efs_csi = object({ + image = string + tag = string + }) + livenessprobe = object({ + image = string + tag = string + }) + node_driver_registrar = object({ + image = string + tag = string + }) + external_provisioner = object({ + image = string + tag = string + }) }) }) - }) encryption_keys = object({ cluster_log_kms_key_id = string cluster_encryption_config = string diff --git a/kubernetes/aws/eks/versions.tf b/kubernetes/aws/eks/versions.tf index a4c552a21..1f1a61933 100644 --- a/kubernetes/aws/eks/versions.tf +++ b/kubernetes/aws/eks/versions.tf @@ -5,6 +5,10 @@ terraform { source = "hashicorp/aws" version = ">= 5.3.0" } + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.13.0" + } helm = { source = "hashicorp/helm" version = ">= 2.10.1" diff --git a/persistent-volume/aws/efs/efs.tf b/persistent-volume/aws/efs/efs.tf index 01f411dd8..ad8b3b3b0 100644 --- a/persistent-volume/aws/efs/efs.tf +++ b/persistent-volume/aws/efs/efs.tf @@ -5,4 +5,3 @@ module "efs" { vpc = var.vpc efs = var.efs } - diff --git a/persistent-volume/aws/efs/outputs.tf b/persistent-volume/aws/efs/outputs.tf index 2e796bdcb..3dd4508f0 100644 --- a/persistent-volume/aws/efs/outputs.tf +++ b/persistent-volume/aws/efs/outputs.tf @@ -2,4 +2,3 @@ output "efs_id" { description = "EFS id for the persistent volume" value = module.efs.id } - From c2d75c350cfaa7ad29f3a117b1a5889b4c9768d3 Mon Sep 17 00:00:00 2001 From: lzianekhodja Date: Tue, 9 Jan 2024 11:53:42 +0100 Subject: [PATCH 07/34] Fix node selector and tolerations for efs csi --- kubernetes/aws/eks/locals.tf | 39 +++++++++++++++++---------------- kubernetes/aws/eks/variables.tf | 1 - 2 files changed, 20 insertions(+), 20 deletions(-) diff --git a/kubernetes/aws/eks/locals.tf b/kubernetes/aws/eks/locals.tf index 58f31d3b4..15292851e 100644 --- a/kubernetes/aws/eks/locals.tf +++ b/kubernetes/aws/eks/locals.tf @@ -24,6 +24,24 @@ locals { aws_node_termination_handler_spot_name = "${var.name}-spot-termination" kubeconfig_output_path = coalesce(var.kubeconfig_file, "${path.root}/generated/kubeconfig") + # Node selector + node_selector_keys = keys(var.node_selector) + node_selector_values = values(var.node_selector) + node_selector = { + nodeSelector = var.node_selector + } + efs_csi_tolerations = [ + for index in range(0, length(local.node_selector_keys)) : { + key = local.node_selector_keys[index] + operator = "Equal" + value = local.node_selector_values[index] + effect = "NoSchedule" + } + ] + tolerations = { + tolerations = local.efs_csi_tolerations + } + # EFS CSI efs_csi_name = try(var.eks.efs_csi.name, "efs-csi-driver") oidc_arn = module.eks.oidc_provider_arn @@ -39,8 +57,8 @@ locals { volMetricsOptIn = false podAnnotations = {} resources = {} - nodeSelector = var.eks.efs_csi.node_selector - tolerations = local.tolerations + nodeSelector = var.node_selector + tolerations = local.efs_csi_tolerations affinity = {} serviceAccount = { create = false @@ -63,23 +81,6 @@ locals { ] } - # Node selector - node_selector_keys = keys(var.node_selector) - node_selector_values = values(var.node_selector) - node_selector = { - nodeSelector = var.node_selector - } - tolerations = { - tolerations = [ - for index in range(0, length(local.node_selector_keys)) : { - key = local.node_selector_keys[index] - operator = "Equal" - value = local.node_selector_values[index] - effect = "NoSchedule" - } - ] - } - # Patch coredns patch_coredns_spec = { spec = { diff --git a/kubernetes/aws/eks/variables.tf b/kubernetes/aws/eks/variables.tf index 4fcfae645..ecf8892ce 100644 --- a/kubernetes/aws/eks/variables.tf +++ b/kubernetes/aws/eks/variables.tf @@ -116,7 +116,6 @@ variable "eks" { name = string namespace = string image_pull_secrets = string - node_selector = any repository = string version = string docker_images = object({ From 9028db9f62464be0e79a45a06709b8e199c56ab5 Mon Sep 17 00:00:00 2001 From: lzianekhodja Date: Tue, 9 Jan 2024 12:19:20 +0100 Subject: [PATCH 08/34] Update docker images for addons --- kubernetes/aws/eks/efs-csi.tf | 16 ++++++++-------- kubernetes/aws/eks/variables.tf | 34 ++++++++++++++++----------------- 2 files changed, 24 insertions(+), 26 deletions(-) diff --git a/kubernetes/aws/eks/efs-csi.tf b/kubernetes/aws/eks/efs-csi.tf index 31a8a958d..a804d9bcc 100644 --- a/kubernetes/aws/eks/efs-csi.tf +++ b/kubernetes/aws/eks/efs-csi.tf @@ -93,35 +93,35 @@ resource "helm_release" "efs_csi" { set { name = "image.repository" - value = var.eks.efs_csi.docker_images.efs_csi.image + value = var.eks.docker_images.efs_csi.image } set { name = "image.tag" - value = var.eks.efs_csi.docker_images.efs_csi.tag + value = var.eks.docker_images.efs_csi.tag } set { name = "sidecars.livenessProbe.image.repository" - value = var.eks.efs_csi.docker_images.livenessprobe.image + value = var.eks.docker_images.livenessprobe.image } set { name = "sidecars.livenessProbe.image.tag" - value = var.eks.efs_csi.docker_images.livenessprobe.tag + value = var.eks.docker_images.livenessprobe.tag } set { name = "sidecars.nodeDriverRegistrar.image.repository" - value = var.eks.efs_csi.docker_images.node_driver_registrar.image + value = var.eks.docker_images.node_driver_registrar.image } set { name = "sidecars.nodeDriverRegistrar.image.tag" - value = var.eks.efs_csi.docker_images.node_driver_registrar.tag + value = var.eks.docker_images.node_driver_registrar.tag } set { name = "sidecars.csiProvisioner.image.repository" - value = var.eks.efs_csi.docker_images.external_provisioner.image + value = var.eks.docker_images.external_provisioner.image } set { name = "sidecars.csiProvisioner.image.tag" - value = var.eks.efs_csi.docker_images.external_provisioner.tag + value = var.eks.docker_images.external_provisioner.tag } set { name = "imagePullSecrets" diff --git a/kubernetes/aws/eks/variables.tf b/kubernetes/aws/eks/variables.tf index ecf8892ce..89335f2d4 100644 --- a/kubernetes/aws/eks/variables.tf +++ b/kubernetes/aws/eks/variables.tf @@ -89,6 +89,22 @@ variable "eks" { image = string tag = string }) + efs_csi = object({ + image = string + tag = string + }) + livenessprobe = object({ + image = string + tag = string + }) + node_driver_registrar = object({ + image = string + tag = string + }) + external_provisioner = object({ + image = string + tag = string + }) }) cluster_autoscaler = object({ expander = string @@ -118,24 +134,6 @@ variable "eks" { image_pull_secrets = string repository = string version = string - docker_images = object({ - efs_csi = object({ - image = string - tag = string - }) - livenessprobe = object({ - image = string - tag = string - }) - node_driver_registrar = object({ - image = string - tag = string - }) - external_provisioner = object({ - image = string - tag = string - }) - }) }) encryption_keys = object({ cluster_log_kms_key_id = string From f876f2a95df2e5f7a9d52d6b5bed2b904686b281 Mon Sep 17 00:00:00 2001 From: lzianekhodja Date: Wed, 10 Jan 2024 09:31:52 +0100 Subject: [PATCH 09/34] Add parameter volume_binding_mode in PV for mongodb --- storage/onpremise/mongodb/persistent-volume.tf | 1 + storage/onpremise/mongodb/variables.tf | 1 + 2 files changed, 2 insertions(+) diff --git a/storage/onpremise/mongodb/persistent-volume.tf b/storage/onpremise/mongodb/persistent-volume.tf index caa0cbfdc..cf938a771 100644 --- a/storage/onpremise/mongodb/persistent-volume.tf +++ b/storage/onpremise/mongodb/persistent-volume.tf @@ -10,6 +10,7 @@ resource "kubernetes_storage_class" "mongodb" { } mount_options = ["tls"] storage_provisioner = var.persistent_volume.storage_provisioner + volume_binding_mode = var.persistent_volume.volume_binding_mode parameters = var.persistent_volume.parameters } diff --git a/storage/onpremise/mongodb/variables.tf b/storage/onpremise/mongodb/variables.tf index 866bed432..2ea019545 100644 --- a/storage/onpremise/mongodb/variables.tf +++ b/storage/onpremise/mongodb/variables.tf @@ -21,6 +21,7 @@ variable "persistent_volume" { description = "Persistent volume info" type = object({ storage_provisioner = string + volume_binding_mode = string parameters = map(string) # Resources for PVC resources = object({ From cebf9f7ea3beb8d1bb25c0aac4a0145cc5de99f2 Mon Sep 17 00:00:00 2001 From: lzianekhodja Date: Wed, 10 Jan 2024 16:08:27 +0100 Subject: [PATCH 10/34] Add parameter volume_binding_mode in PV for grafana and prometheurs --- monitoring/onpremise/grafana/persistent-volume.tf | 1 + monitoring/onpremise/grafana/variables.tf | 1 + monitoring/onpremise/prometheus/persistent-volume.tf | 1 + monitoring/onpremise/prometheus/variables.tf | 1 + 4 files changed, 4 insertions(+) diff --git a/monitoring/onpremise/grafana/persistent-volume.tf b/monitoring/onpremise/grafana/persistent-volume.tf index 25dc13818..cd6f91e55 100644 --- a/monitoring/onpremise/grafana/persistent-volume.tf +++ b/monitoring/onpremise/grafana/persistent-volume.tf @@ -10,6 +10,7 @@ resource "kubernetes_storage_class" "grafana" { } mount_options = ["tls"] storage_provisioner = var.persistent_volume.storage_provisioner + volume_binding_mode = var.persistent_volume.volume_binding_mode parameters = var.persistent_volume.parameters } diff --git a/monitoring/onpremise/grafana/variables.tf b/monitoring/onpremise/grafana/variables.tf index 74c81e3f7..c7256cf06 100644 --- a/monitoring/onpremise/grafana/variables.tf +++ b/monitoring/onpremise/grafana/variables.tf @@ -51,6 +51,7 @@ variable "persistent_volume" { description = "Persistent volume info" type = object({ storage_provisioner = string + volume_binding_mode = string parameters = map(string) # Resources for PVC resources = object({ diff --git a/monitoring/onpremise/prometheus/persistent-volume.tf b/monitoring/onpremise/prometheus/persistent-volume.tf index b6516bac7..e4d9d8901 100644 --- a/monitoring/onpremise/prometheus/persistent-volume.tf +++ b/monitoring/onpremise/prometheus/persistent-volume.tf @@ -10,6 +10,7 @@ resource "kubernetes_storage_class" "prometheus" { } mount_options = ["tls"] storage_provisioner = var.persistent_volume.storage_provisioner + volume_binding_mode = var.persistent_volume.volume_binding_mode parameters = var.persistent_volume.parameters } diff --git a/monitoring/onpremise/prometheus/variables.tf b/monitoring/onpremise/prometheus/variables.tf index 8184cbb7a..57861dc8c 100644 --- a/monitoring/onpremise/prometheus/variables.tf +++ b/monitoring/onpremise/prometheus/variables.tf @@ -38,6 +38,7 @@ variable "persistent_volume" { description = "Persistent volume info" type = object({ storage_provisioner = string + volume_binding_mode = string parameters = map(string) # Resources for PVC resources = object({ From 19b251c7ef529b86f40dd3c2000c625dd8758d8c Mon Sep 17 00:00:00 2001 From: lzianekhodja Date: Thu, 11 Jan 2024 09:15:03 +0100 Subject: [PATCH 11/34] Fix efs csi --- kubernetes/aws/eks/efs-csi.tf | 13 +++++++++++++ kubernetes/aws/eks/locals.tf | 25 ++++++++++++++++--------- 2 files changed, 29 insertions(+), 9 deletions(-) diff --git a/kubernetes/aws/eks/efs-csi.tf b/kubernetes/aws/eks/efs-csi.tf index a804d9bcc..3c51caa34 100644 --- a/kubernetes/aws/eks/efs-csi.tf +++ b/kubernetes/aws/eks/efs-csi.tf @@ -37,6 +37,19 @@ data "aws_iam_policy_document" "efs_csi_driver" { variable = "aws:ResourceTag/efs.csi.aws.com/cluster" } } + statement { + sid = "TagResource" + actions = [ + "elasticfilesystem:TagResource" + ] + effect = "Allow" + resources = ["*"] + condition { + test = "StringLike" + values = [true] + variable = "aws:ResourceTag/efs.csi.aws.com/cluster" + } + } } resource "aws_iam_policy" "efs_csi_driver" { diff --git a/kubernetes/aws/eks/locals.tf b/kubernetes/aws/eks/locals.tf index 15292851e..34b4f19d0 100644 --- a/kubernetes/aws/eks/locals.tf +++ b/kubernetes/aws/eks/locals.tf @@ -30,16 +30,15 @@ locals { node_selector = { nodeSelector = var.node_selector } - efs_csi_tolerations = [ - for index in range(0, length(local.node_selector_keys)) : { - key = local.node_selector_keys[index] - operator = "Equal" - value = local.node_selector_values[index] - effect = "NoSchedule" - } - ] tolerations = { - tolerations = local.efs_csi_tolerations + tolerations = [ + for index in range(0, length(local.node_selector_keys)) : { + key = local.node_selector_keys[index] + operator = "Equal" + value = local.node_selector_values[index] + effect = "NoSchedule" + } + ] } # EFS CSI @@ -47,6 +46,14 @@ locals { oidc_arn = module.eks.oidc_provider_arn oidc_url = trimprefix(module.eks.cluster_oidc_issuer_url, "https://") efs_csi_namespace = try(var.eks.efs_csi.namespace, "kube-system") + efs_csi_tolerations = [ + for index in range(0, length(local.node_selector_keys)) : { + key = local.node_selector_keys[index] + operator = "Equal" + value = local.node_selector_values[index] + effect = "NoSchedule" + } + ] controller = { controller = { create = true From ce62db374dbb7c1ec16cd569f7413e3efd3de906 Mon Sep 17 00:00:00 2001 From: lzianekhodja Date: Thu, 11 Jan 2024 09:23:42 +0100 Subject: [PATCH 12/34] remove node selector for efs csi --- kubernetes/aws/eks/locals.tf | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kubernetes/aws/eks/locals.tf b/kubernetes/aws/eks/locals.tf index 34b4f19d0..c4ca88534 100644 --- a/kubernetes/aws/eks/locals.tf +++ b/kubernetes/aws/eks/locals.tf @@ -64,8 +64,8 @@ locals { volMetricsOptIn = false podAnnotations = {} resources = {} - nodeSelector = var.node_selector - tolerations = local.efs_csi_tolerations +# nodeSelector = var.node_selector +# tolerations = local.efs_csi_tolerations affinity = {} serviceAccount = { create = false From fad832e6bb6c16d51d974889d90464f4b818fb5f Mon Sep 17 00:00:00 2001 From: lzianekhodja Date: Thu, 11 Jan 2024 09:40:44 +0100 Subject: [PATCH 13/34] remove node selector for efs csi --- kubernetes/aws/eks/efs-csi.tf | 10 ++++++++++ kubernetes/aws/eks/locals.tf | 10 +++++----- 2 files changed, 15 insertions(+), 5 deletions(-) diff --git a/kubernetes/aws/eks/efs-csi.tf b/kubernetes/aws/eks/efs-csi.tf index 3c51caa34..8cd467371 100644 --- a/kubernetes/aws/eks/efs-csi.tf +++ b/kubernetes/aws/eks/efs-csi.tf @@ -50,6 +50,16 @@ data "aws_iam_policy_document" "efs_csi_driver" { variable = "aws:ResourceTag/efs.csi.aws.com/cluster" } } + statement { + sid = "Mount" + actions = [ + "elasticfilesystem:ClientRootAccess", + "elasticfilesystem:ClientWrite", + "elasticfilesystem:ClientMount" + ] + effect = "Allow" + resources = ["*"] + } } resource "aws_iam_policy" "efs_csi_driver" { diff --git a/kubernetes/aws/eks/locals.tf b/kubernetes/aws/eks/locals.tf index c4ca88534..8388786d5 100644 --- a/kubernetes/aws/eks/locals.tf +++ b/kubernetes/aws/eks/locals.tf @@ -46,14 +46,14 @@ locals { oidc_arn = module.eks.oidc_provider_arn oidc_url = trimprefix(module.eks.cluster_oidc_issuer_url, "https://") efs_csi_namespace = try(var.eks.efs_csi.namespace, "kube-system") - efs_csi_tolerations = [ + /*efs_csi_tolerations = [ for index in range(0, length(local.node_selector_keys)) : { key = local.node_selector_keys[index] operator = "Equal" value = local.node_selector_values[index] effect = "NoSchedule" } - ] + ]*/ controller = { controller = { create = true @@ -64,9 +64,9 @@ locals { volMetricsOptIn = false podAnnotations = {} resources = {} -# nodeSelector = var.node_selector -# tolerations = local.efs_csi_tolerations - affinity = {} + # nodeSelector = var.node_selector + # tolerations = local.efs_csi_tolerations + affinity = {} serviceAccount = { create = false name = kubernetes_service_account.efs_csi_driver.metadata[0].name From aa7090d154bfd8d283b584db7c92fa37b5591ec3 Mon Sep 17 00:00:00 2001 From: lzianekhodja Date: Thu, 11 Jan 2024 10:59:58 +0100 Subject: [PATCH 14/34] Add implicit dependency in PVC --- monitoring/onpremise/grafana/main.tf | 4 ++-- monitoring/onpremise/grafana/persistent-volume.tf | 4 ++-- monitoring/onpremise/prometheus/main.tf | 4 ++-- monitoring/onpremise/prometheus/persistent-volume.tf | 4 ++-- storage/onpremise/mongodb/main.tf | 4 ++-- storage/onpremise/mongodb/persistent-volume.tf | 4 ++-- 6 files changed, 12 insertions(+), 12 deletions(-) diff --git a/monitoring/onpremise/grafana/main.tf b/monitoring/onpremise/grafana/main.tf index 65f6a5805..a61384a26 100644 --- a/monitoring/onpremise/grafana/main.tf +++ b/monitoring/onpremise/grafana/main.tf @@ -92,7 +92,7 @@ resource "kubernetes_deployment" "grafana" { sub_path = "dashboard-armonik.json" } dynamic "volume_mount" { - for_each = (var.persistent_volume != null && var.persistent_volume != "" ? [1] : []) + for_each = length(kubernetes_persistent_volume_claim.grafana) > 0 ? [1] : [] content { name = "database" mount_path = "/var/lib/grafana" @@ -128,7 +128,7 @@ resource "kubernetes_deployment" "grafana" { } } dynamic "volume" { - for_each = (var.persistent_volume != null && var.persistent_volume != "" ? [1] : []) + for_each = length(kubernetes_persistent_volume_claim.grafana) > 0 ? [1] : [] content { name = "database" persistent_volume_claim { diff --git a/monitoring/onpremise/grafana/persistent-volume.tf b/monitoring/onpremise/grafana/persistent-volume.tf index cd6f91e55..0e564d253 100644 --- a/monitoring/onpremise/grafana/persistent-volume.tf +++ b/monitoring/onpremise/grafana/persistent-volume.tf @@ -1,5 +1,5 @@ resource "kubernetes_storage_class" "grafana" { - count = (var.persistent_volume != null && var.persistent_volume != "" ? 1 : 0) + count = can(coalesce(var.persistent_volume)) ? 1 : 0 metadata { name = "grafana" labels = { @@ -15,7 +15,7 @@ resource "kubernetes_storage_class" "grafana" { } resource "kubernetes_persistent_volume_claim" "grafana" { - count = length(kubernetes_storage_class.grafana) + count = length(kubernetes_storage_class.grafana) > 0 ? 1 : 0 metadata { name = "grafana" namespace = var.namespace diff --git a/monitoring/onpremise/prometheus/main.tf b/monitoring/onpremise/prometheus/main.tf index eb270363c..62b304085 100644 --- a/monitoring/onpremise/prometheus/main.tf +++ b/monitoring/onpremise/prometheus/main.tf @@ -73,7 +73,7 @@ resource "kubernetes_deployment" "prometheus" { sub_path = "prometheus.yml" } dynamic "volume_mount" { - for_each = (var.persistent_volume != null && var.persistent_volume != "" ? [1] : []) + for_each = length(kubernetes_persistent_volume_claim.prometheus) > 0 ? [1] : [] content { name = "database" mount_path = "/prometheus/data" @@ -88,7 +88,7 @@ resource "kubernetes_deployment" "prometheus" { } } dynamic "volume" { - for_each = (var.persistent_volume != null && var.persistent_volume != "" ? [1] : []) + for_each = length(kubernetes_persistent_volume_claim.prometheus) > 0 ? [1] : [] content { name = "database" persistent_volume_claim { diff --git a/monitoring/onpremise/prometheus/persistent-volume.tf b/monitoring/onpremise/prometheus/persistent-volume.tf index e4d9d8901..2c4087af7 100644 --- a/monitoring/onpremise/prometheus/persistent-volume.tf +++ b/monitoring/onpremise/prometheus/persistent-volume.tf @@ -1,5 +1,5 @@ resource "kubernetes_storage_class" "prometheus" { - count = (var.persistent_volume != null && var.persistent_volume != "" ? 1 : 0) + count = can(coalesce(var.persistent_volume)) ? 1 : 0 metadata { name = "prometheus" labels = { @@ -15,7 +15,7 @@ resource "kubernetes_storage_class" "prometheus" { } resource "kubernetes_persistent_volume_claim" "prometheus" { - count = length(kubernetes_storage_class.prometheus) + count = length(kubernetes_storage_class.prometheus) > 0 ? 1 : 0 metadata { name = "prometheus" namespace = var.namespace diff --git a/storage/onpremise/mongodb/main.tf b/storage/onpremise/mongodb/main.tf index 7268098df..2c5354039 100644 --- a/storage/onpremise/mongodb/main.tf +++ b/storage/onpremise/mongodb/main.tf @@ -91,7 +91,7 @@ resource "kubernetes_deployment" "mongodb" { mount_path = "/start/" } dynamic "volume_mount" { - for_each = (var.persistent_volume != null && var.persistent_volume != "" ? [1] : []) + for_each = length(kubernetes_persistent_volume_claim.mongodb) > 0 ? [1] : [] content { name = "database" mount_path = "/data/db" @@ -127,7 +127,7 @@ resource "kubernetes_deployment" "mongodb" { } } dynamic "volume" { - for_each = (var.persistent_volume != null && var.persistent_volume != "" ? [1] : []) + for_each = length(kubernetes_persistent_volume_claim.mongodb) > 0 ? [1] : [] content { name = "database" persistent_volume_claim { diff --git a/storage/onpremise/mongodb/persistent-volume.tf b/storage/onpremise/mongodb/persistent-volume.tf index cf938a771..625ba814e 100644 --- a/storage/onpremise/mongodb/persistent-volume.tf +++ b/storage/onpremise/mongodb/persistent-volume.tf @@ -1,5 +1,5 @@ resource "kubernetes_storage_class" "mongodb" { - count = (var.persistent_volume != null && var.persistent_volume != "" ? 1 : 0) + count = can(coalesce(var.persistent_volume)) ? 1 : 0 metadata { name = "mongodb" labels = { @@ -15,7 +15,7 @@ resource "kubernetes_storage_class" "mongodb" { } resource "kubernetes_persistent_volume_claim" "mongodb" { - count = length(kubernetes_storage_class.mongodb) + count = length(kubernetes_storage_class.mongodb) > 0 ? 1 : 0 metadata { name = "mongodb" namespace = var.namespace From 0725aae3ea01fd1d9a829d51422103797ae3b4b0 Mon Sep 17 00:00:00 2001 From: lzianekhodja Date: Thu, 11 Jan 2024 11:16:48 +0100 Subject: [PATCH 15/34] update prometheus and grafana --- monitoring/onpremise/grafana/main.tf | 27 +------------------------ monitoring/onpremise/prometheus/main.tf | 20 ------------------ 2 files changed, 1 insertion(+), 46 deletions(-) diff --git a/monitoring/onpremise/grafana/main.tf b/monitoring/onpremise/grafana/main.tf index a61384a26..b7b13fb85 100644 --- a/monitoring/onpremise/grafana/main.tf +++ b/monitoring/onpremise/grafana/main.tf @@ -50,10 +50,6 @@ resource "kubernetes_deployment" "grafana" { name = var.docker_image.image_pull_secrets } } - security_context { - run_as_user = 999 - fs_group = 999 - } container { name = "grafana" image = "${var.docker_image.image}:${var.docker_image.tag}" @@ -82,21 +78,9 @@ resource "kubernetes_deployment" "grafana" { mount_path = "/etc/grafana/grafana.ini" sub_path = "grafana.ini" } - /*volume_mount { - name = "dashboards-json-configmap" - mount_path = "/var/lib/grafana/dashboards/" - }*/ volume_mount { name = "dashboards-json-configmap" - mount_path = "/var/lib/grafana/dashboards/dashboard-armonik.json" - sub_path = "dashboard-armonik.json" - } - dynamic "volume_mount" { - for_each = length(kubernetes_persistent_volume_claim.grafana) > 0 ? [1] : [] - content { - name = "database" - mount_path = "/var/lib/grafana" - } + mount_path = "/var/lib/grafana/dashboards/" } } volume { @@ -127,15 +111,6 @@ resource "kubernetes_deployment" "grafana" { optional = false } } - dynamic "volume" { - for_each = length(kubernetes_persistent_volume_claim.grafana) > 0 ? [1] : [] - content { - name = "database" - persistent_volume_claim { - claim_name = kubernetes_persistent_volume_claim.grafana[0].metadata[0].name - } - } - } } } } diff --git a/monitoring/onpremise/prometheus/main.tf b/monitoring/onpremise/prometheus/main.tf index 62b304085..ebc1112ca 100644 --- a/monitoring/onpremise/prometheus/main.tf +++ b/monitoring/onpremise/prometheus/main.tf @@ -50,10 +50,6 @@ resource "kubernetes_deployment" "prometheus" { name = var.docker_image.image_pull_secrets } } - security_context { - run_as_user = 999 - fs_group = 999 - } container { name = "prometheus" image = "${var.docker_image.image}:${var.docker_image.tag}" @@ -72,13 +68,6 @@ resource "kubernetes_deployment" "prometheus" { mount_path = "/etc/prometheus/prometheus.yml" sub_path = "prometheus.yml" } - dynamic "volume_mount" { - for_each = length(kubernetes_persistent_volume_claim.prometheus) > 0 ? [1] : [] - content { - name = "database" - mount_path = "/prometheus/data" - } - } } volume { name = "prometheus-configmap" @@ -87,15 +76,6 @@ resource "kubernetes_deployment" "prometheus" { optional = false } } - dynamic "volume" { - for_each = length(kubernetes_persistent_volume_claim.prometheus) > 0 ? [1] : [] - content { - name = "database" - persistent_volume_claim { - claim_name = kubernetes_persistent_volume_claim.prometheus[0].metadata[0].name - } - } - } } } } From 27ab185ddaabe263bb618c360c3f57663ebd6d45 Mon Sep 17 00:00:00 2001 From: lzianekhodja Date: Thu, 11 Jan 2024 11:18:59 +0100 Subject: [PATCH 16/34] update prometheus and grafana --- monitoring/onpremise/grafana/main.tf | 27 ++++++++++++++++++++++++- monitoring/onpremise/prometheus/main.tf | 20 ++++++++++++++++++ 2 files changed, 46 insertions(+), 1 deletion(-) diff --git a/monitoring/onpremise/grafana/main.tf b/monitoring/onpremise/grafana/main.tf index b7b13fb85..a61384a26 100644 --- a/monitoring/onpremise/grafana/main.tf +++ b/monitoring/onpremise/grafana/main.tf @@ -50,6 +50,10 @@ resource "kubernetes_deployment" "grafana" { name = var.docker_image.image_pull_secrets } } + security_context { + run_as_user = 999 + fs_group = 999 + } container { name = "grafana" image = "${var.docker_image.image}:${var.docker_image.tag}" @@ -78,9 +82,21 @@ resource "kubernetes_deployment" "grafana" { mount_path = "/etc/grafana/grafana.ini" sub_path = "grafana.ini" } - volume_mount { + /*volume_mount { name = "dashboards-json-configmap" mount_path = "/var/lib/grafana/dashboards/" + }*/ + volume_mount { + name = "dashboards-json-configmap" + mount_path = "/var/lib/grafana/dashboards/dashboard-armonik.json" + sub_path = "dashboard-armonik.json" + } + dynamic "volume_mount" { + for_each = length(kubernetes_persistent_volume_claim.grafana) > 0 ? [1] : [] + content { + name = "database" + mount_path = "/var/lib/grafana" + } } } volume { @@ -111,6 +127,15 @@ resource "kubernetes_deployment" "grafana" { optional = false } } + dynamic "volume" { + for_each = length(kubernetes_persistent_volume_claim.grafana) > 0 ? [1] : [] + content { + name = "database" + persistent_volume_claim { + claim_name = kubernetes_persistent_volume_claim.grafana[0].metadata[0].name + } + } + } } } } diff --git a/monitoring/onpremise/prometheus/main.tf b/monitoring/onpremise/prometheus/main.tf index ebc1112ca..62b304085 100644 --- a/monitoring/onpremise/prometheus/main.tf +++ b/monitoring/onpremise/prometheus/main.tf @@ -50,6 +50,10 @@ resource "kubernetes_deployment" "prometheus" { name = var.docker_image.image_pull_secrets } } + security_context { + run_as_user = 999 + fs_group = 999 + } container { name = "prometheus" image = "${var.docker_image.image}:${var.docker_image.tag}" @@ -68,6 +72,13 @@ resource "kubernetes_deployment" "prometheus" { mount_path = "/etc/prometheus/prometheus.yml" sub_path = "prometheus.yml" } + dynamic "volume_mount" { + for_each = length(kubernetes_persistent_volume_claim.prometheus) > 0 ? [1] : [] + content { + name = "database" + mount_path = "/prometheus/data" + } + } } volume { name = "prometheus-configmap" @@ -76,6 +87,15 @@ resource "kubernetes_deployment" "prometheus" { optional = false } } + dynamic "volume" { + for_each = length(kubernetes_persistent_volume_claim.prometheus) > 0 ? [1] : [] + content { + name = "database" + persistent_volume_claim { + claim_name = kubernetes_persistent_volume_claim.prometheus[0].metadata[0].name + } + } + } } } } From 5cd1a4122abc83d78f37938b0e19cc6178c3af24 Mon Sep 17 00:00:00 2001 From: lzianekhodja Date: Thu, 11 Jan 2024 11:23:08 +0100 Subject: [PATCH 17/34] change security context of prometheus --- monitoring/onpremise/prometheus/main.tf | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/monitoring/onpremise/prometheus/main.tf b/monitoring/onpremise/prometheus/main.tf index 62b304085..b40f68c52 100644 --- a/monitoring/onpremise/prometheus/main.tf +++ b/monitoring/onpremise/prometheus/main.tf @@ -51,8 +51,8 @@ resource "kubernetes_deployment" "prometheus" { } } security_context { - run_as_user = 999 - fs_group = 999 + run_as_user = 1000 + fs_group = 2000 } container { name = "prometheus" From 6af014433f21ae61ca347f577668fe69764b766f Mon Sep 17 00:00:00 2001 From: lzianekhodja Date: Thu, 11 Jan 2024 11:28:03 +0100 Subject: [PATCH 18/34] fix security context of prometheus --- monitoring/onpremise/prometheus/main.tf | 2 ++ 1 file changed, 2 insertions(+) diff --git a/monitoring/onpremise/prometheus/main.tf b/monitoring/onpremise/prometheus/main.tf index b40f68c52..e734a5564 100644 --- a/monitoring/onpremise/prometheus/main.tf +++ b/monitoring/onpremise/prometheus/main.tf @@ -52,6 +52,8 @@ resource "kubernetes_deployment" "prometheus" { } security_context { run_as_user = 1000 + run_as_non_root = true + run_as_group = 65534 fs_group = 2000 } container { From 263dbedda973e0dc062671dbb19f3cf016d1b245 Mon Sep 17 00:00:00 2001 From: lzianekhodja Date: Thu, 11 Jan 2024 11:39:02 +0100 Subject: [PATCH 19/34] fix security context of prometheus (1) --- monitoring/onpremise/prometheus/main.tf | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/monitoring/onpremise/prometheus/main.tf b/monitoring/onpremise/prometheus/main.tf index e734a5564..500176135 100644 --- a/monitoring/onpremise/prometheus/main.tf +++ b/monitoring/onpremise/prometheus/main.tf @@ -51,10 +51,10 @@ resource "kubernetes_deployment" "prometheus" { } } security_context { - run_as_user = 1000 + run_as_user = 65534 run_as_non_root = true run_as_group = 65534 - fs_group = 2000 + fs_group = 65534 } container { name = "prometheus" From b6b7d420e619a3c6edcc3a89da099a5b37047746 Mon Sep 17 00:00:00 2001 From: lzianekhodja Date: Thu, 11 Jan 2024 13:02:39 +0100 Subject: [PATCH 20/34] variablize security context for mongodb, prometheus, grafana --- monitoring/onpremise/grafana/main.tf | 4 ++-- monitoring/onpremise/grafana/variables.tf | 8 ++++++++ monitoring/onpremise/prometheus/main.tf | 6 +++--- monitoring/onpremise/prometheus/variables.tf | 8 ++++++++ storage/onpremise/mongodb/main.tf | 4 ++-- storage/onpremise/mongodb/variables.tf | 4 ++++ 6 files changed, 27 insertions(+), 7 deletions(-) diff --git a/monitoring/onpremise/grafana/main.tf b/monitoring/onpremise/grafana/main.tf index a61384a26..28045ea5a 100644 --- a/monitoring/onpremise/grafana/main.tf +++ b/monitoring/onpremise/grafana/main.tf @@ -51,8 +51,8 @@ resource "kubernetes_deployment" "grafana" { } } security_context { - run_as_user = 999 - fs_group = 999 + run_as_user = var.security_context.run_as_user + fs_group = var.security_context.fs_group } container { name = "grafana" diff --git a/monitoring/onpremise/grafana/variables.tf b/monitoring/onpremise/grafana/variables.tf index c7256cf06..7cfe4cef1 100644 --- a/monitoring/onpremise/grafana/variables.tf +++ b/monitoring/onpremise/grafana/variables.tf @@ -46,6 +46,14 @@ variable "authentication" { default = false } +variable "security_context" { + description = "security context for MongoDB pods" + type = object({ + run_as_user = number + fs_group = number + }) +} + # Persistent volume variable "persistent_volume" { description = "Persistent volume info" diff --git a/monitoring/onpremise/prometheus/main.tf b/monitoring/onpremise/prometheus/main.tf index 500176135..b9b2f2440 100644 --- a/monitoring/onpremise/prometheus/main.tf +++ b/monitoring/onpremise/prometheus/main.tf @@ -51,10 +51,10 @@ resource "kubernetes_deployment" "prometheus" { } } security_context { - run_as_user = 65534 + run_as_user = var.security_context.run_as_user run_as_non_root = true - run_as_group = 65534 - fs_group = 65534 + run_as_group = var.security_context.fs_group + fs_group = var.security_context.fs_group } container { name = "prometheus" diff --git a/monitoring/onpremise/prometheus/variables.tf b/monitoring/onpremise/prometheus/variables.tf index 57861dc8c..87c230983 100644 --- a/monitoring/onpremise/prometheus/variables.tf +++ b/monitoring/onpremise/prometheus/variables.tf @@ -33,6 +33,14 @@ variable "metrics_exporter_url" { type = string } +variable "security_context" { + description = "security context for MongoDB pods" + type = object({ + run_as_user = number + fs_group = number + }) +} + # Persistent volume variable "persistent_volume" { description = "Persistent volume info" diff --git a/storage/onpremise/mongodb/main.tf b/storage/onpremise/mongodb/main.tf index 2c5354039..9339a3f83 100644 --- a/storage/onpremise/mongodb/main.tf +++ b/storage/onpremise/mongodb/main.tf @@ -51,8 +51,8 @@ resource "kubernetes_deployment" "mongodb" { } } security_context { - run_as_user = 999 - fs_group = 999 + run_as_user = var.mongodb.security_context.run_as_user + fs_group = var.mongodb.security_context.fs_group } container { name = "mongodb" diff --git a/storage/onpremise/mongodb/variables.tf b/storage/onpremise/mongodb/variables.tf index 2ea019545..bfcb095be 100644 --- a/storage/onpremise/mongodb/variables.tf +++ b/storage/onpremise/mongodb/variables.tf @@ -13,6 +13,10 @@ variable "mongodb" { node_selector = any image_pull_secrets = string replicas_number = number + security_context = object({ + run_as_user = number + fs_group = number + }) }) } From 9d745834965e025b822e646d7f98e721f439c3b4 Mon Sep 17 00:00:00 2001 From: lzianekhodja Date: Thu, 11 Jan 2024 13:49:13 +0100 Subject: [PATCH 21/34] Update count in storage class of mongodb --- storage/onpremise/mongodb/persistent-volume.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/storage/onpremise/mongodb/persistent-volume.tf b/storage/onpremise/mongodb/persistent-volume.tf index 625ba814e..0a0dca6f4 100644 --- a/storage/onpremise/mongodb/persistent-volume.tf +++ b/storage/onpremise/mongodb/persistent-volume.tf @@ -1,5 +1,5 @@ resource "kubernetes_storage_class" "mongodb" { - count = can(coalesce(var.persistent_volume)) ? 1 : 0 + count = var.persistent_volume != null ? 1 : 0 metadata { name = "mongodb" labels = { From cb5ff43b080364cefbd3f9341078580c8a27f0eb Mon Sep 17 00:00:00 2001 From: lzianekhodja Date: Thu, 11 Jan 2024 13:50:23 +0100 Subject: [PATCH 22/34] Update count in storage class of grafana and prometheus --- monitoring/onpremise/grafana/persistent-volume.tf | 2 +- monitoring/onpremise/prometheus/persistent-volume.tf | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/monitoring/onpremise/grafana/persistent-volume.tf b/monitoring/onpremise/grafana/persistent-volume.tf index 0e564d253..6d3ba607d 100644 --- a/monitoring/onpremise/grafana/persistent-volume.tf +++ b/monitoring/onpremise/grafana/persistent-volume.tf @@ -1,5 +1,5 @@ resource "kubernetes_storage_class" "grafana" { - count = can(coalesce(var.persistent_volume)) ? 1 : 0 + count = var.persistent_volume != null ? 1 : 0 metadata { name = "grafana" labels = { diff --git a/monitoring/onpremise/prometheus/persistent-volume.tf b/monitoring/onpremise/prometheus/persistent-volume.tf index 2c4087af7..0d88f0ef4 100644 --- a/monitoring/onpremise/prometheus/persistent-volume.tf +++ b/monitoring/onpremise/prometheus/persistent-volume.tf @@ -1,5 +1,5 @@ resource "kubernetes_storage_class" "prometheus" { - count = can(coalesce(var.persistent_volume)) ? 1 : 0 + count = var.persistent_volume != null ? 1 : 0 metadata { name = "prometheus" labels = { From 2a46f5cc59201f5447c148207d7064c7e3637fa0 Mon Sep 17 00:00:00 2001 From: lzianekhodja Date: Thu, 11 Jan 2024 14:42:04 +0100 Subject: [PATCH 23/34] Update EFS CSI deployment --- kubernetes/aws/eks/efs-csi.tf | 40 ++++++++++++++++++++++++++++++----- kubernetes/aws/eks/locals.tf | 6 +++--- 2 files changed, 38 insertions(+), 8 deletions(-) diff --git a/kubernetes/aws/eks/efs-csi.tf b/kubernetes/aws/eks/efs-csi.tf index 8cd467371..75489bbae 100644 --- a/kubernetes/aws/eks/efs-csi.tf +++ b/kubernetes/aws/eks/efs-csi.tf @@ -82,7 +82,7 @@ resource "aws_iam_role" "efs_csi_driver" { Action = "sts:AssumeRoleWithWebIdentity" Condition = { StringEquals = { - #"${local.oidc_url}:aud" = "sts.amazonaws.com" + "${local.oidc_url}:aud" = "sts.amazonaws.com" "${local.oidc_url}:sub" = "system:serviceaccount:${local.efs_csi_namespace}:efs-csi-controller-sa" } } @@ -97,7 +97,7 @@ resource "aws_iam_role_policy_attachment" "efs_csi_driver" { role = aws_iam_role.efs_csi_driver.name } -resource "kubernetes_service_account" "efs_csi_driver" { +resource "kubernetes_service_account" "efs_csi_driver_controller" { metadata { name = "efs-csi-controller-sa" annotations = { @@ -107,9 +107,19 @@ resource "kubernetes_service_account" "efs_csi_driver" { } } +resource "kubernetes_service_account" "efs_csi_driver_node" { + metadata { + name = "efs-csi-node-sa" + annotations = { + "eks.amazonaws.com/role-arn" = aws_iam_role.efs_csi_driver.arn + } + namespace = local.efs_csi_namespace + } +} + resource "helm_release" "efs_csi" { name = "efs-csi" - namespace = kubernetes_service_account.efs_csi_driver.metadata[0].namespace + namespace = kubernetes_service_account.efs_csi_driver_controller.metadata[0].namespace chart = "aws-efs-csi-driver" repository = var.eks.efs_csi.repository version = var.eks.efs_csi.version @@ -150,9 +160,29 @@ resource "helm_release" "efs_csi" { name = "imagePullSecrets" value = var.eks.efs_csi.image_pull_secrets } + set { + name = "controller.serviceAccount.create" + value = false + } + set { + name = "controller.serviceAccount.name" + value = kubernetes_service_account.efs_csi_driver_controller.metadata[0].name + } + set { + name = "node.serviceAccount.create" + value = false + } + set { + name = "node.serviceAccount.name" + value = kubernetes_service_account.efs_csi_driver_node.metadata[0].name + } + - values = [ + /*values = [ yamlencode(local.controller) ] - depends_on = [kubernetes_service_account.efs_csi_driver] + depends_on = [ + kubernetes_service_account.efs_csi_driver_controller, + kubernetes_service_account.efs_csi_driver_node + ]*/ } diff --git a/kubernetes/aws/eks/locals.tf b/kubernetes/aws/eks/locals.tf index 8388786d5..7a3812e98 100644 --- a/kubernetes/aws/eks/locals.tf +++ b/kubernetes/aws/eks/locals.tf @@ -53,7 +53,7 @@ locals { value = local.node_selector_values[index] effect = "NoSchedule" } - ]*/ + ] controller = { controller = { create = true @@ -69,13 +69,13 @@ locals { affinity = {} serviceAccount = { create = false - name = kubernetes_service_account.efs_csi_driver.metadata[0].name + name = kubernetes_service_account.efs_csi_driver_controller.metadata[0].name annotations = {} } healthPort = 9909 regionalStsEndpoints = false } - } + }*/ # Custom ENI subnets = { From 4b1c7a85aa80e917205842dcfb0dcce3c4bb5ec9 Mon Sep 17 00:00:00 2001 From: lzianekhodja Date: Thu, 11 Jan 2024 16:21:52 +0100 Subject: [PATCH 24/34] Update EFS CSI deployment --- kubernetes/aws/eks/efs-csi.tf | 15 +++++++++++++++ kubernetes/aws/eks/locals.tf | 8 ++++---- 2 files changed, 19 insertions(+), 4 deletions(-) diff --git a/kubernetes/aws/eks/efs-csi.tf b/kubernetes/aws/eks/efs-csi.tf index 75489bbae..e7ce89d49 100644 --- a/kubernetes/aws/eks/efs-csi.tf +++ b/kubernetes/aws/eks/efs-csi.tf @@ -168,6 +168,21 @@ resource "helm_release" "efs_csi" { name = "controller.serviceAccount.name" value = kubernetes_service_account.efs_csi_driver_controller.metadata[0].name } + set { + name = "controller.nodeSelector" + value = var.node_selector + } + set { + name = "controller.nodeSelector" + value = [ + for index in range(0, length(local.node_selector_keys)) : { + key = local.node_selector_keys[index] + operator = "Equal" + value = local.node_selector_values[index] + effect = "NoSchedule" + } + ] + } set { name = "node.serviceAccount.create" value = false diff --git a/kubernetes/aws/eks/locals.tf b/kubernetes/aws/eks/locals.tf index 7a3812e98..83ff93e8b 100644 --- a/kubernetes/aws/eks/locals.tf +++ b/kubernetes/aws/eks/locals.tf @@ -46,7 +46,7 @@ locals { oidc_arn = module.eks.oidc_provider_arn oidc_url = trimprefix(module.eks.cluster_oidc_issuer_url, "https://") efs_csi_namespace = try(var.eks.efs_csi.namespace, "kube-system") - /*efs_csi_tolerations = [ + efs_csi_tolerations = [ for index in range(0, length(local.node_selector_keys)) : { key = local.node_selector_keys[index] operator = "Equal" @@ -64,8 +64,8 @@ locals { volMetricsOptIn = false podAnnotations = {} resources = {} - # nodeSelector = var.node_selector - # tolerations = local.efs_csi_tolerations + nodeSelector = var.node_selector + tolerations = local.efs_csi_tolerations affinity = {} serviceAccount = { create = false @@ -75,7 +75,7 @@ locals { healthPort = 9909 regionalStsEndpoints = false } - }*/ + } # Custom ENI subnets = { From 1fd9d4a8284e57edca7d7ff869342a941ac62cd8 Mon Sep 17 00:00:00 2001 From: lzianekhodja Date: Thu, 11 Jan 2024 16:37:05 +0100 Subject: [PATCH 25/34] clean EFS CSI deployment --- kubernetes/aws/eks/efs-csi.tf | 33 ++++----------------------------- kubernetes/aws/eks/locals.tf | 2 +- 2 files changed, 5 insertions(+), 30 deletions(-) diff --git a/kubernetes/aws/eks/efs-csi.tf b/kubernetes/aws/eks/efs-csi.tf index e7ce89d49..90a4b2234 100644 --- a/kubernetes/aws/eks/efs-csi.tf +++ b/kubernetes/aws/eks/efs-csi.tf @@ -160,29 +160,6 @@ resource "helm_release" "efs_csi" { name = "imagePullSecrets" value = var.eks.efs_csi.image_pull_secrets } - set { - name = "controller.serviceAccount.create" - value = false - } - set { - name = "controller.serviceAccount.name" - value = kubernetes_service_account.efs_csi_driver_controller.metadata[0].name - } - set { - name = "controller.nodeSelector" - value = var.node_selector - } - set { - name = "controller.nodeSelector" - value = [ - for index in range(0, length(local.node_selector_keys)) : { - key = local.node_selector_keys[index] - operator = "Equal" - value = local.node_selector_values[index] - effect = "NoSchedule" - } - ] - } set { name = "node.serviceAccount.create" value = false @@ -191,13 +168,11 @@ resource "helm_release" "efs_csi" { name = "node.serviceAccount.name" value = kubernetes_service_account.efs_csi_driver_node.metadata[0].name } - - - /*values = [ + values = [ yamlencode(local.controller) ] depends_on = [ - kubernetes_service_account.efs_csi_driver_controller, - kubernetes_service_account.efs_csi_driver_node - ]*/ + kubernetes_service_account.efs_csi_driver_controller, + kubernetes_service_account.efs_csi_driver_node + ] } diff --git a/kubernetes/aws/eks/locals.tf b/kubernetes/aws/eks/locals.tf index 83ff93e8b..ca884f7dd 100644 --- a/kubernetes/aws/eks/locals.tf +++ b/kubernetes/aws/eks/locals.tf @@ -66,7 +66,7 @@ locals { resources = {} nodeSelector = var.node_selector tolerations = local.efs_csi_tolerations - affinity = {} + affinity = {} serviceAccount = { create = false name = kubernetes_service_account.efs_csi_driver_controller.metadata[0].name From 1482a7388f4a9078010e5e403c772318704e17ce Mon Sep 17 00:00:00 2001 From: lzianekhodja Date: Fri, 12 Jan 2024 11:26:25 +0100 Subject: [PATCH 26/34] test static PV --- .../onpremise/mongodb/persistent-volume.tf | 49 ++++++++++++++++++- 1 file changed, 48 insertions(+), 1 deletion(-) diff --git a/storage/onpremise/mongodb/persistent-volume.tf b/storage/onpremise/mongodb/persistent-volume.tf index 0a0dca6f4..e47ef86da 100644 --- a/storage/onpremise/mongodb/persistent-volume.tf +++ b/storage/onpremise/mongodb/persistent-volume.tf @@ -1,4 +1,4 @@ -resource "kubernetes_storage_class" "mongodb" { +/*resource "kubernetes_storage_class" "mongodb" { count = var.persistent_volume != null ? 1 : 0 metadata { name = "mongodb" @@ -34,3 +34,50 @@ resource "kubernetes_persistent_volume_claim" "mongodb" { } } } +*/ + +resource "kubernetes_persistent_volume" "mongodb" { + count = var.persistent_volume != null ? 1 : 0 + metadata { + name = "mongodb" + app = "mongodb" + type = "persistent-volume" + service = "persistent-volume" + } + spec { + access_modes = ["ReadWriteMany"] + capacity = { + storage = var.persistent_volume.resources.requests["storage"] + } + volume_mode = "Filesystem" + storage_class_name = "" + persistent_volume_reclaim_policy = "Delete" + persistent_volume_source { + csi { + driver = "efs.csi.aws.com" + volume_handle = var.persistent_volume.parameters.fileSystemId + } + } + } +} + +resource "kubernetes_persistent_volume_claim" "mongodb" { + count = var.persistent_volume != null ? 1 : 0 + metadata { + name = "mongodb" + namespace = var.namespace + labels = { + app = "mongodb" + type = "persistent-volume-claim" + service = "persistent-volume" + } + } + spec { + access_modes = ["ReadWriteMany"] + storage_class_name = "" + resources { + requests = var.persistent_volume.resources.requests + limits = var.persistent_volume.resources.limits + } + } +} From 3fcb9277f206a8bd93a8e3691c1a5fd019fb5d43 Mon Sep 17 00:00:00 2001 From: lzianekhodja Date: Fri, 12 Jan 2024 11:27:43 +0100 Subject: [PATCH 27/34] fix test static PV --- storage/onpremise/mongodb/persistent-volume.tf | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/storage/onpremise/mongodb/persistent-volume.tf b/storage/onpremise/mongodb/persistent-volume.tf index e47ef86da..0ae295bf7 100644 --- a/storage/onpremise/mongodb/persistent-volume.tf +++ b/storage/onpremise/mongodb/persistent-volume.tf @@ -40,9 +40,11 @@ resource "kubernetes_persistent_volume" "mongodb" { count = var.persistent_volume != null ? 1 : 0 metadata { name = "mongodb" - app = "mongodb" - type = "persistent-volume" + labels = { + app = "mongodb" + type = "storage-class" service = "persistent-volume" + } } spec { access_modes = ["ReadWriteMany"] From 21434a8ec25c3a5e03a848614700096592d86cdb Mon Sep 17 00:00:00 2001 From: lzianekhodja Date: Fri, 12 Jan 2024 11:48:02 +0100 Subject: [PATCH 28/34] use dynamic PV --- .../onpremise/mongodb/persistent-volume.tf | 51 +------------------ 1 file changed, 1 insertion(+), 50 deletions(-) diff --git a/storage/onpremise/mongodb/persistent-volume.tf b/storage/onpremise/mongodb/persistent-volume.tf index 0ae295bf7..0a0dca6f4 100644 --- a/storage/onpremise/mongodb/persistent-volume.tf +++ b/storage/onpremise/mongodb/persistent-volume.tf @@ -1,4 +1,4 @@ -/*resource "kubernetes_storage_class" "mongodb" { +resource "kubernetes_storage_class" "mongodb" { count = var.persistent_volume != null ? 1 : 0 metadata { name = "mongodb" @@ -34,52 +34,3 @@ resource "kubernetes_persistent_volume_claim" "mongodb" { } } } -*/ - -resource "kubernetes_persistent_volume" "mongodb" { - count = var.persistent_volume != null ? 1 : 0 - metadata { - name = "mongodb" - labels = { - app = "mongodb" - type = "storage-class" - service = "persistent-volume" - } - } - spec { - access_modes = ["ReadWriteMany"] - capacity = { - storage = var.persistent_volume.resources.requests["storage"] - } - volume_mode = "Filesystem" - storage_class_name = "" - persistent_volume_reclaim_policy = "Delete" - persistent_volume_source { - csi { - driver = "efs.csi.aws.com" - volume_handle = var.persistent_volume.parameters.fileSystemId - } - } - } -} - -resource "kubernetes_persistent_volume_claim" "mongodb" { - count = var.persistent_volume != null ? 1 : 0 - metadata { - name = "mongodb" - namespace = var.namespace - labels = { - app = "mongodb" - type = "persistent-volume-claim" - service = "persistent-volume" - } - } - spec { - access_modes = ["ReadWriteMany"] - storage_class_name = "" - resources { - requests = var.persistent_volume.resources.requests - limits = var.persistent_volume.resources.limits - } - } -} From f45ff4a32c956398c2a581375ac24d570bfd32dd Mon Sep 17 00:00:00 2001 From: lzianekhodja Date: Fri, 12 Jan 2024 11:51:58 +0100 Subject: [PATCH 29/34] use dynamic PV sith security context --- monitoring/onpremise/grafana/main.tf | 6 ++++-- storage/onpremise/mongodb/main.tf | 6 ++++-- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/monitoring/onpremise/grafana/main.tf b/monitoring/onpremise/grafana/main.tf index 28045ea5a..f1f9f95aa 100644 --- a/monitoring/onpremise/grafana/main.tf +++ b/monitoring/onpremise/grafana/main.tf @@ -51,8 +51,10 @@ resource "kubernetes_deployment" "grafana" { } } security_context { - run_as_user = var.security_context.run_as_user - fs_group = var.security_context.fs_group + run_as_user = var.security_context.run_as_user + run_as_non_root = true + run_as_group = var.security_context.fs_group + fs_group = var.security_context.fs_group } container { name = "grafana" diff --git a/storage/onpremise/mongodb/main.tf b/storage/onpremise/mongodb/main.tf index 9339a3f83..842f775be 100644 --- a/storage/onpremise/mongodb/main.tf +++ b/storage/onpremise/mongodb/main.tf @@ -51,8 +51,10 @@ resource "kubernetes_deployment" "mongodb" { } } security_context { - run_as_user = var.mongodb.security_context.run_as_user - fs_group = var.mongodb.security_context.fs_group + run_as_user = var.mongodb.security_context.run_as_user + run_as_non_root = true + run_as_group = var.mongodb.security_context.fs_group + fs_group = var.mongodb.security_context.fs_group } container { name = "mongodb" From a2bb99f8af8f92e3c06ae751d595285ec6acc118 Mon Sep 17 00:00:00 2001 From: lzianekhodja Date: Fri, 12 Jan 2024 12:43:41 +0100 Subject: [PATCH 30/34] update role of efs csi --- kubernetes/aws/eks/efs-csi.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kubernetes/aws/eks/efs-csi.tf b/kubernetes/aws/eks/efs-csi.tf index 90a4b2234..5563db164 100644 --- a/kubernetes/aws/eks/efs-csi.tf +++ b/kubernetes/aws/eks/efs-csi.tf @@ -83,7 +83,7 @@ resource "aws_iam_role" "efs_csi_driver" { Condition = { StringEquals = { "${local.oidc_url}:aud" = "sts.amazonaws.com" - "${local.oidc_url}:sub" = "system:serviceaccount:${local.efs_csi_namespace}:efs-csi-controller-sa" + "${local.oidc_url}:sub" = "system:serviceaccount:${local.efs_csi_namespace}:efs-csi-*" } } } From c8c50c74de00a26dea259224a50909825a4c7e78 Mon Sep 17 00:00:00 2001 From: lzianekhodja Date: Fri, 12 Jan 2024 13:20:03 +0100 Subject: [PATCH 31/34] update role of efs csi --- kubernetes/aws/eks/efs-csi.tf | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/kubernetes/aws/eks/efs-csi.tf b/kubernetes/aws/eks/efs-csi.tf index 5563db164..cc49c2505 100644 --- a/kubernetes/aws/eks/efs-csi.tf +++ b/kubernetes/aws/eks/efs-csi.tf @@ -83,7 +83,7 @@ resource "aws_iam_role" "efs_csi_driver" { Condition = { StringEquals = { "${local.oidc_url}:aud" = "sts.amazonaws.com" - "${local.oidc_url}:sub" = "system:serviceaccount:${local.efs_csi_namespace}:efs-csi-*" + "${local.oidc_url}:sub" = "system:serviceaccount:${local.efs_csi_namespace}:efs-csi-controller-sa" } } } @@ -172,7 +172,7 @@ resource "helm_release" "efs_csi" { yamlencode(local.controller) ] depends_on = [ - kubernetes_service_account.efs_csi_driver_controller, - kubernetes_service_account.efs_csi_driver_node + kubernetes_service_account.efs_csi_driver_controller, + kubernetes_service_account.efs_csi_driver_node ] } From aa58419f789cd580d89616d31a4f8aded4fb2069 Mon Sep 17 00:00:00 2001 From: lzianekhodja Date: Fri, 12 Jan 2024 14:05:41 +0100 Subject: [PATCH 32/34] update role of efs csi --- kubernetes/aws/eks/efs-csi.tf | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kubernetes/aws/eks/efs-csi.tf b/kubernetes/aws/eks/efs-csi.tf index cc49c2505..90a4b2234 100644 --- a/kubernetes/aws/eks/efs-csi.tf +++ b/kubernetes/aws/eks/efs-csi.tf @@ -172,7 +172,7 @@ resource "helm_release" "efs_csi" { yamlencode(local.controller) ] depends_on = [ - kubernetes_service_account.efs_csi_driver_controller, - kubernetes_service_account.efs_csi_driver_node + kubernetes_service_account.efs_csi_driver_controller, + kubernetes_service_account.efs_csi_driver_node ] } From 98f88f63f8e1928bcecfb252824dc37b40bf61f4 Mon Sep 17 00:00:00 2001 From: lzianekhodja Date: Fri, 12 Jan 2024 15:23:26 +0100 Subject: [PATCH 33/34] update path of PVC --- monitoring/onpremise/prometheus/main.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/monitoring/onpremise/prometheus/main.tf b/monitoring/onpremise/prometheus/main.tf index b9b2f2440..e35d76293 100644 --- a/monitoring/onpremise/prometheus/main.tf +++ b/monitoring/onpremise/prometheus/main.tf @@ -78,7 +78,7 @@ resource "kubernetes_deployment" "prometheus" { for_each = length(kubernetes_persistent_volume_claim.prometheus) > 0 ? [1] : [] content { name = "database" - mount_path = "/prometheus/data" + mount_path = "/prometheus" } } } From a8ec1d523195f905577944ea8379b357df28815b Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Mon, 15 Jan 2024 08:30:24 +0000 Subject: [PATCH 34/34] terraform-docs: automated action --- kubernetes/aws/eks/README.md | 11 ++++++++++- monitoring/onpremise/grafana/README.md | 4 ++++ monitoring/onpremise/prometheus/README.md | 4 ++++ persistent-volume/aws/efs/README.md | 5 +---- storage/onpremise/mongodb/README.md | 4 ++-- 5 files changed, 21 insertions(+), 7 deletions(-) diff --git a/kubernetes/aws/eks/README.md b/kubernetes/aws/eks/README.md index 65c13d58b..efcd165ad 100644 --- a/kubernetes/aws/eks/README.md +++ b/kubernetes/aws/eks/README.md @@ -6,6 +6,7 @@ | [terraform](#requirement\_terraform) | >= 1.0 | | [aws](#requirement\_aws) | >= 5.3.0 | | [helm](#requirement\_helm) | >= 2.10.1 | +| [kubernetes](#requirement\_kubernetes) | >= 2.13.0 | | [null](#requirement\_null) | >= 3.2.1 | | [random](#requirement\_random) | >= 3.5.1 | @@ -15,6 +16,7 @@ |------|---------| | [aws](#provider\_aws) | >= 5.3.0 | | [helm](#provider\_helm) | >= 2.10.1 | +| [kubernetes](#provider\_kubernetes) | >= 2.13.0 | | [null](#provider\_null) | >= 3.2.1 | | [random](#provider\_random) | >= 3.5.1 | @@ -34,11 +36,17 @@ | [aws_cloudwatch_event_rule.aws_node_termination_handler_asg](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudwatch_event_rule) | resource | | [aws_cloudwatch_event_rule.aws_node_termination_handler_spot](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudwatch_event_rule) | resource | | [aws_iam_policy.aws_node_termination_handler](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource | +| [aws_iam_policy.efs_csi_driver](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource | | [aws_iam_policy.worker_autoscaling](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource | | [aws_iam_policy_attachment.workers_autoscaling](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy_attachment) | resource | +| [aws_iam_role.efs_csi_driver](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource | +| [aws_iam_role_policy_attachment.efs_csi_driver](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource | | [helm_release.aws_node_termination_handler](https://registry.terraform.io/providers/hashicorp/helm/latest/docs/resources/release) | resource | | [helm_release.cluster_autoscaler](https://registry.terraform.io/providers/hashicorp/helm/latest/docs/resources/release) | resource | +| [helm_release.efs_csi](https://registry.terraform.io/providers/hashicorp/helm/latest/docs/resources/release) | resource | | [helm_release.eni_config](https://registry.terraform.io/providers/hashicorp/helm/latest/docs/resources/release) | resource | +| [kubernetes_service_account.efs_csi_driver_controller](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/service_account) | resource | +| [kubernetes_service_account.efs_csi_driver_node](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/service_account) | resource | | [null_resource.change_cni_label](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource | | [null_resource.patch_coredns](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource | | [null_resource.trigger_custom_cni](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource | @@ -48,6 +56,7 @@ | [aws_availability_zones.available](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/availability_zones) | data source | | [aws_caller_identity.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/caller_identity) | data source | | [aws_iam_policy_document.aws_node_termination_handler](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | +| [aws_iam_policy_document.efs_csi_driver](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | | [aws_iam_policy_document.worker_autoscaling](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | | [aws_region.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/region) | data source | @@ -59,7 +68,7 @@ | [chart\_namespace](#input\_chart\_namespace) | Version for chart | `string` | `"default"` | no | | [chart\_repository](#input\_chart\_repository) | Path to the charts repository | `string` | `"../../../charts"` | no | | [chart\_version](#input\_chart\_version) | Version for chart | `string` | `"0.1.0"` | no | -| [eks](#input\_eks) | Parameters of AWS EKS |
object({
cluster_version = string
cluster_endpoint_private_access = bool
cluster_endpoint_private_access_cidrs = list(string)
cluster_endpoint_private_access_sg = list(string)
cluster_endpoint_public_access = bool
cluster_endpoint_public_access_cidrs = list(string)
cluster_log_retention_in_days = number
docker_images = object({
cluster_autoscaler = object({
image = string
tag = string
})
instance_refresh = object({
image = string
tag = string
})
})
cluster_autoscaler = object({
expander = string
scale_down_enabled = bool
min_replica_count = number
scale_down_utilization_threshold = number
scale_down_non_empty_candidates_count = number
max_node_provision_time = string
scan_interval = string
scale_down_delay_after_add = string
scale_down_delay_after_delete = string
scale_down_delay_after_failure = string
scale_down_unneeded_time = string
skip_nodes_with_system_pods = bool
version = string
repository = string
namespace = string
})
instance_refresh = object({
namespace = string
repository = string
version = string
})
encryption_keys = object({
cluster_log_kms_key_id = string
cluster_encryption_config = string
ebs_kms_key_id = string
})
map_roles = list(object({
rolearn = string
username = string
groups = list(string)
}))
map_users = list(object({
userarn = string
username = string
groups = list(string)
}))
})
| n/a | yes | +| [eks](#input\_eks) | Parameters of AWS EKS |
object({
cluster_version = string
cluster_endpoint_private_access = bool
cluster_endpoint_private_access_cidrs = list(string)
cluster_endpoint_private_access_sg = list(string)
cluster_endpoint_public_access = bool
cluster_endpoint_public_access_cidrs = list(string)
cluster_log_retention_in_days = number
docker_images = object({
cluster_autoscaler = object({
image = string
tag = string
})
instance_refresh = object({
image = string
tag = string
})
efs_csi = object({
image = string
tag = string
})
livenessprobe = object({
image = string
tag = string
})
node_driver_registrar = object({
image = string
tag = string
})
external_provisioner = object({
image = string
tag = string
})
})
cluster_autoscaler = object({
expander = string
scale_down_enabled = bool
min_replica_count = number
scale_down_utilization_threshold = number
scale_down_non_empty_candidates_count = number
max_node_provision_time = string
scan_interval = string
scale_down_delay_after_add = string
scale_down_delay_after_delete = string
scale_down_delay_after_failure = string
scale_down_unneeded_time = string
skip_nodes_with_system_pods = bool
version = string
repository = string
namespace = string
})
instance_refresh = object({
namespace = string
repository = string
version = string
})
efs_csi = object({
name = string
namespace = string
image_pull_secrets = string
repository = string
version = string
})
encryption_keys = object({
cluster_log_kms_key_id = string
cluster_encryption_config = string
ebs_kms_key_id = string
})
map_roles = list(object({
rolearn = string
username = string
groups = list(string)
}))
map_users = list(object({
userarn = string
username = string
groups = list(string)
}))
})
| n/a | yes | | [eks\_managed\_node\_groups](#input\_eks\_managed\_node\_groups) | List of EKS managed node groups | `any` | `null` | no | | [fargate\_profiles](#input\_fargate\_profiles) | List of fargate profiles | `any` | `null` | no | | [kubeconfig\_file](#input\_kubeconfig\_file) | Kubeconfig file path | `string` | n/a | yes | diff --git a/monitoring/onpremise/grafana/README.md b/monitoring/onpremise/grafana/README.md index 5751dfa2e..ec48d430d 100644 --- a/monitoring/onpremise/grafana/README.md +++ b/monitoring/onpremise/grafana/README.md @@ -27,7 +27,9 @@ No modules. | [kubernetes_config_map.datasources_config](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/config_map) | resource | | [kubernetes_config_map.grafana_ini](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/config_map) | resource | | [kubernetes_deployment.grafana](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/deployment) | resource | +| [kubernetes_persistent_volume_claim.grafana](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/persistent_volume_claim) | resource | | [kubernetes_service.grafana](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/service) | resource | +| [kubernetes_storage_class.grafana](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/storage_class) | resource | | [local_file.dashboards_config_file](https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/file) | resource | | [local_file.datasources_config_file](https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/file) | resource | @@ -39,8 +41,10 @@ No modules. | [docker\_image](#input\_docker\_image) | Docker image for Grafana |
object({
image = string
tag = string
image_pull_secrets = string
})
| n/a | yes | | [namespace](#input\_namespace) | Namespace of ArmoniK monitoring | `string` | n/a | yes | | [node\_selector](#input\_node\_selector) | Node selector for Grafana | `any` | `{}` | no | +| [persistent\_volume](#input\_persistent\_volume) | Persistent volume info |
object({
storage_provisioner = string
volume_binding_mode = string
parameters = map(string)
# Resources for PVC
resources = object({
limits = object({
storage = string
})
requests = object({
storage = string
})
})
})
| n/a | yes | | [port](#input\_port) | Port for Grafana service | `string` | n/a | yes | | [prometheus\_url](#input\_prometheus\_url) | Prometheus URL | `string` | n/a | yes | +| [security\_context](#input\_security\_context) | security context for MongoDB pods |
object({
run_as_user = number
fs_group = number
})
| n/a | yes | | [service\_type](#input\_service\_type) | Service type which can be: ClusterIP, NodePort or LoadBalancer | `string` | n/a | yes | ## Outputs diff --git a/monitoring/onpremise/prometheus/README.md b/monitoring/onpremise/prometheus/README.md index 4a9723749..2a03a7c64 100644 --- a/monitoring/onpremise/prometheus/README.md +++ b/monitoring/onpremise/prometheus/README.md @@ -29,7 +29,9 @@ No modules. | [kubernetes_cluster_role_binding.prometheus_ns_armonik](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/cluster_role_binding) | resource | | [kubernetes_config_map.prometheus_config](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/config_map) | resource | | [kubernetes_deployment.prometheus](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/deployment) | resource | +| [kubernetes_persistent_volume_claim.prometheus](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/persistent_volume_claim) | resource | | [kubernetes_service.prometheus](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/service) | resource | +| [kubernetes_storage_class.prometheus](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/storage_class) | resource | | [local_file.prometheus_config_file](https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/file) | resource | | [random_string.random_resources](https://registry.terraform.io/providers/hashicorp/random/latest/docs/resources/string) | resource | @@ -41,6 +43,8 @@ No modules. | [metrics\_exporter\_url](#input\_metrics\_exporter\_url) | URL of metrics exporter | `string` | n/a | yes | | [namespace](#input\_namespace) | Namespace of ArmoniK monitoring | `string` | n/a | yes | | [node\_selector](#input\_node\_selector) | Node selector for Prometheus | `any` | `{}` | no | +| [persistent\_volume](#input\_persistent\_volume) | Persistent volume info |
object({
storage_provisioner = string
volume_binding_mode = string
parameters = map(string)
# Resources for PVC
resources = object({
limits = object({
storage = string
})
requests = object({
storage = string
})
})
})
| n/a | yes | +| [security\_context](#input\_security\_context) | security context for MongoDB pods |
object({
run_as_user = number
fs_group = number
})
| n/a | yes | | [service\_type](#input\_service\_type) | Service type which can be: ClusterIP, NodePort or LoadBalancer | `string` | n/a | yes | ## Outputs diff --git a/persistent-volume/aws/efs/README.md b/persistent-volume/aws/efs/README.md index 17b967486..e277e176e 100644 --- a/persistent-volume/aws/efs/README.md +++ b/persistent-volume/aws/efs/README.md @@ -7,7 +7,6 @@ | [aws](#requirement\_aws) | >= 5.3.0 | | [helm](#requirement\_helm) | >= 2.10.1 | | [kubernetes](#requirement\_kubernetes) | >= 2.21.1 | -| [tls](#requirement\_tls) | >= 4.0.4 | ## Providers @@ -16,7 +15,6 @@ | [aws](#provider\_aws) | >= 5.3.0 | | [helm](#provider\_helm) | >= 2.10.1 | | [kubernetes](#provider\_kubernetes) | >= 2.21.1 | -| [tls](#provider\_tls) | >= 4.0.4 | ## Modules @@ -28,14 +26,13 @@ | Name | Type | |------|------| -| [aws_iam_openid_connect_provider.eks_oidc](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_openid_connect_provider) | resource | | [aws_iam_policy.efs_csi_driver](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource | | [aws_iam_role.efs_csi_driver](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource | | [aws_iam_role_policy_attachment.efs_csi_driver](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource | | [helm_release.efs_csi](https://registry.terraform.io/providers/hashicorp/helm/latest/docs/resources/release) | resource | | [kubernetes_service_account.efs_csi_driver](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/service_account) | resource | +| [aws_iam_openid_connect_provider.eks_oidc](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_openid_connect_provider) | data source | | [aws_iam_policy_document.efs_csi_driver](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | -| [tls_certificate.eks](https://registry.terraform.io/providers/hashicorp/tls/latest/docs/data-sources/certificate) | data source | ## Inputs diff --git a/storage/onpremise/mongodb/README.md b/storage/onpremise/mongodb/README.md index eb890d6fd..4d6ebe401 100644 --- a/storage/onpremise/mongodb/README.md +++ b/storage/onpremise/mongodb/README.md @@ -56,9 +56,9 @@ No modules. | Name | Description | Type | Default | Required | |------|-------------|------|---------|:--------:| -| [mongodb](#input\_mongodb) | Parameters of MongoDB |
object({
image = string
tag = string
node_selector = any
image_pull_secrets = string
replicas_number = number
})
| n/a | yes | +| [mongodb](#input\_mongodb) | Parameters of MongoDB |
object({
image = string
tag = string
node_selector = any
image_pull_secrets = string
replicas_number = number
security_context = object({
run_as_user = number
fs_group = number
})
})
| n/a | yes | | [namespace](#input\_namespace) | Namespace of ArmoniK resources | `string` | n/a | yes | -| [persistent\_volume](#input\_persistent\_volume) | Persistent volume info |
object({
storage_provisioner = string
parameters = map(string)
# Resources for PVC
resources = object({
limits = object({
storage = string
})
requests = object({
storage = string
})
})
})
| n/a | yes | +| [persistent\_volume](#input\_persistent\_volume) | Persistent volume info |
object({
storage_provisioner = string
volume_binding_mode = string
parameters = map(string)
# Resources for PVC
resources = object({
limits = object({
storage = string
})
requests = object({
storage = string
})
})
})
| n/a | yes | | [validity\_period\_hours](#input\_validity\_period\_hours) | Validity period of the certificate in hours | `string` | `"8760"` | no | ## Outputs