From d12d2c038f9c3301e23bbf2fd51e1e21313383f3 Mon Sep 17 00:00:00 2001 From: Spencer Gilbert Date: Mon, 30 Aug 2021 20:46:39 -0400 Subject: [PATCH 01/13] Initial commit for EKS demo Signed-off-by: Spencer Gilbert --- eks-load-balancing/Makefile | 69 +++++++++ eks-load-balancing/aws/iam_policy.json | 207 +++++++++++++++++++++++++ eks-load-balancing/vector/values.yaml | 28 ++++ 3 files changed, 304 insertions(+) create mode 100644 eks-load-balancing/Makefile create mode 100644 eks-load-balancing/aws/iam_policy.json create mode 100644 eks-load-balancing/vector/values.yaml diff --git a/eks-load-balancing/Makefile b/eks-load-balancing/Makefile new file mode 100644 index 0000000..9618162 --- /dev/null +++ b/eks-load-balancing/Makefile @@ -0,0 +1,69 @@ +ifndef ACCOUNT_ID +$(error ACCOUNT_ID is not set) +endif +.PHONY: up +up: eks-up iam-up sa-up alb-up vector-up + +.PHONY: down +down: helm-down sa-down iam-down eks-down + +.PHONY: eks-up +eks-up: + eksctl create cluster --with-oidc --name vector-demo + +.PHONY: iam-up +iam-up: + aws iam create-policy \ + --policy-name AWSLoadBalancerControllerIAMPolicy \ + --policy-document file://aws/iam_policy.json | jq -r .Policy.Arn + +.PHONY: sa-up +sa-up: + eksctl create iamserviceaccount \ + --cluster=vector-demo \ + --namespace=kube-system \ + --name=aws-load-balancer-controller \ + --attach-policy-arn=arn:aws:iam::$(ACCOUNT_ID):policy/AWSLoadBalancerControllerIAMPolicy \ + --override-existing-serviceaccounts \ + --approve +.PHONY: alb-up +alb-up: + helm repo add eks https://aws.github.io/eks-charts && \ + helm repo update + kubectl apply -k "github.com/aws/eks-charts/stable/aws-load-balancer-controller/crds?ref=master" + helm upgrade --install aws-load-balancer-controller eks/aws-load-balancer-controller \ + --set clusterName=vector-demo \ + --set serviceAccount.create=false \ + --set serviceAccount.name=aws-load-balancer-controller \ + --namespace kube-system +.PHONY: vector-up +vector-up: + helm repo add vector https://helm.vector.dev && \ + helm repo update + helm upgrade --install vector vector/vector-aggregator \ + --values vector/values.yaml \ + --namespace vector \ + --create-namespace + +.PHONY: helm-down +helm-down: + helm uninstall aws-load-balancer-controller \ + --namespace kube-system + helm uninstall vector \ + --namespace vector + +.PHONY: sa-down +sa-down: + eksctl delete iamserviceaccount \ + --cluster=vector-demo \ + --namespace=kube-system \ + --name=aws-load-balancer-controller + +.PHONY: iam-down +iam-down: + sleep 15 + aws iam delete-policy --policy-arn arn:aws:iam::$(ACCOUNT_ID):policy/AWSLoadBalancerControllerIAMPolicy + +.PHONY: eks-down +eks-down: + eksctl delete cluster --name=vector-demo diff --git a/eks-load-balancing/aws/iam_policy.json b/eks-load-balancing/aws/iam_policy.json new file mode 100644 index 0000000..c11ff94 --- /dev/null +++ b/eks-load-balancing/aws/iam_policy.json @@ -0,0 +1,207 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "iam:CreateServiceLinkedRole", + "ec2:DescribeAccountAttributes", + "ec2:DescribeAddresses", + "ec2:DescribeAvailabilityZones", + "ec2:DescribeInternetGateways", + "ec2:DescribeVpcs", + "ec2:DescribeSubnets", + "ec2:DescribeSecurityGroups", + "ec2:DescribeInstances", + "ec2:DescribeNetworkInterfaces", + "ec2:DescribeTags", + "ec2:GetCoipPoolUsage", + "ec2:DescribeCoipPools", + "elasticloadbalancing:DescribeLoadBalancers", + "elasticloadbalancing:DescribeLoadBalancerAttributes", + "elasticloadbalancing:DescribeListeners", + "elasticloadbalancing:DescribeListenerCertificates", + "elasticloadbalancing:DescribeSSLPolicies", + "elasticloadbalancing:DescribeRules", + "elasticloadbalancing:DescribeTargetGroups", + "elasticloadbalancing:DescribeTargetGroupAttributes", + "elasticloadbalancing:DescribeTargetHealth", + "elasticloadbalancing:DescribeTags" + ], + "Resource": "*" + }, + { + "Effect": "Allow", + "Action": [ + "cognito-idp:DescribeUserPoolClient", + "acm:ListCertificates", + "acm:DescribeCertificate", + "iam:ListServerCertificates", + "iam:GetServerCertificate", + "waf-regional:GetWebACL", + "waf-regional:GetWebACLForResource", + "waf-regional:AssociateWebACL", + "waf-regional:DisassociateWebACL", + "wafv2:GetWebACL", + "wafv2:GetWebACLForResource", + "wafv2:AssociateWebACL", + "wafv2:DisassociateWebACL", + "shield:GetSubscriptionState", + "shield:DescribeProtection", + "shield:CreateProtection", + "shield:DeleteProtection" + ], + "Resource": "*" + }, + { + "Effect": "Allow", + "Action": [ + "ec2:AuthorizeSecurityGroupIngress", + "ec2:RevokeSecurityGroupIngress" + ], + "Resource": "*" + }, + { + "Effect": "Allow", + "Action": [ + "ec2:CreateSecurityGroup" + ], + "Resource": "*" + }, + { + "Effect": "Allow", + "Action": [ + "ec2:CreateTags" + ], + "Resource": "arn:aws:ec2:*:*:security-group/*", + "Condition": { + "StringEquals": { + "ec2:CreateAction": "CreateSecurityGroup" + }, + "Null": { + "aws:RequestTag/elbv2.k8s.aws/cluster": "false" + } + } + }, + { + "Effect": "Allow", + "Action": [ + "ec2:CreateTags", + "ec2:DeleteTags" + ], + "Resource": "arn:aws:ec2:*:*:security-group/*", + "Condition": { + "Null": { + "aws:RequestTag/elbv2.k8s.aws/cluster": "true", + "aws:ResourceTag/elbv2.k8s.aws/cluster": "false" + } + } + }, + { + "Effect": "Allow", + "Action": [ + "ec2:AuthorizeSecurityGroupIngress", + "ec2:RevokeSecurityGroupIngress", + "ec2:DeleteSecurityGroup" + ], + "Resource": "*", + "Condition": { + "Null": { + "aws:ResourceTag/elbv2.k8s.aws/cluster": "false" + } + } + }, + { + "Effect": "Allow", + "Action": [ + "elasticloadbalancing:CreateLoadBalancer", + "elasticloadbalancing:CreateTargetGroup" + ], + "Resource": "*", + "Condition": { + "Null": { + "aws:RequestTag/elbv2.k8s.aws/cluster": "false" + } + } + }, + { + "Effect": "Allow", + "Action": [ + "elasticloadbalancing:CreateListener", + "elasticloadbalancing:DeleteListener", + "elasticloadbalancing:CreateRule", + "elasticloadbalancing:DeleteRule" + ], + "Resource": "*" + }, + { + "Effect": "Allow", + "Action": [ + "elasticloadbalancing:AddTags", + "elasticloadbalancing:RemoveTags" + ], + "Resource": [ + "arn:aws:elasticloadbalancing:*:*:targetgroup/*/*", + "arn:aws:elasticloadbalancing:*:*:loadbalancer/net/*/*", + "arn:aws:elasticloadbalancing:*:*:loadbalancer/app/*/*" + ], + "Condition": { + "Null": { + "aws:RequestTag/elbv2.k8s.aws/cluster": "true", + "aws:ResourceTag/elbv2.k8s.aws/cluster": "false" + } + } + }, + { + "Effect": "Allow", + "Action": [ + "elasticloadbalancing:AddTags", + "elasticloadbalancing:RemoveTags" + ], + "Resource": [ + "arn:aws:elasticloadbalancing:*:*:listener/net/*/*/*", + "arn:aws:elasticloadbalancing:*:*:listener/app/*/*/*", + "arn:aws:elasticloadbalancing:*:*:listener-rule/net/*/*/*", + "arn:aws:elasticloadbalancing:*:*:listener-rule/app/*/*/*" + ] + }, + { + "Effect": "Allow", + "Action": [ + "elasticloadbalancing:ModifyLoadBalancerAttributes", + "elasticloadbalancing:SetIpAddressType", + "elasticloadbalancing:SetSecurityGroups", + "elasticloadbalancing:SetSubnets", + "elasticloadbalancing:DeleteLoadBalancer", + "elasticloadbalancing:ModifyTargetGroup", + "elasticloadbalancing:ModifyTargetGroupAttributes", + "elasticloadbalancing:DeleteTargetGroup" + ], + "Resource": "*", + "Condition": { + "Null": { + "aws:ResourceTag/elbv2.k8s.aws/cluster": "false" + } + } + }, + { + "Effect": "Allow", + "Action": [ + "elasticloadbalancing:RegisterTargets", + "elasticloadbalancing:DeregisterTargets" + ], + "Resource": "arn:aws:elasticloadbalancing:*:*:targetgroup/*/*" + }, + { + "Effect": "Allow", + "Action": [ + "elasticloadbalancing:SetWebAcl", + "elasticloadbalancing:ModifyListener", + "elasticloadbalancing:AddListenerCertificates", + "elasticloadbalancing:RemoveListenerCertificates", + "elasticloadbalancing:ModifyRule" + ], + "Resource": "*" + } + ] +} diff --git a/eks-load-balancing/vector/values.yaml b/eks-load-balancing/vector/values.yaml new file mode 100644 index 0000000..e074c70 --- /dev/null +++ b/eks-load-balancing/vector/values.yaml @@ -0,0 +1,28 @@ +replicas: 3 +# The Vector Aggregator chart defines a +# vector source that is made available to you. +# You do not need to define a log source. +transforms: + # Adjust as necessary. This remap transform parses a JSON + # formatted log message, emitting a log if the contents are + # not valid JSON + # /docs/reference/transforms/ + remap: + type: remap + inputs: ["vector"] + source: | + structured, err = parse_json(.message) + if err != null { + log("Unable to parse JSON: " + err, level: "error") + } else { + . = merge(., object!(structured)) + } +sinks: + # Adjust as necessary. By default we use the console sink + # to print all data. This allows you to see Vector working. + # /docs/reference/sinks/ + stdout: + type: console + inputs: ["remap"] + target: "stdout" + encoding: "json" From 66c0326d4d1fce641f4e73f0f8eb2c91ac5ab861 Mon Sep 17 00:00:00 2001 From: Spencer Gilbert Date: Thu, 9 Sep 2021 20:52:44 -0400 Subject: [PATCH 02/13] Updates with new vector chart Signed-off-by: Spencer Gilbert --- .../Makefile | 48 ++----- eks-aggregator/README.md | 0 eks-aggregator/helm/datadog.yaml | 18 +++ eks-aggregator/helm/vector.yaml | 120 ++++++++++++++++++ .../aws => eks-aggregator}/iam_policy.json | 0 eks-load-balancing/vector/values.yaml | 28 ---- 6 files changed, 148 insertions(+), 66 deletions(-) rename {eks-load-balancing => eks-aggregator}/Makefile (65%) create mode 100644 eks-aggregator/README.md create mode 100644 eks-aggregator/helm/datadog.yaml create mode 100644 eks-aggregator/helm/vector.yaml rename {eks-load-balancing/aws => eks-aggregator}/iam_policy.json (100%) delete mode 100644 eks-load-balancing/vector/values.yaml diff --git a/eks-load-balancing/Makefile b/eks-aggregator/Makefile similarity index 65% rename from eks-load-balancing/Makefile rename to eks-aggregator/Makefile index 9618162..9579866 100644 --- a/eks-load-balancing/Makefile +++ b/eks-aggregator/Makefile @@ -1,24 +1,12 @@ ifndef ACCOUNT_ID $(error ACCOUNT_ID is not set) endif -.PHONY: up -up: eks-up iam-up sa-up alb-up vector-up - -.PHONY: down -down: helm-down sa-down iam-down eks-down - -.PHONY: eks-up -eks-up: +.PHONY: cluster-up +up: eksctl create cluster --with-oidc --name vector-demo - -.PHONY: iam-up -iam-up: aws iam create-policy \ --policy-name AWSLoadBalancerControllerIAMPolicy \ - --policy-document file://aws/iam_policy.json | jq -r .Policy.Arn - -.PHONY: sa-up -sa-up: + --policy-document file://iam_policy.json | jq -r .Policy.Arn eksctl create iamserviceaccount \ --cluster=vector-demo \ --namespace=kube-system \ @@ -26,8 +14,6 @@ sa-up: --attach-policy-arn=arn:aws:iam::$(ACCOUNT_ID):policy/AWSLoadBalancerControllerIAMPolicy \ --override-existing-serviceaccounts \ --approve -.PHONY: alb-up -alb-up: helm repo add eks https://aws.github.io/eks-charts && \ helm repo update kubectl apply -k "github.com/aws/eks-charts/stable/aws-load-balancer-controller/crds?ref=master" @@ -36,34 +22,20 @@ alb-up: --set serviceAccount.create=false \ --set serviceAccount.name=aws-load-balancer-controller \ --namespace kube-system -.PHONY: vector-up -vector-up: - helm repo add vector https://helm.vector.dev && \ - helm repo update - helm upgrade --install vector vector/vector-aggregator \ - --values vector/values.yaml \ - --namespace vector \ - --create-namespace + kubectl apply -f "https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml" + kubectl create namespace vector + kubectl create namespace datadog -.PHONY: helm-down -helm-down: +.PHONY: cluster-down +down: helm uninstall aws-load-balancer-controller \ --namespace kube-system - helm uninstall vector \ - --namespace vector - -.PHONY: sa-down -sa-down: + kubectl delete namespace vector + kubectl delete namespace datadog eksctl delete iamserviceaccount \ --cluster=vector-demo \ --namespace=kube-system \ --name=aws-load-balancer-controller - -.PHONY: iam-down -iam-down: sleep 15 aws iam delete-policy --policy-arn arn:aws:iam::$(ACCOUNT_ID):policy/AWSLoadBalancerControllerIAMPolicy - -.PHONY: eks-down -eks-down: eksctl delete cluster --name=vector-demo diff --git a/eks-aggregator/README.md b/eks-aggregator/README.md new file mode 100644 index 0000000..e69de29 diff --git a/eks-aggregator/helm/datadog.yaml b/eks-aggregator/helm/datadog.yaml new file mode 100644 index 0000000..4ef71e4 --- /dev/null +++ b/eks-aggregator/helm/datadog.yaml @@ -0,0 +1,18 @@ +datadog: + clusterName: vector-demo + logs: + containerCollectAll: true + enabled: true + orchestratorExplorer: + enabled: true + processAgent: + processCollection: true + prometheusScrape: + enabled: true +agents: + useConfigMap: true + customAgentConfig: + logs_config: + logs_dd_url: "vector-haproxy.vector.svc.cluster.local:8080" + logs_no_ssl: true + use_http: true diff --git a/eks-aggregator/helm/vector.yaml b/eks-aggregator/helm/vector.yaml new file mode 100644 index 0000000..7035841 --- /dev/null +++ b/eks-aggregator/helm/vector.yaml @@ -0,0 +1,120 @@ +role: Stateless-Aggregator + +autoscaling: + enabled: true + minReplicas: 2 + maxReplicas: 5 + +resources: + requests: + cpu: 200m + memory: 256Mi + limits: + cpu: 200m + memory: 256Mi + +env: + - name: DATADOG_API_KEY + valueFrom: + secretKeyRef: + name: vector + key: DATADOG_API_KEY + +podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9090" + +service: + type: NodePort + +customConfig: + api: + enabled: true + address: 127.0.0.1:8686 + sources: + datadog_agents: + type: datadog_agent + acknowledgements: true + address: 0.0.0.0:8080 + internal_metrics: + type: internal_metrics + transforms: + add_tags: + type: remap + inputs: + - datadog_agents + source: | + .ddtags = string!(.ddtags) + ",sender:vector" + sinks: + to_datadog: + type: datadog_logs + inputs: + - add_tags + default_api_key: "${DATADOG_API_KEY}" + encoding: + codec: json + prom_exporter: + type: prometheus_exporter + inputs: + - internal_metrics + address: 0.0.0.0:9090 + +haproxy: + enabled: true + + podAnnotations: + ad.datadoghq.com/haproxy.logs: '[{"source":"haproxy","service":"vector-haproxy"}]' + ad.datadoghq.com/haproxy.check_names: '["haproxy"]' + ad.datadoghq.com/haproxy.init_configs: "[{}]" + ad.datadoghq.com/haproxy.instances: | + [ + { + "use_prometheus": true, + "prometheus_url": "http://%%host%%:1024/metrics" + } + ] + + customConfig: | + global + log stdout local0 + maxconn 4096 + stats socket /tmp/haproxy + hard-stop-after 60s + + defaults + log global + option httplog + option dontlognull + retries 3 + option redispatch + option allbackups + timeout client 5s + timeout server 5s + timeout connect 5s + + resolvers coredns + nameserver dns1 kube-dns.kube-system.svc.cluster.local:53 + resolve_retries 3 + timeout resolve 2s + timeout retry 1s + accepted_payload_size 8192 + hold valid 10s + hold obsolete 15s + + frontend stats + mode http + bind :::1024 + http-request use-service prometheus-exporter if { path /metrics } + + frontend datadog-agents + mode http + bind :::8080 + log global + default_backend datadog-agents + + backend datadog-agents + mode http + balance roundrobin + log global + option tcp-check + server-template srv 10 _datadog-agents._tcp.vector-headless.vector.svc.cluster.local resolvers coredns check diff --git a/eks-load-balancing/aws/iam_policy.json b/eks-aggregator/iam_policy.json similarity index 100% rename from eks-load-balancing/aws/iam_policy.json rename to eks-aggregator/iam_policy.json diff --git a/eks-load-balancing/vector/values.yaml b/eks-load-balancing/vector/values.yaml deleted file mode 100644 index e074c70..0000000 --- a/eks-load-balancing/vector/values.yaml +++ /dev/null @@ -1,28 +0,0 @@ -replicas: 3 -# The Vector Aggregator chart defines a -# vector source that is made available to you. -# You do not need to define a log source. -transforms: - # Adjust as necessary. This remap transform parses a JSON - # formatted log message, emitting a log if the contents are - # not valid JSON - # /docs/reference/transforms/ - remap: - type: remap - inputs: ["vector"] - source: | - structured, err = parse_json(.message) - if err != null { - log("Unable to parse JSON: " + err, level: "error") - } else { - . = merge(., object!(structured)) - } -sinks: - # Adjust as necessary. By default we use the console sink - # to print all data. This allows you to see Vector working. - # /docs/reference/sinks/ - stdout: - type: console - inputs: ["remap"] - target: "stdout" - encoding: "json" From 755fc09742029b0ebd58695af7983d35a415ce58 Mon Sep 17 00:00:00 2001 From: Spencer Gilbert Date: Tue, 14 Sep 2021 12:42:18 -0400 Subject: [PATCH 03/13] Use more VRL functions in example remap Signed-off-by: Spencer Gilbert --- eks-aggregator/helm/vector.yaml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/eks-aggregator/helm/vector.yaml b/eks-aggregator/helm/vector.yaml index 7035841..6ea0b3b 100644 --- a/eks-aggregator/helm/vector.yaml +++ b/eks-aggregator/helm/vector.yaml @@ -44,7 +44,10 @@ customConfig: inputs: - datadog_agents source: | - .ddtags = string!(.ddtags) + ",sender:vector" + .ddtags = parse_key_value!(.ddtags, key_value_delimiter: ":", field_delimiter: ",") + .ddtags.sender = "vector" + .ddtags.vector_aggregator = get_hostname!() + .ddtags = encode_key_value(.ddtags, key_value_delimiter: ":", field_delimiter: ",") sinks: to_datadog: type: datadog_logs From c90a06d0fd0b68bef982ead560a160b7b497b355 Mon Sep 17 00:00:00 2001 From: Spencer Gilbert Date: Tue, 14 Sep 2021 12:42:40 -0400 Subject: [PATCH 04/13] Increase server timeout and don't use nodePort Signed-off-by: Spencer Gilbert --- eks-aggregator/helm/vector.yaml | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/eks-aggregator/helm/vector.yaml b/eks-aggregator/helm/vector.yaml index 6ea0b3b..9791a8a 100644 --- a/eks-aggregator/helm/vector.yaml +++ b/eks-aggregator/helm/vector.yaml @@ -24,9 +24,6 @@ podAnnotations: prometheus.io/scrape: "true" prometheus.io/port: "9090" -service: - type: NodePort - customConfig: api: enabled: true @@ -92,7 +89,7 @@ haproxy: option redispatch option allbackups timeout client 5s - timeout server 5s + timeout server 50s timeout connect 5s resolvers coredns From 94d8f2d910cb9398bdfd9fbd6a4a325833bd907a Mon Sep 17 00:00:00 2001 From: Spencer Gilbert Date: Wed, 15 Sep 2021 12:04:01 -0400 Subject: [PATCH 05/13] Use ALB for load balancing Signed-off-by: Spencer Gilbert --- eks-aggregator/helm/datadog.yaml | 3 ++- eks-aggregator/helm/vector.yaml | 12 +++++++++--- eks-aggregator/http-ingress.yaml | 24 ++++++++++++++++++++++++ 3 files changed, 35 insertions(+), 4 deletions(-) create mode 100644 eks-aggregator/http-ingress.yaml diff --git a/eks-aggregator/helm/datadog.yaml b/eks-aggregator/helm/datadog.yaml index 4ef71e4..7aad3b9 100644 --- a/eks-aggregator/helm/datadog.yaml +++ b/eks-aggregator/helm/datadog.yaml @@ -13,6 +13,7 @@ agents: useConfigMap: true customAgentConfig: logs_config: - logs_dd_url: "vector-haproxy.vector.svc.cluster.local:8080" + logs_dd_url: "internal-k8s-vector-vector-e67b4af534-965797666.us-east-1.elb.amazonaws.com:8080" logs_no_ssl: true use_http: true + use_v2_api: false diff --git a/eks-aggregator/helm/vector.yaml b/eks-aggregator/helm/vector.yaml index 9791a8a..0ff9e41 100644 --- a/eks-aggregator/helm/vector.yaml +++ b/eks-aggregator/helm/vector.yaml @@ -27,7 +27,7 @@ podAnnotations: customConfig: api: enabled: true - address: 127.0.0.1:8686 + address: 0.0.0.0:8686 sources: datadog_agents: type: datadog_agent @@ -45,6 +45,12 @@ customConfig: .ddtags.sender = "vector" .ddtags.vector_aggregator = get_hostname!() .ddtags = encode_key_value(.ddtags, key_value_delimiter: ":", field_delimiter: ",") + + parsed = + parse_json(.message) ?? + parse_syslog(.message) ?? + {} + merge(., object!(parsed)) sinks: to_datadog: type: datadog_logs @@ -60,7 +66,7 @@ customConfig: address: 0.0.0.0:9090 haproxy: - enabled: true + enabled: false podAnnotations: ad.datadoghq.com/haproxy.logs: '[{"source":"haproxy","service":"vector-haproxy"}]' @@ -117,4 +123,4 @@ haproxy: balance roundrobin log global option tcp-check - server-template srv 10 _datadog-agents._tcp.vector-headless.vector.svc.cluster.local resolvers coredns check + server-template srv 5 _datadog-agents._tcp.vector-headless.vector.svc.cluster.local resolvers coredns check diff --git a/eks-aggregator/http-ingress.yaml b/eks-aggregator/http-ingress.yaml new file mode 100644 index 0000000..09c294f --- /dev/null +++ b/eks-aggregator/http-ingress.yaml @@ -0,0 +1,24 @@ +# Temporary resource until https://github.com/vectordotdev/helm-charts/issues/52 +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: vector + annotations: + alb.ingress.kubernetes.io/scheme: internal + alb.ingress.kubernetes.io/healthcheck-port: '8686' + alb.ingress.kubernetes.io/healthcheck-path: /health + alb.ingress.kubernetes.io/listen-ports: '[{"HTTP": 8080}]' + alb.ingress.kubernetes.io/target-type: ip + kubernetes.io/ingress.class: alb +spec: + rules: + - host: internal-k8s-vector-vector-e67b4af534-965797666.us-east-1.elb.amazonaws.com + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: vector + port: + name: datadog-agents From ae59f5ef9a95e9300b8679ce7f0be38c116e32d9 Mon Sep 17 00:00:00 2001 From: Spencer Gilbert Date: Wed, 15 Sep 2021 12:57:05 -0400 Subject: [PATCH 06/13] Remove unused haproxy values Signed-off-by: Spencer Gilbert --- eks-aggregator/helm/vector.yaml | 60 --------------------------------- 1 file changed, 60 deletions(-) diff --git a/eks-aggregator/helm/vector.yaml b/eks-aggregator/helm/vector.yaml index 0ff9e41..5582573 100644 --- a/eks-aggregator/helm/vector.yaml +++ b/eks-aggregator/helm/vector.yaml @@ -64,63 +64,3 @@ customConfig: inputs: - internal_metrics address: 0.0.0.0:9090 - -haproxy: - enabled: false - - podAnnotations: - ad.datadoghq.com/haproxy.logs: '[{"source":"haproxy","service":"vector-haproxy"}]' - ad.datadoghq.com/haproxy.check_names: '["haproxy"]' - ad.datadoghq.com/haproxy.init_configs: "[{}]" - ad.datadoghq.com/haproxy.instances: | - [ - { - "use_prometheus": true, - "prometheus_url": "http://%%host%%:1024/metrics" - } - ] - - customConfig: | - global - log stdout local0 - maxconn 4096 - stats socket /tmp/haproxy - hard-stop-after 60s - - defaults - log global - option httplog - option dontlognull - retries 3 - option redispatch - option allbackups - timeout client 5s - timeout server 50s - timeout connect 5s - - resolvers coredns - nameserver dns1 kube-dns.kube-system.svc.cluster.local:53 - resolve_retries 3 - timeout resolve 2s - timeout retry 1s - accepted_payload_size 8192 - hold valid 10s - hold obsolete 15s - - frontend stats - mode http - bind :::1024 - http-request use-service prometheus-exporter if { path /metrics } - - frontend datadog-agents - mode http - bind :::8080 - log global - default_backend datadog-agents - - backend datadog-agents - mode http - balance roundrobin - log global - option tcp-check - server-template srv 5 _datadog-agents._tcp.vector-headless.vector.svc.cluster.local resolvers coredns check From c29d05a2c55317842366b956e9210fdc641ae8d1 Mon Sep 17 00:00:00 2001 From: Spencer Gilbert Date: Wed, 15 Sep 2021 13:30:03 -0400 Subject: [PATCH 07/13] Actually assign merged event Signed-off-by: Spencer Gilbert --- eks-aggregator/helm/vector.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/eks-aggregator/helm/vector.yaml b/eks-aggregator/helm/vector.yaml index 5582573..5895fbd 100644 --- a/eks-aggregator/helm/vector.yaml +++ b/eks-aggregator/helm/vector.yaml @@ -50,7 +50,7 @@ customConfig: parse_json(.message) ?? parse_syslog(.message) ?? {} - merge(., object!(parsed)) + . = merge(., object!(parsed)) sinks: to_datadog: type: datadog_logs From c5b1a3ff5ae2265279f71c2f69070ff69f8a8e17 Mon Sep 17 00:00:00 2001 From: Spencer Gilbert Date: Thu, 16 Sep 2021 10:26:31 -0400 Subject: [PATCH 08/13] Use new dd_logs sink, more apache parsing Signed-off-by: Spencer Gilbert --- eks-aggregator/helm/datadog.yaml | 1 + eks-aggregator/helm/vector.yaml | 24 ++++++++++++++++++++++-- eks-aggregator/http-ingress.yaml | 1 + 3 files changed, 24 insertions(+), 2 deletions(-) diff --git a/eks-aggregator/helm/datadog.yaml b/eks-aggregator/helm/datadog.yaml index 7aad3b9..2ce8547 100644 --- a/eks-aggregator/helm/datadog.yaml +++ b/eks-aggregator/helm/datadog.yaml @@ -1,4 +1,5 @@ datadog: + logLevel: debug clusterName: vector-demo logs: containerCollectAll: true diff --git a/eks-aggregator/helm/vector.yaml b/eks-aggregator/helm/vector.yaml index 5895fbd..4310fc0 100644 --- a/eks-aggregator/helm/vector.yaml +++ b/eks-aggregator/helm/vector.yaml @@ -1,5 +1,8 @@ role: Stateless-Aggregator +image: + tag: nightly-2021-09-16-distroless-libc + autoscaling: enabled: true minReplicas: 2 @@ -36,7 +39,7 @@ customConfig: internal_metrics: type: internal_metrics transforms: - add_tags: + remap: type: remap inputs: - datadog_agents @@ -49,13 +52,30 @@ customConfig: parsed = parse_json(.message) ?? parse_syslog(.message) ?? + parse_apache_log(.message, format: "common") ?? {} + + if is_integer(parsed.status) { + parsed.http.status_code = del(parsed.status) + if parsed.http.status_code >= 500 && parsed.http.status_code < 600 { parsed.status = "error" } + if parsed.http.status_code >= 400 && parsed.http.status_code < 500 { parsed.status = "warn" } + if parsed.http.status_code >= 300 && parsed.http.status_code < 400 { parsed.status = "info" } + if parsed.http.status_code >= 200 && parsed.http.status_code < 300 { parsed.status = "ok" } + } + + if exists(parsed.method) { parsed.http.method = del(parsed.method) } + if exists(parsed.referer) { parsed.http.referer = del(parsed.referer) } + if exists(parsed.path) { parsed.http.url_details.path = del(parsed.path) } + if exists(parsed.host) { parsed.network.client.ip = del(parsed.host) } + . = merge(., object!(parsed)) + + .uuid = uuid_v4() sinks: to_datadog: type: datadog_logs inputs: - - add_tags + - remap default_api_key: "${DATADOG_API_KEY}" encoding: codec: json diff --git a/eks-aggregator/http-ingress.yaml b/eks-aggregator/http-ingress.yaml index 09c294f..3df7290 100644 --- a/eks-aggregator/http-ingress.yaml +++ b/eks-aggregator/http-ingress.yaml @@ -8,6 +8,7 @@ metadata: alb.ingress.kubernetes.io/healthcheck-port: '8686' alb.ingress.kubernetes.io/healthcheck-path: /health alb.ingress.kubernetes.io/listen-ports: '[{"HTTP": 8080}]' + alb.ingress.kubernetes.io/target-group-attributes: load_balancing.algorithm.type=least_outstanding_requests alb.ingress.kubernetes.io/target-type: ip kubernetes.io/ingress.class: alb spec: From 2fe8630abb902c97bcb1a6514643c5938c03dcc8 Mon Sep 17 00:00:00 2001 From: Spencer Gilbert Date: Fri, 17 Sep 2021 16:54:56 -0400 Subject: [PATCH 09/13] Use chart's builtin ingress and update remap to parse agent logs Signed-off-by: Spencer Gilbert --- eks-aggregator/helm/vector.yaml | 55 +++++++++++++++++++++----------- eks-aggregator/http-ingress.yaml | 25 --------------- 2 files changed, 36 insertions(+), 44 deletions(-) delete mode 100644 eks-aggregator/http-ingress.yaml diff --git a/eks-aggregator/helm/vector.yaml b/eks-aggregator/helm/vector.yaml index 4310fc0..4bc7ac8 100644 --- a/eks-aggregator/helm/vector.yaml +++ b/eks-aggregator/helm/vector.yaml @@ -43,40 +43,40 @@ customConfig: type: remap inputs: - datadog_agents + drop_on_abort: true source: | + # Parse the received .ddtags field so we can more easily access the contained tags .ddtags = parse_key_value!(.ddtags, key_value_delimiter: ":", field_delimiter: ",") .ddtags.sender = "vector" .ddtags.vector_aggregator = get_hostname!() - .ddtags = encode_key_value(.ddtags, key_value_delimiter: ":", field_delimiter: ",") - parsed = - parse_json(.message) ?? - parse_syslog(.message) ?? - parse_apache_log(.message, format: "common") ?? - {} + if .service == "agent" { + parsed, err = + parse_grok(.message, s'(?%{YEAR}-%{MONTHNUM}-%{MONTHDAY} %{TIME}) UTC \| %{NOTSPACE:agent} \| %{LOGLEVEL:status} \| \(%{NOTSPACE:filename}:%{NUMBER:lineno} in %{WORD:process}\) \|( %{NOTSPACE:kv} \|)?( - \|)?( \(%{NOTSPACE:pyFilename}:%{NUMBER:pyLineno}\) \|)?%{GREEDYDATA}', remove_empty: true) ?? + parse_grok(.message, s'(?%{YEAR}-%{MONTHNUM}-%{MONTHDAY} %{TIME}) UTC \| %{LOGLEVEL:status} \| \(%{NOTSPACE:filename}:%{NUMBER:lineno} in %{WORD:process}\)%{GREEDYDATA}') ?? + parse_grok(.message, s'(?%{YEAR}-%{MONTHNUM}-%{MONTHDAY} %{TIME}) UTC \| %{NOTSPACE:agent} \| %{LOGLEVEL:status}\s+\| %{WORD:class} \| %{GREEDYDATA}') + if err != null { log("Failed to parse agent log: " + string!(.message), level: "error"); abort } - if is_integer(parsed.status) { - parsed.http.status_code = del(parsed.status) - if parsed.http.status_code >= 500 && parsed.http.status_code < 600 { parsed.status = "error" } - if parsed.http.status_code >= 400 && parsed.http.status_code < 500 { parsed.status = "warn" } - if parsed.http.status_code >= 300 && parsed.http.status_code < 400 { parsed.status = "info" } - if parsed.http.status_code >= 200 && parsed.http.status_code < 300 { parsed.status = "ok" } - } + parsed |= parse_key_value(del(parsed.kv), key_value_delimiter: ":", field_delimiter: ",") ?? {} - if exists(parsed.method) { parsed.http.method = del(parsed.method) } - if exists(parsed.referer) { parsed.http.referer = del(parsed.referer) } - if exists(parsed.path) { parsed.http.url_details.path = del(parsed.path) } - if exists(parsed.host) { parsed.network.client.ip = del(parsed.host) } + ts = parse_timestamp!(parsed.timestamp, format: "%F %T") + parsed.timestamp = to_unix_timestamp(ts, unit: "milliseconds") + parsed.lineno = to_int!(parsed.lineno) + if exists(parsed.pyLineno) { parsed.pyLineno = to_int!(parsed.pyLineno) } - . = merge(., object!(parsed)) + . = merge(., parsed) + } - .uuid = uuid_v4() + # Re-encode Datadog tags as a string for the `datadog_logs` sink + .ddtags = encode_key_value!(.ddtags, key_value_delimiter: ":", field_delimiter: ",") sinks: to_datadog: type: datadog_logs inputs: - remap default_api_key: "${DATADOG_API_KEY}" + batch: + timeout_secs: 5 encoding: codec: json prom_exporter: @@ -84,3 +84,20 @@ customConfig: inputs: - internal_metrics address: 0.0.0.0:9090 + +ingress: + enabled: true + annotations: + alb.ingress.kubernetes.io/scheme: internal + alb.ingress.kubernetes.io/healthcheck-port: '8686' + alb.ingress.kubernetes.io/healthcheck-path: /health + alb.ingress.kubernetes.io/listen-ports: '[{"HTTP": 8080}]' + alb.ingress.kubernetes.io/target-type: ip + kubernetes.io/ingress.class: alb + hosts: + - host: internal-k8s-vector-vector-e67b4af534-965797666.us-east-1.elb.amazonaws.com + paths: + - path: / + pathType: Prefix + port: + name: datadog-agents diff --git a/eks-aggregator/http-ingress.yaml b/eks-aggregator/http-ingress.yaml deleted file mode 100644 index 3df7290..0000000 --- a/eks-aggregator/http-ingress.yaml +++ /dev/null @@ -1,25 +0,0 @@ -# Temporary resource until https://github.com/vectordotdev/helm-charts/issues/52 -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - name: vector - annotations: - alb.ingress.kubernetes.io/scheme: internal - alb.ingress.kubernetes.io/healthcheck-port: '8686' - alb.ingress.kubernetes.io/healthcheck-path: /health - alb.ingress.kubernetes.io/listen-ports: '[{"HTTP": 8080}]' - alb.ingress.kubernetes.io/target-group-attributes: load_balancing.algorithm.type=least_outstanding_requests - alb.ingress.kubernetes.io/target-type: ip - kubernetes.io/ingress.class: alb -spec: - rules: - - host: internal-k8s-vector-vector-e67b4af534-965797666.us-east-1.elb.amazonaws.com - http: - paths: - - path: / - pathType: Prefix - backend: - service: - name: vector - port: - name: datadog-agents From 2904c4d459259bf210eaeff1cf0d455064a8b970 Mon Sep 17 00:00:00 2001 From: Spencer Gilbert Date: Fri, 17 Sep 2021 16:57:27 -0400 Subject: [PATCH 10/13] Remove my test alb hostname from values file Signed-off-by: Spencer Gilbert --- eks-aggregator/helm/vector.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/eks-aggregator/helm/vector.yaml b/eks-aggregator/helm/vector.yaml index 4bc7ac8..b35a549 100644 --- a/eks-aggregator/helm/vector.yaml +++ b/eks-aggregator/helm/vector.yaml @@ -95,7 +95,7 @@ ingress: alb.ingress.kubernetes.io/target-type: ip kubernetes.io/ingress.class: alb hosts: - - host: internal-k8s-vector-vector-e67b4af534-965797666.us-east-1.elb.amazonaws.com + - host: vector.mycompany.tld paths: - path: / pathType: Prefix From 903c88db208ab1773a1606f0252d9404fababeab Mon Sep 17 00:00:00 2001 From: Spencer Gilbert Date: Mon, 20 Sep 2021 12:17:33 -0400 Subject: [PATCH 11/13] Add readme for eks demo Signed-off-by: Spencer Gilbert --- eks-aggregator/README.md | 68 ++++++++++++++++++++++++++++++++ eks-aggregator/helm/datadog.yaml | 2 +- eks-aggregator/ingress.yaml | 37 +++++++++++++++++ 3 files changed, 106 insertions(+), 1 deletion(-) create mode 100644 eks-aggregator/ingress.yaml diff --git a/eks-aggregator/README.md b/eks-aggregator/README.md index e69de29..a083241 100644 --- a/eks-aggregator/README.md +++ b/eks-aggregator/README.md @@ -0,0 +1,68 @@ +# EKS Stateless Aggregator Demo + +## Prerequisites + +- [helm](https://helm.sh/docs/intro/install/) +- [kubectl](https://kubernetes.io/docs/tasks/tools/) +- [ekctl](https://eksctl.io/introduction/#installation) - if creating a new cluster with the Makefile + +Your EKS cluster will need the [AWS Load Balancer Controller](https://github.com/kubernetes-sigs/aws-load-balancer-controller) installed, this will be installed via the `make` targets or you +can follow [Amazon's instructions](https://docs.aws.amazon.com/eks/latest/userguide/aws-load-balancer-controller.html) for your own cluster. Additionally you'll also need a Datadog API key for Vector and the Datadog Agents. + +## Getting started + +Add the necessary Helm repositories for the Vector and Datadog charts: + +```shell +helm repo add datadog https://helm.datadoghq.com +helm repo add vector https://helm.vector.dev +helm repo update +``` + +If you need to provision an EKS cluster with the AWS load balancer controller, you can use the included Makefile by running: + +```shell +ACCOUNT_ID= make cluster-up +``` + +The following command will install Vector as an Aggregator using an Application Load Balancer to route requests from Datadog Agents. +Vector is configured to process Datadog Agent logs in a similar fashion Datadog's [Pipelines](https://docs.datadoghq.com/logs/log_configuration/pipelines/) +feature, allowing you to move your log processing onto your own hardware. + +```shell +helm upgrade --install vector vector/vector --devel \ + --namespace vector --values helm/vector.yaml \ + --set secrets.generic.DATADOG_API_KEY= \ + --set ingress.hosts[0].host= +``` + +Once your ALB is provisioned you can run the following command to extract it's generated hostname. Run the above command again, substituting your generated hostname in the last `set` option. + +```shell +kubectl --namespace vector get ingress vector \ + --output go-template='{{(index .status.loadBalancer.ingress 0 ).hostname}}' +``` + +Then install your Datadog Agents substituting the hostname from the previous step. + +```shell +helm upgrade --install datadog datadog/dataodg \ + --namespace datadog --values helm/datadog.yaml \ + --set datadog.apiKey= \ + --set agents.customAgentConfig.logs_config.logs_dd_url=":8080" + ``` + +Once all the pods have started, you should begin to see logs being ingested to your [Datadog account](https://app.datadoghq.com/logs) that are being aggregated and parsed by Vector. + +## Cleaning up + +The _cluster-down_ target will delete the Namespaces and Cluster created during this demo. + +```shell +make cluster-down +``` + +## Notes + +- A nightly image is currently used to leverage our rewritten `datadog_logs` sink +- The `--devel` option is used to access our currently _pre-release_ [`vector`](https://github.com/vectordotdev/helm-charts/blob/develop/charts/vector/README.md) chart diff --git a/eks-aggregator/helm/datadog.yaml b/eks-aggregator/helm/datadog.yaml index 2ce8547..3c5d1ee 100644 --- a/eks-aggregator/helm/datadog.yaml +++ b/eks-aggregator/helm/datadog.yaml @@ -14,7 +14,7 @@ agents: useConfigMap: true customAgentConfig: logs_config: - logs_dd_url: "internal-k8s-vector-vector-e67b4af534-965797666.us-east-1.elb.amazonaws.com:8080" + logs_dd_url: "vector.mycompany.tld:8080" logs_no_ssl: true use_http: true use_v2_api: false diff --git a/eks-aggregator/ingress.yaml b/eks-aggregator/ingress.yaml new file mode 100644 index 0000000..5fd2e16 --- /dev/null +++ b/eks-aggregator/ingress.yaml @@ -0,0 +1,37 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + annotations: + alb.ingress.kubernetes.io/backend-protocol-version: GRPC + alb.ingress.kubernetes.io/certificate-arn: arn:aws:acm:us-east-1:071959437513:certificate/1ec6a7ed-8edc-478a-a6de-be473eb32e11 + alb.ingress.kubernetes.io/healthcheck-path: /vector.Vector/HealthCheck + alb.ingress.kubernetes.io/listen-ports: '[{"HTTPS": 443}, {"HTTPS": 9000}]' + alb.ingress.kubernetes.io/scheme: internet-facing + alb.ingress.kubernetes.io/success-codes: "0" + kubectl.kubernetes.io/last-applied-configuration: | + {"apiVersion":"networking.k8s.io/v1","kind":"Ingress","metadata":{"annotations":{"alb.ingress.kubernetes.io/backend-protocol-version":"GRPC","alb.ingress.kubernetes.io/certificate-arn":"arn:aws:acm:us-east-1:071959437513:certificate/1ec6a7ed-8edc-478a-a6de-be473eb32e11","alb.ingress.kubernetes.io/healthcheck-path":"/vector.Vector/HealthCheck","alb.ingress.kubernetes.io/listen-ports":"[{\"HTTPS\": 443}, {\"HTTPS\": 9000}]","alb.ingress.kubernetes.io/scheme":"internet-facing","kubernetes.io/ingress.class":"alb"},"name":"vector-aggregator","namespace":"vector"},"spec":{"rules":[{"host":"aggregator-demo.vector.dev","http":{"paths":[{"backend":{"service":{"name":"vector-aggregator","port":{"number":80}}},"path":"/*","pathType":"ImplementationSpecific"}]}}]}} + kubernetes.io/ingress.class: alb + creationTimestamp: "2021-08-31T21:34:34Z" + finalizers: + - ingress.k8s.aws/resources + generation: 1 + name: vector-aggregator + namespace: vector + resourceVersion: "187293" + uid: e2be95e2-87c4-47e4-927c-8a470c345569 +spec: + rules: + - host: aggregator-demo.vector.dev + http: + paths: + - backend: + service: + name: vector-aggregator + port: + number: 80 + path: /* + pathType: ImplementationSpecific +status: + loadBalancer: + ingress: + - hostname: k8s-vector-vectorag-3df3b33fae-1263098738.us-east-1.elb.amazonaws.com From 2984a666d6e2056fca94bae9e35302d30adacbaa Mon Sep 17 00:00:00 2001 From: Spencer Gilbert Date: Tue, 21 Sep 2021 10:41:15 -0400 Subject: [PATCH 12/13] Remove old grpc ingress, fix make target names Signed-off-by: Spencer Gilbert --- eks-aggregator/Makefile | 4 ++-- eks-aggregator/ingress.yaml | 37 ------------------------------------- 2 files changed, 2 insertions(+), 39 deletions(-) delete mode 100644 eks-aggregator/ingress.yaml diff --git a/eks-aggregator/Makefile b/eks-aggregator/Makefile index 9579866..0eff92f 100644 --- a/eks-aggregator/Makefile +++ b/eks-aggregator/Makefile @@ -2,7 +2,7 @@ ifndef ACCOUNT_ID $(error ACCOUNT_ID is not set) endif .PHONY: cluster-up -up: +cluster-up: eksctl create cluster --with-oidc --name vector-demo aws iam create-policy \ --policy-name AWSLoadBalancerControllerIAMPolicy \ @@ -27,7 +27,7 @@ up: kubectl create namespace datadog .PHONY: cluster-down -down: +cluster-down: helm uninstall aws-load-balancer-controller \ --namespace kube-system kubectl delete namespace vector diff --git a/eks-aggregator/ingress.yaml b/eks-aggregator/ingress.yaml deleted file mode 100644 index 5fd2e16..0000000 --- a/eks-aggregator/ingress.yaml +++ /dev/null @@ -1,37 +0,0 @@ -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - annotations: - alb.ingress.kubernetes.io/backend-protocol-version: GRPC - alb.ingress.kubernetes.io/certificate-arn: arn:aws:acm:us-east-1:071959437513:certificate/1ec6a7ed-8edc-478a-a6de-be473eb32e11 - alb.ingress.kubernetes.io/healthcheck-path: /vector.Vector/HealthCheck - alb.ingress.kubernetes.io/listen-ports: '[{"HTTPS": 443}, {"HTTPS": 9000}]' - alb.ingress.kubernetes.io/scheme: internet-facing - alb.ingress.kubernetes.io/success-codes: "0" - kubectl.kubernetes.io/last-applied-configuration: | - {"apiVersion":"networking.k8s.io/v1","kind":"Ingress","metadata":{"annotations":{"alb.ingress.kubernetes.io/backend-protocol-version":"GRPC","alb.ingress.kubernetes.io/certificate-arn":"arn:aws:acm:us-east-1:071959437513:certificate/1ec6a7ed-8edc-478a-a6de-be473eb32e11","alb.ingress.kubernetes.io/healthcheck-path":"/vector.Vector/HealthCheck","alb.ingress.kubernetes.io/listen-ports":"[{\"HTTPS\": 443}, {\"HTTPS\": 9000}]","alb.ingress.kubernetes.io/scheme":"internet-facing","kubernetes.io/ingress.class":"alb"},"name":"vector-aggregator","namespace":"vector"},"spec":{"rules":[{"host":"aggregator-demo.vector.dev","http":{"paths":[{"backend":{"service":{"name":"vector-aggregator","port":{"number":80}}},"path":"/*","pathType":"ImplementationSpecific"}]}}]}} - kubernetes.io/ingress.class: alb - creationTimestamp: "2021-08-31T21:34:34Z" - finalizers: - - ingress.k8s.aws/resources - generation: 1 - name: vector-aggregator - namespace: vector - resourceVersion: "187293" - uid: e2be95e2-87c4-47e4-927c-8a470c345569 -spec: - rules: - - host: aggregator-demo.vector.dev - http: - paths: - - backend: - service: - name: vector-aggregator - port: - number: 80 - path: /* - pathType: ImplementationSpecific -status: - loadBalancer: - ingress: - - hostname: k8s-vector-vectorag-3df3b33fae-1263098738.us-east-1.elb.amazonaws.com From dc29456d9ac1af78fa4dc87a4f413643e695a90a Mon Sep 17 00:00:00 2001 From: Spencer Gilbert Date: Tue, 21 Sep 2021 16:22:21 -0400 Subject: [PATCH 13/13] Clear up instructions for setting host value Signed-off-by: Spencer Gilbert --- eks-aggregator/README.md | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/eks-aggregator/README.md b/eks-aggregator/README.md index a083241..cab4676 100644 --- a/eks-aggregator/README.md +++ b/eks-aggregator/README.md @@ -33,20 +33,29 @@ feature, allowing you to move your log processing onto your own hardware. helm upgrade --install vector vector/vector --devel \ --namespace vector --values helm/vector.yaml \ --set secrets.generic.DATADOG_API_KEY= \ - --set ingress.hosts[0].host= + --set ingress.hosts[0].host=DUMMY_VAL ``` -Once your ALB is provisioned you can run the following command to extract it's generated hostname. Run the above command again, substituting your generated hostname in the last `set` option. +Once your ALB is provisioned you can run the following command to extract it's generated hostname to replace the DUMMY_VAL above. ```shell -kubectl --namespace vector get ingress vector \ +export ALB_HOSTNAME=kubectl --namespace vector get ingress vector \ --output go-template='{{(index .status.loadBalancer.ingress 0 ).hostname}}' ``` +The following command will upgrade your `vector` release with the created ALB hostname. + +```shell +helm upgrade --install vector vector/vector --devel \ + --namespace vector --values helm/vector.yaml \ + --set secrets.generic.DATADOG_API_KEY= \ + --set ingress.hosts[0].host=${ALB_HOSTNAME} +``` + Then install your Datadog Agents substituting the hostname from the previous step. ```shell -helm upgrade --install datadog datadog/dataodg \ +helm upgrade --install datadog datadog/datadog \ --namespace datadog --values helm/datadog.yaml \ --set datadog.apiKey= \ --set agents.customAgentConfig.logs_config.logs_dd_url=":8080"