From 9d31d75be56bcdf5dedcb2af17c5ad1509a64414 Mon Sep 17 00:00:00 2001 From: ilyes Ajroud Date: Wed, 29 Jan 2025 10:08:45 +0000 Subject: [PATCH 1/9] initial draft --- deploy/kamaji-aws.env | 32 ++ docs/content/guides/kamaji-aws-deployment.md | 455 +++++++++++++++++++ 2 files changed, 487 insertions(+) create mode 100644 deploy/kamaji-aws.env create mode 100644 docs/content/guides/kamaji-aws-deployment.md diff --git a/deploy/kamaji-aws.env b/deploy/kamaji-aws.env new file mode 100644 index 00000000..9fa89e2a --- /dev/null +++ b/deploy/kamaji-aws.env @@ -0,0 +1,32 @@ +# aws parameters +export KAMAJI_REGION=eu-west-3 +export KAMAJI_NODE_NG=${KAMAJI_CLUSTER}-${KAMAJI_REGION}-ng1 +export KAMAJI_NODE_TYPE=m5.large +export KAMAJI_CLUSTER=kamaji +export KAMAJI_VPC_NAME=kamaji-vpc +export KAMAJI_VPC_CIDR=10.0.0.0/16 +export KAMAJI_SUBNET_NAME=kamaji-subnet +export KAMAJI_SUBNET1_ADDRESS=10.0.1.0/24 +export KAMAJI_SUBNET2_ADDRESS=10.0.2.0/24 + +# kamaji parameters +export KAMAJI_NAMESPACE=kamaji-system + +# tenant cluster parameters +export TENANT_NAMESPACE=default +export TENANT_NAME=tenant-00 +export TENANT_DOMAIN=$KAMAJI_REGION.aws.com +export TENANT_VERSION=v1.26.0 +export TENANT_PORT=6443 # port used to expose the tenant api server +export TENANT_PROXY_PORT=8132 # port used to expose the konnectivity server +export TENANT_POD_CIDR=10.36.0.0/16 +export TENANT_SVC_CIDR=10.96.0.0/16 +export TENANT_DNS_SERVICE=10.96.0.10 + +export TENANT_VM_SIZE=Standard_D2ds_v4 +export TENANT_VM_IMAGE=UbuntuLTS +export TENANT_SUBNET_NAME=$TENANT_NAME-subnet +export TENANT_SUBNET_ADDRESS=10.225.0.0/16 +export TENANT_VMSS=$TENANT_NAME-vmss + + diff --git a/docs/content/guides/kamaji-aws-deployment.md b/docs/content/guides/kamaji-aws-deployment.md new file mode 100644 index 00000000..c858e90e --- /dev/null +++ b/docs/content/guides/kamaji-aws-deployment.md @@ -0,0 +1,455 @@ +# Setup Kamaji on aws +This guide will lead you through the process of creating a working Kamaji setup on on AWS. + +The guide requires: + +- a bootstrap machine +- a Kubernetes cluster (EKS) to run the Admin and Tenant Control Planes +- an arbitrary number of machines to host `Tenant`s' workloads + +## Summary + + * [Prepare the bootstrap workspace](#prepare-the-bootstrap-workspace) + * [Access Management Cluster](#access-management-cluster) + * [Install Kamaji](#install-kamaji) + * [Create Tenant Cluster](#create-tenant-cluster) + * [Cleanup](#cleanup) + +## Prepare the bootstrap workspace +On the bootstrap machine, clone the repo and prepare the workspace directory: + +```bash +git clone https://github.com/clastix/kamaji +cd kamaji/deploy +``` + +We assume you have installed on the bootstrap machine: + +- [kubectl](https://kubernetes.io/docs/tasks/tools/#kubectl) +- [kubeadm](https://kubernetes.io/docs/tasks/tools/#kubeadm) +- [helm](https://helm.sh/docs/intro/install/) +- [jq](https://stedolan.github.io/jq/) +- [AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html) +- [eksctl](https://eksctl.io/installation/) + +Make sure you have a valid AWS Account, and login to AWS: + +> The easiest way to get started with AWS is to create [access keys](https://docs.aws.amazon.com/cli/v1/userguide/cli-authentication-user.html#cli-authentication-user-configure.title) associated to your account + +```bash +aws configure +``` + + +## Create Management cluster + +In Kamaji, a Management Cluster is a regular Kubernetes cluster which hosts zero to many Tenant Cluster Control Planes. The Management Cluster acts as cockpit for all the Tenant clusters and implements Monitoring, Logging, and Governance of all the Kamaji setup, including all Tenant Clusters. For this guide, we're going to use an instance of AWS Kubernetes Service (EKS) as Management Cluster. + +Throughout the following instructions, shell variables are used to indicate values that you should adjust to your own AWS environment: + +### Create networks + +In this section, we will create the required VPC and the associated subnets that will host the EKS cluster. We will also create the EIP (Elastic IPs) that will be used as IPs for tenant cluster + +```bash +source kamaji-AWS.env +# create vpc +aws ec2 create-vpc --cidr-block $KAMAJI_VPC_CIDR --region $KAMAJI_REGION +# retreive subnet +export KAMAJI_VPC_ID=$(aws ec2 describe-vpcs --filters "Name=cidr-block,Values=$KAMAJI_VPC_CIDR" --query "Vpcs[0].VpcId" --output text) +# create subnets +aws ec2 create-subnet --vpc-id $KAMAJI_VPC_ID --cidr-block $KAMAJI_SUBNET1_ADDRESS --availability-zone ${KAMAJI_REGION}a +aws ec2 create-subnet --vpc-id $KAMAJI_VPC_ID --cidr-block $KAMAJI_SUBNET2_ADDRESS --availability-zone ${KAMAJI_REGION}b +# retreive subnets +export KAMAJI_SUBNET1_ID=$(aws ec2 describe-subnets --filter "Name=vpc-id,Values=$KAMAJI_VPC_ID" --filter "Name=cidr-block,Values=$KAMAJI_SUBNET1_ADDRESS" --query "Subnets[0].SubnetId" --output text) +export KAMAJI_SUBNET2_ID=$(aws ec2 describe-subnets --filter "Name=vpc-id,Values=$KAMAJI_VPC_ID" --filter "Name=cidr-block,Values=$KAMAJI_SUBNET2_ADDRESS" --query "Subnets[0].SubnetId" --output text) + + +export IGW_ID=$(aws ec2 create-internet-gateway --query "InternetGateway.InternetGatewayId" --output text) +aws ec2 attach-internet-gateway --vpc-id $KAMAJI_VPC_ID --internet-gateway-id $IGW_ID + +# create nat gateway and attach it to the VPC + +export EIP_ALLOCATION_ID=$(aws ec2 allocate-address --query 'AllocationId' --output text) + +NAT_GATEWAY_ID=$(aws ec2 create-nat-gateway \ + --subnet-id $KAMAJI_SUBNET1_ID \ + --allocation-id $EIP_ALLOCATION_ID \ + --query 'NatGateway.NatGatewayId' \ + --output text) + +aws ec2 wait nat-gateway-available --nat-gateway-ids $NAT_GATEWAY_ID + +PRIVATE_ROUTE_TABLE_ID=$(aws ec2 describe-route-tables \ + --filters "Name=vpc-id,Values=$KAMAJI_VPC_ID" \ + --query "RouteTables[*].RouteTableId" \ + --output text) + +aws ec2 create-route \ + --route-table-id $PRIVATE_ROUTE_TABLE_ID \ + --destination-cidr-block 0.0.0.0/0 \ + --nat-gateway-id $NAT_GATEWAY_ID + + + +``` +### create EKS cluster +Once the cluster formation succeeds, get credentials to access the cluster as admin + +```bash +cat >eks-cluster.yaml < ${TENANT_NAMESPACE}-${TENANT_NAME}-tcp.yaml < ${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig + +kubectl --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig config \ + set-cluster ${TENANT_NAME} \ + --server https://${TENANT_NAME}.${KAMAJI_REGION}.cloudapp.AWS.com +``` + +and let's check it out: + +``` +kubectl --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig get svc + +NAMESPACE NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +default kubernetes ClusterIP 10.32.0.1 443/TCP 6m +``` + +Check out how the Tenant Control Plane advertises itself: + +``` +kubectl --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig get ep + +NAME ENDPOINTS AGE +kubernetes 10.240.0.100:6443 57m +``` + +### Join worker nodes + +The Tenant Control Plane is made of pods running in the Kamaji Management Cluster. At this point, the Tenant Cluster has no worker nodes. So, the next step is to join some worker nodes to the Tenant Control Plane. + +Kamaji does not provide any helper for creation of tenant worker nodes, instead it leverages the [Cluster Management API](https://github.com/kubernetes-sigs/cluster-api). This allows you to create the Tenant Clusters, including worker nodes, in a completely declarative way. Currently, a Cluster API `ControlPlane` provider for AWS is not yet available: check the road-map on the [official repository](https://github.com/clastix/cluster-api-control-plane-provider-kamaji). + +An alternative approach to create and join worker nodes in AWS is to manually create the VMs, turn them into Kubernetes worker nodes and then join through the `kubeadm` command. + +Create an AWS VM Stateful Set to host worker nodes + +```bash +az network vnet subnet create \ + --resource-group $KAMAJI_RG \ + --name $TENANT_SUBNET_NAME \ + --vnet-name $KAMAJI_VNET_NAME \ + --address-prefixes $TENANT_SUBNET_ADDRESS + +az vmss create \ + --name $TENANT_VMSS \ + --resource-group $KAMAJI_RG \ + --image $TENANT_VM_IMAGE \ + --vnet-name $KAMAJI_VNET_NAME \ + --subnet $TENANT_SUBNET_NAME \ + --computer-name-prefix $TENANT_NAME- \ + --load-balancer "" \ + --instance-count 0 + +az vmss update \ + --resource-group $KAMAJI_RG \ + --name $TENANT_VMSS \ + --set virtualMachineProfile.networkProfile.networkInterfaceConfigurations[0].enableIPForwarding=true + +az vmss scale \ + --resource-group $KAMAJI_RG \ + --name $TENANT_VMSS \ + --new-capacity 3 +``` + +Once all the machines are ready, follow the related [documentation](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/install-kubeadm/) in order to: + +- install `containerd` as container runtime +- install `crictl`, the command line for working with `containerd` +- install `kubectl`, `kubelet`, and `kubeadm` in the desired version + +After the installation is complete on all the nodes, store the entire command of joining in a variable: + +```bash +TENANT_ADDR=$(kubectl -n ${TENANT_NAMESPACE} get svc ${TENANT_NAME} -o json | jq -r ."spec.loadBalancerIP") +JOIN_CMD=$(echo "sudo kubeadm join ${TENANT_ADDR}:6443 ")$(kubeadm --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig token create --print-join-command |cut -d" " -f4-) +``` + +Use a loop to log in to and run the join command on each node: + +```bash +VMIDS=($(az vmss list-instances \ + --resource-group $KAMAJI_RG \ + --name $TENANT_VMSS \ + --query [].instanceId \ + --output tsv)) + +for i in ${!VMIDS[@]}; do + VMID=${VMIDS[$i]} + az vmss run-command create \ + --name join-tenant-control-plane \ + --vmss-name $TENANT_VMSS \ + --resource-group $KAMAJI_RG \ + --instance-id ${VMID} \ + --script "${JOIN_CMD}" +done +``` + +Checking the nodes: + +```bash +kubectl --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig get nodes + +NAME STATUS ROLES AGE VERSION +tenant-00-000000 NotReady 112s v1.25.0 +tenant-00-000002 NotReady 92s v1.25.0 +tenant-00-000003 NotReady 71s v1.25.0 +``` + +The cluster needs a [CNI](https://kubernetes.io/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins/) plugin to get the nodes ready. In this guide, we are going to install [calico](https://projectcalico.docs.tigera.io/about/about-calico), but feel free to use one of your taste. + +Download the latest stable Calico manifest: + +```bash +curl https://raw.githubusercontent.com/projectcalico/calico/v3.24.1/manifests/calico.yaml -O +``` + +As per [documentation](https://projectcalico.docs.tigera.io/reference/public-cloud/AWS), Calico in VXLAN mode is supported on AWS while IPIP packets are blocked by the AWS network fabric. Make sure you edit the manifest above and set the following variables: + +- `CLUSTER_TYPE="k8s"` +- `CALICO_IPV4POOL_IPIP="Never"` +- `CALICO_IPV4POOL_VXLAN="Always"` + +Apply to the Tenant Cluster: + +```bash +kubectl --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig apply -f calico.yaml +``` + +And after a while, nodes will be ready + +```bash +kubectl --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig get nodes + +NAME STATUS ROLES AGE VERSION +tenant-00-000000 Ready 3m38s v1.25.0 +tenant-00-000002 Ready 3m18s v1.25.0 +tenant-00-000003 Ready 2m57s v1.25.0 +``` + +## Cleanup +To get rid of the Kamaji infrastructure, remove the RESOURCE_GROUP: + +``` +az group delete --name $KAMAJI_RG --yes --no-wait +``` + +That's all folks! \ No newline at end of file From a7a6bff9fc71a56b780484fb08c6e64118c5839b Mon Sep 17 00:00:00 2001 From: ilyes Ajroud Date: Fri, 31 Jan 2025 18:23:31 +0000 Subject: [PATCH 2/9] feat: update tenant configuration for kamaji deployment --- deploy/kamaji-aws.env | 18 +- docs/content/guides/kamaji-aws-deployment.md | 185 ++++++++++--------- 2 files changed, 103 insertions(+), 100 deletions(-) diff --git a/deploy/kamaji-aws.env b/deploy/kamaji-aws.env index 9fa89e2a..71627eff 100644 --- a/deploy/kamaji-aws.env +++ b/deploy/kamaji-aws.env @@ -13,20 +13,22 @@ export KAMAJI_SUBNET2_ADDRESS=10.0.2.0/24 export KAMAJI_NAMESPACE=kamaji-system # tenant cluster parameters -export TENANT_NAMESPACE=default +export TENANT_NAMESPACE=tenant-00 export TENANT_NAME=tenant-00 -export TENANT_DOMAIN=$KAMAJI_REGION.aws.com -export TENANT_VERSION=v1.26.0 +export TENANT_DOMAIN=internal.kamaji.aws.com +export TENANT_VERSION=v1.30.0 export TENANT_PORT=6443 # port used to expose the tenant api server export TENANT_PROXY_PORT=8132 # port used to expose the konnectivity server export TENANT_POD_CIDR=10.36.0.0/16 export TENANT_SVC_CIDR=10.96.0.0/16 export TENANT_DNS_SERVICE=10.96.0.10 -export TENANT_VM_SIZE=Standard_D2ds_v4 -export TENANT_VM_IMAGE=UbuntuLTS -export TENANT_SUBNET_NAME=$TENANT_NAME-subnet -export TENANT_SUBNET_ADDRESS=10.225.0.0/16 -export TENANT_VMSS=$TENANT_NAME-vmss +export UBUNTU_AMI_ID=ami-06e02ae7bdac6b938 +export TENANT_VM_SIZE=t3.medium +export TENANT_ASG_MIN_SIZE=1 +export TENANT_ASG_MAX_SIZE=1 +export TENANT_ASG_DESIRED_SIZE=1 +export TENANT_SUBNET_ADDRESS=10.0.4.0/24 +export export TENANT_ASG_NAME=$TENANT_NAME-workers diff --git a/docs/content/guides/kamaji-aws-deployment.md b/docs/content/guides/kamaji-aws-deployment.md index c858e90e..2c7b3c8e 100644 --- a/docs/content/guides/kamaji-aws-deployment.md +++ b/docs/content/guides/kamaji-aws-deployment.md @@ -138,12 +138,17 @@ And check you can access: ```bash aws eks update-kubeconfig --region ${KAMAJI_REGION} --name ${KAMAJI_CLUSTER} kubectl cluster-info +# make ebs as a default storage class +kubectl patch storageclass standard -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"false"}}}' + ``` ### Add route 53 domain In order to easily access to tenant clusters , it is recommended to create a route53 domain or use an existing one if exists ```bash +# for within VPC aws route53 create-hosted-zone --name "$TENANT_DOMAIN" --caller-reference $(date +%s) --vpc "VPCRegion=$KAMAJI_REGION,VPCId=$KAMAJI_VPC_ID" + ``` ## Install Kamaji @@ -173,7 +178,10 @@ Setting externalDNS allows to update your DNS records dynamically from an annota helm repo add external-dns https://kubernetes-sigs.github.io/external-dns/ helm repo update -helm install my-external-dns external-dns/external-dns --version 1.15.1 +helm install external-dns external-dns/external-dns \ + --namespace external-dns \ + --create-namespace \ + --version 1.15.1 ``` ## Install Kamaji Controller @@ -190,13 +198,19 @@ helm install kamaji clastix/kamaji -n kamaji-system --create-namespace ### Tenant Control Plane With Kamaji on EKS, the tenant control plane is accessible: +- from management cluster through a `ClusterIP` service - from tenant worker nodes through an internal loadbalancer - from tenant admin user through an external loadbalancer responding to `https://${TENANT_NAME}.${TENANT_NAME}.${TENANT_DOMAIN}:443` Create a tenant control plane of example: ```yaml -cat > ${TENANT_NAMESPACE}-${TENANT_NAME}-tcp.yaml < ${TENANT_NAMESPACE}-${TENANT_NAME}-2.yaml < port: ${TENANT_PORT} certSANs: - ${TENANT_NAME}.${TENANT_DOMAIN} @@ -265,40 +284,26 @@ spec: cpu: 100m memory: 128Mi limits: {} ---- -apiVersion: v1 -kind: Service -metadata: - name: ${TENANT_NAME}-public - namespace: ${TENANT_NAMESPACE} - annotations: - service.beta.kubernetes.io/AWS-dns-label-name: ${TENANT_NAME} -spec: - ports: - - port: 443 - protocol: TCP - targetPort: ${TENANT_PORT} - selector: - kamaji.clastix.io/name: ${TENANT_NAME} - type: LoadBalancer EOF -kubectl -n ${TENANT_NAMESPACE} apply -f ${TENANT_NAMESPACE}-${TENANT_NAME}-tcp.yaml +kubectl -n ${TENANT_NAMESPACE} apply -f ${TENANT_NAMESPACE}-${TENANT_NAME}.yaml ``` Make sure: -- the following annotation: `service.beta.kubernetes.io/AWS-load-balancer-internal=true` is set on the `tcp` service. It tells AWS to expose the service within an internal loadbalancer. +- the following annotation: `external-dns.alpha.kubernetes.io/hostname` is set to create the dns record. It tells AWS to expose the Tenant Control Plane with public domain name: `${TENANT_NAME}.${TENANT_DOMAIN}`. -- the following annotation: `service.beta.kubernetes.io/AWS-dns-label-name=${TENANT_NAME}` is set the public loadbalancer service. It tells AWS to expose the Tenant Control Plane with public domain name: `${TENANT_NAME}.${TENANT_DOMAIN}`. +> Since AWS load Balancer does not support setting LoadBalancerIP, you will get the folowing warning on the service created for the control plane tenant `Error syncing load balancer: failed to ensure load balancer: LoadBalancerIP cannot be specified for AWS ELB`. you can ignore it for now. ### Working with Tenant Control Plane Check the access to the Tenant Control Plane: +> if the domain you used is a private route53 domain make sure to map the public IP of the LB to ${TENANT_NAME}.${TENANT_DOMAIN} in your `/etc/hosts`. otherwise kubectl will fail checking ssl certificates + ```bash -curl -k https://${TENANT_NAME}.${KAMAJI_REGION}.cloudapp.AWS.com/healthz -curl -k https://${TENANT_NAME}.${KAMAJI_REGION}.cloudapp.AWS.com/version +curl -k https://${TENANT_NAME}.${TENANT_DOMAIN}/healthz +curl -k https://${TENANT_NAME}.${TENANT_DOMAIN}/version ``` Let's retrieve the `kubeconfig` in order to work with it: @@ -311,7 +316,7 @@ kubectl get secrets -n ${TENANT_NAMESPACE} ${TENANT_NAME}-admin-kubeconfig -o js kubectl --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig config \ set-cluster ${TENANT_NAME} \ - --server https://${TENANT_NAME}.${KAMAJI_REGION}.cloudapp.AWS.com + --server https://${TENANT_NAME}.${TENANT_DOMAIN} ``` and let's check it out: @@ -319,8 +324,8 @@ and let's check it out: ``` kubectl --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig get svc -NAMESPACE NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -default kubernetes ClusterIP 10.32.0.1 443/TCP 6m +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +kubernetes ClusterIP 10.96.0.1 443/TCP 38h ``` Check out how the Tenant Control Plane advertises itself: @@ -328,90 +333,88 @@ Check out how the Tenant Control Plane advertises itself: ``` kubectl --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig get ep -NAME ENDPOINTS AGE -kubernetes 10.240.0.100:6443 57m +NAME ENDPOINTS AGE +kubernetes 172.20.251.60:6443 38h ``` -### Join worker nodes +## Join worker nodes The Tenant Control Plane is made of pods running in the Kamaji Management Cluster. At this point, the Tenant Cluster has no worker nodes. So, the next step is to join some worker nodes to the Tenant Control Plane. -Kamaji does not provide any helper for creation of tenant worker nodes, instead it leverages the [Cluster Management API](https://github.com/kubernetes-sigs/cluster-api). This allows you to create the Tenant Clusters, including worker nodes, in a completely declarative way. Currently, a Cluster API `ControlPlane` provider for AWS is not yet available: check the road-map on the [official repository](https://github.com/clastix/cluster-api-control-plane-provider-kamaji). +Kamaji does not provide any helper for creation of tenant worker nodes, instead it leverages the [Cluster Management API](https://github.com/kubernetes-sigs/cluster-api). This allows you to create the Tenant Clusters, including worker nodes, in a completely declarative way. Currently, a Cluster API `ControlPlane` provider for AWS is available: check the [official documentation](https://github.com/clastix/cluster-api-control-plane-provider-kamaji/blob/master/docs/providers-aws.md). An alternative approach to create and join worker nodes in AWS is to manually create the VMs, turn them into Kubernetes worker nodes and then join through the `kubeadm` command. -Create an AWS VM Stateful Set to host worker nodes +### Create the kubeadm join command +Run the following command to get the `kubeadm` join command that will be used on the worker tenant nodes: ```bash -az network vnet subnet create \ - --resource-group $KAMAJI_RG \ - --name $TENANT_SUBNET_NAME \ - --vnet-name $KAMAJI_VNET_NAME \ - --address-prefixes $TENANT_SUBNET_ADDRESS - -az vmss create \ - --name $TENANT_VMSS \ - --resource-group $KAMAJI_RG \ - --image $TENANT_VM_IMAGE \ - --vnet-name $KAMAJI_VNET_NAME \ - --subnet $TENANT_SUBNET_NAME \ - --computer-name-prefix $TENANT_NAME- \ - --load-balancer "" \ - --instance-count 0 - -az vmss update \ - --resource-group $KAMAJI_RG \ - --name $TENANT_VMSS \ - --set virtualMachineProfile.networkProfile.networkInterfaceConfigurations[0].enableIPForwarding=true - -az vmss scale \ - --resource-group $KAMAJI_RG \ - --name $TENANT_VMSS \ - --new-capacity 3 +TENANT_ADDR=$(kubectl -n ${TENANT_NAMESPACE} get svc ${TENANT_NAME} -o json | jq -r ."spec.loadBalancerIP") +JOIN_CMD=$(echo "sudo kubeadm join ${TENANT_ADDR}:6443 ")$(kubeadm --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig token create --ttl 0 --print-join-command |cut -d" " -f4-) ``` -Once all the machines are ready, follow the related [documentation](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/install-kubeadm/) in order to: +> setting `--ttl=0` on the `kubeadm token create` will guarantee that the token will never expires and can be used every time. -- install `containerd` as container runtime -- install `crictl`, the command line for working with `containerd` -- install `kubectl`, `kubelet`, and `kubeadm` in the desired version +### create tenant worker nodes ASG -After the installation is complete on all the nodes, store the entire command of joining in a variable: +Create an AWS autoscaling group to host tenant worker nodes: ```bash -TENANT_ADDR=$(kubectl -n ${TENANT_NAMESPACE} get svc ${TENANT_NAME} -o json | jq -r ."spec.loadBalancerIP") -JOIN_CMD=$(echo "sudo kubeadm join ${TENANT_ADDR}:6443 ")$(kubeadm --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig token create --print-join-command |cut -d" " -f4-) -``` - -Use a loop to log in to and run the join command on each node: +aws ec2 create-subnet --vpc-id $KAMAJI_VPC_ID --cidr-block $TENANT_SUBNET_ADDRESS --availability-zone ${KAMAJI_REGION}a + +export TENANT_SUBNET_ID=$(aws ec2 describe-subnets --filter "Name=vpc-id,Values=$KAMAJI_VPC_ID" --filter "Name=cidr-block,Values=$TENANT_SUBNET_ADDRESS" --query "Subnets[0].SubnetId" --output text) + +USER_DATA=$(cat < Note: we're using the `userdata` in order to bootstrap the worker nodes. You can follow the [documentation](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/install-kubeadm/) for manual bootstrapping + Checking the nodes: ```bash kubectl --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig get nodes -NAME STATUS ROLES AGE VERSION -tenant-00-000000 NotReady 112s v1.25.0 -tenant-00-000002 NotReady 92s v1.25.0 -tenant-00-000003 NotReady 71s v1.25.0 +NAME STATUS ROLES AGE VERSION +ip-10-0-1-49 NotReady 56m v1.30.9 ``` The cluster needs a [CNI](https://kubernetes.io/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins/) plugin to get the nodes ready. In this guide, we are going to install [calico](https://projectcalico.docs.tigera.io/about/about-calico), but feel free to use one of your taste. @@ -440,16 +443,14 @@ And after a while, nodes will be ready kubectl --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig get nodes NAME STATUS ROLES AGE VERSION -tenant-00-000000 Ready 3m38s v1.25.0 -tenant-00-000002 Ready 3m18s v1.25.0 -tenant-00-000003 Ready 2m57s v1.25.0 +ip-10-0-1-49 Ready 56m v1.30.9 ``` ## Cleanup To get rid of the Kamaji infrastructure, remove the RESOURCE_GROUP: ``` -az group delete --name $KAMAJI_RG --yes --no-wait +TODO ``` That's all folks! \ No newline at end of file From 546368ab8e71b83f4346ff2802ca5a54fc68976d Mon Sep 17 00:00:00 2001 From: ilyesAj Date: Mon, 3 Feb 2025 19:43:37 +0100 Subject: [PATCH 3/9] simplify documentation using eksctl --- deploy/kamaji-aws.env | 15 +-- docs/content/guides/kamaji-aws-deployment.md | 126 +++++++++---------- 2 files changed, 64 insertions(+), 77 deletions(-) diff --git a/deploy/kamaji-aws.env b/deploy/kamaji-aws.env index 71627eff..f63ef515 100644 --- a/deploy/kamaji-aws.env +++ b/deploy/kamaji-aws.env @@ -1,13 +1,14 @@ # aws parameters export KAMAJI_REGION=eu-west-3 +export KAMAJI_AZ=eu-west-3a +export KAMAJI_CLUSTER_VERSION="1.32" +export KAMAJI_CLUSTER=kamaji-2 export KAMAJI_NODE_NG=${KAMAJI_CLUSTER}-${KAMAJI_REGION}-ng1 export KAMAJI_NODE_TYPE=m5.large -export KAMAJI_CLUSTER=kamaji -export KAMAJI_VPC_NAME=kamaji-vpc -export KAMAJI_VPC_CIDR=10.0.0.0/16 -export KAMAJI_SUBNET_NAME=kamaji-subnet -export KAMAJI_SUBNET1_ADDRESS=10.0.1.0/24 -export KAMAJI_SUBNET2_ADDRESS=10.0.2.0/24 +export KAMAJI_VPC_NAME=eksctl-${KAMAJI_CLUSTER}-cluster/VPC +export KAMAJI_VPC_CIDR=192.168.0.0/16 +export KAMAJI_PUBLIC_SUBNET_NAME=eksctl-${KAMAJI_CLUSTER}-cluster/SubnetPublicEUWEST3A + # kamaji parameters export KAMAJI_NAMESPACE=kamaji-system @@ -16,7 +17,7 @@ export KAMAJI_NAMESPACE=kamaji-system export TENANT_NAMESPACE=tenant-00 export TENANT_NAME=tenant-00 export TENANT_DOMAIN=internal.kamaji.aws.com -export TENANT_VERSION=v1.30.0 +export TENANT_VERSION=v1.30.2 export TENANT_PORT=6443 # port used to expose the tenant api server export TENANT_PROXY_PORT=8132 # port used to expose the konnectivity server export TENANT_POD_CIDR=10.36.0.0/16 diff --git a/docs/content/guides/kamaji-aws-deployment.md b/docs/content/guides/kamaji-aws-deployment.md index 2c7b3c8e..86bc40a5 100644 --- a/docs/content/guides/kamaji-aws-deployment.md +++ b/docs/content/guides/kamaji-aws-deployment.md @@ -47,54 +47,21 @@ In Kamaji, a Management Cluster is a regular Kubernetes cluster which hosts zero Throughout the following instructions, shell variables are used to indicate values that you should adjust to your own AWS environment: -### Create networks - -In this section, we will create the required VPC and the associated subnets that will host the EKS cluster. We will also create the EIP (Elastic IPs) that will be used as IPs for tenant cluster - -```bash -source kamaji-AWS.env -# create vpc -aws ec2 create-vpc --cidr-block $KAMAJI_VPC_CIDR --region $KAMAJI_REGION -# retreive subnet -export KAMAJI_VPC_ID=$(aws ec2 describe-vpcs --filters "Name=cidr-block,Values=$KAMAJI_VPC_CIDR" --query "Vpcs[0].VpcId" --output text) -# create subnets -aws ec2 create-subnet --vpc-id $KAMAJI_VPC_ID --cidr-block $KAMAJI_SUBNET1_ADDRESS --availability-zone ${KAMAJI_REGION}a -aws ec2 create-subnet --vpc-id $KAMAJI_VPC_ID --cidr-block $KAMAJI_SUBNET2_ADDRESS --availability-zone ${KAMAJI_REGION}b -# retreive subnets -export KAMAJI_SUBNET1_ID=$(aws ec2 describe-subnets --filter "Name=vpc-id,Values=$KAMAJI_VPC_ID" --filter "Name=cidr-block,Values=$KAMAJI_SUBNET1_ADDRESS" --query "Subnets[0].SubnetId" --output text) -export KAMAJI_SUBNET2_ID=$(aws ec2 describe-subnets --filter "Name=vpc-id,Values=$KAMAJI_VPC_ID" --filter "Name=cidr-block,Values=$KAMAJI_SUBNET2_ADDRESS" --query "Subnets[0].SubnetId" --output text) - - -export IGW_ID=$(aws ec2 create-internet-gateway --query "InternetGateway.InternetGatewayId" --output text) -aws ec2 attach-internet-gateway --vpc-id $KAMAJI_VPC_ID --internet-gateway-id $IGW_ID - -# create nat gateway and attach it to the VPC - -export EIP_ALLOCATION_ID=$(aws ec2 allocate-address --query 'AllocationId' --output text) - -NAT_GATEWAY_ID=$(aws ec2 create-nat-gateway \ - --subnet-id $KAMAJI_SUBNET1_ID \ - --allocation-id $EIP_ALLOCATION_ID \ - --query 'NatGateway.NatGatewayId' \ - --output text) - -aws ec2 wait nat-gateway-available --nat-gateway-ids $NAT_GATEWAY_ID - -PRIVATE_ROUTE_TABLE_ID=$(aws ec2 describe-route-tables \ - --filters "Name=vpc-id,Values=$KAMAJI_VPC_ID" \ - --query "RouteTables[*].RouteTableId" \ - --output text) +### create EKS cluster -aws ec2 create-route \ - --route-table-id $PRIVATE_ROUTE_TABLE_ID \ - --destination-cidr-block 0.0.0.0/0 \ - --nat-gateway-id $NAT_GATEWAY_ID +In order to create quickly an EKS cluster, we will use `eksctl` provided by AWS. `eksctl` is a simple CLI tool for creating and managing clusters on EKS - +`eksctl` will provision for you: +- A dedicated VPC on `192.168.0.0/16` CIDR +- 3 private subnets and 3 public subnets in 3 different availability zones +- NAT Gateway for the private subnets, An internet gateway for the public ones +- the required route tables to associate the subnets with the IGW and the NAT gateways +- Provision the EKS cluster +- Provision worker nodes and associate them to your cluster +- Optionally creates the required IAM policies for your addons and attach them to the node +- Optionally adds the eks addons to your cluster -``` -### create EKS cluster -Once the cluster formation succeeds, get credentials to access the cluster as admin +For our use case, we will create an EKS cluster with the following configuration: ```bash cat >eks-cluster.yaml < ${TENANT_NAMESPACE}-${TENANT_NAME}-2.yaml < ${TENANT_NAMESPACE}-${TENANT_NAME}.yaml < + address: ${TENANT_PUBLIC_IP} port: ${TENANT_PORT} certSANs: - ${TENANT_NAME}.${TENANT_DOMAIN} @@ -291,9 +274,12 @@ kubectl -n ${TENANT_NAMESPACE} apply -f ${TENANT_NAMESPACE}-${TENANT_NAME}.yaml Make sure: +- Tenant Control Plane will expose the API server using a public IP address through a network loadbalancer. +it is important to provide a static public IP address for the API server in order to make it reachable from the outside world. + - the following annotation: `external-dns.alpha.kubernetes.io/hostname` is set to create the dns record. It tells AWS to expose the Tenant Control Plane with public domain name: `${TENANT_NAME}.${TENANT_DOMAIN}`. -> Since AWS load Balancer does not support setting LoadBalancerIP, you will get the folowing warning on the service created for the control plane tenant `Error syncing load balancer: failed to ensure load balancer: LoadBalancerIP cannot be specified for AWS ELB`. you can ignore it for now. +> Since AWS load Balancer does not support setting LoadBalancerIP, you will get the following warning on the service created for the control plane tenant `Error syncing load balancer: failed to ensure load balancer: LoadBalancerIP cannot be specified for AWS ELB`. you can ignore it for now. ### Working with Tenant Control Plane @@ -302,8 +288,9 @@ Check the access to the Tenant Control Plane: > if the domain you used is a private route53 domain make sure to map the public IP of the LB to ${TENANT_NAME}.${TENANT_DOMAIN} in your `/etc/hosts`. otherwise kubectl will fail checking ssl certificates ```bash -curl -k https://${TENANT_NAME}.${TENANT_DOMAIN}/healthz -curl -k https://${TENANT_NAME}.${TENANT_DOMAIN}/version +curl -k https://${TENANT_PUBLIC_IP}:${TENANT_PORT}/version +curl -k https://${TENANT_NAME}.${TENANT_DOMAIN}:${TENANT_PORT}/healthz +curl -k https://${TENANT_NAME}.${TENANT_DOMAIN}:${TENANT_PORT}/version ``` Let's retrieve the `kubeconfig` in order to work with it: @@ -316,7 +303,7 @@ kubectl get secrets -n ${TENANT_NAMESPACE} ${TENANT_NAME}-admin-kubeconfig -o js kubectl --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig config \ set-cluster ${TENANT_NAME} \ - --server https://${TENANT_NAME}.${TENANT_DOMAIN} + --server https://${TENANT_NAME}.${TENANT_DOMAIN}:${TENANT_PORT} ``` and let's check it out: @@ -333,8 +320,8 @@ Check out how the Tenant Control Plane advertises itself: ``` kubectl --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig get ep -NAME ENDPOINTS AGE -kubernetes 172.20.251.60:6443 38h +NAME ENDPOINTS AGE +kubernetes 13.37.33.12:6443 3m22s ``` ## Join worker nodes @@ -360,8 +347,7 @@ JOIN_CMD=$(echo "sudo kubeadm join ${TENANT_ADDR}:6443 ")$(kubeadm --kubeconfig= Create an AWS autoscaling group to host tenant worker nodes: ```bash -aws ec2 create-subnet --vpc-id $KAMAJI_VPC_ID --cidr-block $TENANT_SUBNET_ADDRESS --availability-zone ${KAMAJI_REGION}a - +aws ec2 create-subnet --vpc-id $KAMAJI_VPC_ID --cidr-block $TENANT_SUBNET_ADDRESS --availability-zone ${KAMAJI_AZ} export TENANT_SUBNET_ID=$(aws ec2 describe-subnets --filter "Name=vpc-id,Values=$KAMAJI_VPC_ID" --filter "Name=cidr-block,Values=$TENANT_SUBNET_ADDRESS" --query "Subnets[0].SubnetId" --output text) USER_DATA=$(cat < Date: Sat, 8 Feb 2025 23:50:44 +0100 Subject: [PATCH 4/9] use CAPA images && fmt --- deploy/kamaji-aws.env | 4 +- docs/content/guides/kamaji-aws-deployment.md | 102 ++++++++----------- 2 files changed, 44 insertions(+), 62 deletions(-) diff --git a/deploy/kamaji-aws.env b/deploy/kamaji-aws.env index f63ef515..37c98002 100644 --- a/deploy/kamaji-aws.env +++ b/deploy/kamaji-aws.env @@ -4,10 +4,11 @@ export KAMAJI_AZ=eu-west-3a export KAMAJI_CLUSTER_VERSION="1.32" export KAMAJI_CLUSTER=kamaji-2 export KAMAJI_NODE_NG=${KAMAJI_CLUSTER}-${KAMAJI_REGION}-ng1 -export KAMAJI_NODE_TYPE=m5.large +export KAMAJI_NODE_TYPE=t3.medium export KAMAJI_VPC_NAME=eksctl-${KAMAJI_CLUSTER}-cluster/VPC export KAMAJI_VPC_CIDR=192.168.0.0/16 export KAMAJI_PUBLIC_SUBNET_NAME=eksctl-${KAMAJI_CLUSTER}-cluster/SubnetPublicEUWEST3A +export KAMAJI_PRIVATE_SUBNET_NAME=eksctl-${KAMAJI_CLUSTER}-cluster/SubnetPrivateEUWEST3A # kamaji parameters @@ -24,7 +25,6 @@ export TENANT_POD_CIDR=10.36.0.0/16 export TENANT_SVC_CIDR=10.96.0.0/16 export TENANT_DNS_SERVICE=10.96.0.10 -export UBUNTU_AMI_ID=ami-06e02ae7bdac6b938 export TENANT_VM_SIZE=t3.medium export TENANT_ASG_MIN_SIZE=1 export TENANT_ASG_MAX_SIZE=1 diff --git a/docs/content/guides/kamaji-aws-deployment.md b/docs/content/guides/kamaji-aws-deployment.md index 86bc40a5..c2464ecd 100644 --- a/docs/content/guides/kamaji-aws-deployment.md +++ b/docs/content/guides/kamaji-aws-deployment.md @@ -16,6 +16,7 @@ The guide requires: * [Cleanup](#cleanup) ## Prepare the bootstrap workspace + On the bootstrap machine, clone the repo and prepare the workspace directory: ```bash @@ -26,11 +27,11 @@ cd kamaji/deploy We assume you have installed on the bootstrap machine: - [kubectl](https://kubernetes.io/docs/tasks/tools/#kubectl) -- [kubeadm](https://kubernetes.io/docs/tasks/tools/#kubeadm) - [helm](https://helm.sh/docs/intro/install/) - [jq](https://stedolan.github.io/jq/) - [AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html) - [eksctl](https://eksctl.io/installation/) +- [clusterawsadm](https://github.com/kubernetes-sigs/cluster-api-provider-aws/releases) Make sure you have a valid AWS Account, and login to AWS: @@ -40,8 +41,7 @@ Make sure you have a valid AWS Account, and login to AWS: aws configure ``` - -## Create Management cluster +## Create Management cluster In Kamaji, a Management Cluster is a regular Kubernetes cluster which hosts zero to many Tenant Cluster Control Planes. The Management Cluster acts as cockpit for all the Tenant clusters and implements Monitoring, Logging, and Governance of all the Kamaji setup, including all Tenant Clusters. For this guide, we're going to use an instance of AWS Kubernetes Service (EKS) as Management Cluster. @@ -51,7 +51,8 @@ Throughout the following instructions, shell variables are used to indicate valu In order to create quickly an EKS cluster, we will use `eksctl` provided by AWS. `eksctl` is a simple CLI tool for creating and managing clusters on EKS -`eksctl` will provision for you: +`eksctl` will provision for you: + - A dedicated VPC on `192.168.0.0/16` CIDR - 3 private subnets and 3 public subnets in 3 different availability zones - NAT Gateway for the private subnets, An internet gateway for the public ones @@ -97,9 +98,11 @@ EOF eks create cluster -f eks-cluster.yaml ``` -Please note : + +Please note : + - the `aws-ebs-csi-driver` addon is required to use EBS volumes as persistent volumes . This will be mainly used to store the tenant control plane data using default data store `etcd`. -- We created a node group with 1 node in one availability zone to simplify the setup. +- We created a node group with 1 node in one availability zone to simplify the setup. ### Access to the management cluster @@ -112,7 +115,9 @@ kubectl cluster-info kubectl patch storageclass gp2 -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}' ``` -### Add route 53 domain + +### (optional) Add route 53 domain + In order to easily access to tenant clusters , it is recommended to create a route53 domain or use an existing one if exists ```bash @@ -120,6 +125,7 @@ In order to easily access to tenant clusters , it is recommended to create a rou aws route53 create-hosted-zone --name "$TENANT_DOMAIN" --caller-reference $(date +%s) --vpc "VPCRegion=$KAMAJI_REGION,VPCId=$KAMAJI_VPC_ID" ``` + ## Install Kamaji Follow the [Getting Started](../getting-started.md) to install Cert Manager and the Kamaji Controller. @@ -139,11 +145,10 @@ helm install \ --set installCRDs=true ``` -### Install externalDNS +### (optional) Install externalDNS Setting externalDNS allows to update your DNS records dynamically from an annotation that you add in the service within EKS. Run the following commands to install externalDNS helm chart: - ```bash helm repo add external-dns https://kubernetes-sigs.github.io/external-dns/ @@ -153,6 +158,7 @@ helm install external-dns external-dns/external-dns \ --create-namespace \ --version 1.15.1 ``` + ## Install Kamaji Controller Installing Kamaji via Helm charts is the preferred way. Run the following commands to install a stable release of Kamaji: @@ -182,6 +188,7 @@ export TENANT_PUBLIC_IP=$(aws ec2 describe-addresses --allocation-ids $TENANT_EI ``` + On the next step, we will create a Tenant Control Plane with the following configuration: ```yaml @@ -328,7 +335,7 @@ kubernetes 13.37.33.12:6443 3m22s The Tenant Control Plane is made of pods running in the Kamaji Management Cluster. At this point, the Tenant Cluster has no worker nodes. So, the next step is to join some worker nodes to the Tenant Control Plane. -Kamaji does not provide any helper for creation of tenant worker nodes, instead it leverages the [Cluster Management API](https://github.com/kubernetes-sigs/cluster-api). This allows you to create the Tenant Clusters, including worker nodes, in a completely declarative way. Currently, a Cluster API `ControlPlane` provider for AWS is available: check the [official documentation](https://github.com/clastix/cluster-api-control-plane-provider-kamaji/blob/master/docs/providers-aws.md). +Kamaji does not provide any helper for creation of tenant worker nodes, instead it leverages the [Cluster Management API](https://github.com/kubernetes-sigs/cluster-api). This allows you to create the Tenant Clusters, including worker nodes, in a completely declarative way. Currently, a Cluster API `ControlPlane` provider for AWS is available: check the [official documentation](https://github.com/clastix/cluster-api-control-plane-provider-kamaji/blob/master/docs/providers-aws.md). An alternative approach to create and join worker nodes in AWS is to manually create the VMs, turn them into Kubernetes worker nodes and then join through the `kubeadm` command. @@ -342,65 +349,38 @@ JOIN_CMD=$(echo "sudo kubeadm join ${TENANT_ADDR}:6443 ")$(kubeadm --kubeconfig= > setting `--ttl=0` on the `kubeadm token create` will guarantee that the token will never expires and can be used every time. -### create tenant worker nodes ASG +### create tenant worker nodes -Create an AWS autoscaling group to host tenant worker nodes: +In this section, we will use AMI provided by CAPA (Cluster API Provider AWS) to create the worker nodes. Those AMIs are built using [image builder](https://github.com/kubernetes-sigs/image-builder/tree/main) and contains all the necessary components to join the cluster. ```bash -aws ec2 create-subnet --vpc-id $KAMAJI_VPC_ID --cidr-block $TENANT_SUBNET_ADDRESS --availability-zone ${KAMAJI_AZ} -export TENANT_SUBNET_ID=$(aws ec2 describe-subnets --filter "Name=vpc-id,Values=$KAMAJI_VPC_ID" --filter "Name=cidr-block,Values=$TENANT_SUBNET_ADDRESS" --query "Subnets[0].SubnetId" --output text) - -USER_DATA=$(cat <> worker-user-data.sh +#!/bin/bash + $JOIN_CMD EOF -) -USER_DATA_ENCODED=$(echo "$USER_DATA" | base64) - -LAUNCH_TEMPLATE_ID=$(aws ec2 create-launch-template \ - --launch-template-name "$LAUNCH_TEMPLATE_NAME" \ - --version-description "Initial version" \ - --launch-template-data "{ - \"ImageId\": \"$UBUNTU_AMI_ID\", - \"InstanceType\": \"$TENANT_VM_SIZE\", - \"SecurityGroupIds\": [\"$SECURITY_GROUP_ID\"], - \"UserData\": \"$USER_DATA_ENCODED\" - }" \ - --query 'LaunchTemplate.LaunchTemplateId' --output text) - - aws autoscaling create-auto-scaling-group \ - --auto-scaling-group-name "$TENANT_ASG_NAME" \ - --launch-template "LaunchTemplateId=$LAUNCH_TEMPLATE_ID,Version=1" \ - --min-size $TENANT_ASG_MIN_SIZE \ - --max-size $TENANT_ASG_MAX_SIZE \ - --desired-capacity $TENANT_ASG_DESIRED_CAPACITY \ - --vpc-zone-identifier "$TENANT_SUBNET_ID" \ + +aws ec2 run-instances --image-id $WORKER_AMI --instance-type "t2.medium" --user-data $(cat worker-user-data.sh | base64 -w0) --network-interfaces '{"SubnetId":'"'${KAMAJI_PRIVATE_SUBNET_ID}'"',"AssociatePublicIpAddress":false,"DeviceIndex":0,"Groups":[""]}' --count "1" ``` -> Note: we're using the `userdata` in order to bootstrap the worker nodes. You can follow the [documentation](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/install-kubeadm/) for manual bootstrapping +> we have used user data to run the `kubeadm join` command on the instance boot. This will make sure that the worker node will join the cluster automatically. + -Checking the nodes: +> make sure to replace `` with the security group id that allows the worker nodes to communicate with the public IP of the tenant control plane + +Checking the nodes in the Tenant Cluster: ```bash kubectl --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig get nodes -NAME STATUS ROLES AGE VERSION -ip-10-0-1-49 NotReady 56m v1.30.9 +NAME STATUS ROLES AGE VERSION +ip-192-168-153-94 NotReady 56m v1.30.2 ``` The cluster needs a [CNI](https://kubernetes.io/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins/) plugin to get the nodes ready. In this guide, we are going to install [calico](https://projectcalico.docs.tigera.io/about/about-calico), but feel free to use one of your taste. @@ -428,15 +408,17 @@ And after a while, nodes will be ready ```bash kubectl --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig get nodes -NAME STATUS ROLES AGE VERSION -ip-10-0-1-49 Ready 56m v1.30.9 +NAME STATUS ROLES AGE VERSION +ip-192-168-153-94 Ready 59m v1.30.2 ``` ## Cleanup + To get rid of the Kamaji infrastructure, remove the RESOURCE_GROUP: -``` -TODO +```bash +eksctl delete cluster -f eks-cluster.yaml + ``` That's all folks! \ No newline at end of file From e1ecbbd103ee8d914961d911ef26bddc9c54498d Mon Sep 17 00:00:00 2001 From: ilyesAj Date: Sat, 8 Feb 2025 23:56:57 +0100 Subject: [PATCH 5/9] fmt --- docs/content/guides/kamaji-aws-deployment.md | 24 +++++++++++--------- 1 file changed, 13 insertions(+), 11 deletions(-) diff --git a/docs/content/guides/kamaji-aws-deployment.md b/docs/content/guides/kamaji-aws-deployment.md index c2464ecd..15eae1a2 100644 --- a/docs/content/guides/kamaji-aws-deployment.md +++ b/docs/content/guides/kamaji-aws-deployment.md @@ -1,4 +1,5 @@ # Setup Kamaji on aws + This guide will lead you through the process of creating a working Kamaji setup on on AWS. The guide requires: @@ -56,7 +57,7 @@ In order to create quickly an EKS cluster, we will use `eksctl` provided by AWS. - A dedicated VPC on `192.168.0.0/16` CIDR - 3 private subnets and 3 public subnets in 3 different availability zones - NAT Gateway for the private subnets, An internet gateway for the public ones -- the required route tables to associate the subnets with the IGW and the NAT gateways +- The required route tables to associate the subnets with the IGW and the NAT gateways - Provision the EKS cluster - Provision worker nodes and associate them to your cluster - Optionally creates the required IAM policies for your addons and attach them to the node @@ -101,7 +102,7 @@ eks create cluster -f eks-cluster.yaml Please note : -- the `aws-ebs-csi-driver` addon is required to use EBS volumes as persistent volumes . This will be mainly used to store the tenant control plane data using default data store `etcd`. +- The `aws-ebs-csi-driver` addon is required to use EBS volumes as persistent volumes . This will be mainly used to store the tenant control plane data using default data store `etcd`. - We created a node group with 1 node in one availability zone to simplify the setup. ### Access to the management cluster @@ -171,7 +172,7 @@ helm install kamaji clastix/kamaji -n kamaji-system --create-namespace ## Create Tenant Cluster -Now that our management cluster is up and running, we can create a Tenant Cluster. A Tenant Cluster is a Kubernetes cluster that is managed by Kamaji. +Now that our management cluster is up and running, we can create a Tenant Cluster. A Tenant Cluster is a Kubernetes cluster that is managed by Kamaji. ### Tenant Control Plane @@ -281,10 +282,10 @@ kubectl -n ${TENANT_NAMESPACE} apply -f ${TENANT_NAMESPACE}-${TENANT_NAME}.yaml Make sure: -- Tenant Control Plane will expose the API server using a public IP address through a network loadbalancer. +- Tenant Control Plane will expose the API server using a public IP address through a network loadbalancer. it is important to provide a static public IP address for the API server in order to make it reachable from the outside world. -- the following annotation: `external-dns.alpha.kubernetes.io/hostname` is set to create the dns record. It tells AWS to expose the Tenant Control Plane with public domain name: `${TENANT_NAME}.${TENANT_DOMAIN}`. +- The following annotation: `external-dns.alpha.kubernetes.io/hostname` is set to create the dns record. It tells AWS to expose the Tenant Control Plane with public domain name: `${TENANT_NAME}.${TENANT_DOMAIN}`. > Since AWS load Balancer does not support setting LoadBalancerIP, you will get the following warning on the service created for the control plane tenant `Error syncing load balancer: failed to ensure load balancer: LoadBalancerIP cannot be specified for AWS ELB`. you can ignore it for now. @@ -292,7 +293,7 @@ it is important to provide a static public IP address for the API server in orde Check the access to the Tenant Control Plane: -> if the domain you used is a private route53 domain make sure to map the public IP of the LB to ${TENANT_NAME}.${TENANT_DOMAIN} in your `/etc/hosts`. otherwise kubectl will fail checking ssl certificates +> If the domain you used is a private route53 domain make sure to map the public IP of the LB to ${TENANT_NAME}.${TENANT_DOMAIN} in your `/etc/hosts`. otherwise kubectl will fail checking ssl certificates ```bash curl -k https://${TENANT_PUBLIC_IP}:${TENANT_PORT}/version @@ -339,15 +340,16 @@ Kamaji does not provide any helper for creation of tenant worker nodes, instead An alternative approach to create and join worker nodes in AWS is to manually create the VMs, turn them into Kubernetes worker nodes and then join through the `kubeadm` command. -### Create the kubeadm join command +### generate kubeadm join command + +To join the worker nodes to the Tenant Control Plane, you need to generate the `kubeadm join` command from the Management cluster: -Run the following command to get the `kubeadm` join command that will be used on the worker tenant nodes: ```bash TENANT_ADDR=$(kubectl -n ${TENANT_NAMESPACE} get svc ${TENANT_NAME} -o json | jq -r ."spec.loadBalancerIP") JOIN_CMD=$(echo "sudo kubeadm join ${TENANT_ADDR}:6443 ")$(kubeadm --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig token create --ttl 0 --print-join-command |cut -d" " -f4-) ``` -> setting `--ttl=0` on the `kubeadm token create` will guarantee that the token will never expires and can be used every time. +> Setting `--ttl=0` on the `kubeadm token create` will guarantee that the token will never expires and can be used every time. ### create tenant worker nodes @@ -369,10 +371,10 @@ aws ec2 run-instances --image-id $WORKER_AMI --instance-type "t2.medium" --user- ``` -> we have used user data to run the `kubeadm join` command on the instance boot. This will make sure that the worker node will join the cluster automatically. +> We have used user data to run the `kubeadm join` command on the instance boot. This will make sure that the worker node will join the cluster automatically. -> make sure to replace `` with the security group id that allows the worker nodes to communicate with the public IP of the tenant control plane +> Make sure to replace `` with the security group id that allows the worker nodes to communicate with the public IP of the tenant control plane Checking the nodes in the Tenant Cluster: From 1329a60846dded0a435bc6b1e12444a624bc12b9 Mon Sep 17 00:00:00 2001 From: ilyesAj Date: Sat, 8 Feb 2025 23:58:06 +0100 Subject: [PATCH 6/9] fmt --- docs/content/guides/kamaji-aws-deployment.md | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/docs/content/guides/kamaji-aws-deployment.md b/docs/content/guides/kamaji-aws-deployment.md index 15eae1a2..7a866a2c 100644 --- a/docs/content/guides/kamaji-aws-deployment.md +++ b/docs/content/guides/kamaji-aws-deployment.md @@ -48,7 +48,7 @@ In Kamaji, a Management Cluster is a regular Kubernetes cluster which hosts zero Throughout the following instructions, shell variables are used to indicate values that you should adjust to your own AWS environment: -### create EKS cluster +### Create EKS cluster In order to create quickly an EKS cluster, we will use `eksctl` provided by AWS. `eksctl` is a simple CLI tool for creating and managing clusters on EKS @@ -146,7 +146,7 @@ helm install \ --set installCRDs=true ``` -### (optional) Install externalDNS +### (optional) Install ExternalDNS Setting externalDNS allows to update your DNS records dynamically from an annotation that you add in the service within EKS. Run the following commands to install externalDNS helm chart: @@ -316,7 +316,7 @@ kubectl --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig config \ and let's check it out: -``` +```bash kubectl --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig get svc NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE @@ -325,7 +325,7 @@ kubernetes ClusterIP 10.96.0.1 443/TCP 38h Check out how the Tenant Control Plane advertises itself: -``` +```bash kubectl --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig get ep NAME ENDPOINTS AGE @@ -340,7 +340,7 @@ Kamaji does not provide any helper for creation of tenant worker nodes, instead An alternative approach to create and join worker nodes in AWS is to manually create the VMs, turn them into Kubernetes worker nodes and then join through the `kubeadm` command. -### generate kubeadm join command +### Generate kubeadm join command To join the worker nodes to the Tenant Control Plane, you need to generate the `kubeadm join` command from the Management cluster: @@ -351,7 +351,7 @@ JOIN_CMD=$(echo "sudo kubeadm join ${TENANT_ADDR}:6443 ")$(kubeadm --kubeconfig= > Setting `--ttl=0` on the `kubeadm token create` will guarantee that the token will never expires and can be used every time. -### create tenant worker nodes +### Create tenant worker nodes In this section, we will use AMI provided by CAPA (Cluster API Provider AWS) to create the worker nodes. Those AMIs are built using [image builder](https://github.com/kubernetes-sigs/image-builder/tree/main) and contains all the necessary components to join the cluster. From 2639fc72e33e93a1b6696ff4987fc30a7700b3c5 Mon Sep 17 00:00:00 2001 From: ilyesAj Date: Thu, 13 Feb 2025 17:03:58 +0100 Subject: [PATCH 7/9] fmt --- docs/content/guides/kamaji-aws-deployment.md | 34 ++++++++++---------- docs/mkdocs.yml | 1 + 2 files changed, 18 insertions(+), 17 deletions(-) diff --git a/docs/content/guides/kamaji-aws-deployment.md b/docs/content/guides/kamaji-aws-deployment.md index 7a866a2c..7ce8be27 100644 --- a/docs/content/guides/kamaji-aws-deployment.md +++ b/docs/content/guides/kamaji-aws-deployment.md @@ -44,7 +44,7 @@ aws configure ## Create Management cluster -In Kamaji, a Management Cluster is a regular Kubernetes cluster which hosts zero to many Tenant Cluster Control Planes. The Management Cluster acts as cockpit for all the Tenant clusters and implements Monitoring, Logging, and Governance of all the Kamaji setup, including all Tenant Clusters. For this guide, we're going to use an instance of AWS Kubernetes Service (EKS) as Management Cluster. +In Kamaji, a Management Cluster is a regular Kubernetes cluster which hosts zero to many Tenant Cluster Control Planes. The Management Cluster acts as a cockpit for all the Tenant clusters and implements monitoring, logging, and governance of all the Kamaji setups, including all Tenant Clusters. For this guide, we're going to use an instance of AWS Kubernetes Service (EKS) as a Management Cluster. Throughout the following instructions, shell variables are used to indicate values that you should adjust to your own AWS environment: @@ -61,7 +61,7 @@ In order to create quickly an EKS cluster, we will use `eksctl` provided by AWS. - Provision the EKS cluster - Provision worker nodes and associate them to your cluster - Optionally creates the required IAM policies for your addons and attach them to the node -- Optionally adds the eks addons to your cluster +- Optionally, install the EKS add-ons to your cluster For our use case, we will create an EKS cluster with the following configuration: @@ -102,7 +102,7 @@ eks create cluster -f eks-cluster.yaml Please note : -- The `aws-ebs-csi-driver` addon is required to use EBS volumes as persistent volumes . This will be mainly used to store the tenant control plane data using default data store `etcd`. +- The `aws-ebs-csi-driver` addon is required to use EBS volumes as persistent volumes. This will be mainly used to store the tenant control plane data using the _default_ `etcd` DataStore. - We created a node group with 1 node in one availability zone to simplify the setup. ### Access to the management cluster @@ -119,7 +119,7 @@ kubectl patch storageclass gp2 -p '{"metadata": {"annotations":{"storageclass.ku ### (optional) Add route 53 domain -In order to easily access to tenant clusters , it is recommended to create a route53 domain or use an existing one if exists +In order to easily access tenant clusters, it is recommended to create a Route53 domain or use an existing one if it exists ```bash # for within VPC @@ -148,7 +148,7 @@ helm install \ ### (optional) Install ExternalDNS -Setting externalDNS allows to update your DNS records dynamically from an annotation that you add in the service within EKS. Run the following commands to install externalDNS helm chart: +ExternalDNS allows updating your DNS records dynamically from an annotation that you add in the service within EKS. Run the following commands to install the ExternalDNS Helm chart: ```bash @@ -170,13 +170,13 @@ helm repo update helm install kamaji clastix/kamaji -n kamaji-system --create-namespace ``` -## Create Tenant Cluster +## Create a Tenant Cluster Now that our management cluster is up and running, we can create a Tenant Cluster. A Tenant Cluster is a Kubernetes cluster that is managed by Kamaji. ### Tenant Control Plane -A tenant cluster are made of a `Tenant Control Plane` and an arbitrary number of worker nodes. The `Tenant Control Plane` is a Kubernetes cluster that is managed by Kamaji and is responsible for running the Tenant's workloads. +A tenant cluster is made of a `Tenant Control Plane` and an arbitrary number of worker nodes. The `Tenant Control Plane` is a Kubernetes Control Plane managed by Kamaji and responsible for running the Tenant's workloads. Before creating a Tenant Control Plane, you need to define some variables: @@ -190,7 +190,7 @@ export TENANT_PUBLIC_IP=$(aws ec2 describe-addresses --allocation-ids $TENANT_EI ``` -On the next step, we will create a Tenant Control Plane with the following configuration: +In the next step, we will create a Tenant Control Plane with the following configuration: ```yaml cat > ${TENANT_NAMESPACE}-${TENANT_NAME}.yaml < Since AWS load Balancer does not support setting LoadBalancerIP, you will get the following warning on the service created for the control plane tenant `Error syncing load balancer: failed to ensure load balancer: LoadBalancerIP cannot be specified for AWS ELB`. you can ignore it for now. @@ -293,7 +293,7 @@ it is important to provide a static public IP address for the API server in orde Check the access to the Tenant Control Plane: -> If the domain you used is a private route53 domain make sure to map the public IP of the LB to ${TENANT_NAME}.${TENANT_DOMAIN} in your `/etc/hosts`. otherwise kubectl will fail checking ssl certificates +> If the domain you used is a private route53 domain make sure to map the public IP of the LB to `${TENANT_NAME}.${TENANT_DOMAIN}` in your `/etc/hosts`. otherwise, `kubectl` will fail to check SSL certificates ```bash curl -k https://${TENANT_PUBLIC_IP}:${TENANT_PORT}/version @@ -336,7 +336,7 @@ kubernetes 13.37.33.12:6443 3m22s The Tenant Control Plane is made of pods running in the Kamaji Management Cluster. At this point, the Tenant Cluster has no worker nodes. So, the next step is to join some worker nodes to the Tenant Control Plane. -Kamaji does not provide any helper for creation of tenant worker nodes, instead it leverages the [Cluster Management API](https://github.com/kubernetes-sigs/cluster-api). This allows you to create the Tenant Clusters, including worker nodes, in a completely declarative way. Currently, a Cluster API `ControlPlane` provider for AWS is available: check the [official documentation](https://github.com/clastix/cluster-api-control-plane-provider-kamaji/blob/master/docs/providers-aws.md). +Kamaji does not provide any helper for the creation of tenant worker nodes, instead, it leverages the [Cluster Management API](https://github.com/kubernetes-sigs/cluster-api). This allows you to create the Tenant Clusters, including worker nodes, in a completely declarative way. Currently, a Cluster API `ControlPlane` provider for AWS is available: check the [official documentation](https://github.com/clastix/cluster-api-control-plane-provider-kamaji/blob/master/docs/providers-aws.md). An alternative approach to create and join worker nodes in AWS is to manually create the VMs, turn them into Kubernetes worker nodes and then join through the `kubeadm` command. @@ -349,11 +349,13 @@ TENANT_ADDR=$(kubectl -n ${TENANT_NAMESPACE} get svc ${TENANT_NAME} -o json | jq JOIN_CMD=$(echo "sudo kubeadm join ${TENANT_ADDR}:6443 ")$(kubeadm --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig token create --ttl 0 --print-join-command |cut -d" " -f4-) ``` -> Setting `--ttl=0` on the `kubeadm token create` will guarantee that the token will never expires and can be used every time. +> Setting `--ttl=0` on the `kubeadm token create` will guarantee that the token will never expire and can be used every time. +> +> It's not intended for production-grade setups. ### Create tenant worker nodes -In this section, we will use AMI provided by CAPA (Cluster API Provider AWS) to create the worker nodes. Those AMIs are built using [image builder](https://github.com/kubernetes-sigs/image-builder/tree/main) and contains all the necessary components to join the cluster. +In this section, we will use AMI provided by CAPA (Cluster API Provider AWS) to create the worker nodes. Those AMIs are built using [image builder](https://github.com/kubernetes-sigs/image-builder/tree/main) and contain all the necessary components to join the cluster. ```bash @@ -416,11 +418,9 @@ ip-192-168-153-94 Ready 59m v1.30.2 ## Cleanup -To get rid of the Kamaji infrastructure, remove the RESOURCE_GROUP: +To get rid of the whole Kamaji infrastructure, remove the EKS cluster: ```bash eksctl delete cluster -f eks-cluster.yaml -``` - That's all folks! \ No newline at end of file diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml index 30d20220..6cc39eaf 100644 --- a/docs/mkdocs.yml +++ b/docs/mkdocs.yml @@ -60,6 +60,7 @@ nav: - 'Guides': - guides/index.md - guides/kamaji-azure-deployment.md + - guides/kamaji-aws-deployment.md - guides/alternative-datastore.md - guides/kamaji-gitops-flux.md - guides/upgrade.md From 24c44d99eaff20c6457176a016668655bceb88cd Mon Sep 17 00:00:00 2001 From: Dario Tranchitella Date: Thu, 13 Feb 2025 17:31:46 +0100 Subject: [PATCH 8/9] Update docs/content/guides/kamaji-aws-deployment.md --- docs/content/guides/kamaji-aws-deployment.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/content/guides/kamaji-aws-deployment.md b/docs/content/guides/kamaji-aws-deployment.md index 7ce8be27..3824c070 100644 --- a/docs/content/guides/kamaji-aws-deployment.md +++ b/docs/content/guides/kamaji-aws-deployment.md @@ -1,4 +1,4 @@ -# Setup Kamaji on aws +# Setup Kamaji on AWS This guide will lead you through the process of creating a working Kamaji setup on on AWS. From f42fe5fbfa4de6b0d22f44741befbc24d457db8c Mon Sep 17 00:00:00 2001 From: Dario Tranchitella Date: Thu, 13 Feb 2025 17:31:53 +0100 Subject: [PATCH 9/9] Update deploy/kamaji-aws.env --- deploy/kamaji-aws.env | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/deploy/kamaji-aws.env b/deploy/kamaji-aws.env index 37c98002..b9738e2a 100644 --- a/deploy/kamaji-aws.env +++ b/deploy/kamaji-aws.env @@ -30,6 +30,4 @@ export TENANT_ASG_MIN_SIZE=1 export TENANT_ASG_MAX_SIZE=1 export TENANT_ASG_DESIRED_SIZE=1 export TENANT_SUBNET_ADDRESS=10.0.4.0/24 -export export TENANT_ASG_NAME=$TENANT_NAME-workers - - +export TENANT_ASG_NAME=$TENANT_NAME-workers