From 47a642ceaa020ac27f6efb1c90bf7e192c04ed9f Mon Sep 17 00:00:00 2001 From: Andi Skrgat Date: Wed, 15 Jan 2025 13:03:15 +0100 Subject: [PATCH 1/3] Improve AKS and AWS docs --- charts/memgraph-high-availability/README.md | 19 ++-- .../memgraph-high-availability/aks/README.md | 98 +++++++++++++++++-- .../memgraph-high-availability/aws/README.md | 37 ++++++- charts/memgraph-high-availability/values.yaml | 8 +- 4 files changed, 138 insertions(+), 24 deletions(-) diff --git a/charts/memgraph-high-availability/README.md b/charts/memgraph-high-availability/README.md index ed99eef..5cae7b1 100644 --- a/charts/memgraph-high-availability/README.md +++ b/charts/memgraph-high-availability/README.md @@ -20,6 +20,17 @@ Or you can modify a `values.yaml` file and override the desired values: helm install memgraph/memgraph-high-availability -f values.yaml ``` +## Upgrading the Memgraph HA Helm Chart + +If you used `values.yaml` file for installing Helm Chart, use: +``` +helm upgrade memgraph/memgraph-high-availability -f values.yaml +``` + +If you used `--set`, use: +``` +helm upgrade memgraph/memgraph-high-availability --set ,memgraph.image.tag= +``` ## Configuration Options @@ -72,12 +83,6 @@ For the `data` and `coordinators` sections, each item in the list has the follow -The `args` section contains a list of arguments for the instance. The default values are the same for all instances: - -```markdown -- "--also-log-to-stderr" -- "--log-level=TRACE" -- "--replication-restore-state-on-startup=true" -``` +The `args` section contains a list of arguments for starting the Memgraph instance. For all available database settings, refer to the [Configuration settings reference guide](https://memgraph.com/docs/memgraph/reference-guide/configuration). diff --git a/charts/memgraph-high-availability/aks/README.md b/charts/memgraph-high-availability/aks/README.md index 2802b5d..469fefd 100644 --- a/charts/memgraph-high-availability/aks/README.md +++ b/charts/memgraph-high-availability/aks/README.md @@ -1,9 +1,9 @@ ## Description This guide instructs users on how to deploy Memgraph HA to Azure AKS. It serves only as a starting point and there are many ways possible to extend -what is currently here. In this setup each Memgraph database is deployed to separate, `Standard_A2_v2` node. +what is currently here. In this setup each Memgraph database is deployed to separate, `Standard_A2_v2`. -## Installation +## Installing tools You will need: - [azure-cli](https://learn.microsoft.com/en-us/cli/azure/) @@ -63,7 +63,85 @@ kubectl label nodes aks-nodepool1-65392319-vmss000003 role=data-node kubectl label nodes aks-nodepool1-65392319-vmss000004 role=data-node ``` -## Deploy Memgraph HA +In the following chapters, we will go over several most common deployment types: + +## Service type = IngressNginx + +The most cost-friendly way to manage a Memgraph HA cluster in K8s is using a IngressNginx contoller. This controller is capable of routing TCP messages on Bolt level +protocol to the K8s Memgraph services. To achieve this, it uses only a single LoadBalancer which means there is only a single external IP for connecting to the cluster. +Users can connect to any coordinator or data instance by distinguishing bolt ports. The 1st step is to install Memgraph HA: + +``` +helm install mem-ha-test ./charts/memgraph-high-availability --set \ +memgraph.env.MEMGRAPH_ENTERPRISE_LICENSE=,\ +memgraph.env.MEMGRAPH_ORGANIZATION_NAME=,memgraph.affinity.nodeSelection=true,\ +memgraph.externalAccessConfig.dataInstance.serviceType=IngressNginx,memgraph.externalAccessConfig.coordinator.serviceType=IngressNginx +``` + +Next, install `IngressNginx` resource: + +``` +helm upgrade --install ingress-nginx ingress-nginx \ +--repo https://kubernetes.github.io/ingress-nginx \ +--namespace ingress-nginx --create-namespace \ +--set controller.tcp.services.configMapNamespace=ingress-nginx \ +--set controller.tcp.services.configMapName=tcp-services +``` + +After installing Memgraph HA Chart and IngressNginx resource, we need to customize a configuration of the newly created Controller and Deployment +K8s object. Open the `ingress-nginx-controller` service for editing by running: +``` +kubectl edit svc ingress-nginx-controller -n ingress-nginx +``` + +Locate the `spec.ports` section and append the following services configuration: +``` +- name: data-0 + port: 9000 + targetPort: 9000 + protocol: TCP +- name: data-1 + port: 9001 + targetPort: 9001 + protocol: TCP +- name: coord-1 + port: 9011 + targetPort: 9011 + protocol: TCP +- name: coord-2 + port: 9012 + targetPort: 9012 + protocol: TCP +- name: coord-3 + port: 9013 + targetPort: 9013 + protocol: TCP +``` + +After you are done, save and close the file. Next, open the `ingress-nginx-controller` deployment by running: +``` +kubectl edit deployment ingress-nginx-controller -n ingress-nginx +``` + +Locate the `args` section and append the following configuration option: +``` +- --tcp-services-configmap=ingress-nginx/tcp-services +``` + +If you get stuck, more info can be found [here](https://kubernetes.github.io/ingress-nginx/user-guide/exposing-tcp-udp-services/). Save and close the file. +The only remaining step is to connect Memgraph instances. For that, we need to find out which external IP will a LoadBalancer use. You can find that out +by running `kubectl get svc -o=wide -A`. + +``` +ADD COORDINATOR 1 WITH CONFIG {"bolt_server": ":9011", "management_server": "memgraph-coordinator-1.default.svc.cluster.local:10000", "coordinator_server": "memgraph-coordinator-1.default.svc.cluster.local:12000"}; +ADD COORDINATOR 2 WITH CONFIG {"bolt_server": ":9012", "management_server": "memgraph-coordinator-2.default.svc.cluster.local:10000", "coordinator_server": "memgraph-coordinator-2.default.svc.cluster.local:12000"}; +ADD COORDINATOR 3 WITH CONFIG {"bolt_server": ":9013", "management_server": "memgraph-coordinator-3.default.svc.cluster.local:10000", "coordinator_server": "memgraph-coordinator-3.default.svc.cluster.local:12000"}; +REGISTER INSTANCE instance_0 WITH CONFIG {"bolt_server": ":9000", "management_server": "memgraph-data-0.default.svc.cluster.local:10000", "replication_server": "memgraph-data-0.default.svc.cluster.local:20000"}; +REGISTER INSTANCE instance_1 WITH CONFIG {"bolt_server": ":9001", "management_server": "memgraph-data-1.default.svc.cluster.local:10000", "replication_server": "memgraph-data-1.default.svc.cluster.local:20000"}; +SET INSTANCE instance_1 TO MAIN; +``` + +## ServiceType = LoadBalancer After preparing nodes, we can deploy Memgraph HA cluster by using `helm install` command. We will specify affinity options so that node labels are used and so that each data and coordinator instance is exposed through LoadBalancer. @@ -107,6 +185,7 @@ For the host enter external ip of `memgraph-coordinator-1-external` and port is we only need to change 'bolt\_server' part to use LoadBalancers' external IP. ``` +ADD COORDINATOR 1 WITH CONFIG {"bolt_server": "172.205.93.228:7687", "management_server": "memgraph-coordinator-1.default.svc.cluster.local:10000", "coordinator_server": "memgraph-coordinator-1.default.svc.cluster.local:12000"}; ADD COORDINATOR 2 WITH CONFIG {"bolt_server": "4.209.216.240:7687", "management_server": "memgraph-coordinator-2.default.svc.cluster.local:10000", "coordinator_server": "memgraph-coordinator-2.default.svc.cluster.local:12000"}; ADD COORDINATOR 3 WITH CONFIG {"bolt_server": "68.219.15.104:7687", "management_server": "memgraph-coordinator-3.default.svc.cluster.local:10000", "coordinator_server": "memgraph-coordinator-3.default.svc.cluster.local:12000"}; REGISTER INSTANCE instance_1 WITH CONFIG {"bolt_server": "68.219.11.242:7687", "management_server": "memgraph-data-0.default.svc.cluster.local:10000", "replication_server": "memgraph-data-0.default.svc.cluster.local:20000"}; @@ -117,9 +196,9 @@ SET INSTANCE instance_1 TO MAIN; The output of `SHOW INSTANCES` should then look similar to: ``` -| name | bolt_server | coordinator_server | management_server | health | role | last_succ_resp_ms | -|---------------|---------------------------------------------------------|---------------------------------------------------------|---------------------------------------------------------|--------|----------|-------------------| -| "coordinator_1" | "memgraph-coordinator-1.default.svc.cluster.local:7687" | "memgraph-coordinator-1.default.svc.cluster.local:12000" | "memgraph-coordinator-1.default.svc.cluster.local:10000" | "up" | "leader" | 0 | +| name | bolt_server | coordinator_server | management_server | health | role | last_succ_resp_ms | +|-----------------|---------------------------------------------------------|----------------------------------------------------------|----------------------------------------------------------|---------|-----------|-------------------| +| "coordinator_1" | "172.205.93.228:7687" | "memgraph-coordinator-1.default.svc.cluster.local:12000" | "memgraph-coordinator-1.default.svc.cluster.local:10000" | "up" | "leader" | 0 | | "coordinator_2" | "4.209.216.240:7687" | "memgraph-coordinator-2.default.svc.cluster.local:12000" | "memgraph-coordinator-2.default.svc.cluster.local:10000" | "up" | "follower"| 550 | | "coordinator_3" | "68.219.15.104:7687" | "memgraph-coordinator-3.default.svc.cluster.local:12000" | "memgraph-coordinator-3.default.svc.cluster.local:10000" | "up" | "follower"| 26 | | "instance_1" | "68.219.11.242:7687" | "" | "memgraph-data-0.default.svc.cluster.local:10000" | "up" | "main" | 917 | @@ -128,15 +207,16 @@ The output of `SHOW INSTANCES` should then look similar to: ## Using CommonLoadBalancer -When using 'CommonLoadBalancer', all three coordinators will be behind a single LoadBalancer. To connect the cluster, open Lab and use Memgraph +When using 'CommonLoadBalancer', all three coordinators will be behind a single LoadBalancer while each data instance has their own load balancer. To connect the cluster, open Lab and use Memgraph instance type of connection. For the host enter external IP of `memgraph-coordinator-1-external` and port is 7687. Again, we only need to change 'bolt\_server' part to use LoadBalancers' external IP. When connecting to CommonLoadBalancer, K8 will automatically route you to one of coordinators. -To see on which coordinator did you end route, run `show instances`. If for example, the output of show instances says you are connected to +To see on which coordinator did you end routed, run `SHOW INSTANCE`. If for example, the output of the query says you are connected to coordinator 2, we need to add coordinators 1 and 3. Registering data instances stays exactly the same. ``` +ADD COORDINATOR 2 WITH CONFIG {"bolt_server": ":7687", "management_server": "memgraph-coordinator-2.default.svc.cluster.local:10000", "coordinator_server": "memgraph-coordinator-2.default.svc.cluster.local:12000"}; ADD COORDINATOR 1 WITH CONFIG {"bolt_server": ":7687", "management_server": "memgraph-coordinator-1.default.svc.cluster.local:10000", "coordinator_server": "memgraph-coordinator-1.default.svc.cluster.local:12000"}; -ADD COORDINATOR 3 WITH CONFIG {"bolt_server": "CommonLoadBalancer-IP:7687", "management_server": "memgraph-coordinator-3.default.svc.cluster.local:10000", "coordinator_server": "memgraph-coordinator-3.default.svc.cluster.local:12000"}; +ADD COORDINATOR 3 WITH CONFIG {"bolt_server": ":7687", "management_server": "memgraph-coordinator-3.default.svc.cluster.local:10000", "coordinator_server": "memgraph-coordinator-3.default.svc.cluster.local:12000"}; REGISTER INSTANCE instance_1 WITH CONFIG {"bolt_server": "68.219.11.242:7687", "management_server": "memgraph-data-0.default.svc.cluster.local:10000", "replication_server": "memgraph-data-0.default.svc.cluster.local:20000"}; REGISTER INSTANCE instance_2 WITH CONFIG {"bolt_server": "68.219.13.145:7687", "management_server": "memgraph-data-1.default.svc.cluster.local:10000", "replication_server": "memgraph-data-1.default.svc.cluster.local:20000"}; SET INSTANCE instance_1 TO MAIN; diff --git a/charts/memgraph-high-availability/aws/README.md b/charts/memgraph-high-availability/aws/README.md index 1f2ff24..3229ced 100644 --- a/charts/memgraph-high-availability/aws/README.md +++ b/charts/memgraph-high-availability/aws/README.md @@ -1,6 +1,6 @@ ## Description -This guide instructs users on how to deploy Memgraph HA to AWS EKS. It serves only as a starting point and there are many ways possible to extend what is currently here. In this setup +This guide instructs users on how to deploy Memgraph HA to AWS EKS using `NodePort` services. It serves only as a starting point and there are many ways possible to extend what is currently here. In this setup each Memgraph database is deployed to separate, `t3.small` node in the `eu-west-1` AWS region. ## Installation @@ -61,13 +61,26 @@ aws iam attach-role-policy --role-name eksctl-test-cluster-ha-nodegroup-s-NodeIn aws iam list-attached-role-policies --role-name eksctl-test-cluster-ha-nodegroup-s-NodeInstanceRole- ``` -It is also important to create Inbound Rule in the Security Group attached to the eksctl cluster which will allow TCP traffic +When using `NodePort` services, it is important to create Inbound Rule in the Security Group attached to the eksctl cluster which will allow TCP traffic on ports 30000-32767. We find it easiest to modify this by going to the EC2 Dashboard. +## Label nodes + +This guide uses a `nodeSelection` affinity option. Make sure to label nodes where you want to have deployed coordinators with role `coordinator-node` +and nodes where you want to have deployed data instances with role `data-node`. + +Example: +``` +kubectl label nodes node-000000 role=coordinator-node +kubectl label nodes node-000001 role=coordinator-node +kubectl label nodes node-000002 role=coordinator-node +kubectl label nodes node-000003 role=data-node +kubectl label nodes node-000004 role=data-node +``` ## Deploy Memgraph cluster -The only step left is to deploy the cluster using +We can now install Memgraph HA chart using the following command: ``` helm install mem-ha-test ./charts/memgraph-high-availability --set \ @@ -77,7 +90,23 @@ memgraph.data.volumeClaim.storagePVCClassName=gp2, \ memgraph.coordinators.volumeClaim.storagePVCClassName=gp2, \ memgraph.data.volumeClaim.logPVCClassName=gp2, \ memgraph.coordinators.volumeClaim.logPVCClassName=gp2, \ -memgraph.affinity.enabled=true +memgraph.affinity.nodeSelection=true, \ +memgraph.externalAccessConfig.dataInstance.serviceType=NodePort, \ +memgraph.externalAccessConfig.coordinator.serviceType=NodePort +``` + +The only remaining step is to connect instances to form a cluster: ``` +ADD COORDINATOR 1 WITH CONFIG {"bolt_server": ":7687", "management_server": "memgraph-coordinator-1.default.svc.cluster.local:10000", "coordinator_server": "memgraph-coordinator-1.default.svc.cluster.local:12000"}; +ADD COORDINATOR 2 WITH CONFIG {"bolt_server": ":7687", "management_server": "memgraph-coordinator-2.default.svc.cluster.local:10000", "coordinator_server": "memgraph-coordinator-2.default.svc.cluster.local:12000"}; +ADD COORDINATOR 3 WITH CONFIG {"bolt_server": ":7687", "management_server": "memgraph-coordinator-3.default.svc.cluster.local:10000", "coordinator_server": "memgraph-coordinator-3.default.svc.cluster.local:12000"}; +REGISTER INSTANCE instance_1 WITH CONFIG {"bolt_server": ":7687", "management_server": "memgraph-data-0.default.svc.cluster.local:10000", "replication_server": "memgraph-data-0.default.svc.cluster.local:20000"}; +REGISTER INSTANCE instance_2 WITH CONFIG {"bolt_server": ":7687", "management_server": "memgraph-data-1.default.svc.cluster.local:10000", "replication_server": "memgraph-data-1.default.svc.cluster.local:20000"}; +SET INSTANCE instance_1 TO MAIN; + +``` + You can check the state of the cluster with `kubectl get pods -o wide`. + + diff --git a/charts/memgraph-high-availability/values.yaml b/charts/memgraph-high-availability/values.yaml index 044c46f..fec0aba 100644 --- a/charts/memgraph-high-availability/values.yaml +++ b/charts/memgraph-high-availability/values.yaml @@ -1,7 +1,7 @@ memgraph: image: - repository: memgraph/memgraph - tag: 2.22.0 + repository: memgraphacrha.azurecr.io/memgraph/memgraph + tag: 2.22.0_31_b64b5e79c pullPolicy: IfNotPresent env: MEMGRAPH_ENTERPRISE_LICENSE: "" @@ -23,7 +23,7 @@ memgraph: storagePVCSize: "1Gi" logPVCClassName: "" logPVC: true - logPVCSize: "256Mi" + logPVCSize: "1Gi" coordinators: volumeClaim: storagePVCClassName: "" @@ -31,7 +31,7 @@ memgraph: storagePVCSize: "1Gi" logPVCClassName: "" logPVC: true - logPVCSize: "256Mi" + logPVCSize: "1Gi" ports: boltPort: 7687 managementPort: 10000 From 5a27fa964861f6df5e30a19687edef695242571d Mon Sep 17 00:00:00 2001 From: Andi Skrgat Date: Thu, 16 Jan 2025 13:11:36 +0100 Subject: [PATCH 2/3] Fix values --- charts/memgraph-high-availability/values.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/charts/memgraph-high-availability/values.yaml b/charts/memgraph-high-availability/values.yaml index fec0aba..0f59953 100644 --- a/charts/memgraph-high-availability/values.yaml +++ b/charts/memgraph-high-availability/values.yaml @@ -1,7 +1,7 @@ memgraph: image: - repository: memgraphacrha.azurecr.io/memgraph/memgraph - tag: 2.22.0_31_b64b5e79c + repository: memgraph/memgraph + tag: 2.22.0 pullPolicy: IfNotPresent env: MEMGRAPH_ENTERPRISE_LICENSE: "" From 762b1a4ad9b0afb075c173f1cb1270df128f5f88 Mon Sep 17 00:00:00 2001 From: Andi Skrgat Date: Fri, 17 Jan 2025 11:59:52 +0100 Subject: [PATCH 3/3] Add notes for recovering data --- charts/memgraph-high-availability/README.md | 2 +- .../memgraph-high-availability/aks/README.md | 111 ++++++++++++++++++ .../memgraph-high-availability/aws/README.md | 2 - .../templates/NOTES.txt | 31 +---- charts/memgraph-high-availability/values.yaml | 2 +- 5 files changed, 115 insertions(+), 33 deletions(-) diff --git a/charts/memgraph-high-availability/README.md b/charts/memgraph-high-availability/README.md index 5cae7b1..6441254 100644 --- a/charts/memgraph-high-availability/README.md +++ b/charts/memgraph-high-availability/README.md @@ -83,6 +83,6 @@ For the `data` and `coordinators` sections, each item in the list has the follow -The `args` section contains a list of arguments for starting the Memgraph instance. +The `args` section contains a list of arguments for starting the Memgraph instance. For all available database settings, refer to the [Configuration settings reference guide](https://memgraph.com/docs/memgraph/reference-guide/configuration). diff --git a/charts/memgraph-high-availability/aks/README.md b/charts/memgraph-high-availability/aks/README.md index 469fefd..5731d31 100644 --- a/charts/memgraph-high-availability/aks/README.md +++ b/charts/memgraph-high-availability/aks/README.md @@ -221,3 +221,114 @@ REGISTER INSTANCE instance_1 WITH CONFIG {"bolt_server": "68.219.11.242:7687", " REGISTER INSTANCE instance_2 WITH CONFIG {"bolt_server": "68.219.13.145:7687", "management_server": "memgraph-data-1.default.svc.cluster.local:10000", "replication_server": "memgraph-data-1.default.svc.cluster.local:20000"}; SET INSTANCE instance_1 TO MAIN; ``` + +## Memgraph HA storage model + +Each Memgraph instance stores its data and logs in two separate storages. You usually don't want to manually inspect data from the +data directory, you only want to be able to recover data when starting a new instance. For working with persistent data, Kubernetes uses +persistent volumes (PV) and persistent volume claims (PVC). You can think of persistent volumes as the actual storage where the data is stored +while persistent volume claims are requests to attach PVs to your pods. You can find more details about the concept of storage in +Kubernetes [here](https://kubernetes.io/docs/concepts/storage/persistent-volumes/). At the moment for HA chart, we use dynamically created +PVCs which won't get deleted upon uninstallation of the chart. However, if you do `kubectl delete pvc -A`, it will also delete underlying +persistent volumes since the default policy is Delete. This also means that when you upgrade a chart, all data will be preserved. + + +Inspecting logs can be very valuable e.g when sending a bug report in the case of a pod crash. In that case, `kubectl logs` doesn't help because +it doesn't show logs before the crash. + +There are two possible ways in which you can get to your PVs. Note that you can retrieve your data directory in the same way as logs so the +following two chapters apply in both cases. + +### Attaching disk to VM for Azure Disk storage + +Azure Disk is the default storage class for Azure AKS. It is a block storage which doesn't allow simultaneous access from multiple pods. Therefore, in order to retrieve logs +we will create a snapshot of the disk, create a temporary virtual machine and attach copy of the disk to the newly created VM. + +Let's say that coordinator-1 pod crashed and you want to send us logs so we can figure out what happened. Run + +``` +kubectl get pv -A +``` + +to find the ID of the PV that coordinator 1 uses. The PV's ID and also serves as the name of the disk used as the underlying +storage. We will use this information to create a snapshot of the disk using: + +``` +az snapshot create \ + --resource-group \ + --source /subscriptions//resourceGroups//providers/Microsoft.Compute/disks/ \ + --name coord1-log-snapshot +``` + +If you are not sure about the resource group of the disk, you can run: + +``` +az disk list --output table +``` + +to find it out. Using the created snapshot, we will create a new disk using the following command: + +``` +az disk create \ + --resource-group \ + --source coord1-log-snapshot \ + --name coord1-log-disk \ + --zone 1 +``` + +The next step consists of creating a virtual machine for which any reasonable default settings will work. It is only important that it is in the same region as newly created disk copy. +Note that one VM can be used to attaching as many disks as you want so you don't need to create a separate VM every time. For creating a VM we used Azure Portal. After you have created +the VM, you can attach disk to the VM using: + +``` +az vm disk attach \ + --resource-group \ + --vm-name \ + --disk /subscriptions//resourceGroups//providers/Microsoft.Compute/disks/coord1-log-disk +``` + +SSH into the VM and by running you should be able to see your disk (sdc, sdd usually are names) by running `lsblk`. Create a new directory and mount the disk. +``` +sudo mkdir /mnt/coord1 +sudo mount /dev/ /mnt/coord1 +``` +You can now copy it to the local machine using scp. + +### Creating a debug pod for Azure File storage + +When using Azure File storage, the easiest way to retrieve data is to create a debug pod which attaches to a PV and mounts it locally. In order to support it, you need to use the +Azure File type of storage with PVCs access mode set to `ReadWriteMany`. The default storage uses Azure Disk which is a block storage operating as a physical disk which doesn't allow multiple pods to mount the disk simultaneously. +The example of a debug pod for retrieving data from coordinator 1 looks something like: +``` +apiVersion: v1 +kind: Pod +metadata: + name: debug-pod + namespace: +spec: + tolerations: + - operator: "Exists" + containers: + - name: debug-container + image: busybox + command: [ "/bin/sh", "-c", "--" ] + args: [ "while true; do sleep 30; done;" ] + volumeMounts: + - name: my-debug-volume + mountPath: /coord1-logs + volumes: + - name: my-debug-volume + persistentVolumeClaim: + claimName: memgraph-coordinator-1-log-storage-memgraph-coordinator-1-0 +``` +Note that you need to set `metadata.namespace` to the namespace where your instances are installed. Start your pod with: +``` +kubectl apply -f debug-pod.yaml -n +``` + +and login into it with: +``` +kubectl exec -it debug-pod -- /bin/sh +``` + +Your data should now be seen at `/coord1-logs` directory. diff --git a/charts/memgraph-high-availability/aws/README.md b/charts/memgraph-high-availability/aws/README.md index 3229ced..22dba0d 100644 --- a/charts/memgraph-high-availability/aws/README.md +++ b/charts/memgraph-high-availability/aws/README.md @@ -108,5 +108,3 @@ SET INSTANCE instance_1 TO MAIN; You can check the state of the cluster with `kubectl get pods -o wide`. - - diff --git a/charts/memgraph-high-availability/templates/NOTES.txt b/charts/memgraph-high-availability/templates/NOTES.txt index 1b0b336..1e0e459 100644 --- a/charts/memgraph-high-availability/templates/NOTES.txt +++ b/charts/memgraph-high-availability/templates/NOTES.txt @@ -11,34 +11,7 @@ Make sure your are connecting to the correct ip address and port. For details ch To start, you should add coordinators and register data instances in order to completely setup cluster. Depending on whether you use LoadBalancers or NodePorts to expose your service, queries will be slightly different. In both cases you only need to modify 'bolt_server' part of the query while 'management_server', 'coordinator_server' and 'replication_server' will stay the same. If you are connecting via Lab, select 'Memgraph instance' -type of connection when adding instances to the cluster. - - -NodePort configuration example - -ADD COORDINATOR 2 WITH CONFIG {"bolt_server": ":", "management_server": "memgraph-coordinator-2.default.svc.cluster.local:10000", "coordinator_server": "memgraph-coordinator-2.default.svc.cluster.local:12000"}; -REGISTER INSTANCE instance_1 WITH CONFIG {"bolt_server": ":", "management_server": "memgraph-data-0.default.svc.cluster.local:10000", "replication_server": "memgraph-data-0.default.svc.cluster.local:20000"}; - - -LoadBalancer configuration example - -ADD COORDINATOR 2 WITH CONFIG {"bolt_server": ":7687", "management_server": "memgraph-coordinator-2.default.svc.cluster.local:10000", "coordinator_server": "memgraph-coordinator-2.default.svc.cluster.local:12000"}; -REGISTER INSTANCE instance_1 WITH CONFIG {"bolt_server": ":7687", "management_server": "memgraph-data-0.default.svc.cluster.local:10000", "replication_server": "memgraph-data-0.default.svc.cluster.local:20000"}; - - -CommonLoadBalancer configuration example - -ADD COORDINATOR 2 WITH CONFIG {"bolt_server": ":7687", "management_server": "memgraph-coordinator-2.default.svc.cluster.local:10000", "coordinator_server": "memgraph-coordinator-2.default.svc.cluster.local:12000"}; -REGISTER INSTANCE instance_1 WITH CONFIG {"bolt_server": ":7687", "management_server": "memgraph-data-0.default.svc.cluster.local:10000", "replication_server": "memgraph-data-0.default.svc.cluster.local:20000"}; - -IngressNginx - -If you are using ingress-nginx there are several steps we need to do in order to make use of it. - -ADD COORDINATOR 2 WITH CONFIG {"bolt_server": ":9012", "management_server": "memgraph-coordinator-2.default.svc.cluster.local:10000", "coordinator_server": "memgraph-coordinator-2.default.svc.cluster.local:12000"}; -REGISTER INSTANCE instance_1 WITH CONFIG {"bolt_server": ":9001", "management_server": "memgraph-data-0.default.svc.cluster.local:10000", "replication_server": "memgraph-data-0.default.svc.cluster.local:20000"}; - - -If you are connecting via Lab, specify your coordinator instance IP address and port in Memgraph Lab GUI and select Memgraph HA cluster type of connection. +type of connection when adding instances to the cluster. If you are connecting via Lab, specify your coordinator instance IP address and port in Memgraph Lab GUI and select Memgraph HA +cluster type of connection for using bolt+routing. If you are using minikube, you can find out your node ip using `minikube ip`. diff --git a/charts/memgraph-high-availability/values.yaml b/charts/memgraph-high-availability/values.yaml index 0f59953..696e467 100644 --- a/charts/memgraph-high-availability/values.yaml +++ b/charts/memgraph-high-availability/values.yaml @@ -1,7 +1,7 @@ memgraph: image: repository: memgraph/memgraph - tag: 2.22.0 + tag: 2.22.0 pullPolicy: IfNotPresent env: MEMGRAPH_ENTERPRISE_LICENSE: ""