diff --git a/Dockerfile b/Dockerfile index a30e213c5..ab279e456 100644 --- a/Dockerfile +++ b/Dockerfile @@ -28,8 +28,13 @@ COPY controllers/ controllers/ RUN GOOS=linux GOARCH=amd64 CGO_ENABLED=0 go build -o /src/${PROJECT_NAME} \ -ldflags "-X ${REPO_PATH}/pkg/version.Version=${VERSION} -X ${REPO_PATH}/pkg/version.GitSHA=${GIT_SHA}" main.go -FROM ${DOCKER_REGISTRY:+$DOCKER_REGISTRY/}alpine:${ALPINE_VERSION} AS final +# Backup script +COPY build_backup/backup.sh /zookeeper/backup.sh +RUN chmod +x /zookeeper/backup.sh +# Install tools for backup +RUN apk update && apk add findutils tar +FROM ${DOCKER_REGISTRY:+$DOCKER_REGISTRY/}alpine:${ALPINE_VERSION} AS final ARG PROJECT_NAME=zookeeper-operator diff --git a/Makefile b/Makefile index b1d37124a..7d0b8dca1 100644 --- a/Makefile +++ b/Makefile @@ -104,6 +104,7 @@ generate: # sync crd generated to helm-chart echo '{{- if .Values.crd.create }}' > charts/zookeeper-operator/templates/zookeeper.pravega.io_zookeeperclusters_crd.yaml cat config/crd/bases/zookeeper.pravega.io_zookeeperclusters.yaml >> charts/zookeeper-operator/templates/zookeeper.pravega.io_zookeeperclusters_crd.yaml + cat config/crd/bases/zookeeper.pravega.io_zookeeperbackups.yaml >> charts/zookeeper-operator/templates/zookeeper.pravega.io_zookeeperclusters_crd.yaml echo '{{- end }}' >> charts/zookeeper-operator/templates/zookeeper.pravega.io_zookeeperclusters_crd.yaml diff --git a/PROJECT b/PROJECT index 20941e6f8..52ef199a1 100644 --- a/PROJECT +++ b/PROJECT @@ -1,13 +1,22 @@ domain: zookeeper.pravega.io layout: - go.kubebuilder.io/v3 +plugins: + manifests.sdk.operatorframework.io/v2: {} + scorecard.sdk.operatorframework.io/v2: {} +projectName: zookeeper-operator repo: github.com/pravega/zookeeper-operator resources: - group: zookeeper.pravega.io kind: ZookeeperCluster version: v1beta1 +- api: + crdVersion: v1 + namespaced: true + controller: true + domain: zookeeper.pravega.io + group: zookeeper.pravega.io + kind: ZookeeperBackup + path: github.com/pravega/zookeeper-operator/api/v1beta1 + version: v1beta1 version: "3" -plugins: - manifests.sdk.operatorframework.io/v2: {} - scorecard.sdk.operatorframework.io/v2: {} -projectName: zookeeper-operator diff --git a/README.md b/README.md index 49479125e..9022c7bd0 100644 --- a/README.md +++ b/README.md @@ -18,6 +18,7 @@ The project is currently alpha. While no breaking API changes are currently plan * [Upgrade the Zookeeper Operator](#upgrade-the-operator) * [Uninstall the Operator](#uninstall-the-operator) * [The AdminServer](#the-adminserver) + * [Automatic backup (tech preview)](#automatic-backup-tech-preview) * [Development](#development) * [Build the Operator Image](#build-the-operator-image) * [Direct Access to Cluster](#direct-access-to-the-cluster) @@ -366,6 +367,40 @@ The list of available commands are /commands/zabstate ``` +### Automatic backup (tech preview) +In Zookeeper operator was implemented dedicated controller responsible for automatic backups of zookeeper data. Current +implementation provides periodic copying of transaction logs and snapshots (on-disk representation of the data) to +dedicated persistence volume with specified storage class within kubernetes cluster. Advantages and disadvantages of such +approach described in [article](https://www.elastic.co/blog/zookeeper-backup-a-treatise). +Exmaple CR of zookeeper backup: +```yaml +apiVersion: "zookeeper.pravega.io/v1beta1" +kind: "ZookeeperBackup" +metadata: + name: "example-backup" +spec: + zookeeperCluster: "zookeeper-cluster" + schedule: "0 0 */1 * *" + backupsToKeep: "7" + dataStorageClass: "backup-class" + image: + repository: "pravega/zkbackup" + tag: "0.1" +``` + +Parameters: +- *zookeeperCluster* (required) - name of zookeeper cluster to backup. +- *schedule* (optional) - the schedule in Cron format. +- *backupsToKeep* (optional) - number of stored backups. +- *dataStorageClass* (required) - storage class used for persistence volume for backups. Storage class and related provisioner should be configured separately. +- *image* (optional) - image for backup procedure. + +Backup controller takes following responsibilities: +- Provide cluster health check and cancel backup operation if required. +- Detect ZK leader pod and prepare/reconfigure CronJob configuration (Backup pod should land on node where leader is elected). +- Schedule CronJob with a backup script. +- Provide mechanism to periodic checks to make sure CronJob configuration is updated and valid (for example in case of new leader election) + ## Development ### Build the operator image diff --git a/api/v1beta1/deepcopy_test.go b/api/v1beta1/deepcopy_test.go index 52edca265..23f7fa47c 100644 --- a/api/v1beta1/deepcopy_test.go +++ b/api/v1beta1/deepcopy_test.go @@ -325,3 +325,107 @@ var _ = Describe("ZookeeperCluster DeepCopy", func() { }) }) + +var _ = Describe("ZookeeperBackup DeepCopy", func() { + Context("with defaults", func() { + var ( + str1, str1a, str1b, str2, str2a, str2b string + zkBk, zkBkCopy, zkBkCopy2 *v1beta1.ZookeeperBackup + ) + BeforeEach(func() { + zkBk = &v1beta1.ZookeeperBackup{ + ObjectMeta: metav1.ObjectMeta{ + Name: "example", + Namespace: "default", + }, + } + zkBk.Spec = v1beta1.ZookeeperBackupSpec{ + ZookeeperCluster: "ZK_Cluster1", + } + + zkBk.WithDefaults() + zkBkCopy = zkBk.DeepCopy() + zkBkCopy2 = zkBk.DeepCopy() + + str1 = zkBk.Spec.ZookeeperCluster + str1a = zkBkCopy.Spec.ZookeeperCluster + str1b = zkBkCopy2.Spec.ZookeeperCluster + + str2 = "0 12 */1 * *" + zkBk.Spec.Schedule = str2 + zkBkCopy.Spec = *zkBk.Spec.DeepCopy() + zkBk.Spec.DeepCopyInto(&zkBkCopy2.Spec) + str2a = zkBkCopy.Spec.Schedule + str2b = zkBkCopy2.Spec.Schedule + + zkBkCopy.Status = *zkBk.Status.DeepCopy() + zkBk.Status.DeepCopyInto(&zkBkCopy.Status) + + }) + It("value of str1, str1a and str1b should be equal", func() { + Ω(str1a).To(Equal(str1)) + Ω(str1b).To(Equal(str1)) + }) + It("value of str2, str2a and str2b should be 0 12 */1 * *", func() { + Ω(str2a).To(Equal(str2)) + Ω(str2b).To(Equal(str2)) + }) + + It("checking of nil DeepCopy for ZookeeperBackup", func() { + var zk *v1beta1.ZookeeperBackup + zkCopy := zk.DeepCopy() + Ω(zkCopy).To(BeNil()) + }) + + It("checking of DeepCopyObject for ZookeeperBackup", func() { + zkObj := zkBk.DeepCopyObject() + Ω(zkObj.GetObjectKind().GroupVersionKind().Version).To(Equal("")) + }) + It("checking of nil DeepCopyObject for ZookeeperBackup", func() { + var zk *v1beta1.ZookeeperBackup + zkCopy := zk.DeepCopyObject() + Ω(zkCopy).To(BeNil()) + }) + + It("checking of nil DeepCopy for ZookeeperBackupList", func() { + var backupList *v1beta1.ZookeeperBackupList + backupList2 := backupList.DeepCopy() + Ω(backupList2).To(BeNil()) + }) + + It("checking of DeepCopyObject for ZookeeperBackupList", func() { + var backupList v1beta1.ZookeeperBackupList + backupList.ResourceVersion = "v1beta1" + backupList2 := backupList.DeepCopyObject() + Ω(backupList2).ShouldNot(BeNil()) + }) + It("checking of DeepCopyObject for ZookeeperBackupList with items", func() { + var backupList v1beta1.ZookeeperBackupList + backupList.ResourceVersion = "v1beta1" + backupList.Items = []v1beta1.ZookeeperBackup{ + { + Spec: v1beta1.ZookeeperBackupSpec{}, + }, + } + backupList2 := backupList.DeepCopyObject() + Ω(backupList2).ShouldNot(BeNil()) + }) + It("checking of nil DeepCopyObject for ZookeeperBackupList", func() { + var backupList *v1beta1.ZookeeperBackupList + backupList2 := backupList.DeepCopyObject() + Ω(backupList2).To(BeNil()) + }) + + It("checking of nil DeepCopy for ZookeeperBackupSpec", func() { + var backupSpec *v1beta1.ZookeeperBackupSpec + backupSpec2 := backupSpec.DeepCopy() + Ω(backupSpec2).To(BeNil()) + }) + + It("checking of nil DeepCopy for ZookeeperBackupStatus", func() { + var backupStatus *v1beta1.ZookeeperBackupStatus + backupStatus2 := backupStatus.DeepCopy() + Ω(backupStatus2).To(BeNil()) + }) + }) +}) diff --git a/api/v1beta1/zookeeperbackup_types.go b/api/v1beta1/zookeeperbackup_types.go new file mode 100644 index 000000000..d0e10ebe1 --- /dev/null +++ b/api/v1beta1/zookeeperbackup_types.go @@ -0,0 +1,102 @@ +/** + * Copyright (c) 2018 Dell Inc., or its subsidiaries. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + */ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// ZookeeperBackupSpec defines the desired state of ZookeeperBackup +type ZookeeperBackupSpec struct { + // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + // Important: Run "make" to regenerate code after modifying this file + + // Name of the ZookeeperCluster to backup + // +kubebuilder:validation:Required + ZookeeperCluster string `json:"zookeeperCluster"` + // Schedule in Cron format + // +kubebuilder:default:="0 0 */1 * *" + // +optional + Schedule string `json:"schedule,omitempty"` + // Number of backups to store + // +kubebuilder:default:="7" + // +optional + BackupsToKeep string `json:"backupsToKeep,omitempty"` + // Data Storage Capacity + // +kubebuilder:default:="1Gi" + // +optional + DataCapacity string `json:"dataCapacity,omitempty"` + // Data Storage Class name + // +kubebuilder:validation:Required + DataStorageClass string `json:"dataStorageClass,omitempty"` + + // Image for backup procedure + Image ContainerImage `json:"image,omitempty"` +} + +func (s *ZookeeperBackupSpec) withDefaults() (changed bool) { + if s.Schedule == "" { + s.Schedule = "0 0 */1 * *" + changed = true + } + if s.BackupsToKeep == "" { + s.BackupsToKeep = "7" + changed = true + } + if s.DataCapacity == "" { + s.DataCapacity = "1Gi" + changed = true + } + if s.Image.Repository == "" { + s.Image.Repository = "pravega/zookeeper-operator" + changed = true + } + if s.Image.Tag == "" { + s.Image.Tag = "latest" + changed = true + } + return changed +} + +// ZookeeperBackupStatus defines the observed state of ZookeeperBackup +type ZookeeperBackupStatus struct { + // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + // Important: Run "make" to regenerate code after modifying this file +} + +//+kubebuilder:object:root=true +//+kubebuilder:subresource:status + +// ZookeeperBackup is the Schema for the zookeeperbackups API +type ZookeeperBackup struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec ZookeeperBackupSpec `json:"spec,omitempty"` + Status ZookeeperBackupStatus `json:"status,omitempty"` +} + +func (z *ZookeeperBackup) WithDefaults() bool { + return z.Spec.withDefaults() +} + +//+kubebuilder:object:root=true + +// ZookeeperBackupList contains a list of ZookeeperBackup +type ZookeeperBackupList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ZookeeperBackup `json:"items"` +} + +func init() { + SchemeBuilder.Register(&ZookeeperBackup{}, &ZookeeperBackupList{}) +} diff --git a/api/v1beta1/zookeeperbackup_types_test.go b/api/v1beta1/zookeeperbackup_types_test.go new file mode 100644 index 000000000..663971b71 --- /dev/null +++ b/api/v1beta1/zookeeperbackup_types_test.go @@ -0,0 +1,58 @@ +/** + * Copyright (c) 2018 Dell Inc., or its subsidiaries. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + */ + +package v1beta1_test + +import ( + "github.com/pravega/zookeeper-operator/api/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +var _ = Describe("ZookeeperBackup Types", func() { + var zkBk v1beta1.ZookeeperBackup + BeforeEach(func() { + zkBk = v1beta1.ZookeeperBackup{ + ObjectMeta: metav1.ObjectMeta{ + Name: "example", + }, + } + }) + + Context("#WithDefaults", func() { + var changed bool + BeforeEach(func() { + changed = zkBk.WithDefaults() + }) + + It("should return as changed", func() { + Ω(changed).To(BeTrue()) + }) + + It("should have a default schedule (every day)", func() { + Ω(zkBk.Spec.Schedule).To(BeEquivalentTo("0 0 */1 * *")) + }) + + It("should have a default BackupsToKeep number", func() { + Ω(zkBk.Spec.BackupsToKeep).To(BeEquivalentTo("7")) + }) + + It("should have a default DataCapacity size", func() { + Ω(zkBk.Spec.DataCapacity).To(BeEquivalentTo("1Gi")) + }) + + It("should have a default image for backup", func() { + Ω(zkBk.Spec.Image.Repository).To(BeEquivalentTo("pravega/zookeeper-operator")) + Ω(zkBk.Spec.Image.Tag).To(BeEquivalentTo("latest")) + }) + }) +}) diff --git a/api/v1beta1/zz_generated.deepcopy.go b/api/v1beta1/zz_generated.deepcopy.go index 409d989b1..643bd79a0 100644 --- a/api/v1beta1/zz_generated.deepcopy.go +++ b/api/v1beta1/zz_generated.deepcopy.go @@ -291,6 +291,96 @@ func (in *Probes) DeepCopy() *Probes { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ZookeeperBackup) DeepCopyInto(out *ZookeeperBackup) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + out.Status = in.Status +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ZookeeperBackup. +func (in *ZookeeperBackup) DeepCopy() *ZookeeperBackup { + if in == nil { + return nil + } + out := new(ZookeeperBackup) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ZookeeperBackup) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ZookeeperBackupList) DeepCopyInto(out *ZookeeperBackupList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ZookeeperBackup, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ZookeeperBackupList. +func (in *ZookeeperBackupList) DeepCopy() *ZookeeperBackupList { + if in == nil { + return nil + } + out := new(ZookeeperBackupList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ZookeeperBackupList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ZookeeperBackupSpec) DeepCopyInto(out *ZookeeperBackupSpec) { + *out = *in + out.Image = in.Image +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ZookeeperBackupSpec. +func (in *ZookeeperBackupSpec) DeepCopy() *ZookeeperBackupSpec { + if in == nil { + return nil + } + out := new(ZookeeperBackupSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ZookeeperBackupStatus) DeepCopyInto(out *ZookeeperBackupStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ZookeeperBackupStatus. +func (in *ZookeeperBackupStatus) DeepCopy() *ZookeeperBackupStatus { + if in == nil { + return nil + } + out := new(ZookeeperBackupStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ZookeeperCluster) DeepCopyInto(out *ZookeeperCluster) { *out = *in diff --git a/build_backup/backup.sh b/build_backup/backup.sh new file mode 100755 index 000000000..02606113c --- /dev/null +++ b/build_backup/backup.sh @@ -0,0 +1,68 @@ +#!/bin/sh + +echo "--------------- Check required env variables ---------------" +_req_envs=" +Required parameters: \n +BACKUPDIR: $BACKUPDIR \n +ZOOKEEPERDATADIR: $ZOOKEEPERDATADIR \n +BACKUPS_TO_KEEP: $BACKUPS_TO_KEEP \n +" +echo -e "$_req_envs" + +if [ -z "$BACKUPDIR" ] || [ -z "$ZOOKEEPERDATADIR" ] || [ -z "$BACKUPS_TO_KEEP" ]; then + echo -e "Some required env variables aren't defined.\n" + exit 1 +fi + +echo "--------------- Check BACKUPS_TO_KEEP variable ---------------" +if ! [[ $BACKUPS_TO_KEEP =~ ^[0-9]+$ ]] ; + then echo "Error: $BACKUPS_TO_KEEP not a number" >&2; exit 1 +fi +# Create backup directory if absent. +# ---------------------------------- +echo "--------------- Check backup/tmp dirs ---------------" +if [ ! -d "$BACKUPDIR" ] && [ ! -e "$BACKUPDIR" ]; then + mkdir -p "$BACKUPDIR" +else + printf "Backup directory $BACKUPDIR is existed.\n" +fi + +# TO DO: provide additional check of zookeeper health + +# Backup and create tar archive +# ------------------------------ +echo "--------------- Backup ---------------" +TIMESTAMP=$( date +"%Y%m%d%H%M%S" ) +# Include the timestamp in the filename +FILENAME="$BACKUPDIR/zookeeper-$TIMESTAMP.tar.gz" + +tar -zcvf $FILENAME -P $ZOOKEEPERDATADIR > /dev/null 2>&1 +RC=$? + +if [ $RC -ne 0 ]; then + printf "Error generating tar archive.\n" + exit 1 +else + printf "Successfully created a backup tar file.\n" +fi + + +# Cleanup old backups +# ------------------- +echo "--------------- Cleanup ---------------" +echo "List of backups:" +cd $BACKUPDIR + +BACKUPS_AMOUNT=`find . -path "*/zookeeper-*.tar.gz" -type f -printf "\n%AD %AT %p" | wc -l` +TO_DELETE=$(( $BACKUPS_AMOUNT - $BACKUPS_TO_KEEP )) +if [ $TO_DELETE -gt 0 ] ; then + echo "Keeping only $BACKUPS_TO_KEEP full backups" + ls -t zookeeper-*.tar.gz | tail -n -$TO_DELETE | xargs -d '\n' rm -rf +else + echo "There are less backups than required, nothing to delete." +fi + +# Cleanup old backups +# ------------------- +echo "--------------- Current backups ---------------" +ls -lt diff --git a/charts/zookeeper-operator/templates/zookeeper.pravega.io_zookeeperclusters_crd.yaml b/charts/zookeeper-operator/templates/zookeeper.pravega.io_zookeeperclusters_crd.yaml index 03555d8d9..217364cf1 100644 --- a/charts/zookeeper-operator/templates/zookeeper.pravega.io_zookeeperclusters_crd.yaml +++ b/charts/zookeeper-operator/templates/zookeeper.pravega.io_zookeeperclusters_crd.yaml @@ -5849,4 +5849,86 @@ spec: storage: true subresources: status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.9.0 + creationTimestamp: null + name: zookeeperbackups.zookeeper.pravega.io +spec: + group: zookeeper.pravega.io + names: + kind: ZookeeperBackup + listKind: ZookeeperBackupList + plural: zookeeperbackups + singular: zookeeperbackup + scope: Namespaced + versions: + - name: v1beta1 + schema: + openAPIV3Schema: + description: ZookeeperBackup is the Schema for the zookeeperbackups API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ZookeeperBackupSpec defines the desired state of ZookeeperBackup + properties: + backupsToKeep: + default: "7" + description: Number of backups to store + type: string + dataCapacity: + default: 1Gi + description: Data Storage Capacity + type: string + dataStorageClass: + description: Data Storage Class name + type: string + image: + description: Image for backup procedure + properties: + pullPolicy: + description: PullPolicy describes a policy for if/when to pull + a container image + enum: + - Always + - Never + - IfNotPresent + type: string + repository: + type: string + tag: + type: string + type: object + schedule: + default: 0 0 */1 * * + description: Schedule in Cron format + type: string + zookeeperCluster: + description: Name of the ZookeeperCluster to backup + type: string + required: + - zookeeperCluster + type: object + status: + description: ZookeeperBackupStatus defines the observed state of ZookeeperBackup + type: object + type: object + served: true + storage: true + subresources: + status: {} {{- end }} diff --git a/config/crd/bases/zookeeper.pravega.io_zookeeperbackups.yaml b/config/crd/bases/zookeeper.pravega.io_zookeeperbackups.yaml new file mode 100644 index 000000000..4dbba0c14 --- /dev/null +++ b/config/crd/bases/zookeeper.pravega.io_zookeeperbackups.yaml @@ -0,0 +1,82 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.9.0 + creationTimestamp: null + name: zookeeperbackups.zookeeper.pravega.io +spec: + group: zookeeper.pravega.io + names: + kind: ZookeeperBackup + listKind: ZookeeperBackupList + plural: zookeeperbackups + singular: zookeeperbackup + scope: Namespaced + versions: + - name: v1beta1 + schema: + openAPIV3Schema: + description: ZookeeperBackup is the Schema for the zookeeperbackups API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ZookeeperBackupSpec defines the desired state of ZookeeperBackup + properties: + backupsToKeep: + default: "7" + description: Number of backups to store + type: string + dataCapacity: + default: 1Gi + description: Data Storage Capacity + type: string + dataStorageClass: + description: Data Storage Class name + type: string + image: + description: Image for backup procedure + properties: + pullPolicy: + description: PullPolicy describes a policy for if/when to pull + a container image + enum: + - Always + - Never + - IfNotPresent + type: string + repository: + type: string + tag: + type: string + type: object + schedule: + default: 0 0 */1 * * + description: Schedule in Cron format + type: string + zookeeperCluster: + description: Name of the ZookeeperCluster to backup + type: string + required: + - zookeeperCluster + type: object + status: + description: ZookeeperBackupStatus defines the observed state of ZookeeperBackup + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml index 2d4e4fd53..aae2b494d 100644 --- a/config/crd/kustomization.yaml +++ b/config/crd/kustomization.yaml @@ -3,16 +3,19 @@ # It should be run by config/default resources: - bases/zookeeper.pravega.io_zookeeperclusters.yaml +- bases/zookeeper.pravega.io_zookeeperbackups.yaml # +kubebuilder:scaffold:crdkustomizeresource patchesStrategicMerge: # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix. # patches here are for enabling the conversion webhook for each CRD #- patches/webhook_in_zookeeperclusters.yaml +#- patches/webhook_in_zookeeperbackups.yaml # +kubebuilder:scaffold:crdkustomizewebhookpatch # [CERTMANAGER] To enable webhook, uncomment all the sections with [CERTMANAGER] prefix. # patches here are for enabling the CA injection for each CRD #- patches/cainjection_in_zookeeperclusters.yaml +#- patches/cainjection_in_zookeeperbackups.yaml # +kubebuilder:scaffold:crdkustomizecainjectionpatch diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 21ba69ead..330b5bc21 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -5,6 +5,32 @@ metadata: creationTimestamp: null name: manager-role rules: +- apiGroups: + - zookeeper.pravega.io.zookeeper.pravega.io + resources: + - zookeeperbackups + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - zookeeper.pravega.io.zookeeper.pravega.io + resources: + - zookeeperbackups/finalizers + verbs: + - update +- apiGroups: + - zookeeper.pravega.io.zookeeper.pravega.io + resources: + - zookeeperbackups/status + verbs: + - get + - patch + - update - apiGroups: - zookeeper.pravega.io.zookeeper.pravega.io resources: diff --git a/config/samples/pravega/zookeeper_v1beta1_zookeeperbackup_cr.yaml b/config/samples/pravega/zookeeper_v1beta1_zookeeperbackup_cr.yaml new file mode 100644 index 000000000..cf9e4c8ac --- /dev/null +++ b/config/samples/pravega/zookeeper_v1beta1_zookeeperbackup_cr.yaml @@ -0,0 +1,12 @@ +apiVersion: zookeeper.pravega.io/v1beta1 +kind: ZookeeperBackup +metadata: + name: example-zookeeperbackup +spec: + zookeeperCluster: "tf-zookeeper" + schedule: "0 0 */1 * *" + backupsToKeep: "7" + dataStorageClass: "backup-class" + image: + repository: "pravega/zkbackup" + tag: "0.1" diff --git a/controllers/controllers_suite_test.go b/controllers/controllers_suite_test.go new file mode 100644 index 000000000..934931b42 --- /dev/null +++ b/controllers/controllers_suite_test.go @@ -0,0 +1,23 @@ +/** + * Copyright (c) 2021 Dell Inc., or its subsidiaries. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + */ + +package controllers + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func TestZookeepercluster(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Zookeeper Controllers Spec") +} diff --git a/controllers/zookeeperbackup_controller.go b/controllers/zookeeperbackup_controller.go new file mode 100644 index 000000000..b13e41fba --- /dev/null +++ b/controllers/zookeeperbackup_controller.go @@ -0,0 +1,405 @@ +/** + * Copyright (c) 2018 Dell Inc., or its subsidiaries. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (&the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + */ +package controllers + +import ( + "context" + "encoding/json" + "fmt" + "github.com/go-logr/logr" + zookeeperv1beta1 "github.com/pravega/zookeeper-operator/api/v1beta1" + "io" + batchv1 "k8s.io/api/batch/v1" + batchv1beta1 "k8s.io/api/batch/v1beta1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "net/http" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "strconv" + "strings" + + "github.com/mitchellh/hashstructure/v2" +) + +// ReconcileTime is the delay between reconciliations +const PVCSuffix = "-pvc" + +var logBk = logf.Log.WithName("controller_zookeeperbackup") +var hash uint64 + +type LeaderGetter func(hostname string, port int32) (string, error) + +// ZookeeperBackupReconciler reconciles a ZookeeperBackup object +type ZookeeperBackupReconciler struct { + Client client.Client + Scheme *runtime.Scheme + Log logr.Logger + LeaderGetter LeaderGetter +} + +//+kubebuilder:rbac:groups=zookeeper.pravega.io.zookeeper.pravega.io,resources=zookeeperbackups,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=zookeeper.pravega.io.zookeeper.pravega.io,resources=zookeeperbackups/status,verbs=get;update;patch +//+kubebuilder:rbac:groups=zookeeper.pravega.io.zookeeper.pravega.io,resources=zookeeperbackups/finalizers,verbs=update + +func (r *ZookeeperBackupReconciler) Reconcile(_ context.Context, request reconcile.Request) (reconcile.Result, error) { + r.Log = logBk.WithValues("Request.Namespace", request.Namespace, "Request.Name", request.Name) + r.Log.Info("Reconciling ZookeeperBackup") + + // Fetch the ZookeeperBackup instance + zookeeperBackup := &zookeeperv1beta1.ZookeeperBackup{} + err := r.Client.Get(context.TODO(), request.NamespacedName, zookeeperBackup) + if err != nil { + if errors.IsNotFound(err) { + // Request object not found, could have been deleted after reconcile request. + // Owned objects are automatically garbage collected. For additional cleanup logic use finalizers. + // Return and don't requeue + return reconcile.Result{}, nil + } + // Error reading the object - requeue the request. + return reconcile.Result{}, err + } + changed := zookeeperBackup.WithDefaults() + if changed { + r.Log.Info("Setting default settings for zookeeper-backup") + if err := r.Client.Update(context.TODO(), zookeeperBackup); err != nil { + return reconcile.Result{}, err + } + return reconcile.Result{Requeue: true}, nil + } + + // Check if zookeeper cluster exists + foundZookeeperCluster := &zookeeperv1beta1.ZookeeperCluster{} + zkCluster := zookeeperBackup.Spec.ZookeeperCluster + err = r.Client.Get(context.TODO(), types.NamespacedName{Name: zkCluster, Namespace: zookeeperBackup.Namespace}, foundZookeeperCluster) + if err != nil && errors.IsNotFound(err) { + r.Log.Error(err, fmt.Sprintf("Zookeeper cluster '%s' not found", zkCluster)) + return reconcile.Result{}, err + } + + // Define a new PVC object + pvc := newPVCForZookeeperBackup(zookeeperBackup) + // Set ZookeeperBackup instance as the owner and controller + if err := controllerutil.SetControllerReference(zookeeperBackup, pvc, r.Scheme); err != nil { + r.Log.Error(err, fmt.Sprintf("Can't set reference for pvc '%s'", pvc.Name)) + return reconcile.Result{}, err + } + if pvc.Annotations == nil { + pvc.Annotations = make(map[string]string) + } + + // Calculate hash of PVC Spec + hash, err = hashstructure.Hash(pvc.Spec, hashstructure.FormatV2, nil) + if err != nil { + return reconcile.Result{}, err + } + pvcHashStr := strconv.FormatUint(hash, 10) + + // Check if PVC already created + foundPVC := &corev1.PersistentVolumeClaim{} + err = r.Client.Get(context.TODO(), types.NamespacedName{Name: pvc.Name, Namespace: pvc.Namespace}, foundPVC) + if err != nil && errors.IsNotFound(err) { + r.Log.Info("Creating a new PersistenVolumeClaim") + pvc.Annotations["last-applied-hash"] = pvcHashStr + err = r.Client.Create(context.TODO(), pvc) + if err != nil { + r.Log.Error(err, fmt.Sprintf("Can't create pvc '%s'", pvc.Name)) + return reconcile.Result{}, err + } + } else if err != nil { + return reconcile.Result{}, err + } else { + // Check if pvc requires to be updated + if foundPVC.Annotations["last-applied-hash"] == pvcHashStr { + r.Log.Info("PVC already exists and looks updated", "pvc.Namespace", foundPVC.Namespace, "pvc.Name", foundPVC.Name) + } else { + pvc.Annotations["last-applied-hash"] = pvcHashStr + r.Log.Info("Update PVC", "Namespace", pvc.Namespace, "Name", pvc.Name) + err = r.Client.Update(context.TODO(), pvc) + if err != nil { + r.Log.Error(err, fmt.Sprintf("PVC '%s' cannot be updated", pvc.Name)) + return reconcile.Result{}, err + } + } + } + + // Define a new CronJob object + cronJob := newCronJobForCR(zookeeperBackup) + // Set ZookeeperBackup instance as the owner and controller + if err := controllerutil.SetControllerReference(zookeeperBackup, cronJob, r.Scheme); err != nil { + return reconcile.Result{}, err + } + + // Check Zookeeper Cluster status + if foundZookeeperCluster.Spec.Replicas != foundZookeeperCluster.Status.ReadyReplicas { + r.Log.Info(fmt.Sprintf("Not all cluster replicas are ready: %d/%d. Suspend CronJob", + foundZookeeperCluster.Status.ReadyReplicas, foundZookeeperCluster.Status.Replicas)) + *cronJob.Spec.Suspend = true + } + + // Get zookeeper service hostname/ip and port + svcAdminName := foundZookeeperCluster.GetAdminServerServiceName() + foundSvcAdmin := &corev1.Service{} + err = r.Client.Get(context.TODO(), types.NamespacedName{ + Name: svcAdminName, + Namespace: foundZookeeperCluster.Namespace, + }, foundSvcAdmin) + if err != nil { + r.Log.Error(err, fmt.Sprintf("Can't get Zookeeper admin service '%s'", svcAdminName)) + return reconcile.Result{}, err + } + + adminIp := foundSvcAdmin.Spec.ClusterIP + svcPort := GetServicePortByName(foundSvcAdmin, "tcp-admin-server") + + // Get host with zookeeper leader + leaderHostname, err := r.LeaderGetter(adminIp, svcPort.Port) + if err != nil { + r.Log.Error(err, "Leader hostname can't be found") + return reconcile.Result{}, err + } + r.Log.Info(fmt.Sprintf("Leader hostname: %s", leaderHostname)) + + // Landing backup pod on the same node with leader + podList := &corev1.PodList{} + opts := []client.ListOption{ + client.InNamespace(request.NamespacedName.Namespace), + client.MatchingLabels{"app": zkCluster}, + } + err = r.Client.List(context.TODO(), podList, opts...) + if err != nil { + if errors.IsNotFound(err) { + msg := fmt.Sprintf("Pods cannot be found by label app:%s", zookeeperBackup.Name) + r.Log.Error(err, msg) + } + return reconcile.Result{}, err + } + + leaderFound := false + for _, pod := range podList.Items { + if pod.Spec.Hostname == leaderHostname { + leaderFound = true + r.Log.Info(fmt.Sprintf("Leader was found. Pod: %s (node: %s)", pod.Name, pod.Spec.NodeName)) + // Set appropriate NodeSelector and PVC ClaimName + cronJob.Spec.JobTemplate.Spec.Template.Spec.NodeSelector = + map[string]string{"kubernetes.io/hostname": pod.Spec.NodeName} + vol := GetVolumeByName(cronJob.Spec.JobTemplate.Spec.Template.Spec.Volumes, "zookeeperbackup-data") + vol.VolumeSource.PersistentVolumeClaim.ClaimName = "data-" + pod.Name + break + } + } + if !leaderFound { + r.Log.Info("Pod with leader role wasn't found. Suspend CronJob") + *cronJob.Spec.Suspend = true + } + + if cronJob.Annotations == nil { + cronJob.Annotations = make(map[string]string) + } + + // Calculate hash of CronJob Spec + hash, err := hashstructure.Hash(cronJob.Spec, hashstructure.FormatV2, nil) + if err != nil { + return reconcile.Result{}, err + } + hashStr := strconv.FormatUint(hash, 10) + + // Check if this CronJob already exists + foundCJ := &batchv1beta1.CronJob{} + err = r.Client.Get(context.TODO(), types.NamespacedName{Name: cronJob.Name, Namespace: cronJob.Namespace}, foundCJ) + if err != nil && errors.IsNotFound(err) { + r.Log.Info("Creating a new CronJob", "CronJob.Namespace", cronJob.Namespace, "CronJob.Name", cronJob.Name) + cronJob.Annotations["last-applied-hash"] = hashStr + err = r.Client.Create(context.TODO(), cronJob) + if err != nil { + return reconcile.Result{}, err + } + } else if err != nil { + return reconcile.Result{}, err + } else { + // Check if CronJob requires to be updated + if foundCJ.Annotations["last-applied-hash"] == hashStr { + r.Log.Info("CronJob already exists and looks updated", "CronJob.Namespace", foundCJ.Namespace, "CronJob.Name", foundCJ.Name) + } else { + cronJob.Annotations["last-applied-hash"] = hashStr + r.Log.Info("Update CronJob", "Namespace", cronJob.Namespace, "Name", cronJob.Name) + err = r.Client.Update(context.TODO(), cronJob) + if err != nil { + r.Log.Error(err, "CronJob cannot be updated") + return reconcile.Result{}, err + } + } + } + + // Requeue + r.Log.Info(fmt.Sprintf("Rerun reconclie after %s sec.", ReconcileTime)) + return reconcile.Result{RequeueAfter: ReconcileTime}, nil +} + +func GetLeader(hostname string, port int32) (string, error) { + resp, err := http.Get(fmt.Sprintf("http://%s:%d/commands/leader", hostname, port)) + if err != nil { + return "", err + } + defer resp.Body.Close() + body, err := io.ReadAll(resp.Body) + if err != nil { + return "", err + } + var result map[string]interface{} + err = json.Unmarshal(body, &result) + if err != nil { + return "", err + } + leaderIp := result["leader_ip"].(string) + if err != nil && errors.IsNotFound(err) { + return "", err + } + leaderHostname := strings.Split(leaderIp, ".")[0] + return leaderHostname, nil +} + +func GetServicePortByName(service *corev1.Service, name string) *corev1.ServicePort { + for _, port := range service.Spec.Ports { + if port.Name == name { + return &port + } + } + return nil +} + +func GetVolumeByName(volumes []corev1.Volume, name string) *corev1.Volume { + for _, vol := range volumes { + if vol.Name == name { + return &vol + } + } + return nil +} + +// newPVCForZookeeperBackup returns pob definition +func newPVCForZookeeperBackup(cr *zookeeperv1beta1.ZookeeperBackup) *corev1.PersistentVolumeClaim { + labels := map[string]string{ + "app": cr.Name, + } + pvc := &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: cr.Name + PVCSuffix, + Namespace: cr.Namespace, + Labels: labels, + }, + Spec: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{ + corev1.PersistentVolumeAccessMode("ReadWriteOnce"), + }, + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse(cr.Spec.DataCapacity), + }, + }, + StorageClassName: &cr.Spec.DataStorageClass, + }, + } + return pvc +} + +// newCronJobForCR returns a cronJob with the same name/namespace as the cr +func newCronJobForCR(cr *zookeeperv1beta1.ZookeeperBackup) *batchv1beta1.CronJob { + labels := map[string]string{ + "app": cr.Name, + } + suspend := false + backupMountPath := "/var/backup" + return &batchv1beta1.CronJob{ + ObjectMeta: metav1.ObjectMeta{ + Name: cr.Name + "-backup", + Namespace: cr.Namespace, + Labels: labels, + }, + Spec: batchv1beta1.CronJobSpec{ + Schedule: cr.Spec.Schedule, + Suspend: &suspend, + JobTemplate: batchv1beta1.JobTemplateSpec{ + Spec: batchv1.JobSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + RestartPolicy: "Never", + Containers: []corev1.Container{ + { + Name: "run-zookeeperbackup", + Image: cr.Spec.Image.ToString(), + ImagePullPolicy: cr.Spec.Image.PullPolicy, + VolumeMounts: []corev1.VolumeMount{ + { + Name: "zookeeperbackup-vol", + MountPath: backupMountPath, + }, + { + Name: "zookeeperbackup-data", + MountPath: "/data", + }, + }, + Env: []corev1.EnvVar{ + { + Name: "BACKUPDIR", + Value: backupMountPath, + }, + { + Name: "ZOOKEEPERDATADIR", + Value: "/data/version-2/", + }, + { + Name: "BACKUPS_TO_KEEP", + Value: cr.Spec.BackupsToKeep, + }, + }, + Command: []string{"/zookeeper/backup.sh"}, + }, + }, + NodeName: "", + Volumes: []corev1.Volume{ + { + Name: "zookeeperbackup-vol", + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: cr.Name + PVCSuffix, + }, + }, + }, + { + Name: "zookeeperbackup-data", + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: "", + }, + }, + }, + }, + }, + }, + }, + }, + }, + } +} + +// SetupWithManager sets up the controller with the Manager. +func (r *ZookeeperBackupReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&zookeeperv1beta1.ZookeeperBackup{}). + Complete(r) +} diff --git a/controllers/zookeeperbackup_controller_test.go b/controllers/zookeeperbackup_controller_test.go new file mode 100644 index 000000000..56179d9da --- /dev/null +++ b/controllers/zookeeperbackup_controller_test.go @@ -0,0 +1,315 @@ +/** + * Copyright (c) 2018 Dell Inc., or its subsidiaries. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + */ + +package controllers + +import ( + "context" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/pravega/zookeeper-operator/api/v1beta1" + batchv1beta1 "k8s.io/api/batch/v1beta1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/kubernetes/scheme" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +const ( + ZkClusterName = "zk-cluster" + ZkBackupName = "zk-backup" + Namespace = "default" + Hostname = "node-0" +) + +func MockGetLeader(_ string, _ int32) (string, error) { + return Hostname, nil +} + +var _ = Describe("ZookeeperBackup controller", func() { + + var ( + s = scheme.Scheme + mockZkClient = new(MockZookeeperClient) + rCl *ZookeeperClusterReconciler + rBk *ZookeeperBackupReconciler + ) + + Context("Reconcile", func() { + var ( + cl client.Client + res reconcile.Result + req reconcile.Request + zkBk *v1beta1.ZookeeperBackup + zkCl *v1beta1.ZookeeperCluster + leaderPod *corev1.Pod + ) + + BeforeEach(func() { + req = reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: ZkBackupName, + Namespace: Namespace, + }, + } + zkCl = &v1beta1.ZookeeperCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: ZkClusterName, + Namespace: Namespace, + }, + } + zkBk = &v1beta1.ZookeeperBackup{ + ObjectMeta: metav1.ObjectMeta{ + Name: ZkBackupName, + Namespace: Namespace, + }, + Spec: v1beta1.ZookeeperBackupSpec{ + ZookeeperCluster: ZkClusterName, + }, + } + leaderPod = &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: ZkClusterName + "-0", + Namespace: Namespace, + Labels: map[string]string{ + "app": ZkClusterName, + }, + }, + Spec: corev1.PodSpec{ + Hostname: Hostname, + }, + } + s.AddKnownTypes(v1beta1.GroupVersion, zkCl, zkBk) + }) + + When("ZK cluster isn't deployed", func() { + var ( + errBk error + ) + + BeforeEach(func() { + zkBk.WithDefaults() + cl = fake.NewClientBuilder().WithScheme(scheme.Scheme).WithRuntimeObjects(zkBk).Build() + rBk = &ZookeeperBackupReconciler{Client: cl, Scheme: s, LeaderGetter: MockGetLeader} + _, errBk = rBk.Reconcile(context.TODO(), req) + }) + + It("should raise an error", func() { + Ω(errBk).To(HaveOccurred()) + }) + + }) + + When("ZK Cluster is deployed", func() { + var ( + resCl reconcile.Result + reqCl reconcile.Request + errCl error + ) + + Context("and empty backup spec", func() { + var ( + errBk error + err error + ) + + BeforeEach(func() { + zkCl.WithDefaults() + cl = fake.NewClientBuilder().WithScheme(scheme.Scheme).WithRuntimeObjects(zkCl, zkBk).Build() + rCl = &ZookeeperClusterReconciler{Client: cl, Scheme: s, ZkClient: mockZkClient} + reqCl = reconcile.Request{NamespacedName: types.NamespacedName{Name: ZkClusterName, Namespace: Namespace}} + resCl, errCl = rCl.Reconcile(context.TODO(), reqCl) + Ω(errCl).To(BeNil()) + Ω(resCl.RequeueAfter).To(Equal(ReconcileTime)) + rBk = &ZookeeperBackupReconciler{Client: cl, Scheme: s, LeaderGetter: MockGetLeader} + res, errBk = rBk.Reconcile(context.TODO(), req) + }) + + It("shouldn't error", func() { + Ω(errBk).To(BeNil()) + }) + + It("should set the default spec options", func() { + foundZkBk := &v1beta1.ZookeeperBackup{} + err = cl.Get(context.TODO(), req.NamespacedName, foundZkBk) + Ω(err).To(BeNil()) + Ω(foundZkBk.Spec.BackupsToKeep).To(BeEquivalentTo("7")) + }) + + It("should requeue the request", func() { + Ω(res.Requeue).To(BeTrue()) + }) + }) + + Context("with default backup specs", func() { + var ( + errBk error + err error + jobSpec *batchv1beta1.CronJob + ) + + BeforeEach(func() { + jobSpec = newCronJobForCR(zkBk) + + zkCl.WithDefaults() + zkCl.Status.ReadyReplicas = 3 + zkBk.WithDefaults() + cl = fake.NewClientBuilder().WithScheme(scheme.Scheme).WithRuntimeObjects(zkCl, zkBk, leaderPod).Build() + rCl = &ZookeeperClusterReconciler{Client: cl, Scheme: s, ZkClient: mockZkClient} + reqCl = reconcile.Request{NamespacedName: types.NamespacedName{Name: ZkClusterName, Namespace: Namespace}} + resCl, errCl = rCl.Reconcile(context.TODO(), reqCl) + Ω(errCl).To(BeNil()) + Ω(resCl.RequeueAfter).To(Equal(ReconcileTime)) + rBk = &ZookeeperBackupReconciler{Client: cl, Scheme: s, LeaderGetter: MockGetLeader} + res, errBk = rBk.Reconcile(context.TODO(), req) + }) + + It("shouldn't error", func() { + Ω(errBk).To(BeNil()) + }) + + It("should create PVC", func() { + pvcSpec := newPVCForZookeeperBackup(zkBk) + foundPVC := &corev1.PersistentVolumeClaim{} + err = cl.Get(context.TODO(), types.NamespacedName{Name: pvcSpec.Name, Namespace: Namespace}, foundPVC) + Ω(err).To(BeNil()) + }) + + It("should create CronJob", func() { + foundCronJob := &batchv1beta1.CronJob{} + err = cl.Get(context.TODO(), types.NamespacedName{Name: jobSpec.Name, Namespace: Namespace}, foundCronJob) + Ω(err).To(BeNil()) + Ω(*foundCronJob.Spec.Suspend).Should(BeFalse()) + }) + + It("should requeue after ReconcileTime delay", func() { + Ω(res.RequeueAfter).To(Equal(ReconcileTime)) + }) + + Context("pvc parameters have changed", func() { + + BeforeEach(func() { + zkBk.Spec.DataStorageClass = "backup-hdd" + err = cl.Update(context.TODO(), zkBk) + Ω(err).To(BeNil()) + res, errBk = rBk.Reconcile(context.TODO(), req) + Ω(errBk).To(BeNil()) + }) + + It("should update pvc", func() { + pvcSpec := newPVCForZookeeperBackup(zkBk) + foundPVC := &corev1.PersistentVolumeClaim{} + err = cl.Get(context.TODO(), types.NamespacedName{Name: pvcSpec.Name, Namespace: Namespace}, foundPVC) + Ω(err).To(BeNil()) + Ω(*foundPVC.Spec.StorageClassName).Should(BeEquivalentTo(zkBk.Spec.DataStorageClass)) + }) + + }) + + Context("cronJob parameters have changed", func() { + + BeforeEach(func() { + zkBk.Spec.Schedule = "0 12 */1 * *" + zkBk.Spec.BackupsToKeep = "15" + err = cl.Update(context.TODO(), zkBk) + Ω(err).To(BeNil()) + res, errBk = rBk.Reconcile(context.TODO(), req) + Ω(errBk).To(BeNil()) + }) + + It("should update cronJob", func() { + foundCronJob := &batchv1beta1.CronJob{} + err = cl.Get(context.TODO(), types.NamespacedName{Name: jobSpec.Name, Namespace: Namespace}, foundCronJob) + Ω(err).To(BeNil()) + envVars := foundCronJob.Spec.JobTemplate.Spec.Template.Spec.Containers[0].Env + for _, envVar := range envVars { + if envVar.Name == "BACKUPS_TO_KEEP" { + Ω(envVar.Value).Should(BeEquivalentTo(zkBk.Spec.BackupsToKeep)) + } + } + Ω(foundCronJob.Spec.Schedule).Should(BeEquivalentTo(zkBk.Spec.Schedule)) + Ω(*foundCronJob.Spec.Suspend).Should(BeFalse()) + }) + }) + + Context("ZK cluster becomes not Ready", func() { + + BeforeEach(func() { + err = cl.Get(context.TODO(), types.NamespacedName{Name: zkCl.Name, Namespace: zkCl.Namespace}, zkCl) + Ω(err).To(BeNil()) + zkCl.Status.Replicas = 3 + zkCl.Status.ReadyReplicas = 2 + err = cl.Update(context.TODO(), zkCl) + Ω(err).To(BeNil()) + res, errBk = rBk.Reconcile(context.TODO(), req) + Ω(errBk).To(BeNil()) + }) + + It("should suspend cronJob", func() { + foundCronJob := &batchv1beta1.CronJob{} + err = cl.Get(context.TODO(), types.NamespacedName{Name: jobSpec.Name, Namespace: Namespace}, foundCronJob) + Ω(err).To(BeNil()) + Ω(*foundCronJob.Spec.Suspend).Should(BeTrue()) + }) + }) + + Context("can't find pod with leader", func() { + BeforeEach(func() { + err = cl.Delete(context.TODO(), leaderPod) + Ω(err).To(BeNil()) + res, errBk = rBk.Reconcile(context.TODO(), req) + Ω(errBk).To(BeNil()) + }) + + It("should suspend cronJob", func() { + foundCronJob := &batchv1beta1.CronJob{} + err = cl.Get(context.TODO(), types.NamespacedName{Name: jobSpec.Name, Namespace: Namespace}, foundCronJob) + Ω(err).To(BeNil()) + Ω(*foundCronJob.Spec.Suspend).Should(BeTrue()) + }) + }) + }) + }) + + Context("Checking result when request namespace does not contains zookeeper backup", func() { + var ( + errBk error + ) + + BeforeEach(func() { + cl = fake.NewClientBuilder().WithScheme(scheme.Scheme).WithRuntimeObjects(zkBk).Build() + rBk = &ZookeeperBackupReconciler{Client: cl, Scheme: s, LeaderGetter: MockGetLeader} + req.NamespacedName.Namespace = "temp" + res, errBk = rBk.Reconcile(context.TODO(), req) + }) + It("should have false in reconcile result", func() { + Ω(res.Requeue).To(BeFalse()) + Ω(errBk).To(BeNil()) + }) + }) + + Context("ZK backup isn't registered", func() { + var ( + errBk error + ) + cl = fake.NewClientBuilder().WithScheme(scheme.Scheme).Build() + rBk = &ZookeeperBackupReconciler{Client: cl, Scheme: s, LeaderGetter: MockGetLeader} + res, errBk = rBk.Reconcile(context.TODO(), req) + + It("should raise an error and shouldn't requeue the request", func() { + Ω(errBk).To(HaveOccurred()) + Ω(res.Requeue).To(BeFalse()) + }) + }) + }) +}) diff --git a/controllers/zookeepercluster_controller_test.go b/controllers/zookeepercluster_controller_test.go index fe530dbd7..0e082b60a 100644 --- a/controllers/zookeepercluster_controller_test.go +++ b/controllers/zookeepercluster_controller_test.go @@ -13,7 +13,6 @@ package controllers import ( "context" "os" - "testing" "time" "github.com/pravega/zookeeper-operator/api/v1beta1" @@ -33,11 +32,6 @@ import ( . "github.com/onsi/gomega" ) -func TestZookeepercluster(t *testing.T) { - RegisterFailHandler(Fail) - RunSpecs(t, "ZookeeperCluster Controller Spec") -} - type MockZookeeperClient struct { // dummy struct } diff --git a/go.mod b/go.mod index 1f60fd171..df77347a4 100644 --- a/go.mod +++ b/go.mod @@ -5,6 +5,7 @@ go 1.18 require ( github.com/ghodss/yaml v1.0.0 github.com/go-logr/logr v1.2.3 + github.com/mitchellh/hashstructure/v2 v2.0.2 github.com/onsi/ginkgo v1.16.5 github.com/onsi/gomega v1.19.0 github.com/operator-framework/operator-lib v0.11.0 diff --git a/go.sum b/go.sum index 2388f754b..326b2c4e4 100644 --- a/go.sum +++ b/go.sum @@ -26,6 +26,7 @@ cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+Y cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= +cloud.google.com/go v0.100.2 h1:t9Iw5QH5v4XtlEQaCtUY7x6sCABps8sW0acw7e2WQ6Y= cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= @@ -88,11 +89,13 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= +github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= @@ -155,12 +158,14 @@ github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoD github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU= github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI= github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= github.com/getkin/kin-openapi v0.76.0/go.mod h1:660oXbgy5JFMKreazJaQTw7o+X00qeSyhcnluiMv+Xg= github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-bindata/go-bindata/v3 v3.1.3/go.mod h1:1/zrpXsLD8YDIbhZRqXzm1Ghc7NhEvIN9+Z6R5/xH4I= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= @@ -278,6 +283,7 @@ github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= @@ -324,6 +330,7 @@ github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpO github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= @@ -374,6 +381,8 @@ github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrk github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= +github.com/mitchellh/hashstructure/v2 v2.0.2 h1:vGKWl0YJqUNxE8d+h8f6NJLcCJrgbhC4NcD46KavDd4= +github.com/mitchellh/hashstructure/v2 v2.0.2/go.mod h1:MG3aRVU/N29oo/V/IhBX8GR/zz4kQkprJgF2EVszyDE= github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= @@ -403,15 +412,21 @@ github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= +github.com/onsi/ginkgo/v2 v2.0.0/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= github.com/onsi/ginkgo/v2 v2.1.3 h1:e/3Cwtogj0HA+25nMP1jCMDIf8RtRYbGwGGuBIFztkc= +github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= +github.com/onsi/gomega v1.18.1/go.mod h1:0q+aL8jAiMXy9hbwj2mr5GziHiwhAIQpFmmtT5hitRs= github.com/onsi/gomega v1.19.0 h1:4ieX6qQjPP/BfC3mpsAtIGGlxTWPeA3Inl/7DtXw1tw= github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/operator-framework/api v0.15.0/go.mod h1:scnY9xqSeCsOdtJtNoHIXd7OtHZ14gj1hkDA4+DlgLY= github.com/operator-framework/operator-lib v0.11.0 h1:eYzqpiOfq9WBI4Trddisiq/X9BwCisZd3rIzmHRC9Z8= github.com/operator-framework/operator-lib v0.11.0/go.mod h1:RpyKhFAoG6DmKTDIwMuO6pI3LRc8IE9rxEYWy476o6g= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= @@ -546,8 +561,10 @@ go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= +go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA= +go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/multierr v1.8.0 h1:dg6GjLku4EH+249NNmoIciG9N/jURbDG+pFlTkhzIC8= @@ -555,6 +572,7 @@ go.uber.org/multierr v1.8.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95a go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= +go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI= go.uber.org/zap v1.21.0 h1:WefMeulhovoZ2sYXz7st6K0sLj7bBhpiFaud4r4zST8= go.uber.org/zap v1.21.0/go.mod h1:wjWOCqI0f2ZZrJF/UufIOkiC8ii6tm1iqIsLo76RfJw= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -647,6 +665,7 @@ golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= @@ -1064,16 +1083,23 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +k8s.io/api v0.24.0/go.mod h1:5Jl90IUrJHUJYEMANRURMiVvJ0g7Ax7r3R1bqO8zx8I= k8s.io/api v0.24.1 h1:BjCMRDcyEYz03joa3K1+rbshwh1Ay6oB53+iUx2H8UY= k8s.io/api v0.24.1/go.mod h1:JhoOvNiLXKTPQ60zh2g0ewpA+bnEYf5q44Flhquh4vQ= +k8s.io/apiextensions-apiserver v0.24.0/go.mod h1:iuVe4aEpe6827lvO6yWQVxiPSpPoSKVjkq+MIdg84cM= k8s.io/apiextensions-apiserver v0.24.1 h1:5yBh9+ueTq/kfnHQZa0MAo6uNcPrtxPMpNQgorBaKS0= k8s.io/apiextensions-apiserver v0.24.1/go.mod h1:A6MHfaLDGfjOc/We2nM7uewD5Oa/FnEbZ6cD7g2ca4Q= +k8s.io/apimachinery v0.24.0/go.mod h1:82Bi4sCzVBdpYjyI4jY6aHX+YCUchUIrZrXKedjd2UM= k8s.io/apimachinery v0.24.1 h1:ShD4aDxTQKN5zNf8K1RQ2u98ELLdIW7jEnlO9uAMX/I= k8s.io/apimachinery v0.24.1/go.mod h1:82Bi4sCzVBdpYjyI4jY6aHX+YCUchUIrZrXKedjd2UM= +k8s.io/apiserver v0.24.0/go.mod h1:WFx2yiOMawnogNToVvUYT9nn1jaIkMKj41ZYCVycsBA= k8s.io/apiserver v0.24.1/go.mod h1:dQWNMx15S8NqJMp0gpYfssyvhYnkilc1LpExd/dkLh0= +k8s.io/client-go v0.24.0/go.mod h1:VFPQET+cAFpYxh6Bq6f4xyMY80G6jKKktU6G0m00VDw= k8s.io/client-go v0.24.1 h1:w1hNdI9PFrzu3OlovVeTnf4oHDt+FJLd9Ndluvnb42E= k8s.io/client-go v0.24.1/go.mod h1:f1kIDqcEYmwXS/vTbbhopMUbhKp2JhOeVTfxgaCIlF8= +k8s.io/code-generator v0.24.0/go.mod h1:dpVhs00hTuTdTY6jvVxvTFCk6gSMrtfRydbhZwHI15w= k8s.io/code-generator v0.24.1/go.mod h1:dpVhs00hTuTdTY6jvVxvTFCk6gSMrtfRydbhZwHI15w= +k8s.io/component-base v0.24.0/go.mod h1:Dgazgon0i7KYUsS8krG8muGiMVtUZxG037l1MKyXgrA= k8s.io/component-base v0.24.1 h1:APv6W/YmfOWZfo+XJ1mZwep/f7g7Tpwvdbo9CQLDuts= k8s.io/component-base v0.24.1/go.mod h1:DW5vQGYVCog8WYpNob3PMmmsY8A3L9QZNg4j/dV3s38= k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= diff --git a/main.go b/main.go index 2126c607e..9af8ed74e 100644 --- a/main.go +++ b/main.go @@ -138,6 +138,14 @@ func main() { log.Error(err, "unable to create controller", "controller", "ZookeeperCluster") os.Exit(1) } + if err = (&controllers.ZookeeperBackupReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + LeaderGetter: controllers.GetLeader, + }).SetupWithManager(mgr); err != nil { + log.Error(err, "unable to create controller", "controller", "ZookeeperBackup") + os.Exit(1) + } // +kubebuilder:scaffold:builder log.Info("starting manager")