From 5e334017e0460b7ef443f26f1b4eb81fef649f2a Mon Sep 17 00:00:00 2001 From: arshadda Date: Wed, 15 Jan 2025 10:57:43 +0530 Subject: [PATCH] Add UTs for failure domain --- .../kubeadm/internal/control_plane_test.go | 264 ++++++++++++++ .../internal/controllers/scale_test.go | 343 ++++++++++++++++++ 2 files changed, 607 insertions(+) diff --git a/controlplane/kubeadm/internal/control_plane_test.go b/controlplane/kubeadm/internal/control_plane_test.go index a456bd102fd6..d1db601ae6ed 100644 --- a/controlplane/kubeadm/internal/control_plane_test.go +++ b/controlplane/kubeadm/internal/control_plane_test.go @@ -24,13 +24,16 @@ import ( . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/client" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal/etcd" "sigs.k8s.io/cluster-api/util/collections" "sigs.k8s.io/cluster-api/util/conditions" + "sigs.k8s.io/cluster-api/util/patch" ) func TestControlPlane(t *testing.T) { @@ -64,6 +67,12 @@ func TestControlPlane(t *testing.T) { controlPlane.Machines.Insert(machine("machine-5", withFailureDomain("unknown"))) g.Expect(*controlPlane.FailureDomainWithMostMachines(ctx, controlPlane.Machines)).To(Equal("unknown")) }) + + t.Run("With failure Domains is set empty", func(*testing.T) { + g := NewWithT(t) + controlPlane.Cluster.Status.FailureDomains = nil + g.Expect(*controlPlane.FailureDomainWithMostMachines(ctx, controlPlane.Machines)).To(Equal("one")) + }) }) t.Run("MachinesUpToDate", func(t *testing.T) { @@ -144,6 +153,173 @@ func TestControlPlane(t *testing.T) { g.Expect(err).NotTo(HaveOccurred()) g.Expect(fd).To(Equal(ptr.To("two"))) // deleted up-to-date machines (m4) should not be counted when picking the next failure domain for scale up }) + + t.Run("Next Failure Domains", func(t *testing.T) { + g := NewWithT(t) + cluster := clusterv1.Cluster{ + Status: clusterv1.ClusterStatus{ + FailureDomains: clusterv1.FailureDomains{ + "one": failureDomain(false), + }, + }, + } + kcp := &controlplanev1.KubeadmControlPlane{ + Spec: controlplanev1.KubeadmControlPlaneSpec{ + Version: "v1.31.0", + }, + } + machines := collections.Machines{ + "machine-1": &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{Name: "m1", DeletionTimestamp: ptr.To(metav1.Now())}, + Spec: clusterv1.MachineSpec{ + Version: ptr.To("v1.31.0"), // deleted + FailureDomain: ptr.To("one"), + InfrastructureRef: corev1.ObjectReference{Kind: "GenericInfrastructureMachine", APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1", Name: "m1"}, + }}, + } + controlPlane, err := NewControlPlane(ctx, nil, env.GetClient(), &cluster, kcp, machines) + g.Expect(err).NotTo(HaveOccurred()) + fd, err := controlPlane.NextFailureDomainForScaleUp(ctx) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(fd).To(BeNil()) + }) + + t.Run("ControlPlane returns infra error", func(t *testing.T) { + g := NewWithT(t) + cluster := clusterv1.Cluster{ + Status: clusterv1.ClusterStatus{ + FailureDomains: clusterv1.FailureDomains{ + "one": failureDomain(true), + "two": failureDomain(true), + "three": failureDomain(true), + }, + }, + } + kcp := &controlplanev1.KubeadmControlPlane{ + Spec: controlplanev1.KubeadmControlPlaneSpec{ + Version: "v1.31.0", + }, + } + machines := collections.Machines{ + "machine-1": &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{Name: "m1"}, + Spec: clusterv1.MachineSpec{ + Version: ptr.To("v1.31.0"), + FailureDomain: ptr.To("one"), + InfrastructureRef: corev1.ObjectReference{Name: "m1"}, + }}, + } + _, err := NewControlPlane(ctx, nil, env.GetClient(), &cluster, kcp, machines) + g.Expect(err).To(HaveOccurred()) + }) + + t.Run("When infra and bootstrap config is exists", func(t *testing.T) { + g := NewWithT(t) + ns, err := env.CreateNamespace(ctx, "test-machine-watches") + kcp := &controlplanev1.KubeadmControlPlane{ + Spec: controlplanev1.KubeadmControlPlaneSpec{ + Version: "v1.31.0", + }, + } + + g.Expect(err).ToNot(HaveOccurred()) + + infraMachine := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "kind": "GenericInfrastructureMachine", + "apiVersion": "infrastructure.cluster.x-k8s.io/v1beta1", + "metadata": map[string]interface{}{ + "name": "infra-config1", + "namespace": ns.Name, + }, + "spec": map[string]interface{}{ + "providerID": "test://id-1", + }, + "status": map[string]interface{}{ + "ready": true, + "addresses": []interface{}{ + map[string]interface{}{ + "type": "InternalIP", + "address": "10.0.0.1", + }, + }, + }, + }, + } + + bootstrap := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "kind": "KubeadmConfig", + "apiVersion": "bootstrap.cluster.x-k8s.io/v1beta1", + "metadata": map[string]interface{}{ + "name": "bootstrap-config-machinereconcile", + "namespace": ns.Name, + }, + "spec": map[string]interface{}{ + "providerID": "test://id-1", + }, + "status": map[string]interface{}{ + "ready": true, + }, + }, + } + + testCluster := &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{Name: "test-cluster", Namespace: ns.Name}, + Status: clusterv1.ClusterStatus{ + FailureDomains: clusterv1.FailureDomains{ + "one": failureDomain(true), + "two": failureDomain(true), + "three": failureDomain(true), + }, + }, + } + + g.Expect(env.Create(ctx, infraMachine)).To(Succeed()) + g.Expect(env.Create(ctx, bootstrap)).To(Succeed()) + + defer func(do ...client.Object) { + g.Expect(env.Cleanup(ctx, do...)).To(Succeed()) + }(ns, bootstrap, infraMachine) + + // Patch infra machine ready + patchHelper, err := patch.NewHelper(infraMachine, env) + g.Expect(err).ShouldNot(HaveOccurred()) + g.Expect(unstructured.SetNestedField(infraMachine.Object, true, "status", "ready")).To(Succeed()) + g.Expect(patchHelper.Patch(ctx, infraMachine, patch.WithStatusObservedGeneration{})).To(Succeed()) + + // Patch bootstrap ready + patchHelper, err = patch.NewHelper(bootstrap, env) + g.Expect(err).ShouldNot(HaveOccurred()) + g.Expect(unstructured.SetNestedField(bootstrap.Object, true, "status", "ready")).To(Succeed()) + g.Expect(patchHelper.Patch(ctx, bootstrap, patch.WithStatusObservedGeneration{})).To(Succeed()) + + machines := collections.Machines{ + "machine-1": &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{Name: "m1", + Namespace: ns.Name}, + Spec: clusterv1.MachineSpec{ + InfrastructureRef: corev1.ObjectReference{ + APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1", + Kind: "GenericInfrastructureMachine", + Name: "infra-config1", + Namespace: ns.Name, + }, + Bootstrap: clusterv1.Bootstrap{ + ConfigRef: &corev1.ObjectReference{ + APIVersion: "bootstrap.cluster.x-k8s.io/v1beta1", + Kind: "KubeadmConfig", + Name: "bootstrap-config-machinereconcile", + Namespace: ns.Name, + }, + }, + }, + }, + } + + _, err = NewControlPlane(ctx, nil, env.GetClient(), testCluster, kcp, machines) + g.Expect(err).NotTo(HaveOccurred()) + }) } func TestHasMachinesToBeRemediated(t *testing.T) { @@ -324,6 +500,94 @@ func TestStatusToLogKeyAndValues(t *testing.T) { g.Expect(got[3]).To(Equal("m1, m2, m3")) } +func TestMachineInFailureDomainWithMostMachines(t *testing.T) { + t.Run("Machines in Failure Domain", func(t *testing.T) { + machines := collections.Machines{ + "machine-3": &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{Name: "m3"}, + Spec: clusterv1.MachineSpec{ + Version: ptr.To("v1.31.0"), + FailureDomain: ptr.To("three"), + InfrastructureRef: corev1.ObjectReference{Kind: "GenericInfrastructureMachine", APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1", Name: "m3"}, + }}, + } + + c := &ControlPlane{ + KCP: &controlplanev1.KubeadmControlPlane{}, + Cluster: &clusterv1.Cluster{ + Status: clusterv1.ClusterStatus{ + FailureDomains: clusterv1.FailureDomains{ + "three": failureDomain(false), + }, + }, + }, + Machines: collections.Machines{ + "machine-3": machine("machine-3", withFailureDomain("three")), + }, + } + + g := NewWithT(t) + _, err := c.MachineInFailureDomainWithMostMachines(ctx, machines) + g.Expect(err).NotTo(HaveOccurred()) + }) + t.Run("Return error when no controlplane machine found", func(t *testing.T) { + machines := collections.Machines{} + + c := &ControlPlane{ + KCP: &controlplanev1.KubeadmControlPlane{}, + Cluster: &clusterv1.Cluster{ + Status: clusterv1.ClusterStatus{ + FailureDomains: clusterv1.FailureDomains{}, + }, + }, + Machines: collections.Machines{}, + } + + g := NewWithT(t) + _, err := c.MachineInFailureDomainWithMostMachines(ctx, machines) + g.Expect(err).To(HaveOccurred()) + }) +} +func TestMachineWithDeleteAnnotation(t *testing.T) { + t.Run("Machines having delete annotation set", func(t *testing.T) { + machines := collections.Machines{ + "machine-1": &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{Name: "m1", + Annotations: map[string]string{ + "cluster.x-k8s.io/delete-machine": "", + }, + }, + Spec: clusterv1.MachineSpec{ + Version: ptr.To("v1.31.0"), + FailureDomain: ptr.To("one"), + InfrastructureRef: corev1.ObjectReference{Kind: "GenericInfrastructureMachine", APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1", Name: "m1"}, + }}, + "machine-2": &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{Name: "m2", + Annotations: map[string]string{ + "cluster.x-k8s.io/delete-machine": "", + }, + }, + Spec: clusterv1.MachineSpec{ + Version: ptr.To("v1.31.0"), + FailureDomain: ptr.To("two"), + InfrastructureRef: corev1.ObjectReference{Kind: "GenericInfrastructureMachine", APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1", Name: "m2"}, + }}, + } + + c := ControlPlane{ + Machines: machines, + Cluster: &clusterv1.Cluster{ + Status: clusterv1.ClusterStatus{}, + }, + } + + g := NewWithT(t) + annotedMachines := c.MachineWithDeleteAnnotation(machines) + g.Expect(annotedMachines).NotTo(BeNil()) + }) +} + type machineOpt func(*clusterv1.Machine) func failureDomain(controlPlane bool) clusterv1.FailureDomainSpec { diff --git a/controlplane/kubeadm/internal/controllers/scale_test.go b/controlplane/kubeadm/internal/controllers/scale_test.go index f74b69439027..28eba02214c0 100644 --- a/controlplane/kubeadm/internal/controllers/scale_test.go +++ b/controlplane/kubeadm/internal/controllers/scale_test.go @@ -23,6 +23,7 @@ import ( "time" . "github.com/onsi/gomega" + "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" @@ -109,6 +110,99 @@ func TestKubeadmControlPlaneReconciler_initializeControlPlane(t *testing.T) { g.Expect(kubeadmConfig.Spec.ClusterConfiguration.FeatureGates).To(BeComparableTo(map[string]bool{internal.ControlPlaneKubeletLocalMode: true})) } +func TestKubeadmControlPlaneReconciler_initializeControlPlane_Error(t *testing.T) { + t.Run("Return error when parsing kubernetes version", func(t *testing.T) { + setup := func(t *testing.T, g *WithT) *corev1.Namespace { + t.Helper() + + t.Log("Creating the namespace") + ns, err := env.CreateNamespace(ctx, "test-kcp-reconciler-initializecontrolplane") + g.Expect(err).ToNot(HaveOccurred()) + + return ns + } + + teardown := func(t *testing.T, g *WithT, ns *corev1.Namespace) { + t.Helper() + + t.Log("Deleting the namespace") + g.Expect(env.Delete(ctx, ns)).To(Succeed()) + } + + g := NewWithT(t) + namespace := setup(t, g) + defer teardown(t, g, namespace) + + cluster, kcp, genericInfrastructureMachineTemplate := createClusterWithControlPlane(namespace.Name) + // Try to break version + kcp.Spec.Version = "1+foobar-0" + g.Expect(env.CreateAndWait(ctx, genericInfrastructureMachineTemplate, client.FieldOwner("manager"))).To(Succeed()) + kcp.UID = types.UID(util.RandomString(10)) + + r := &KubeadmControlPlaneReconciler{ + Client: env, + recorder: record.NewFakeRecorder(32), + managementClusterUncached: &fakeManagementCluster{ + Management: &internal.Management{Client: env}, + Workload: &fakeWorkloadCluster{}, + }, + } + controlPlane := &internal.ControlPlane{ + Cluster: cluster, + KCP: kcp, + } + + result, err := r.initializeControlPlane(ctx, controlPlane) + g.Expect(result).To(BeComparableTo(ctrl.Result{Requeue: false})) + g.Expect(err).To(HaveOccurred()) + }) + t.Run("Return error when cloning control plane Machine", func(t *testing.T) { + setup := func(t *testing.T, g *WithT) *corev1.Namespace { + t.Helper() + + t.Log("Creating the namespace") + ns, err := env.CreateNamespace(ctx, "test-kcp-reconciler-initializecontrolplane") + g.Expect(err).ToNot(HaveOccurred()) + + return ns + } + + teardown := func(t *testing.T, g *WithT, ns *corev1.Namespace) { + t.Helper() + + t.Log("Deleting the namespace") + g.Expect(env.Delete(ctx, ns)).To(Succeed()) + } + + g := NewWithT(t) + namespace := setup(t, g) + defer teardown(t, g, namespace) + + cluster, kcp, genericInfrastructureMachineTemplate := createClusterWithControlPlane(namespace.Name) + // Try to break Infra Cloning + kcp.Spec.MachineTemplate.InfrastructureRef.Name = "something_invalid" + g.Expect(env.CreateAndWait(ctx, genericInfrastructureMachineTemplate, client.FieldOwner("manager"))).To(Succeed()) + kcp.UID = types.UID(util.RandomString(10)) + + r := &KubeadmControlPlaneReconciler{ + Client: env, + recorder: record.NewFakeRecorder(32), + managementClusterUncached: &fakeManagementCluster{ + Management: &internal.Management{Client: env}, + Workload: &fakeWorkloadCluster{}, + }, + } + controlPlane := &internal.ControlPlane{ + Cluster: cluster, + KCP: kcp, + } + + result, err := r.initializeControlPlane(ctx, controlPlane) + g.Expect(result).To(BeComparableTo(ctrl.Result{Requeue: false})) + g.Expect(err).To(HaveOccurred()) + }) +} + func TestKubeadmControlPlaneReconciler_scaleUpControlPlane(t *testing.T) { t.Run("creates a control plane Machine if preflight checks pass", func(t *testing.T) { setup := func(t *testing.T, g *WithT) *corev1.Namespace { @@ -248,6 +342,227 @@ func TestKubeadmControlPlaneReconciler_scaleUpControlPlane(t *testing.T) { g.Expect(m).To(BeComparableTo(bm)) } }) + t.Run("Return error when parsing kubernetes version", func(t *testing.T) { + setup := func(t *testing.T, g *WithT) *corev1.Namespace { + t.Helper() + + t.Log("Creating the namespace") + ns, err := env.CreateNamespace(ctx, "test-kcp-reconciler-scaleupcontrolplane") + g.Expect(err).ToNot(HaveOccurred()) + + return ns + } + + teardown := func(t *testing.T, g *WithT, ns *corev1.Namespace) { + t.Helper() + + t.Log("Deleting the namespace") + g.Expect(env.Delete(ctx, ns)).To(Succeed()) + } + + g := NewWithT(t) + namespace := setup(t, g) + defer teardown(t, g, namespace) + + cluster, kcp, genericInfrastructureMachineTemplate := createClusterWithControlPlane(namespace.Name) + // Try to break version + kcp.Spec.Version = "1+foobar-0" + g.Expect(env.CreateAndWait(ctx, genericInfrastructureMachineTemplate, client.FieldOwner("manager"))).To(Succeed()) + kcp.UID = types.UID(util.RandomString(10)) + setKCPHealthy(kcp) + + fmc := &fakeManagementCluster{ + Machines: collections.New(), + Workload: &fakeWorkloadCluster{}, + } + + for i := range 2 { + m, _ := createMachineNodePair(fmt.Sprintf("test-%d", i), cluster, kcp, true) + setMachineHealthy(m) + fmc.Machines.Insert(m) + } + + r := &KubeadmControlPlaneReconciler{ + Client: env, + managementCluster: fmc, + managementClusterUncached: fmc, + recorder: record.NewFakeRecorder(32), + } + controlPlane := &internal.ControlPlane{ + KCP: kcp, + Cluster: cluster, + Machines: fmc.Machines, + } + + result, err := r.scaleUpControlPlane(ctx, controlPlane) + g.Expect(result).To(BeComparableTo(ctrl.Result{Requeue: false})) + g.Expect(err).To(HaveOccurred()) + }) + t.Run("Return error when cloning control plane Machine", func(t *testing.T) { + setup := func(t *testing.T, g *WithT) *corev1.Namespace { + t.Helper() + + t.Log("Creating the namespace") + ns, err := env.CreateNamespace(ctx, "test-kcp-reconciler-scaleupcontrolplane") + g.Expect(err).ToNot(HaveOccurred()) + + return ns + } + + teardown := func(t *testing.T, g *WithT, ns *corev1.Namespace) { + t.Helper() + + t.Log("Deleting the namespace") + g.Expect(env.Delete(ctx, ns)).To(Succeed()) + } + + g := NewWithT(t) + namespace := setup(t, g) + defer teardown(t, g, namespace) + + cluster, kcp, genericInfrastructureMachineTemplate := createClusterWithControlPlane(namespace.Name) + // Try to break Infra Cloning + kcp.Spec.MachineTemplate.InfrastructureRef.Name = "something_invalid" + g.Expect(env.CreateAndWait(ctx, genericInfrastructureMachineTemplate, client.FieldOwner("manager"))).To(Succeed()) + kcp.UID = types.UID(util.RandomString(10)) + setKCPHealthy(kcp) + + fmc := &fakeManagementCluster{ + Machines: collections.New(), + Workload: &fakeWorkloadCluster{}, + } + + for i := range 2 { + m, _ := createMachineNodePair(fmt.Sprintf("test-%d", i), cluster, kcp, true) + setMachineHealthy(m) + fmc.Machines.Insert(m) + } + + r := &KubeadmControlPlaneReconciler{ + Client: env, + managementCluster: fmc, + managementClusterUncached: fmc, + recorder: record.NewFakeRecorder(32), + } + controlPlane := &internal.ControlPlane{ + KCP: kcp, + Cluster: cluster, + Machines: fmc.Machines, + } + + result, err := r.scaleUpControlPlane(ctx, controlPlane) + g.Expect(result).To(BeComparableTo(ctrl.Result{Requeue: false})) + g.Expect(err).To(HaveOccurred()) + }) +} + +func TestKubeadmControlPlaneReconciler_scaleDownControlPlane_Error(t *testing.T) { + t.Run("Return error failed to select machine for scale down", func(t *testing.T) { + g := NewWithT(t) + fakeClient := newFakeClient() + + r := &KubeadmControlPlaneReconciler{ + recorder: record.NewFakeRecorder(32), + Client: fakeClient, + SecretCachingClient: fakeClient, + managementCluster: &fakeManagementCluster{ + Workload: &fakeWorkloadCluster{}, + }, + } + + kcp := &controlplanev1.KubeadmControlPlane{ + Spec: controlplanev1.KubeadmControlPlaneSpec{ + Version: "v1.19.1", + }, + } + setKCPHealthy(kcp) + controlPlane := &internal.ControlPlane{ + KCP: kcp, + Cluster: &clusterv1.Cluster{}, + } + controlPlane.InjectTestManagementCluster(r.managementCluster) + + result, err := r.scaleDownControlPlane(context.Background(), controlPlane, controlPlane.Machines) + g.Expect(err).To(HaveOccurred()) + g.Expect(result).To(BeComparableTo(ctrl.Result{Requeue: false})) + }) + t.Run("Return error when deleting control plane machine", func(t *testing.T) { + g := NewWithT(t) + + machines := map[string]*clusterv1.Machine{ + "two": machine("two", withTimestamp(time.Now())), + "three": machine("three", withTimestamp(time.Now())), + } + setMachineHealthy(machines["two"]) + setMachineHealthy(machines["three"]) + fakeClient := newFakeClient(machines["two"], machines["three"]) + + r := &KubeadmControlPlaneReconciler{ + recorder: record.NewFakeRecorder(32), + Client: &fClient{Client: fakeClient, + deleteError: errors.New("delete error")}, + SecretCachingClient: fakeClient, + managementCluster: &fakeManagementCluster{ + Workload: &fakeWorkloadCluster{}, + }, + } + + cluster := &clusterv1.Cluster{} + kcp := &controlplanev1.KubeadmControlPlane{ + Spec: controlplanev1.KubeadmControlPlaneSpec{ + Version: "v1.19.1", + }, + } + controlPlane := &internal.ControlPlane{ + KCP: kcp, + Cluster: cluster, + Machines: machines, + } + controlPlane.InjectTestManagementCluster(r.managementCluster) + + result, err := r.scaleDownControlPlane(context.Background(), controlPlane, machines) + g.Expect(err).To(HaveOccurred()) + g.Expect(result).To(BeComparableTo(ctrl.Result{Requeue: false})) + }) + + t.Run("Return error when create client for workload cluster", func(t *testing.T) { + g := NewWithT(t) + + machines := map[string]*clusterv1.Machine{ + "two": machine("two", withTimestamp(time.Now())), + "three": machine("three", withTimestamp(time.Now())), + } + setMachineHealthy(machines["two"]) + setMachineHealthy(machines["three"]) + fakeClient := newFakeClient(machines["two"], machines["three"]) + + r := &KubeadmControlPlaneReconciler{ + recorder: record.NewFakeRecorder(32), + Client: &fClient{Client: fakeClient}, + SecretCachingClient: fakeClient, + managementCluster: &fakeManagementCluster{ + Workload: &fakeWorkloadCluster{}, + WorkloadErr: errors.New("get error"), + }, + } + + cluster := &clusterv1.Cluster{} + kcp := &controlplanev1.KubeadmControlPlane{ + Spec: controlplanev1.KubeadmControlPlaneSpec{ + Version: "v1.19.1", + }, + } + controlPlane := &internal.ControlPlane{ + KCP: kcp, + Cluster: cluster, + Machines: machines, + } + controlPlane.InjectTestManagementCluster(r.managementCluster) + + result, err := r.scaleDownControlPlane(context.Background(), controlPlane, machines) + g.Expect(err).To(HaveOccurred()) + g.Expect(result).To(BeComparableTo(ctrl.Result{Requeue: false})) + }) } func TestKubeadmControlPlaneReconciler_scaleDownControlPlane_NoError(t *testing.T) { @@ -769,3 +1084,31 @@ func withTimestamp(t time.Time) machineOpt { m.CreationTimestamp = metav1.NewTime(t) } } + +type fClient struct { + client.Client + getError error + listError error + deleteError error +} + +func (fc *fClient) Get(ctx context.Context, key client.ObjectKey, obj client.Object, opts ...client.GetOption) error { + if fc.getError != nil { + return fc.getError + } + return fc.Client.Get(ctx, key, obj, opts...) +} + +func (fc *fClient) List(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { + if fc.listError != nil { + return fc.listError + } + return fc.Client.List(ctx, list, opts...) +} + +func (fc *fClient) Delete(ctx context.Context, obj client.Object, opts ...client.DeleteOption) error { + if fc.deleteError != nil { + return fc.deleteError + } + return fc.Client.Delete(ctx, obj, opts...) +}