Skip to content

Commit

Permalink
Fix the issue that ownerref is not set with ignorefields (#794)
Browse files Browse the repository at this point in the history
Signed-off-by: Jian Qiu <[email protected]>
  • Loading branch information
qiujian16 authored Jan 10, 2025
1 parent 0acf030 commit 11896cc
Show file tree
Hide file tree
Showing 2 changed files with 51 additions and 9 deletions.
8 changes: 2 additions & 6 deletions pkg/work/spoke/apply/server_side_apply.go
Original file line number Diff line number Diff line change
Expand Up @@ -95,12 +95,8 @@ func (c *ServerSideApply) Apply(
ctx, required.GetName(), metav1.GetOptions{})
switch {
case errors.IsNotFound(err):
// if object is not found, directly apply without removing ignore fields in the object.
obj, createErr := c.client.
Resource(gvr).
Namespace(required.GetNamespace()).
Apply(ctx, required.GetName(), requiredOriginal, metav1.ApplyOptions{FieldManager: fieldManager, Force: force})
return obj, createErr
// if object is not found, use requiredOriginal to apply so the ignore fields are kept when create
required = requiredOriginal
case err != nil:
return nil, err
case len(existing.GetAnnotations()) > 0:
Expand Down
52 changes: 49 additions & 3 deletions test/e2e/work_workload_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ import (
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/rand"
applyappsv1 "k8s.io/client-go/applyconfigurations/apps/v1"
"k8s.io/utils/pointer"

workapiv1 "open-cluster-management.io/api/work/v1"
Expand Down Expand Up @@ -787,7 +788,7 @@ var _ = ginkgo.Describe("Work agent", ginkgo.Label("work-agent", "sanity-check")
IgnoreFields: []workapiv1.IgnoreField{
{
Condition: workapiv1.IgnoreFieldsConditionOnSpokeChange,
JSONPaths: []string{"spec.replicas"},
JSONPaths: []string{".spec.replicas"},
},
},
},
Expand All @@ -806,6 +807,9 @@ var _ = ginkgo.Describe("Work agent", ginkgo.Label("work-agent", "sanity-check")
if *deploy.Spec.Replicas != int32(2) {
return fmt.Errorf("expected 2 replicas, got %d", *deploy.Spec.Replicas)
}
if len(deploy.OwnerReferences) == 0 {
return fmt.Errorf("expected owner references, got none")
}
return nil
}).ShouldNot(gomega.HaveOccurred())

Expand Down Expand Up @@ -835,6 +839,15 @@ var _ = ginkgo.Describe("Work agent", ginkgo.Label("work-agent", "sanity-check")
return nil
}).ShouldNot(gomega.HaveOccurred())

ginkgo.By("update deployment with new replicas")
specConfig := applyappsv1.DeploymentSpec().WithReplicas(5)
applyConfig := applyappsv1.Deployment(deployment.Name, deployment.Namespace).WithSpec(specConfig)
_, err = spoke.KubeClient.AppsV1().Deployments(deployment.Namespace).Apply(context.Background(), applyConfig, metav1.ApplyOptions{
FieldManager: "another-actor",
Force: true,
})
gomega.Expect(err).ToNot(gomega.HaveOccurred())

ginkgo.By("update work with new replicas and type to onSpokePresent")
gomega.Eventually(func() error {
actualWork, err := hub.WorkClient.WorkV1().ManifestWorks(universalClusterName).Get(context.Background(), workName, metav1.GetOptions{})
Expand All @@ -850,17 +863,50 @@ var _ = ginkgo.Describe("Work agent", ginkgo.Label("work-agent", "sanity-check")
return err
}).ShouldNot(gomega.HaveOccurred())

// wait until the work has been applied
gomega.Eventually(func() error {
actualWork, err := hub.WorkClient.WorkV1().ManifestWorks(universalClusterName).Get(context.Background(), workName, metav1.GetOptions{})
if err != nil {
return err
}
appliedCond := meta.FindStatusCondition(actualWork.Status.Conditions, workapiv1.WorkApplied)
if appliedCond == nil {
return fmt.Errorf("expected a work applied condition")
}
if appliedCond.ObservedGeneration != actualWork.Generation {
return fmt.Errorf("expect work is applied.")
}
return nil
}).ShouldNot(gomega.HaveOccurred())

// Check deployment status, it should not be changed
gomega.Eventually(func() error {
deploy, err := spoke.KubeClient.AppsV1().Deployments(deployment.Namespace).Get(context.Background(), deployment.Name, metav1.GetOptions{})
if err != nil {
return err
}
if *deploy.Spec.Replicas != int32(3) {
return fmt.Errorf("expected 3 replicas, got %d", *deploy.Spec.Replicas)
if *deploy.Spec.Replicas != int32(5) {
return fmt.Errorf("expected 5 replicas, got %d", *deploy.Spec.Replicas)
}
return nil
}).ShouldNot(gomega.HaveOccurred())

err = hub.WorkClient.WorkV1().ManifestWorks(universalClusterName).Delete(
context.Background(), work.Name, metav1.DeleteOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())

// Check deployment is removed
gomega.Eventually(func() error {
_, err := spoke.KubeClient.AppsV1().Deployments(deployment.Namespace).Get(
context.Background(), deployment.Name, metav1.GetOptions{})
if errors.IsNotFound(err) {
return nil
}
if err != nil {
return err
}
return fmt.Errorf("resource still exists")
}).ShouldNot(gomega.HaveOccurred())
})
})
})
Expand Down

0 comments on commit 11896cc

Please sign in to comment.