Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix(e2e): resolve instability issues in end-to-end tests #2517

Merged
merged 1 commit into from
Jan 21, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 14 additions & 0 deletions test/e2e/common/05_tektonhubdeployment_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,7 @@ type TektonHubTestSuite struct {
resourceNames utils.ResourceNames
clients *utils.Clients
deployments []string
pvcs []string
dbMigrationJobName string
interval time.Duration
timeout time.Duration
Expand All @@ -79,6 +80,9 @@ func NewTektonHubTestSuite(t *testing.T) *TektonHubTestSuite {
"tekton-hub-api",
"tekton-hub-ui",
},
pvcs: []string{
"tekton-hub-api",
},
dbMigrationJobName: "tekton-hub-db-migration",
interval: 5 * time.Second,
timeout: 5 * time.Minute,
Expand Down Expand Up @@ -302,6 +306,16 @@ func (s *TektonHubTestSuite) undeploy(databaseNamespace string) {
err := resources.WaitForDeploymentDeletion(s.clients.KubeClient, deploymentName, namespace, pollInterval, timeout)
require.NoError(t, err)
}
// verify pvcs are removed
for _, pvcName := range s.pvcs {
namespace := s.resourceNames.TargetNamespace
if databaseNamespace != "" {
// no need to verify external database removal
continue
}
err := resources.WaitForPVCDeletion(s.clients.KubeClient, pvcName, namespace, pollInterval, timeout)
require.NoError(t, err)
}
// verify migration job is removed
err := resources.WaitForJobDeletion(s.clients.KubeClient, s.dbMigrationJobName, s.resourceNames.TargetNamespace, pollInterval, timeout)
require.NoError(t, err)
Expand Down
16 changes: 16 additions & 0 deletions test/resources/deployment.go
Original file line number Diff line number Diff line change
Expand Up @@ -126,3 +126,19 @@ func WaitForDeploymentDeletion(kubeClient kubernetes.Interface, name, namespace

return wait.PollUntilContextTimeout(context.TODO(), interval, timeout, true, verifyFunc)
}

// WaitForPVCDeletion waits for the PVC to be deleted.
func WaitForPVCDeletion(kubeClient kubernetes.Interface, name, namespace string, interval, timeout time.Duration) error {
verifyFunc := func(ctx context.Context) (bool, error) {
_, err := kubeClient.CoreV1().PersistentVolumeClaims(namespace).Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
if apierrs.IsNotFound(err) {
return true, nil
}
return false, err
}
return false, nil
}

return wait.PollUntilContextTimeout(context.TODO(), interval, timeout, true, verifyFunc)
}
25 changes: 22 additions & 3 deletions test/resources/pod.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ import (
"time"

core "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes"
Expand All @@ -37,6 +38,18 @@ func DeletePodByLabelSelector(kubeClient kubernetes.Interface, labelSelector, na
if err != nil {
return err
}
// wait for the pod to be deleted
err = wait.PollUntilContextTimeout(context.TODO(), 5*time.Second, 3*time.Minute, true,
func(ctx context.Context) (bool, error) {
_, err := kubeClient.CoreV1().Pods(namespace).Get(context.TODO(), pod.GetName(), metav1.GetOptions{})
if err != nil && apierrors.IsNotFound(err) {
return true, nil
}
return false, nil
})
if err != nil {
return err
}
}
return nil
}
Expand All @@ -49,11 +62,17 @@ func WaitForPodByLabelSelector(kubeClient kubernetes.Interface, labelSelector, n
}

for _, pod := range pods.Items {
if pod.Status.Phase == core.PodRunning {
return true, nil
if pod.Status.Phase != core.PodRunning {
return false, nil
}
// Only when the Ready status of the Pod is True is the Pod considered Ready
for _, c := range pod.Status.Conditions {
if c.Type == core.PodReady && c.Status != core.ConditionTrue {
return false, nil
}
}
}
return false, nil
return true, nil
}

return wait.PollUntilContextTimeout(context.TODO(), interval, timeout, true, verifyFunc)
Expand Down
18 changes: 17 additions & 1 deletion test/resources/tektonconfigs.go
Original file line number Diff line number Diff line change
Expand Up @@ -181,6 +181,7 @@ func verifyNoTektonConfigCR(clients *utils.Clients) error {
}

func WaitForTektonConfigReady(client operatorV1alpha1.TektonConfigInterface, name string, interval, timeout time.Duration) error {
readyCount := 0
isReady := func(ctx context.Context) (bool, error) {
configCR, err := client.Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
Expand All @@ -189,7 +190,22 @@ func WaitForTektonConfigReady(client operatorV1alpha1.TektonConfigInterface, nam
}
return false, err
}
return configCR.Status.IsReady(), nil
if configCR.Status.IsReady() {
readyCount++
} else {
readyCount = 0
}
// This needs to be confirmed multiple times because:
// 1. The previous Pod may have just restarted, and the new Pod has not
// yet entered the Reconcile logic to modify the resource status.
// 2. Even if the Pod has started properly, it may not have entered the
// Reconcile logic yet, which could also lead to instability.
// For example:
// In the testing logic of TektonHub, it immediately deletes the
// CR of TektonHub and then polls whether the CR has been cleaned up.
// At this point, the tekton-operator enters the Reconcile logic and
// automatically creates the CR of TektonHub again, causing the test to fail.
return readyCount >= 3, nil
}

return wait.PollUntilContextTimeout(context.TODO(), interval, timeout, true, isReady)
Expand Down
Loading