From a8a7559a9630871270c91d34e64692167e1fa9b5 Mon Sep 17 00:00:00 2001 From: Satyajit Bulage Date: Fri, 10 Jan 2025 21:04:13 +0530 Subject: [PATCH] Revert Updated existing 2.7 CI job testing purpose Signed-off-by: Satyajit Bulage --- .github/workflows/master-e2e.yaml | 34 ++++++++++++-- .github/workflows/ui-rm_head_2.7.yaml | 18 +++++--- tests/cypress/e2e/unit_tests/p0_fleet.spec.ts | 4 +- tests/e2e/upgrade_test.go | 46 +++++++++++++++++++ 4 files changed, 89 insertions(+), 13 deletions(-) diff --git a/.github/workflows/master-e2e.yaml b/.github/workflows/master-e2e.yaml index 267b0519..8d40c493 100644 --- a/.github/workflows/master-e2e.yaml +++ b/.github/workflows/master-e2e.yaml @@ -329,14 +329,34 @@ jobs: # Export values echo "rancher_image_version=${RANCHER_VERSION}" >> ${GITHUB_OUTPUT} - # Check application - make e2e-check-app else # Needed to be sure that Github Action will see the failure false fi + - name: Extract component versions/informations after Upgrade + id: upgraded_component + run: | + # Extract Rancher Manager version + RM_UPGRADED_VERSION=$(kubectl get pod \ + --namespace cattle-system \ + -l app=rancher \ + -o jsonpath={.items[*].status.containerStatuses[*].image} 2> /dev/null || true) + # Extract Fleet App version and images from local cluster + FLEET_APP_UPGRADED_VERSION="$(helm list -n cattle-fleet-system -o json 2> /dev/null \ + | jq -r '.[] | .chart' \ + | sort -V \ + | uniq \ + | tail -n 2 \ + | head -n 1)" + for ns in {cattle-fleet-system,cattle-fleet-local-system}; do + FLEET_UPGRADED_IMAGES+="$(kubectl get pods -n $ns -o jsonpath='{.items[*].spec.containers[*].image}' 2> /dev/null) " + done + # Export values + echo "rm_upgraded_version=${RM_UPGRADED_VERSION}" >> ${GITHUB_OUTPUT} + echo "fleet_app_upgraded_version=${FLEET_APP_UPGRADED_VERSION}" >> ${GITHUB_OUTPUT} + echo "fleet_images_after_upgrade=${FLEET_UPGRADED_IMAGES}" >> ${GITHUB_OUTPUT} - name: Cypress tests - On Upgraded Rancher - id: rancher_upgrade + id: cypress_result_after_rancher_upgrade if: ${{ inputs.rancher_upgrade != '' }} env: BROWSER: chrome @@ -406,8 +426,12 @@ jobs: echo "K3s version for Rancher Manager: ${{ env.INSTALL_K3S_VERSION }}" >> ${GITHUB_STEP_SUMMARY} echo "K3d version for downstream cluster: ${{ env.INSTALL_K3S_VERSION }}" >> ${GITHUB_STEP_SUMMARY} if ${{ inputs.rancher_upgrade != '' }}; then - echo "# Rancher Manager Upgrade Information" >> ${GITHUB_STEP_SUMMARY} - echo "Rancher Manager Upgraded Version: ${{ inputs.rancher_upgrade }}" >> ${GITHUB_STEP_SUMMARY} + echo "### Rancher Manager Upgrade Information" >> ${GITHUB_STEP_SUMMARY} + echo "Rancher Manager Installed Version: ${{ inputs.rancher_version }}" >> ${GITHUB_STEP_SUMMARY} + echo "Rancher Manager Upgraded Version: ${{ steps.upgraded_component.outputs.rm_upgraded_version }}" >> ${GITHUB_STEP_SUMMARY} + echo "### Fleet (After Upgrade)" >> ${GITHUB_STEP_SUMMARY} + echo "Fleet App version: ${{ steps.upgraded_component.outputs.fleet_app_upgraded_version }}" >> ${GITHUB_STEP_SUMMARY} + echo "Fleet images in local cluster: ${{ steps.upgraded_component.outputs.fleet_images_after_upgrade }}" >> ${GITHUB_STEP_SUMMARY} fi delete-runner: diff --git a/.github/workflows/ui-rm_head_2.7.yaml b/.github/workflows/ui-rm_head_2.7.yaml index 91aba189..0d83cd60 100644 --- a/.github/workflows/ui-rm_head_2.7.yaml +++ b/.github/workflows/ui-rm_head_2.7.yaml @@ -1,5 +1,5 @@ # This workflow calls the master E2E workflow with custom variables -name: UI-RM_head_2.7 +name: UI-RM_head_upgrade run-name: ${{ github.event_name == 'workflow_dispatch' && format('`{0}` on `{1}` - `{2}` destroy={3}', inputs.rancher_version, inputs.upstream_cluster_version, inputs.grep_test_by_tag, inputs.destroy_runner) || github.workflow }} on: @@ -14,13 +14,18 @@ on: default: true type: boolean rancher_version: - description: Rancher version channel/version/head_version latest/latest, latest/2.7.10[-rc2], prime/2.7.12, prime/devel/2.7, prime-optimus/2.7.13-rc4 - default: latest/devel/2.7 + description: Rancher version channel/version/head_version latest/latest, latest/2.y.x[-rc1], prime/2.y.x, prime/devel/2.y, alpha/2.y.0-alphaZ + default: latest/devel/2.9 type: string required: true + rancher_upgrade: + description: Rancher Manager channel/version to upgrade to + default: latest/devel/2.10 + required: true + type: string upstream_cluster_version: description: K3s upstream cluster version where to install Rancher - default: v1.26.10+k3s2 + default: v1.28.8+k3s1 type: string required: true grep_test_by_tag: @@ -52,7 +57,8 @@ jobs: cluster_name: cluster-k3s # WARNING, VALUES BELOW ARE HARDCODED FOR RUNS SCHEDULED BY CRON destroy_runner: ${{ github.event_name == 'schedule' && true || inputs.destroy_runner }} - upstream_cluster_version: ${{ inputs.upstream_cluster_version || 'v1.26.10+k3s2' }} - rancher_version: ${{ inputs.rancher_version || 'latest/devel/2.7' }} + upstream_cluster_version: ${{ inputs.upstream_cluster_version || 'v1.28.8+k3s1' }} + rancher_version: ${{ inputs.rancher_version || 'latest/devel/2.9' }} + rancher_upgrade: ${{ inputs.rancher_upgrade || 'latest/devel/2.10' }} qase_run_id: ${{ inputs.qase_run_id || 'auto' }} grep_test_by_tag: ${{ inputs.grep_test_by_tag || '@login @p0 @p1 @rbac' }} diff --git a/tests/cypress/e2e/unit_tests/p0_fleet.spec.ts b/tests/cypress/e2e/unit_tests/p0_fleet.spec.ts index b09d0d29..fbbbb905 100644 --- a/tests/cypress/e2e/unit_tests/p0_fleet.spec.ts +++ b/tests/cypress/e2e/unit_tests/p0_fleet.spec.ts @@ -23,7 +23,7 @@ export const path = "nginx" beforeEach(() => { cy.login(); cy.visit('/'); - cy.deleteAllFleetRepos(); + // cy.deleteAllFleetRepos(); }); Cypress.config(); @@ -196,7 +196,7 @@ describe('Test gitrepos with cabundle', { tags: '@p0' }, () => { ); qase(144, - it("Fleet-144 Test cabundle secrets are not created without TLS certificate", { tags: '@fleet-144' }, () => {; + it.only("Fleet-144 Test cabundle secrets are not created without TLS certificate", { tags: '@fleet-144' }, () => {; const repoName = 'local-144-test-cabundle-secrets-not-created' const repoUrl = 'https://github.com/rancher/fleet-examples' diff --git a/tests/e2e/upgrade_test.go b/tests/e2e/upgrade_test.go index 33d8e1bc..a818601d 100644 --- a/tests/e2e/upgrade_test.go +++ b/tests/e2e/upgrade_test.go @@ -15,6 +15,7 @@ limitations under the License. package e2e_test import ( + "fmt" "strings" "time" @@ -46,6 +47,16 @@ var _ = Describe("E2E - Upgrading Rancher Manager", Label("upgrade-rancher-manag versionBeforeUpgrade, err := kubectl.RunWithoutErr(getImageVersion...) Expect(err).To(Not(HaveOccurred())) + // Get Fleet Version before-upgrade + getFleetImageVersion := []string{"get", "pod", + "--namespace", "cattle-fleet-system", + "-l", "app=fleet-controller", + "-o", "jsonpath={.items[*].status.containerStatuses[*].image}", + } + + // Execute the shell command to get version before upgrade + fleetVersionBeforeUpgrade, err := kubectl.RunWithoutErr(getFleetImageVersion...) + // Upgrade Rancher Manager // NOTE: Don't check the status, we can have false-positive here... // Better to check the rollout after the upgrade, it will fail if the upgrade failed @@ -90,5 +101,40 @@ var _ = Describe("E2E - Upgrading Rancher Manager", Label("upgrade-rancher-manag versionAfterUpgrade, err := kubectl.RunWithoutErr(getImageVersion...) Expect(err).To(Not(HaveOccurred())) Expect(versionAfterUpgrade).To(Not(Equal(versionBeforeUpgrade))) + + // Function to check if all Fleet pods are updated and running the new version + isFleetControllerUpgradeComplete := func() bool { + // Check the rollout status of Fleet pods to ensure they are updated + rolloutStatus, err := kubectl.RunWithoutErr( + "rollout", + "--namespace", "cattle-fleet-system", + "status", "deployment/fleet-controller", + ) + if err != nil { + return false + } + + // Check if the rollout has completed successfully + return strings.Contains(rolloutStatus, `deployment "fleet-controller" successfully rolled out`) + } + + // Wait for the upgrade to complete by checking if the Fleet rollout is complete + Eventually(isFleetControllerUpgradeComplete, tools.SetTimeout(10*time.Minute), 20*time.Second).Should(BeTrue()) + + // Get after-upgrade Fleet version + // and check that it's different to the before-upgrade version + Eventually(func() int { + fleetCmdOut, _ := kubectl.RunWithoutErr(getFleetImageVersion...) + return len(strings.Fields(fleetCmdOut)) + }, tools.SetTimeout(5*time.Minute), 10*time.Second).Should(Equal(3)) + + // Get Fleet version after upgrade + // and check that it's different to the version before upgrade + Eventually(func() string { + fleetVersionAfterUpgrade, err := kubectl.RunWithoutErr(getFleetImageVersion...) + Expect(err).To(Not(HaveOccurred())) + fmt.Println("Current Fleet version after upgrade:", fleetVersionAfterUpgrade) // Debugging output + return fleetVersionAfterUpgrade + }, 10*time.Minute, 5*time.Second).Should(Not(Equal(fleetVersionBeforeUpgrade))) }) })