Skip to content

Commit

Permalink
Revert Updated existing 2.7 CI job testing purpose
Browse files Browse the repository at this point in the history
Signed-off-by: Satyajit Bulage <[email protected]>
  • Loading branch information
sbulage committed Jan 22, 2025
1 parent aba839a commit a8a7559
Show file tree
Hide file tree
Showing 4 changed files with 89 additions and 13 deletions.
34 changes: 29 additions & 5 deletions .github/workflows/master-e2e.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -329,14 +329,34 @@ jobs:
# Export values
echo "rancher_image_version=${RANCHER_VERSION}" >> ${GITHUB_OUTPUT}
# Check application
make e2e-check-app
else
# Needed to be sure that Github Action will see the failure
false
fi
- name: Extract component versions/informations after Upgrade
id: upgraded_component
run: |
# Extract Rancher Manager version
RM_UPGRADED_VERSION=$(kubectl get pod \
--namespace cattle-system \
-l app=rancher \
-o jsonpath={.items[*].status.containerStatuses[*].image} 2> /dev/null || true)
# Extract Fleet App version and images from local cluster
FLEET_APP_UPGRADED_VERSION="$(helm list -n cattle-fleet-system -o json 2> /dev/null \
| jq -r '.[] | .chart' \
| sort -V \
| uniq \
| tail -n 2 \
| head -n 1)"
for ns in {cattle-fleet-system,cattle-fleet-local-system}; do
FLEET_UPGRADED_IMAGES+="$(kubectl get pods -n $ns -o jsonpath='{.items[*].spec.containers[*].image}' 2> /dev/null) "
done
# Export values
echo "rm_upgraded_version=${RM_UPGRADED_VERSION}" >> ${GITHUB_OUTPUT}
echo "fleet_app_upgraded_version=${FLEET_APP_UPGRADED_VERSION}" >> ${GITHUB_OUTPUT}
echo "fleet_images_after_upgrade=${FLEET_UPGRADED_IMAGES}" >> ${GITHUB_OUTPUT}
- name: Cypress tests - On Upgraded Rancher
id: rancher_upgrade
id: cypress_result_after_rancher_upgrade
if: ${{ inputs.rancher_upgrade != '' }}
env:
BROWSER: chrome
Expand Down Expand Up @@ -406,8 +426,12 @@ jobs:
echo "K3s version for Rancher Manager: ${{ env.INSTALL_K3S_VERSION }}" >> ${GITHUB_STEP_SUMMARY}
echo "K3d version for downstream cluster: ${{ env.INSTALL_K3S_VERSION }}" >> ${GITHUB_STEP_SUMMARY}
if ${{ inputs.rancher_upgrade != '' }}; then
echo "# Rancher Manager Upgrade Information" >> ${GITHUB_STEP_SUMMARY}
echo "Rancher Manager Upgraded Version: ${{ inputs.rancher_upgrade }}" >> ${GITHUB_STEP_SUMMARY}
echo "### Rancher Manager Upgrade Information" >> ${GITHUB_STEP_SUMMARY}
echo "Rancher Manager Installed Version: ${{ inputs.rancher_version }}" >> ${GITHUB_STEP_SUMMARY}
echo "Rancher Manager Upgraded Version: ${{ steps.upgraded_component.outputs.rm_upgraded_version }}" >> ${GITHUB_STEP_SUMMARY}
echo "### Fleet (After Upgrade)" >> ${GITHUB_STEP_SUMMARY}
echo "Fleet App version: ${{ steps.upgraded_component.outputs.fleet_app_upgraded_version }}" >> ${GITHUB_STEP_SUMMARY}
echo "Fleet images in local cluster: ${{ steps.upgraded_component.outputs.fleet_images_after_upgrade }}" >> ${GITHUB_STEP_SUMMARY}
fi
delete-runner:
Expand Down
18 changes: 12 additions & 6 deletions .github/workflows/ui-rm_head_2.7.yaml
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
# This workflow calls the master E2E workflow with custom variables
name: UI-RM_head_2.7
name: UI-RM_head_upgrade
run-name: ${{ github.event_name == 'workflow_dispatch' && format('`{0}` on `{1}` - `{2}` destroy={3}', inputs.rancher_version, inputs.upstream_cluster_version, inputs.grep_test_by_tag, inputs.destroy_runner) || github.workflow }}

on:
Expand All @@ -14,13 +14,18 @@ on:
default: true
type: boolean
rancher_version:
description: Rancher version channel/version/head_version latest/latest, latest/2.7.10[-rc2], prime/2.7.12, prime/devel/2.7, prime-optimus/2.7.13-rc4
default: latest/devel/2.7
description: Rancher version channel/version/head_version latest/latest, latest/2.y.x[-rc1], prime/2.y.x, prime/devel/2.y, alpha/2.y.0-alphaZ
default: latest/devel/2.9
type: string
required: true
rancher_upgrade:
description: Rancher Manager channel/version to upgrade to
default: latest/devel/2.10
required: true
type: string
upstream_cluster_version:
description: K3s upstream cluster version where to install Rancher
default: v1.26.10+k3s2
default: v1.28.8+k3s1
type: string
required: true
grep_test_by_tag:
Expand Down Expand Up @@ -52,7 +57,8 @@ jobs:
cluster_name: cluster-k3s
# WARNING, VALUES BELOW ARE HARDCODED FOR RUNS SCHEDULED BY CRON
destroy_runner: ${{ github.event_name == 'schedule' && true || inputs.destroy_runner }}
upstream_cluster_version: ${{ inputs.upstream_cluster_version || 'v1.26.10+k3s2' }}
rancher_version: ${{ inputs.rancher_version || 'latest/devel/2.7' }}
upstream_cluster_version: ${{ inputs.upstream_cluster_version || 'v1.28.8+k3s1' }}
rancher_version: ${{ inputs.rancher_version || 'latest/devel/2.9' }}
rancher_upgrade: ${{ inputs.rancher_upgrade || 'latest/devel/2.10' }}
qase_run_id: ${{ inputs.qase_run_id || 'auto' }}
grep_test_by_tag: ${{ inputs.grep_test_by_tag || '@login @p0 @p1 @rbac' }}
4 changes: 2 additions & 2 deletions tests/cypress/e2e/unit_tests/p0_fleet.spec.ts
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ export const path = "nginx"
beforeEach(() => {
cy.login();
cy.visit('/');
cy.deleteAllFleetRepos();
// cy.deleteAllFleetRepos();
});

Cypress.config();
Expand Down Expand Up @@ -196,7 +196,7 @@ describe('Test gitrepos with cabundle', { tags: '@p0' }, () => {
);

qase(144,
it("Fleet-144 Test cabundle secrets are not created without TLS certificate", { tags: '@fleet-144' }, () => {;
it.only("Fleet-144 Test cabundle secrets are not created without TLS certificate", { tags: '@fleet-144' }, () => {;

const repoName = 'local-144-test-cabundle-secrets-not-created'
const repoUrl = 'https://github.com/rancher/fleet-examples'
Expand Down
46 changes: 46 additions & 0 deletions tests/e2e/upgrade_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ limitations under the License.
package e2e_test

import (
"fmt"
"strings"
"time"

Expand Down Expand Up @@ -46,6 +47,16 @@ var _ = Describe("E2E - Upgrading Rancher Manager", Label("upgrade-rancher-manag
versionBeforeUpgrade, err := kubectl.RunWithoutErr(getImageVersion...)
Expect(err).To(Not(HaveOccurred()))

// Get Fleet Version before-upgrade
getFleetImageVersion := []string{"get", "pod",
"--namespace", "cattle-fleet-system",
"-l", "app=fleet-controller",
"-o", "jsonpath={.items[*].status.containerStatuses[*].image}",
}

// Execute the shell command to get version before upgrade
fleetVersionBeforeUpgrade, err := kubectl.RunWithoutErr(getFleetImageVersion...)

// Upgrade Rancher Manager
// NOTE: Don't check the status, we can have false-positive here...
// Better to check the rollout after the upgrade, it will fail if the upgrade failed
Expand Down Expand Up @@ -90,5 +101,40 @@ var _ = Describe("E2E - Upgrading Rancher Manager", Label("upgrade-rancher-manag
versionAfterUpgrade, err := kubectl.RunWithoutErr(getImageVersion...)
Expect(err).To(Not(HaveOccurred()))
Expect(versionAfterUpgrade).To(Not(Equal(versionBeforeUpgrade)))

// Function to check if all Fleet pods are updated and running the new version
isFleetControllerUpgradeComplete := func() bool {
// Check the rollout status of Fleet pods to ensure they are updated
rolloutStatus, err := kubectl.RunWithoutErr(
"rollout",
"--namespace", "cattle-fleet-system",
"status", "deployment/fleet-controller",
)
if err != nil {
return false
}

// Check if the rollout has completed successfully
return strings.Contains(rolloutStatus, `deployment "fleet-controller" successfully rolled out`)
}

// Wait for the upgrade to complete by checking if the Fleet rollout is complete
Eventually(isFleetControllerUpgradeComplete, tools.SetTimeout(10*time.Minute), 20*time.Second).Should(BeTrue())

// Get after-upgrade Fleet version
// and check that it's different to the before-upgrade version
Eventually(func() int {
fleetCmdOut, _ := kubectl.RunWithoutErr(getFleetImageVersion...)
return len(strings.Fields(fleetCmdOut))
}, tools.SetTimeout(5*time.Minute), 10*time.Second).Should(Equal(3))

// Get Fleet version after upgrade
// and check that it's different to the version before upgrade
Eventually(func() string {
fleetVersionAfterUpgrade, err := kubectl.RunWithoutErr(getFleetImageVersion...)
Expect(err).To(Not(HaveOccurred()))
fmt.Println("Current Fleet version after upgrade:", fleetVersionAfterUpgrade) // Debugging output
return fleetVersionAfterUpgrade
}, 10*time.Minute, 5*time.Second).Should(Not(Equal(fleetVersionBeforeUpgrade)))
})
})

0 comments on commit a8a7559

Please sign in to comment.