Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

e2e, arm64: make the e2e tests works of Arm64 platform #3577

Merged
merged 8 commits into from
Jan 15, 2025
1 change: 0 additions & 1 deletion BUILD.bazel
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,6 @@ container_bundle(
"$(container_prefix)/cdi-func-test-registry-populate:$(container_tag)": "//tools/cdi-func-test-registry-init:cdi-func-test-registry-populate-image",
"$(container_prefix)/cdi-func-test-registry:$(container_tag)": "//tools/cdi-func-test-registry-init:cdi-func-test-registry-image",
"$(container_prefix)/imageio-init:$(container_tag)": "//tools/imageio-init:imageio-init-image",
"$(container_prefix)/vcenter-simulator:$(container_tag)": "//tools/vddk-test:vcenter-simulator",
"$(container_prefix)/cdi-func-test-tinycore:$(container_tag)": "//tests:cdi-func-test-tinycore",
"$(container_prefix)/cdi-func-test-imageio:$(container_tag)": "//tools/image-io:cdi-func-test-imageio-image",
"$(container_prefix)/cdi-func-test-cirros-qcow2:$(container_tag)": "//tests:cdi-func-test-cirros-qcow2",
Expand Down
4 changes: 3 additions & 1 deletion automation/test.sh
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,9 @@ if [[ $TARGET =~ openshift-.* ]]; then
elif [[ $TARGET =~ k8s-.* ]]; then
export KUBEVIRT_NUM_NODES=2
export KUBEVIRT_MEMORY_SIZE=8192
elif [[ $TARGET =~ kind-.* ]]; then
export KUBEVIRT_NUM_NODES=1
export KIND_PORT_MAPPING=31001:31002
fi

if [ ! -d "cluster-up/cluster/$KUBEVIRT_PROVIDER" ]; then
Expand All @@ -68,7 +71,6 @@ if [[ -z "$UPGRADE_FROM" ]] && [[ -z "$RANDOM_CR" ]]; then
fi
echo "Upgrading from versions: $UPGRADE_FROM"
fi
export KUBEVIRT_NUM_NODES=2

kubectl() { cluster-up/kubectl.sh "$@"; }

Expand Down
23 changes: 19 additions & 4 deletions cluster-sync/ephemeral_provider.sh
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,17 @@ function seed_images(){

}

# For the Kind provider, we need to configure hostname resolution for the local image registry in the CoreDNS service.
# This ensures that local container images can be successfully pulled into Kubernetes pods during certain e2e tests.
function setup_hostname_resolution_for_registry {
host_name="registry"
host_ip=$(${CDI_CRI} inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' $(${CDI_CRI} ps|grep registry|awk '{print $1}'))
_kubectl patch configmap coredns \
-n kube-system \
--type merge \
-p "{\"data\":{\"Corefile\":\".:53 {\n errors\n health {\n lameduck 5s\n }\n ready\n kubernetes cluster.local in-addr.arpa ip6.arpa {\n pods insecure\n fallthrough in-addr.arpa ip6.arpa\n ttl 30\n }\n prometheus :9153\n forward . /etc/resolv.conf {\n max_concurrent 1000\n }\n cache 30\n loop\n reload\n loadbalance\n hosts {\n $host_ip $host_name\n fallthrough\n }\n}\"}}"
}

function verify() {
echo 'Wait until all nodes are ready'
until [[ $(_kubectl get nodes --no-headers | wc -l) -eq $(_kubectl get nodes --no-headers | grep " Ready" | wc -l) ]]; do
Expand All @@ -38,10 +49,14 @@ function configure_storage() {
}

function configure_hpp() {
for i in $(seq 1 ${KUBEVIRT_NUM_NODES}); do
./cluster-up/ssh.sh "node$(printf "%02d" ${i})" "sudo mkdir -p /var/hpvolumes"
./cluster-up/ssh.sh "node$(printf "%02d" ${i})" "sudo chcon -t container_file_t -R /var/hpvolumes"
done
if [[ $KUBEVIRT_PROVIDER =~ kind.* ]]; then
./cluster-up/ssh.sh ${KUBEVIRT_PROVIDER}-control-plane mkdir -p /var/hpvolumes
else
for i in $(seq 1 ${KUBEVIRT_NUM_NODES}); do
./cluster-up/ssh.sh "node$(printf "%02d" ${i})" "sudo mkdir -p /var/hpvolumes"
./cluster-up/ssh.sh "node$(printf "%02d" ${i})" "sudo chcon -t container_file_t -R /var/hpvolumes"
akalenyu marked this conversation as resolved.
Show resolved Hide resolved
done
fi
HPP_RELEASE=$(get_latest_release "kubevirt/hostpath-provisioner-operator")
_kubectl apply -f https://github.com/kubevirt/hostpath-provisioner-operator/releases/download/$HPP_RELEASE/namespace.yaml
#install cert-manager
Expand Down
49 changes: 45 additions & 4 deletions cluster-sync/sync.sh
Original file line number Diff line number Diff line change
Expand Up @@ -37,11 +37,21 @@ PULL_POLICY=${PULL_POLICY:-IfNotPresent}
# have to refactor/rewrite any of the code that works currently.
MANIFEST_REGISTRY=$DOCKER_PREFIX
if [ "${KUBEVIRT_PROVIDER}" != "external" ]; then
registry=${IMAGE_REGISTRY:-localhost:$(_port registry)}
if [[ $KUBEVIRT_PROVIDER =~ kind.* ]]; then
registry=${IMAGE_REGISTRY:-localhost:5000}
else
registry=${IMAGE_REGISTRY:-localhost:$(_port registry)}
fi
DOCKER_PREFIX=${registry}
MANIFEST_REGISTRY="registry:5000"
fi

# When the Kubevirt provider is kind, We set up the cluster level hostname resolution for registry, thus, we can
# visit the registry:5000 in pods.
if [[ $KUBEVIRT_PROVIDER =~ kind.* ]]; then
setup_hostname_resolution_for_registry
fi

if [ "${KUBEVIRT_PROVIDER}" == "external" ]; then
# No kubevirtci local registry, likely using something external
if [[ $(${CDI_CRI} login --help | grep authfile) ]]; then
Expand Down Expand Up @@ -96,8 +106,16 @@ function wait_cdi_available {
}

function configure_uploadproxy_override {
host_port=$(./cluster-up/cli.sh ports uploadproxy | xargs)
override="https://127.0.0.1:$host_port"
if [[ $KUBEVIRT_PROVIDER =~ kind.* ]]; then
# To enable port mapping, it must be configured both in the Kind configuration and the uploadProxyURLOverride.
# We use the environment variable KIND_PORT_MAPPING to ensure the setup is applied in both locations.
container_port=$(echo "$KIND_PORT_MAPPING" | awk -F: '{print $1}')
host_port=$(echo "$KIND_PORT_MAPPING" | awk -F: '{print $2}')
override="https://127.0.0.1:$host_port"
else
host_port=$(./cluster-up/cli.sh ports uploadproxy | xargs)
override="https://127.0.0.1:$host_port"
fi
_kubectl patch cdi ${CR_NAME} --type=merge -p '{"spec": {"config": {"uploadProxyURLOverride": "'"$override"'"}}}'
}

Expand Down Expand Up @@ -162,6 +180,15 @@ function setup_for_upgrade_testing {
_kubectl apply -f "./_out/manifests/cdi-testing-sa.yaml"
_kubectl apply -f "./_out/manifests/file-host.yaml"
_kubectl apply -f "./_out/manifests/registry-host.yaml"

# In the kind cluster, registry-populate need more capability to use buildah
if [[ $KUBEVIRT_PROVIDER =~ kind.* ]]; then
_kubectl patch deployment \
-n ${CDI_NAMESPACE} cdi-docker-registry-host \
--type=json \
-p='[{"op": "add", "path": "/spec/template/spec/containers/2/securityContext/capabilities/add", "value": ["SETFCAP", "SYS_ADMIN", "SYS_CHROOT"]}]'
fi

echo "Waiting for testing tools to be ready"
_kubectl wait pod -n ${CDI_NAMESPACE} --for=condition=Ready --all --timeout=${CDI_AVAILABLE_TIMEOUT}s
_kubectl apply -f "./_out/manifests/upgrade-testing-artifacts.yaml"
Expand All @@ -181,10 +208,22 @@ if [ "${CDI_SYNC}" == "test-infra" ]; then
_kubectl apply -f "./_out/manifests/sample-populator.yaml"
_kubectl apply -f "./_out/manifests/uploadproxy-nodeport.yaml"

# In the kind cluster, registry-populate need more capability to use buildah
akalenyu marked this conversation as resolved.
Show resolved Hide resolved
if [[ $KUBEVIRT_PROVIDER =~ kind.* ]]; then
_kubectl patch deployment \
-n ${CDI_NAMESPACE} cdi-docker-registry-host \
--type=json \
-p='[{"op": "add", "path": "/spec/template/spec/containers/2/securityContext/capabilities/add", "value": ["SETFCAP", "SYS_ADMIN", "SYS_CHROOT"]}]'
fi

# Disable unsupported functest images for s390x
if [ "${ARCHITECTURE}" != "s390x" ]; then
# Imageio test service:
_kubectl apply -f "./_out/manifests/imageio.yaml"
fi

# Disable deploy VDDK on s390x and arm64
if [ "${ARCHITECTURE}" != "s390x" ] && [ "${ARCHITECTURE}" != "aarch64" ]; then
# vCenter (VDDK) test service:
_kubectl apply -f "./_out/manifests/vcenter.yaml"
fi
Expand All @@ -198,7 +237,9 @@ fi
mkdir -p ./_out/tests
rm -f $OLD_CDI_VER_PODS $NEW_CDI_VER_PODS

seed_images
if [[ ! $KUBEVIRT_PROVIDER =~ kind.* ]]; then
seed_images
fi

# Install CDI
install_cdi
Expand Down
6 changes: 3 additions & 3 deletions tests/datavolume_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1074,7 +1074,7 @@ var _ = Describe("[vendor:[email protected]][level:component]DataVolume tests",
Message: "Import Complete",
Reason: "Completed",
}}),
Entry("[test_id:5077]succeed creating import dv from VDDK source", dataVolumeTestArguments{
Entry("[test_id:5077]succeed creating import dv from VDDK source", Label("VDDK"), dataVolumeTestArguments{
name: "dv-import-vddk",
size: "1Gi",
url: vcenterURL,
Expand Down Expand Up @@ -1202,7 +1202,7 @@ var _ = Describe("[vendor:[email protected]][level:component]DataVolume tests",

testDataVolume(args)
},
Entry("[test_id:5079]should fail with \"AwaitingVDDK\" reason when VDDK image config map is not present", dataVolumeTestArguments{
Entry("[test_id:5079]should fail with \"AwaitingVDDK\" reason when VDDK image config map is not present", Label("VDDK"), dataVolumeTestArguments{
name: "dv-awaiting-vddk",
size: "1Gi",
url: vcenterURL,
Expand Down Expand Up @@ -1230,7 +1230,7 @@ var _ = Describe("[vendor:[email protected]][level:component]DataVolume tests",
Type: cdiv1.DataVolumeRunning,
Status: v1.ConditionFalse,
}}),
Entry("[test_id:5080]succeed importing VDDK data volume with init image URL set", dataVolumeTestArguments{
Entry("[test_id:5080]succeed importing VDDK data volume with init image URL set", Label("VDDK"), dataVolumeTestArguments{
name: "dv-import-vddk",
size: "1Gi",
url: vcenterURL,
Expand Down
2 changes: 1 addition & 1 deletion tests/framework/pvc.go
Original file line number Diff line number Diff line change
Expand Up @@ -340,7 +340,7 @@ func (f *Framework) VerifyPermissions(namespace *k8sv1.Namespace, pvc *k8sv1.Per
return f.verifyInPod(namespace, pvc, cmd, func(output, stderr string) (bool, error) {
fmt.Fprintf(ginkgo.GinkgoWriter, "INFO: permissions of disk.img: %s\n", output)

return strings.Compare(output, "-rw-rw----.") == 0, nil
return strings.Contains(output, "-rw-rw----"), nil
})
}

Expand Down
10 changes: 5 additions & 5 deletions tests/import_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ var _ = Describe("[rfe_id:1115][crit:high][vendor:[email protected]][level:compo
ns = f.Namespace.Name
})

DescribeTable("[test_id:2329] Should fail to import images that require too much space", func(uploadURL string) {
DescribeTable("[test_id:2329] Should fail to import images that require too much space", Label("no-kubernetes-in-docker"), func(uploadURL string) {
imageURL := fmt.Sprintf(uploadURL, f.CdiInstallNs)

By(imageURL)
Expand Down Expand Up @@ -1268,7 +1268,7 @@ var _ = Describe("Preallocation", func() {
dataVolume.Annotations[controller.AnnPodRetainAfterCompletion] = "true"
return dataVolume
}),
Entry("VddkImport", true, utils.VcenterMD5, utils.DefaultImagePath, func() *cdiv1.DataVolume {
Entry("VddkImport", Label("VDDK"), true, utils.VcenterMD5, utils.DefaultImagePath, func() *cdiv1.DataVolume {
// Find vcenter-simulator pod
pod, err := utils.FindPodByPrefix(f.K8sClient, f.CdiInstallNs, "vcenter-deployment", "app=vcenter")
Expect(err).ToNot(HaveOccurred())
Expand Down Expand Up @@ -1609,8 +1609,8 @@ var _ = Describe("Import populator", func() {
Entry("[test_id:11005]with Registry image without preallocation", utils.TinyCoreMD5, createRegistryImportPopulatorCR, false, false),
Entry("[test_id:11006]with ImageIO image with preallocation", Serial, utils.ImageioMD5, createImageIOImportPopulatorCR, true, false),
Entry("[test_id:11007]with ImageIO image without preallocation", Serial, utils.ImageioMD5, createImageIOImportPopulatorCR, false, false),
Entry("[test_id:11008]with VDDK image with preallocation", utils.VcenterMD5, createVDDKImportPopulatorCR, true, false),
Entry("[test_id:11009]with VDDK image without preallocation", utils.VcenterMD5, createVDDKImportPopulatorCR, false, false),
Entry("[test_id:11008]with VDDK image with preallocation", Label("VDDK"), utils.VcenterMD5, createVDDKImportPopulatorCR, true, false),
Entry("[test_id:11009]with VDDK image without preallocation", Label("VDDK"), utils.VcenterMD5, createVDDKImportPopulatorCR, false, false),
Entry("[test_id:11010]with Blank image with preallocation", utils.BlankMD5, createBlankImportPopulatorCR, true, false),
Entry("[test_id:11011]with Blank image without preallocation", utils.BlankMD5, createBlankImportPopulatorCR, false, false),
)
Expand Down Expand Up @@ -1658,7 +1658,7 @@ var _ = Describe("Import populator", func() {
Entry("with HTTP image", utils.TinyCoreMD5, createHTTPImportPopulatorCR),
Entry("with Registry image", utils.TinyCoreMD5, createRegistryImportPopulatorCR),
Entry("with ImageIO image", Serial, utils.ImageioMD5, createImageIOImportPopulatorCR),
Entry("with VDDK image", utils.VcenterMD5, createVDDKImportPopulatorCR),
Entry("with VDDK image", Label("VDDK"), utils.VcenterMD5, createVDDKImportPopulatorCR),
Entry("with Blank image", utils.BlankMD5, createBlankImportPopulatorCR),
)

Expand Down
2 changes: 1 addition & 1 deletion tests/upload_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -307,7 +307,7 @@ var _ = Describe("[rfe_id:138][crit:high][vendor:[email protected]][level:compon
Entry("fail given a large physical size QCOW2 file", utils.UploadFileLargePhysicalDiskQcow),
)

DescribeTable("[posneg:negative][test_id:2330]Verify failure on sync upload if virtual size > pvc size", Serial, func(filename string) {
DescribeTable("[posneg:negative][test_id:2330]Verify failure on sync upload if virtual size > pvc size", Label("no-kubernetes-in-docker"), Serial, func(filename string) {
By("Verify PVC annotation says ready")
found, err := utils.WaitPVCPodStatusReady(f.K8sClient, pvc)
Expect(err).ToNot(HaveOccurred())
Expand Down