diff --git a/README.md b/README.md index 19beee534..497771d89 100644 --- a/README.md +++ b/README.md @@ -37,11 +37,8 @@ For Guest UEFI Secure Boot tests, the requirements are: * `openssl` * VM * `chattr` - * `efitools` for uefistored auth var tests - * `util-linux` for uefistored auth var tests in Alpine VMs -* XCP-ng Host (installed by default on XCP-ng 8.2+) - * `uefistored` - * `varstored-tools` + * `efitools` for uefistored (in 8.2) or varstored (in 8.3+) auth var tests + * `util-linux` for uefistored (in 8.2) or varstored (in 8.3+) auth var tests in Alpine VMs Many tests have specific requirements, detailed in a comment at the top of the test file: minimal number of hosts in a pool, number of pools, VMs with specific characteristics (OS, BIOS vs UEFI, additional tools installed in the VM, additional networks in the pool, presence of an unused disk on one host or every host...). Markers, jobs defined in `jobs.py` (`./jobs.py show JOBNAME` will display the requirements and the reference to a VM or VM group), VMs and VM groups defined in `vm-data.py-dist` may all help understanding what tests can run with what VMs. @@ -110,7 +107,7 @@ Another example: ``` # Run secure boot tests that require a Unix VM (as opposed to a Windows VM) and that should ideally be run on a large variety of VMs -pytest tests/uefistored -m "multi_vms and unix_vm" --hosts=ip_of_poolmaster --vm=http://path/to/unix_vm_1.xva --vm=http://path/to/unix_vm_2.xva --vm=http://path/to/unix_vm_3.xva +pytest tests/uefi_sb -m "multi_vms and unix_vm" --hosts=ip_of_poolmaster --vm=http://path/to/unix_vm_1.xva --vm=http://path/to/unix_vm_2.xva --vm=http://path/to/unix_vm_3.xva ``` @@ -135,16 +132,20 @@ The output of commands below is given as example and may not reflect the current $ ./jobs.py list main: a group of not-too-long tests that run either without a VM, or with a single small one main-multi: a group of tests that need to run on the largest variety of VMs +packages: tests that packages can be installed correctly quicktest: XAPI's quicktest, not so quick by the way -storage-main: tests all storage drivers (except linstor), but avoids migrations and reboots -storage-migrations: tests migrations with all storage drivers (except linstor) -storage-reboots: storage driver tests that involve rebooting hosts (except linstor and flaky tests) -sb-main: tests uefistored and SecureBoot using a small unix VM (or no VM when none needed) -sb-windows: tests uefistored and SecureBoot using a Windows VM +storage-main: tests all storage drivers, but avoids migrations and reboots +storage-migrations: tests migrations with all storage drivers +storage-reboots: storage driver tests that involve rebooting hosts (except flaky tests) +sb-main: tests uefistored/varstored and SecureBoot using a small unix VM (or no VM when none needed) +sb-certificates: tests certificate propagation to disk by XAPI, and to VMs by uefistored/varstored +sb-windows: tests uefistored/varstored and SecureBoot using a Windows VM sb-unix-multi: checks basic Secure-Boot support on a variety of Unix VMs sb-windows-multi: checks basic Secure-Boot support on a variety of Windows VMs tools-unix: tests our unix guest tools on a single small VM tools-unix-multi: tests our unix guest tools on a variety of VMs +xen: Testing of the Xen hypervisor itself +vtpm: Testing vTPM functionalities flaky: tests that usually pass, but sometimes fail unexpectedly ``` @@ -163,7 +164,7 @@ $ ./jobs.py show sb-unix-multi "--vm[]": "multi/uefi_unix" }, "paths": [ - "tests/uefistored" + "tests/uefi_sb" ], "markers": "multi_vms and unix_vm" } @@ -177,16 +178,19 @@ A very important information is also the `--vm` (single VM) or `--vm[]` (multipl There are two more commands that you can use to display information about a job: ``` -$ ./jobs.py collect sb-unix-multi +$ ./jobs.py collect tools-unix [...] -collected 175 items / 170 deselected / 5 selected +collected 6 items - - - - - - + + + + + + + + + ``` This lists the tests that are selected by the job. Tests may be repeated if they will run several times, as in the case of this example because there are 3 VMs to test. I chose a job whose output is small for the sake of documentation conciseness, but the output can be a lot bigger! @@ -196,11 +200,11 @@ Lastly, the `run` command with the `--print-only` switch will display the comman ``` # job with default parameters $ ./jobs.py run --print-only sb-unix-multi ip_of_poolmaster -pytest tests/uefistored -m "multi_vms and unix_vm" --hosts=ip_of_poolmaster --vm=http://path/to/vm1.xva --vm=http://path/to/vm2.xva --vm=http://path/to/vm3.xva +pytest tests/uefi_sb -m "multi_vms and unix_vm" --hosts=ip_of_poolmaster --vm=http://path/to/vm1.xva --vm=http://path/to/vm2.xva --vm=http://path/to/vm3.xva # same, but we override the list of VMs $ ./jobs.py run --print-only sb-unix-multi ip_of_poolmaster --vm=http://path/to/vm4.xva -pytest tests/uefistored -m "multi_vms and unix_vm" --hosts=ip_of_poolmaster --vm=http://path/to/vm4.xva +pytest tests/uefi_sb -m "multi_vms and unix_vm" --hosts=ip_of_poolmaster --vm=http://path/to/vm4.xva ``` #### Run a job @@ -221,7 +225,7 @@ Example: ``` # job with default parameters $ ./jobs.py run sb-unix-multi ip_of_poolmaster -pytest tests/uefistored -m "multi_vms and unix_vm" --hosts=ip_of_poolmaster --vm=http://path/to/vm1.xva --vm=http://path/to/vm2.xva --vm=http://path/to/vm3.xva +pytest tests/uefi_sb -m "multi_vms and unix_vm" --hosts=ip_of_poolmaster --vm=http://path/to/vm1.xva --vm=http://path/to/vm2.xva --vm=http://path/to/vm3.xva [... job executes...] ``` @@ -230,7 +234,7 @@ Any parameter added at the end of the command will be passed to `pytest`. Any pa ``` # same, but we override the list of VMs $ ./jobs.py run --print-only sb-unix-multi ip_of_poolmaster --vm=http://path/to/vm4.xva -pytest tests/uefistored -m "multi_vms and unix_vm" --hosts=ip_of_poolmaster --vm=http://path/to/vm4.xva +pytest tests/uefi_sb -m "multi_vms and unix_vm" --hosts=ip_of_poolmaster --vm=http://path/to/vm4.xva [... job executes...] ``` diff --git a/jobs.py b/jobs.py index fb6af3713..e3ed59570 100755 --- a/jobs.py +++ b/jobs.py @@ -128,13 +128,17 @@ "params": { "--vm": "single/small_vm_efitools", }, - "paths": ["tests/uefistored/test_auth_var.py", "tests/uefistored/test_secure_boot.py"], + "paths": [ + "tests/uefi_sb/test_auth_var.py", + "tests/uefi_sb/test_uefistored_sb.py", + "tests/uefi_sb/test_varstored_sb.py" + ], "markers": "not windows_vm", }, "sb-certificates": { - "description": "[8.3+] tests certificate propagation to disk by XAPI, and to VMs by uefistored/varstored", + "description": "tests certificate propagation to disk by XAPI, and to VMs by uefistored/varstored", "requirements": [ - "A pool >= 8.2.1. On 8.3+, it needs at least two hosts.", + "A pool >= 8.2.1. On 8.3+, it needs at least two hosts. On 8.2, one is enough but more is better.", "On 8.3+ only, a second pool, single-host, available for temporarily joining the first pool.", "A fast-booting unix UEFI VM with efitools.", ], @@ -143,7 +147,7 @@ "params": { "--vm": "single/small_vm_efitools", }, - "paths": ["tests/uefistored/test_cert_inheritance.py"], + "paths": ["tests/uefi_sb/test_uefistored_cert_flow.py", "tests/uefi_sb/test_varstored_cert_flow.py"], }, "sb-windows": { "description": "tests uefistored/varstored and SecureBoot using a Windows VM", @@ -155,7 +159,7 @@ "params": { "--vm": "single/small_vm_windows", }, - "paths": ["tests/uefistored"], + "paths": ["tests/uefi_sb"], "markers": "windows_vm", }, "sb-unix-multi": { @@ -169,7 +173,7 @@ "params": { "--vm[]": "multi/uefi_unix", }, - "paths": ["tests/uefistored"], + "paths": ["tests/uefi_sb"], "markers": "multi_vms and unix_vm", }, "sb-windows-multi": { @@ -182,7 +186,7 @@ "params": { "--vm[]": "multi/uefi_windows", }, - "paths": ["tests/uefistored"], + "paths": ["tests/uefi_sb"], "markers": "multi_vms and windows_vm", }, "tools-unix": { diff --git a/lib/efi.py b/lib/efi.py index ec31f67b2..aef7a32da 100755 --- a/lib/efi.py +++ b/lib/efi.py @@ -4,6 +4,7 @@ import atexit import copy +import hashlib import logging import os import shutil @@ -442,6 +443,8 @@ def esl_from_auth_bytes(auth: bytes) -> bytes: """ return auth[auth.index(EFI_CERT_X509_GUID):] +def get_md5sum_from_auth(auth): + return hashlib.md5(esl_from_auth_file(auth)).hexdigest() if __name__ == '__main__': import argparse diff --git a/lib/host.py b/lib/host.py index 398721fba..196852530 100644 --- a/lib/host.py +++ b/lib/host.py @@ -391,6 +391,9 @@ def file_exists(self, filepath, regular_file=True): def binary_exists(self, binary): return self.ssh_with_result(['which', binary]).returncode == 0 + def is_symlink(self, filepath): + return self.ssh_with_result(['test', '-L', filepath]).returncode == 0 + def sr_create(self, sr_type, label, device_config, shared=False, verify=False): params = { 'host-uuid': self.uuid, diff --git a/lib/pool.py b/lib/pool.py index e8eb1bc4a..46afe4902 100644 --- a/lib/pool.py +++ b/lib/pool.py @@ -1,6 +1,8 @@ import logging import traceback +from packaging import version + import lib.commands as commands from lib.common import safe_split, wait_for, wait_for_not @@ -97,6 +99,24 @@ def first_shared_sr(self): return None def save_uefi_certs(self): + """ + Save UEFI certificates in order to restore them later. XCP-ng 8.2 only. + + This method was developed for XCP-ng 8.2, because many secureboot tests were dependent + on the initial state of the pool certificates, due to how certificates propagate. + Also, there were no certificates installed by default (except PK) on XCP-ng 8.2, and + we tried to be nice and restore the initial state after the tests. + + On XCP-ng 8.3+, the tests don't depend so much on the pool certificates, and when they do we + can simply set custom certificates without erasing the default ones, so there's no real need + for saving then restoring the certificates. + The method was not reviewed for XCP-ng 8.3, and tests should be written in a way that is not + dependent on the initial state of pool certificates. To prevent ourselves from using a method + that is not appropriate, assert that the version is lower than 8.3. + + This can be revised later if a need for saving custom certificates in 8.3+ arises. + """ + assert self.master.xcp_version < version.parse("8.3"), "this function should only be needed on XCP-ng 8.2" logging.info('Saving pool UEFI certificates') if int(self.master.ssh(["secureboot-certs", "--version"]).split(".")[0]) < 1: @@ -136,6 +156,8 @@ def save_uefi_certs(self): ) def restore_uefi_certs(self): + # See explanation in save_uefi_certs(). + assert self.master.xcp_version < version.parse("8.3"), "this function should only be needed on XCP-ng 8.2" assert self.saved_uefi_certs is not None if len(self.saved_uefi_certs) == 0: logging.info('We need to clear pool UEFI certificates to restore initial state') @@ -156,12 +178,29 @@ def restore_uefi_certs(self): self.saved_uefi_certs = None def clear_uefi_certs(self): + """ + Clear UEFI certificates on XCP-ng 8.2. + + On XCP-ng 8.2, clearing the certificates from XAPI doesn't clear them from disk, so we need to do so manually. + + This method is not suitable for XCP-ng 8.3+, where only custom certificates can be modified, and this + must all be done through XAPI (which will delete them from disk on each host automatically). + + For XCP-ng 8.3+, see clear_custom_uefi_certificates() + """ + assert self.master.xcp_version < version.parse("8.3"), "function only relevant on XCP-ng 8.2" logging.info('Clearing pool UEFI certificates in XAPI and on hosts disks') self.master.ssh(['secureboot-certs', 'clear']) # remove files on each host for host in self.hosts: host.ssh(['rm', '-f', f'{host.varstore_dir()}/*']) + def clear_custom_uefi_certs(self): + """ Clear Custom UEFI certificates on XCP-ng 8.3+. """ + assert self.master.xcp_version >= version.parse("8.3"), "function only relevant on XCP-ng 8.3+" + logging.info('Clearing custom pool UEFI certificates') + self.master.ssh(['secureboot-certs', 'clear']) + def install_custom_uefi_certs(self, auths): host = self.master auths_dict = {} diff --git a/lib/vm.py b/lib/vm.py index bb759b19a..1be62d6b3 100644 --- a/lib/vm.py +++ b/lib/vm.py @@ -392,6 +392,8 @@ def clear_uefi_variables(self): This makes it look like the VM is new, in the eyes of uefistored/varstored, and so it will propagate certs from disk to its NVRAM when it boots next. + + Some VMs will not boot anymore after such an operation. Seen with debian VMs, for example. """ self.param_remove('NVRAM', 'EFI-variables') @@ -536,3 +538,19 @@ def is_in_uefi_shell(self): res_host.ssh(['screen', '-S', session, '-X', 'quit'], check=False) res_host.ssh(['rm', '-f', tmp_file], check=False) return ret + + def set_uefi_setup_mode(self): + # Note that in XCP-ng 8.2, the VM won't stay in setup mode, because uefistored + # will add PK and other certs if available when the guest boots. + logging.info(f"Set VM {self.uuid} to UEFI setup mode") + self.host.ssh(["varstore-sb-state", self.uuid, "setup"]) + + def set_uefi_user_mode(self): + # Setting user mode propagates the host's certificates to the VM + logging.info(f"Set VM {self.uuid} to UEFI user mode") + self.host.ssh(["varstore-sb-state", self.uuid, "user"]) + + def is_cert_present(vm, key): + res = vm.host.ssh(['varstore-get', vm.uuid, efi.get_secure_boot_guid(key).as_str(), key], + check=False, simple_output=False, decode=False) + return res.returncode == 0 diff --git a/tests/uefistored/__init__.py b/tests/uefi_sb/__init__.py similarity index 100% rename from tests/uefistored/__init__.py rename to tests/uefi_sb/__init__.py diff --git a/tests/uefistored/conftest.py b/tests/uefi_sb/conftest.py similarity index 70% rename from tests/uefistored/conftest.py rename to tests/uefi_sb/conftest.py index 2ee44023c..6fe94ed38 100644 --- a/tests/uefistored/conftest.py +++ b/tests/uefi_sb/conftest.py @@ -1,8 +1,11 @@ import logging import pytest +from packaging import version + @pytest.fixture(scope='module') def pool_without_uefi_certs(host): + assert host.xcp_version < version.parse("8.3"), "fixture only relevant on XCP-ng 8.2" pool = host.pool # Save the certs. @@ -22,9 +25,10 @@ def uefi_vm_and_snapshot(uefi_vm): vm = uefi_vm # Any VM that has been booted at least once comes with some - # UEFI variable state, so clear the state of UEFI variables. - logging.info('Clear VM UEFI certs and set SB to false') - vm.clear_uefi_variables() + # UEFI variable state, so simply clear the state of + # secure boot specific variables + vm.set_uefi_setup_mode() + logging.info('Set platform.secureboot to false for VM') vm.param_set('platform', 'secureboot', False) snapshot = vm.snapshot() diff --git a/tests/uefistored/test_auth_var.py b/tests/uefi_sb/test_auth_var.py similarity index 100% rename from tests/uefistored/test_auth_var.py rename to tests/uefi_sb/test_auth_var.py diff --git a/tests/uefi_sb/test_uefistored_cert_flow.py b/tests/uefi_sb/test_uefistored_cert_flow.py new file mode 100644 index 000000000..916b7c2ce --- /dev/null +++ b/tests/uefi_sb/test_uefistored_cert_flow.py @@ -0,0 +1,197 @@ +import hashlib +import logging +import pytest + +from .utils import check_disk_cert_md5sum, check_vm_cert_md5sum, generate_keys, revert_vm_state + +# These tests check the behaviour of XAPI and uefistored as they are in XCP-ng 8.2 +# For XCP-ng 8.3 or later, see test_varstored_cert_flow.py + +# Requirements: +# On the test runner: +# - See requirements documented in the project's README.md for Guest UEFI Secure Boot tests +# From --hosts parameter: +# - host: XCP-ng host 8.2.x only (+ updates) +# with UEFI certs either absent, or present and consistent (state will be saved and restored) +# Ideally master of a pool with 2 hosts or more + +pytestmark = pytest.mark.default_vm('mini-linux-x86_64-uefi') + +def install_certs_to_disks(pool, certs_dict, keys): + for host in pool.hosts: + logging.debug('Installing to host %s:' % host.hostname_or_ip) + for key in keys: + value = certs_dict[key].auth + with open(value, 'rb') as f: + hash = hashlib.md5(f.read()).hexdigest() + logging.debug(' - key: %s, value: %s' % (key, hash)) + host.scp(value, f'{host.varstore_dir()}/{key}.auth') + +@pytest.mark.small_vm +@pytest.mark.usefixtures("host_less_than_8_3", "pool_without_uefi_certs") +class TestPoolToDiskCertInheritanceAtVmStart: + @pytest.fixture(autouse=True) + def setup_and_cleanup(self, uefi_vm_and_snapshot): + vm, snapshot = uefi_vm_and_snapshot + yield + # Revert the VM, which has the interesting effect of also shutting it down instantly + revert_vm_state(vm, snapshot) + # clear pool certs for next test + vm.host.pool.clear_uefi_certs() + + def test_pool_certs_present_and_disk_certs_absent(self, uefi_vm): + vm = uefi_vm + # start with certs on pool and no certs on host disks + pool_auths = generate_keys(as_dict=True) + vm.host.pool.install_custom_uefi_certs([pool_auths[key] for key in ['PK', 'KEK', 'db', 'dbx']]) + # start a VM so that certs may be synced to disk if appropriate + vm.start() + residence_host = vm.get_residence_host() + logging.info('Check that the certs have been written on the disk of the host that started the VM.') + for key in ['PK', 'KEK', 'db', 'dbx']: + check_disk_cert_md5sum(residence_host, key, pool_auths[key].auth) + + def test_pool_certs_present_and_disk_certs_different(self, uefi_vm): + vm = uefi_vm + # start with different certs on pool and disks + pool_auths = generate_keys(as_dict=True) + disk_auths = generate_keys(as_dict=True) + vm.host.pool.install_custom_uefi_certs([pool_auths[key] for key in ['PK', 'KEK', 'db', 'dbx']]) + logging.info("Installing different certs to hosts disks") + install_certs_to_disks(vm.host.pool, disk_auths, ['PK', 'KEK', 'db', 'dbx']) + # start a VM so that certs may be synced to disk if appropriate + vm.start() + residence_host = vm.get_residence_host() + logging.info('Check that the certs have been updated on the disk of the host that started the VM.') + for key in ['PK', 'KEK', 'db', 'dbx']: + check_disk_cert_md5sum(residence_host, key, pool_auths[key].auth) + + def test_pool_certs_absent_and_disk_certs_present(self, uefi_vm): + vm = uefi_vm + # start with no pool certs and with certs on disks + disk_auths = generate_keys(as_dict=True) + logging.info("Installing certs to hosts disks") + install_certs_to_disks(vm.host.pool, disk_auths, ['PK', 'KEK', 'db', 'dbx']) + # start a VM so that certs may be synced to disk if appropriate + vm.start() + residence_host = vm.get_residence_host() + logging.info('Check that the certs on disk have not changed after the VM started.') + for key in ['PK', 'KEK', 'db', 'dbx']: + check_disk_cert_md5sum(residence_host, key, disk_auths[key].auth) + + def test_pool_certs_present_and_some_different_disk_certs_present(self, uefi_vm): + vm = uefi_vm + # start with all certs on pool and just two certs on disks + pool_auths = generate_keys(as_dict=True) + disk_auths = generate_keys(as_dict=True) + vm.host.pool.install_custom_uefi_certs([pool_auths[key] for key in ['PK', 'KEK', 'db', 'dbx']]) + logging.info("Installing different certs to hosts disks") + install_certs_to_disks(vm.host.pool, disk_auths, ['KEK', 'dbx']) + # start a VM so that certs may be synced to disk if appropriate + vm.start() + residence_host = vm.get_residence_host() + logging.info('Check that the certs have been added or updated on the disk of the host that started the VM.') + for key in ['PK', 'KEK', 'db', 'dbx']: + check_disk_cert_md5sum(residence_host, key, pool_auths[key].auth) + + def test_pool_certs_present_except_dbx_and_disk_certs_different(self, uefi_vm): + vm = uefi_vm + # start with no dbx on pool and all, different, certs on disks + pool_auths = generate_keys(as_dict=True) + disk_auths = generate_keys(as_dict=True) + vm.host.pool.install_custom_uefi_certs([pool_auths[key] for key in ['PK', 'KEK', 'db']]) + logging.info("Installing different certs to hosts disks, including a dbx") + install_certs_to_disks(vm.host.pool, disk_auths, ['PK', 'KEK', 'db', 'dbx']) + # start a VM so that certs may be synced to disk if appropriate + vm.start() + residence_host = vm.get_residence_host() + logging.info('Check that the certs have been updated on the disk of the host that started the VM, except dbx.') + for key in ['PK', 'KEK', 'db']: + check_disk_cert_md5sum(residence_host, key, pool_auths[key].auth) + check_disk_cert_md5sum(residence_host, 'dbx', disk_auths[key].auth) + + def test_pool_certs_present_and_disk_certs_present_and_same(self, uefi_vm): + vm = uefi_vm + # start with certs on pool and no certs on host disks + pool_auths = generate_keys(as_dict=True) + vm.host.pool.install_custom_uefi_certs([pool_auths[key] for key in ['PK', 'KEK', 'db', 'dbx']]) + install_certs_to_disks(vm.host.pool, pool_auths, ['PK', 'KEK', 'db', 'dbx']) + # start a VM so that certs may be synced to disk if appropriate + vm.start() + residence_host = vm.get_residence_host() + logging.info('Check that the certs have been written on the disk of the host that started the VM.') + for key in ['PK', 'KEK', 'db', 'dbx']: + check_disk_cert_md5sum(residence_host, key, pool_auths[key].auth) + + +@pytest.mark.small_vm +@pytest.mark.usefixtures("host_less_than_8_3", "pool_without_uefi_certs") +class TestPoolToVMCertInheritance: + @pytest.fixture(autouse=True) + def setup_and_cleanup(self, uefi_vm_and_snapshot): + vm, snapshot = uefi_vm_and_snapshot + yield + # Revert the VM, which has the interesting effect of also shutting it down instantly + revert_vm_state(vm, snapshot) + # clear pool certs for next test + vm.host.pool.clear_uefi_certs() + + def test_pool_certs_absent_and_vm_certs_absent(self, uefi_vm): + vm = uefi_vm + # start with no certs on pool and no certs in the VM + # start the VM so that certs may be synced to it if appropriate + vm.start() + logging.info("Check that the VM still has no certs") + for key in ['PK', 'KEK', 'db', 'dbx']: + assert not vm.is_cert_present(key) + + def test_pool_certs_present_and_vm_certs_absent(self, uefi_vm): + vm = uefi_vm + # start with certs on pool and no certs in the VM + pool_auths = generate_keys(as_dict=True) + vm.host.pool.install_custom_uefi_certs([pool_auths[key] for key in ['PK', 'KEK', 'db', 'dbx']]) + # start the VM so that certs may be synced to it if appropriate + vm.start() + logging.info("Check that the VM got the pool certs") + for key in ['PK', 'KEK', 'db', 'dbx']: + check_vm_cert_md5sum(vm, key, pool_auths[key].auth) + + def test_pool_certs_present_and_vm_certs_present(self, uefi_vm): + vm = uefi_vm + # start with all certs on pool and in the VM + pool_auths = generate_keys(as_dict=True) + vm_auths = generate_keys(as_dict=True) + vm.host.pool.install_custom_uefi_certs([pool_auths[key] for key in ['PK', 'KEK', 'db', 'dbx']]) + vm.install_uefi_certs([vm_auths[key] for key in ['PK', 'KEK', 'db', 'dbx']]) + # start the VM so that certs may be synced to it if appropriate + vm.start() + logging.info("Check that the VM certs are unchanged") + for key in ['PK', 'KEK', 'db', 'dbx']: + check_vm_cert_md5sum(vm, key, vm_auths[key].auth) + + def test_pools_certs_absent_and_vm_certs_present(self, uefi_vm): + vm = uefi_vm + # start with no certs on pool and all certs in the VM + vm_auths = generate_keys(as_dict=True) + vm.install_uefi_certs([vm_auths[key] for key in ['PK', 'KEK', 'db', 'dbx']]) + # start the VM so that certs may be synced to it if appropriate + vm.start() + logging.info("Check that the VM certs are unchanged") + for key in ['PK', 'KEK', 'db', 'dbx']: + check_vm_cert_md5sum(vm, key, vm_auths[key].auth) + + def test_pool_certs_partially_present_and_vm_certs_partially_present(self, uefi_vm): + vm = uefi_vm + # start with some certs on pool and some certs in the VM, partially overlaping + pool_auths = generate_keys(as_dict=True) + vm_auths = generate_keys(as_dict=True) + vm.host.pool.install_custom_uefi_certs([pool_auths[key] for key in ['PK', 'KEK', 'db']]) + # don't ask why the VM only has db and dbx certs. It's for the test. + vm.install_uefi_certs([vm_auths[key] for key in ['db', 'dbx']]) + # start the VM so that certs may be synced to it if appropriate + vm.start() + logging.info("Check that the VM db and dbx certs are unchanged and PK and KEK were updated") + for key in ['PK', 'KEK']: + check_vm_cert_md5sum(vm, key, pool_auths[key].auth) + for key in ['db', 'dbx']: + check_vm_cert_md5sum(vm, key, vm_auths[key].auth) diff --git a/tests/uefistored/test_secure_boot.py b/tests/uefi_sb/test_uefistored_sb.py similarity index 69% rename from tests/uefistored/test_secure_boot.py rename to tests/uefi_sb/test_uefistored_sb.py index 527d4fdf3..21960c08d 100644 --- a/tests/uefistored/test_secure_boot.py +++ b/tests/uefi_sb/test_uefistored_sb.py @@ -3,15 +3,18 @@ from lib.commands import SSHCommandFailed from lib.common import wait_for -from lib.efi import EFIAuth, EFI_AT_ATTRS_BYTES -from .utils import generate_keys, revert_vm_state, VM_SECURE_BOOT_FAILED +from .utils import _test_key_exchanges, boot_and_check_no_sb_errors, boot_and_check_sb_failed, \ + boot_and_check_sb_succeeded, generate_keys, revert_vm_state, sign_efi_bins, VM_SECURE_BOOT_FAILED + +# These tests check the behaviour of XAPI and uefistored as they are in XCP-ng 8.2 +# For XCP-ng 8.3 or later, see test_varstored_sb.py # Requirements: # On the test runner: # - See requirements documented in the project's README.md for Guest UEFI Secure Boot tests # From --hosts parameter: -# - host: XCP-ng host >= 8.2 (+ updates) +# - host: XCP-ng host 8.2.x only (+ updates) # with UEFI certs either absent, or present and consistent (state will be saved and restored) # From --vm parameter # - A UEFI VM to import @@ -19,44 +22,8 @@ pytestmark = pytest.mark.default_vm('mini-linux-x86_64-uefi') -def boot_and_check_sb_failed(vm): - vm.start() - wait_for( - lambda: vm.get_messages(VM_SECURE_BOOT_FAILED), - 'Wait for message %s' % VM_SECURE_BOOT_FAILED - ) - - # If there is a VM_SECURE_BOOT_FAILED message and yet the OS still - # successfully booted, this is a uefistored bug - assert vm.is_in_uefi_shell() - -def boot_and_check_no_sb_errors(vm): - vm.start() - vm.wait_for_vm_running_and_ssh_up() - logging.info("Verify there's no %s message" % VM_SECURE_BOOT_FAILED) - assert not vm.get_messages(VM_SECURE_BOOT_FAILED) - -def boot_and_check_sb_succeeded(vm): - boot_and_check_no_sb_errors(vm) - logging.info("Check that SB is enabled according to the OS.") - assert vm.booted_with_secureboot() - -def sign_efi_bins(vm, db): - '''Boots the VM if it is halted, signs the bootloader, and halts the - VM again (if halted was its original state). - ''' - shutdown = not vm.is_running() - if shutdown: - vm.start() - vm.wait_for_vm_running_and_ssh_up() - - logging.info('> Sign bootloader') - vm.sign_efi_bins(db) - - if shutdown: - vm.shutdown(verify=True) - @pytest.mark.small_vm +@pytest.mark.usefixtures("host_less_than_8_3") @pytest.mark.usefixtures("pool_without_uefi_certs", "unix_vm") class TestGuestLinuxUEFISecureBoot: @pytest.fixture(autouse=True) @@ -148,6 +115,7 @@ def test_sb_off_really_means_off(self, uefi_vm): assert not vm.booted_with_secureboot() +@pytest.mark.usefixtures("host_less_than_8_3") @pytest.mark.usefixtures("pool_without_uefi_certs", "windows_vm") class TestGuestWindowsUEFISecureBoot: @pytest.fixture(autouse=True) @@ -177,7 +145,8 @@ def test_windows_succeeds(self, uefi_vm): @pytest.mark.small_vm -@pytest.mark.usefixtures("pool_without_uefi_certs", "xfail_on_xcpng_8_3") +@pytest.mark.usefixtures("host_less_than_8_3") +@pytest.mark.usefixtures("pool_without_uefi_certs") class TestCertsMissingAndSbOn: @pytest.fixture(autouse=True) def setup_and_cleanup(self, uefi_vm_and_snapshot): @@ -235,6 +204,7 @@ def test_only_db_present_but_sb_on(self, uefi_vm): self.check_vm_start_fails_and_uefistored_dies(vm) @pytest.mark.small_vm +@pytest.mark.usefixtures("host_less_than_8_3") @pytest.mark.usefixtures("pool_without_uefi_certs", "unix_vm") class TestUEFIKeyExchange: @pytest.fixture(autouse=True) @@ -246,72 +216,4 @@ def setup_and_cleanup(self, uefi_vm_and_snapshot): def test_key_exchanges(self, uefi_vm): vm = uefi_vm - PK = EFIAuth('PK') - null_PK = EFIAuth('PK', is_null=True) - new_PK = EFIAuth('PK') - bad_PK = EFIAuth('PK') - - KEK = EFIAuth('KEK') - null_KEK = EFIAuth('KEK', is_null=True) - - db_from_KEK = EFIAuth('db') - db_from_PK = EFIAuth('db') - null_db_from_KEK = EFIAuth('db', is_null=True) - null_db_from_PK = EFIAuth('db', is_null=True) - - PK.sign_auth(PK) - PK.sign_auth(null_PK) - PK.sign_auth(KEK) - PK.sign_auth(null_KEK) - PK.sign_auth(new_PK) - PK.sign_auth(db_from_PK) - PK.sign_auth(null_db_from_PK) - PK.sign_auth(db_from_KEK) - PK.sign_auth(null_db_from_KEK) - KEK.sign_auth(db_from_KEK) - KEK.sign_auth(null_db_from_KEK) - bad_PK.sign_auth(bad_PK) - - vm.start() - vm.wait_for_vm_running_and_ssh_up() - - # at this point we should have a VM with no certs, on a pool with no certs either - - tests = [ - # Set the PK - (PK, True), - # Clear the PK - (null_PK, True), - # Set the PK again - (PK, True), - # Set a PK with the wrong sig, should fail and PK should be unchanged - (bad_PK, False), - # Set, clear, and reset the KEK - (KEK, True), - (null_KEK, True), - (KEK, True), - # Set and clear the db signed by the KEK - (db_from_KEK, True), - (null_db_from_KEK, True), - # Set and clear the db signed by the PK - (db_from_PK, True), - (null_db_from_PK, True), - # Set a new PK - (new_PK, True), - # Set old PK, should fail due to expired timestamp - (PK, False), - ] - - for i, (auth, should_succeed) in enumerate(tests): - logging.info('> Testing {} ({})'.format(auth.name, i)) - - ok = True - saved_exception = None - try: - vm.set_efi_var(auth.name, auth.guid, - EFI_AT_ATTRS_BYTES, auth.auth_data) - except SSHCommandFailed: - ok = False - - if (should_succeed and not ok) or (ok and not should_succeed): - raise AssertionError('Failed to set {} {}'.format(i, auth.name)) + _test_key_exchanges(vm) diff --git a/tests/uefi_sb/test_varstored_cert_flow.py b/tests/uefi_sb/test_varstored_cert_flow.py new file mode 100644 index 000000000..0f9c2de7d --- /dev/null +++ b/tests/uefi_sb/test_varstored_cert_flow.py @@ -0,0 +1,199 @@ +import logging +import pytest + +from lib.common import wait_for + +from .utils import check_disk_cert_md5sum, check_vm_cert_md5sum, generate_keys, revert_vm_state + +# These tests check the behaviour of XAPI and varstored as they are in XCP-ng 8.3 +# For XCP-ng 8.2, see test_uefistored_cert_flow.py + +# Requirements: +# On the test runner: +# - See requirements documented in the project's README.md for Guest UEFI Secure Boot tests +# From --hosts parameter: +# - host: XCP-ng host >= 8.3 +# Master of a, at least, 2 hosts pool +# - hostB1: XCP-ng host >= 8.3 +# This host will be joined and ejected from pool A, it means its state will be completely reinitialized from scratch + +pytestmark = pytest.mark.default_vm('mini-linux-x86_64-uefi') + +@pytest.mark.usefixtures("host_at_least_8_3", "hostA2") +class TestPoolToDiskCertPropagationToAllHosts: + def test_set_pool_certificates(self, host): + keys = ['PK', 'KEK', 'db', 'dbx'] + pool_auths = generate_keys(as_dict=True) + host.pool.install_custom_uefi_certs([pool_auths[key] for key in keys]) + for h in host.pool.hosts: + logging.info(f"Check Pool.set_uefi_certificates updated host {h} certificates in {host.varstore_dir()}.") + assert not h.is_symlink(host.varstore_dir()) + for key in keys: + check_disk_cert_md5sum(h, key, pool_auths[key].auth) + + def test_set_pool_certificates_partial(self, host): + keys = ['PK', 'KEK', 'db'] + missing_key = 'dbx' + pool_auths = generate_keys(as_dict=True) + host.pool.install_custom_uefi_certs([pool_auths[key] for key in keys]) + for h in host.pool.hosts: + logging.info(f"Check Pool.set_uefi_certificates updated host {h} certificates in {host.varstore_dir()}.") + assert not h.is_symlink(host.varstore_dir()) + for key in keys: + check_disk_cert_md5sum(h, key, pool_auths[key].auth) + assert not h.file_exists(f'{host.varstore_dir()}/{missing_key}.auth') + + def test_clear_custom_pool_certificates(self, host): + keys = ['PK', 'KEK', 'db', 'dbx'] + pool_auths = generate_keys(as_dict=True) + host.pool.install_custom_uefi_certs([pool_auths[key] for key in keys]) + host.pool.clear_custom_uefi_certs() + for h in host.pool.hosts: + logging.info(f"Check host {h} has no custom certificates on disk.") + assert h.is_symlink(host.varstore_dir()) + +@pytest.mark.small_vm +@pytest.mark.usefixtures("host_at_least_8_3") +class TestVMCertMisc: + @pytest.fixture(autouse=True, scope="function") + def auto_revert_vm(self, uefi_vm_and_snapshot): + vm, snapshot = uefi_vm_and_snapshot + yield + # Revert the VM, which has the interesting effect of also shutting it down instantly + revert_vm_state(vm, snapshot) + + def test_snapshot_revert_restores_certs(self, uefi_vm): + vm = uefi_vm + vm_auths = generate_keys(as_dict=True) + vm.install_uefi_certs([vm_auths[key] for key in ['PK', 'KEK', 'db', 'dbx']]) + snapshot = vm.snapshot() + try: + # clear all certs + vm.set_uefi_setup_mode() + snapshot.revert() + logging.info("Check that the VM certs were restored") + for key in ['PK', 'KEK', 'db', 'dbx']: + check_vm_cert_md5sum(vm, key, vm_auths[key].auth) + finally: + snapshot.destroy() + + def test_vm_import_restores_certs(self, uefi_vm, formatted_and_mounted_ext4_disk): + vm = uefi_vm + vm_auths = generate_keys(as_dict=True) + vm.install_uefi_certs([vm_auths[key] for key in ['PK', 'KEK', 'db', 'dbx']]) + filepath = formatted_and_mounted_ext4_disk + '/test-export-with-uefi-certs.xva' + vm.export(filepath, 'zstd') + vm2 = None + try: + vm2 = vm.host.import_vm(filepath) + logging.info("Check that the VM certs were imported with the VM") + for key in ['PK', 'KEK', 'db', 'dbx']: + check_vm_cert_md5sum(vm2, key, vm_auths[key].auth) + finally: + try: + if vm2 is not None: + logging.info(f"Destroy VM {vm2.uuid}") + vm2.destroy(verify=True) + finally: + vm.host.ssh(['rm', '-f', filepath], check=False) + +@pytest.mark.small_vm +@pytest.mark.usefixtures("host_at_least_8_3") +class TestPoolToVMCertInheritance: + @pytest.fixture(autouse=True, scope="function") + def auto_revert_vm(self, uefi_vm_and_snapshot): + vm, snapshot = uefi_vm_and_snapshot + yield + # Revert the VM, which has the interesting effect of also shutting it down instantly + revert_vm_state(vm, snapshot) + + def test_start_vm_without_uefi_vars(self, uefi_vm): + # The only situation where varstored will propagate the certs automatically + # at VM start is when the VM looks like it never started, that is it has no + # UEFI vars at all in its NVRAM. + vm = uefi_vm + vm.clear_uefi_variables() + pool_auths = generate_keys(as_dict=True) + vm.host.pool.install_custom_uefi_certs([pool_auths[key] for key in ['PK', 'KEK', 'db', 'dbx']]) + vm.start() + logging.info("Check that the VM certs were updated") + for key in ['PK', 'KEK', 'db', 'dbx']: + check_vm_cert_md5sum(vm, key, pool_auths[key].auth) + + def test_start_vm_in_setup_mode(self, uefi_vm): + # In setup mode, no cert is set, but other UEFI variables are present. + # varstored will *not* propagate the certs in this case. + vm = uefi_vm + pool_auths = generate_keys(as_dict=True) + vm.host.pool.install_custom_uefi_certs([pool_auths[key] for key in ['PK', 'KEK', 'db', 'dbx']]) + # start the VM so that certs may be synced to it if appropriate + vm.start() + logging.info("Check that the VM certs are unchanged") + for key in ['PK', 'KEK', 'db', 'dbx']: + assert not vm.is_cert_present(key) + + def test_start_vm_which_already_has_pk(self, uefi_vm): + vm = uefi_vm + pool_auths = generate_keys(as_dict=True) + vm.host.pool.install_custom_uefi_certs([pool_auths[key] for key in ['PK', 'KEK', 'db', 'dbx']]) + vm_auths = generate_keys(as_dict=True) + vm.install_uefi_certs([vm_auths['PK']]) + # start the VM so that certs may be synced to it if appropriate + vm.start() + logging.info("Check that the VM certs are unchanged") + check_vm_cert_md5sum(vm, 'PK', vm_auths['PK'].auth) + for key in ['KEK', 'db', 'dbx']: + assert not vm.is_cert_present(key) + + def test_switching_to_user_mode(self, uefi_vm): + vm = uefi_vm + pool_auths = generate_keys(as_dict=True) + vm.host.pool.install_custom_uefi_certs([pool_auths[key] for key in ['PK', 'KEK', 'db', 'dbx']]) + vm.set_uefi_user_mode() + logging.info("Check that the VM certs were updated") + for key in ['PK', 'KEK', 'db', 'dbx']: + check_vm_cert_md5sum(vm, key, pool_auths[key].auth) + + # Now that the VM has had the certs added, let's see what happens + # if we call the command to switch to user mode again. + # But first, change the certs on disk or we won't see any changes. + new_pool_auths = generate_keys(as_dict=True) + vm.host.pool.install_custom_uefi_certs([new_pool_auths[key] for key in ['PK', 'KEK', 'db', 'dbx']]) + vm.set_uefi_user_mode() + logging.info("Check that the VM certs were updated again") + for key in ['PK', 'KEK', 'db', 'dbx']: + check_vm_cert_md5sum(vm, key, new_pool_auths[key].auth) + +@pytest.mark.usefixtures("host_at_least_8_3") +class TestPoolToDiskCertInheritanceOnPoolJoin: + @pytest.fixture(scope='function') + def keys_auths_for_joined_host(self, host, hostB1): + from packaging import version + version_str = "8.3" + if not hostB1.xcp_version >= version.parse(version_str): + raise Exception(f"This test requires a second XCP-ng pool with version >= {version_str}") + + # Install certs before host join + keys = ['PK', 'KEK', 'db', 'dbx'] + pool_auths = generate_keys(as_dict=True) + host.pool.install_custom_uefi_certs([pool_auths[key] for key in keys]) + + logging.info(f"> Join host {hostB1} to pool {host} after certificates installed.") + hostB1.join_pool(host.pool) + joined_host = host.pool.get_host_by_uuid(hostB1.uuid) + yield keys, pool_auths, joined_host + + logging.info(f"< Eject host {joined_host} from pool {host}.") + # Warning: triggers a reboot of ejected host. + host.pool.eject_host(joined_host) + host.pool.clear_custom_uefi_certs() + + def test_host_certificates_updated_after_join(self, keys_auths_for_joined_host): + keys, pool_auths, joined_host = keys_auths_for_joined_host + + for key in keys: + wait_for( + lambda: check_disk_cert_md5sum(joined_host, key, pool_auths[key].auth, do_assert=False), + f"Wait for new host '{key}' key to be identifical to pool '{key}' key", + 60 + ) diff --git a/tests/uefi_sb/test_varstored_sb.py b/tests/uefi_sb/test_varstored_sb.py new file mode 100644 index 000000000..e1fa1a048 --- /dev/null +++ b/tests/uefi_sb/test_varstored_sb.py @@ -0,0 +1,156 @@ +import logging +import pytest + +from .utils import _test_key_exchanges, boot_and_check_no_sb_errors, boot_and_check_sb_failed, \ + boot_and_check_sb_succeeded, generate_keys, revert_vm_state, sign_efi_bins + +# These tests check the behaviour of XAPI and varstored as they are in XCP-ng 8.3 +# For XCP-ng 8.2, see test_uefistored_sb.py + +# Requirements: +# On the test runner: +# - See requirements documented in the project's README.md for Guest UEFI Secure Boot tests +# From --hosts parameter: +# - host: XCP-ng host >= 8.3 +# From --vm parameter +# - A UEFI VM to import +# Some tests are Linux-only and some tests are Windows-only. + +pytestmark = pytest.mark.default_vm('mini-linux-x86_64-uefi') + +@pytest.mark.small_vm +@pytest.mark.usefixtures("host_at_least_8_3") +@pytest.mark.usefixtures("unix_vm") +class TestGuestLinuxUEFISecureBoot: + @pytest.fixture(autouse=True) + def setup_and_cleanup(self, uefi_vm_and_snapshot): + vm, snapshot = uefi_vm_and_snapshot + self.PK, self.KEK, self.db, self.dbx = generate_keys() + yield + revert_vm_state(vm, snapshot) + + @pytest.mark.multi_vms # test that SB works on various UEFI unix/linux VMs, not just on `small_vm` + def test_boot_success_when_vm_db_set_and_images_signed(self, uefi_vm): + vm = uefi_vm + vm.install_uefi_certs([self.PK, self.KEK, self.db]) + sign_efi_bins(vm, self.db) + vm.param_set('platform', 'secureboot', True) + boot_and_check_sb_succeeded(vm) + + def test_boot_fails_when_vm_db_set_and_images_unsigned(self, uefi_vm): + vm = uefi_vm + vm.install_uefi_certs([self.PK, self.KEK, self.db]) + vm.param_set('platform', 'secureboot', True) + boot_and_check_sb_failed(vm) + + def test_boot_succeeds_when_vm_certs_set_and_sb_disabled(self, uefi_vm): + vm = uefi_vm + vm.install_uefi_certs([self.PK, self.KEK, self.db]) + vm.param_set('platform', 'secureboot', False) + boot_and_check_no_sb_errors(vm) + + def test_boot_fails_when_vm_dbx_revokes_signed_images(self, uefi_vm): + vm = uefi_vm + vm.install_uefi_certs([self.PK, self.KEK, self.db, self.dbx]) + sign_efi_bins(vm, self.db) + vm.param_set('platform', 'secureboot', True) + boot_and_check_sb_failed(vm) + + def test_boot_success_when_initial_vm_keys_not_signed_by_parent(self, uefi_vm): + vm = uefi_vm + PK, KEK, db, _ = generate_keys(self_signed=True) + vm.install_uefi_certs([PK, KEK, db]) + sign_efi_bins(vm, db) + vm.param_set('platform', 'secureboot', True) + boot_and_check_sb_succeeded(vm) + + def test_sb_off_really_means_off(self, uefi_vm): + vm = uefi_vm + vm.install_uefi_certs([self.PK, self.KEK, self.db]) + sign_efi_bins(vm, self.db) + vm.param_set('platform', 'secureboot', False) + vm.start() + vm.wait_for_vm_running_and_ssh_up() + logging.info("Check that SB is NOT enabled according to the OS.") + assert not vm.booted_with_secureboot() + + +@pytest.mark.usefixtures("host_at_least_8_3") +@pytest.mark.usefixtures("windows_vm") +class TestGuestWindowsUEFISecureBoot: + @pytest.fixture(autouse=True) + def setup_and_cleanup(self, uefi_vm_and_snapshot): + vm, snapshot = uefi_vm_and_snapshot + yield + revert_vm_state(vm, snapshot) + + @pytest.mark.small_vm # test on the smallest Windows VM, if that means anything with Windows + def test_windows_fails(self, uefi_vm): + vm = uefi_vm + PK, KEK, db, _ = generate_keys(self_signed=True) + vm.install_uefi_certs([PK, KEK, db]) + vm.param_set('platform', 'secureboot', True) + boot_and_check_sb_failed(vm) + + @pytest.mark.multi_vms # test that SB works on every Windows VM we have + def test_windows_succeeds(self, uefi_vm): + vm = uefi_vm + vm.param_set('platform', 'secureboot', True) + # Install certs in the VM. They must be official MS certs. + # We install them first in the pool with `secureboot-certs install`, which requires internet access + logging.info("Install MS certs on pool with secureboot-certs install") + vm.host.ssh(['secureboot-certs', 'install']) + vm.host.pool.clear_custom_uefi_certs() + # Now install the default pool certs in the VM + vm.set_uefi_user_mode() + boot_and_check_sb_succeeded(vm) + + +@pytest.mark.small_vm +@pytest.mark.usefixtures("host_at_least_8_3") +class TestCertsMissingAndSbOn: + @pytest.fixture(autouse=True) + def setup_and_cleanup(self, uefi_vm_and_snapshot): + vm, snapshot = uefi_vm_and_snapshot + vm.param_set('platform', 'secureboot', True) + yield + revert_vm_state(vm, snapshot) + + def test_setup_mode_and_sb_on(self, uefi_vm): + vm = uefi_vm + vm.set_uefi_setup_mode() + boot_and_check_no_sb_errors(vm) + + def test_only_pk_present_but_sb_on(self, uefi_vm): + vm = uefi_vm + PK, _, _, _ = generate_keys() + vm.install_uefi_certs([PK]) + boot_and_check_sb_failed(vm) + + def test_only_pk_and_kek_present_but_sb_on(self, uefi_vm): + vm = uefi_vm + PK, KEK, _, _ = generate_keys() + vm.install_uefi_certs([PK, KEK]) + boot_and_check_sb_failed + + def test_only_pk_and_db_present_but_sb_on(self, uefi_vm): + vm = uefi_vm + PK, _, db, _ = generate_keys() + vm.install_uefi_certs([PK, db]) + boot_and_check_sb_succeeded + +@pytest.mark.small_vm +@pytest.mark.usefixtures("host_at_least_8_3") +@pytest.mark.usefixtures("unix_vm") +class TestUEFIKeyExchange: + @pytest.fixture(autouse=True) + def setup_and_cleanup(self, uefi_vm_and_snapshot): + vm, snapshot = uefi_vm_and_snapshot + yield + revert_vm_state(vm, snapshot) + + def test_key_exchanges(self, uefi_vm): + vm = uefi_vm + vm.set_uefi_setup_mode() + + _test_key_exchanges(vm) diff --git a/tests/uefi_sb/utils.py b/tests/uefi_sb/utils.py new file mode 100644 index 000000000..d9a737729 --- /dev/null +++ b/tests/uefi_sb/utils.py @@ -0,0 +1,175 @@ +import hashlib +import logging + +from lib.commands import SSHCommandFailed +from lib.common import wait_for +from lib.efi import EFIAuth, EFI_AT_ATTRS_BYTES, get_md5sum_from_auth, get_secure_boot_guid + +VM_SECURE_BOOT_FAILED = 'VM_SECURE_BOOT_FAILED' + +def generate_keys(self_signed=False, as_dict=False): + logging.info('Generating keys' + (' (self signed)' if self_signed else '')) + PK = EFIAuth('PK') + KEK = EFIAuth('KEK') + db = EFIAuth('db') + + if self_signed: + PK.sign_auth(PK) + KEK.sign_auth(KEK) + db.sign_auth(db) + else: + PK.sign_auth(PK) + PK.sign_auth(KEK) + KEK.sign_auth(db) + + # For our tests the dbx blacklists anything signed by the db + dbx = EFIAuth.copy(db, name='dbx') + + if as_dict: + return { + 'PK': PK, + 'KEK': KEK, + 'db': db, + 'dbx': dbx + } + else: + return PK, KEK, db, dbx + +def revert_vm_state(vm, snapshot): + try: + snapshot.revert() + finally: + # Messages may be populated from previous tests and may + # interfere with future tests, so remove them + logging.info('> remove guest SB messages') + vm.rm_messages(VM_SECURE_BOOT_FAILED) + +def boot_and_check_sb_failed(vm): + vm.start() + wait_for( + lambda: vm.get_messages(VM_SECURE_BOOT_FAILED), + 'Wait for message %s' % VM_SECURE_BOOT_FAILED + ) + + # If there is a VM_SECURE_BOOT_FAILED message and yet the OS still + # successfully booted, this is a uefistored bug + assert vm.is_in_uefi_shell() + +def boot_and_check_no_sb_errors(vm): + vm.start() + vm.wait_for_vm_running_and_ssh_up() + logging.info("Verify there's no %s message" % VM_SECURE_BOOT_FAILED) + assert not vm.get_messages(VM_SECURE_BOOT_FAILED) + +def boot_and_check_sb_succeeded(vm): + boot_and_check_no_sb_errors(vm) + logging.info("Check that SB is enabled according to the OS.") + assert vm.booted_with_secureboot() + +def sign_efi_bins(vm, db): + """ + Sign a unix VM's EFI binaries. + + Boots the VM if it is halted, signs the bootloader, and halts the + VM again (if halted was its original state). + """ + shutdown = not vm.is_running() + if shutdown: + vm.start() + vm.wait_for_vm_running_and_ssh_up() + + logging.info('> Sign bootloader') + vm.sign_efi_bins(db) + + if shutdown: + vm.shutdown(verify=True) + +def _test_key_exchanges(vm): + PK = EFIAuth('PK') + null_PK = EFIAuth('PK', is_null=True) + new_PK = EFIAuth('PK') + bad_PK = EFIAuth('PK') + + KEK = EFIAuth('KEK') + null_KEK = EFIAuth('KEK', is_null=True) + + db_from_KEK = EFIAuth('db') + db_from_PK = EFIAuth('db') + null_db_from_KEK = EFIAuth('db', is_null=True) + null_db_from_PK = EFIAuth('db', is_null=True) + + PK.sign_auth(PK) + PK.sign_auth(null_PK) + PK.sign_auth(KEK) + PK.sign_auth(null_KEK) + PK.sign_auth(new_PK) + PK.sign_auth(db_from_PK) + PK.sign_auth(null_db_from_PK) + PK.sign_auth(db_from_KEK) + PK.sign_auth(null_db_from_KEK) + KEK.sign_auth(db_from_KEK) + KEK.sign_auth(null_db_from_KEK) + bad_PK.sign_auth(bad_PK) + + vm.start() + vm.wait_for_vm_running_and_ssh_up() + + # at this point we should have a VM with no certs, on a pool with no certs either + + tests = [ + # Set the PK + (PK, True), + # Clear the PK + (null_PK, True), + # Set the PK again + (PK, True), + # Set a PK with the wrong sig, should fail and PK should be unchanged + (bad_PK, False), + # Set, clear, and reset the KEK + (KEK, True), + (null_KEK, True), + (KEK, True), + # Set and clear the db signed by the KEK + (db_from_KEK, True), + (null_db_from_KEK, True), + # Set and clear the db signed by the PK + (db_from_PK, True), + (null_db_from_PK, True), + # Set a new PK + (new_PK, True), + # Set old PK, should fail due to expired timestamp + (PK, False), + ] + + for i, (auth, should_succeed) in enumerate(tests): + logging.info('> Testing {} ({})'.format(auth.name, i)) + + ok = True + try: + vm.set_efi_var(auth.name, auth.guid, + EFI_AT_ATTRS_BYTES, auth.auth_data) + except SSHCommandFailed: + ok = False + + if (should_succeed and not ok) or (ok and not should_succeed): + raise AssertionError('Failed to set {} {}'.format(i, auth.name)) + +def check_disk_cert_md5sum(host, key, reference_file, do_assert=True): + auth_filepath_on_host = f'{host.varstore_dir()}/{key}.auth' + assert host.file_exists(auth_filepath_on_host) + with open(reference_file, 'rb') as rf: + reference_md5 = hashlib.md5(rf.read()).hexdigest() + host_disk_md5 = host.ssh([f'md5sum {auth_filepath_on_host} | cut -d " " -f 1']) + logging.debug('Reference MD5: %s' % reference_md5) + logging.debug('Host disk MD5: %s' % host_disk_md5) + if do_assert: + assert host_disk_md5 == reference_md5 + else: + return host_disk_md5 == reference_md5 + +def check_vm_cert_md5sum(vm, key, reference_file): + res = vm.host.ssh(['varstore-get', vm.uuid, get_secure_boot_guid(key).as_str(), key], + check=False, simple_output=False, decode=False) + assert res.returncode == 0, f"Cert {key} must be present" + reference_md5 = get_md5sum_from_auth(reference_file) + assert hashlib.md5(res.stdout).hexdigest() == reference_md5 diff --git a/tests/uefistored/test_cert_inheritance.py b/tests/uefistored/test_cert_inheritance.py deleted file mode 100644 index 9a2483e7d..000000000 --- a/tests/uefistored/test_cert_inheritance.py +++ /dev/null @@ -1,371 +0,0 @@ -import hashlib -import logging -import pytest - -from lib.efi import get_secure_boot_guid, esl_from_auth_file - -from .utils import generate_keys, revert_vm_state - -# Requirements: -# On the test runner: -# - See requirements documented in the project's README.md for Guest UEFI Secure Boot tests -# From --hosts parameter: -# - host(A1): XCP-ng host >= 8.2 (+ updates) (or >= 8.3 for other tests) -# with UEFI certs either absent, or present and consistent (state will be saved and restored) -# Master of a, at least, 2 hosts pool -# - hostB1: XCP-ng host >= 8.3 (required only if hostA1 is already >=8.3, else no hostB1 is needed) -# This host will be joined and ejected from pool A, it means its state will be completely reinitialized from scratch - -pytestmark = pytest.mark.default_vm('mini-linux-x86_64-uefi') - -def install_certs_to_disks(pool, certs_dict, keys): - for host in pool.hosts: - logging.debug('Installing to host %s:' % host.hostname_or_ip) - for key in keys: - value = certs_dict[key].auth - with open(value, 'rb') as f: - hash = hashlib.md5(f.read()).hexdigest() - logging.debug(' - key: %s, value: %s' % (key, hash)) - host.scp(value, f'{host.varstore_dir()}/{key}.auth') - -def check_disk_cert_md5sum(host, key, reference_file): - auth_filepath_on_host = f'{host.varstore_dir()}/{key}.auth' - assert host.file_exists(auth_filepath_on_host) - with open(reference_file, 'rb') as rf: - reference_md5 = hashlib.md5(rf.read()).hexdigest() - host_disk_md5 = host.ssh([f'md5sum {auth_filepath_on_host} | cut -d " " -f 1']) - logging.debug('Reference MD5: %s' % reference_md5) - logging.debug('Host disk MD5: %s' % host_disk_md5) - assert host_disk_md5 == reference_md5 - -@pytest.mark.small_vm -@pytest.mark.usefixtures("host_less_than_8_3", "pool_without_uefi_certs") -class TestPoolToDiskCertInheritanceAtVmStart: - @pytest.fixture(autouse=True) - def setup_and_cleanup(self, uefi_vm_and_snapshot): - vm, snapshot = uefi_vm_and_snapshot - yield - # Revert the VM, which has the interesting effect of also shutting it down instantly - revert_vm_state(vm, snapshot) - # clear pool certs for next test - vm.host.pool.clear_uefi_certs() - - def test_pool_certs_present_and_disk_certs_absent(self, uefi_vm): - vm = uefi_vm - # start with certs on pool and no certs on host disks - pool_auths = generate_keys(as_dict=True) - vm.host.pool.install_custom_uefi_certs([pool_auths[key] for key in ['PK', 'KEK', 'db', 'dbx']]) - # start a VM so that certs may be synced to disk if appropriate - vm.start() - residence_host = vm.get_residence_host() - logging.info('Check that the certs have been written on the disk of the host that started the VM.') - for key in ['PK', 'KEK', 'db', 'dbx']: - check_disk_cert_md5sum(residence_host, key, pool_auths[key].auth) - - def test_pool_certs_present_and_disk_certs_different(self, uefi_vm): - vm = uefi_vm - # start with different certs on pool and disks - pool_auths = generate_keys(as_dict=True) - disk_auths = generate_keys(as_dict=True) - vm.host.pool.install_custom_uefi_certs([pool_auths[key] for key in ['PK', 'KEK', 'db', 'dbx']]) - logging.info("Installing different certs to hosts disks") - install_certs_to_disks(vm.host.pool, disk_auths, ['PK', 'KEK', 'db', 'dbx']) - # start a VM so that certs may be synced to disk if appropriate - vm.start() - residence_host = vm.get_residence_host() - logging.info('Check that the certs have been updated on the disk of the host that started the VM.') - for key in ['PK', 'KEK', 'db', 'dbx']: - check_disk_cert_md5sum(residence_host, key, pool_auths[key].auth) - - def test_pool_certs_absent_and_disk_certs_present(self, uefi_vm): - vm = uefi_vm - # start with no pool certs and with certs on disks - disk_auths = generate_keys(as_dict=True) - logging.info("Installing certs to hosts disks") - install_certs_to_disks(vm.host.pool, disk_auths, ['PK', 'KEK', 'db', 'dbx']) - # start a VM so that certs may be synced to disk if appropriate - vm.start() - residence_host = vm.get_residence_host() - logging.info('Check that the certs on disk have not changed after the VM started.') - for key in ['PK', 'KEK', 'db', 'dbx']: - check_disk_cert_md5sum(residence_host, key, disk_auths[key].auth) - - def test_pool_certs_present_and_some_different_disk_certs_present(self, uefi_vm): - vm = uefi_vm - # start with all certs on pool and just two certs on disks - pool_auths = generate_keys(as_dict=True) - disk_auths = generate_keys(as_dict=True) - vm.host.pool.install_custom_uefi_certs([pool_auths[key] for key in ['PK', 'KEK', 'db', 'dbx']]) - logging.info("Installing different certs to hosts disks") - install_certs_to_disks(vm.host.pool, disk_auths, ['KEK', 'dbx']) - # start a VM so that certs may be synced to disk if appropriate - vm.start() - residence_host = vm.get_residence_host() - logging.info('Check that the certs have been added or updated on the disk of the host that started the VM.') - for key in ['PK', 'KEK', 'db', 'dbx']: - check_disk_cert_md5sum(residence_host, key, pool_auths[key].auth) - - def test_pool_certs_present_except_dbx_and_disk_certs_different(self, uefi_vm): - vm = uefi_vm - # start with no dbx on pool and all, different, certs on disks - pool_auths = generate_keys(as_dict=True) - disk_auths = generate_keys(as_dict=True) - vm.host.pool.install_custom_uefi_certs([pool_auths[key] for key in ['PK', 'KEK', 'db']]) - logging.info("Installing different certs to hosts disks, including a dbx") - install_certs_to_disks(vm.host.pool, disk_auths, ['PK', 'KEK', 'db', 'dbx']) - # start a VM so that certs may be synced to disk if appropriate - vm.start() - residence_host = vm.get_residence_host() - logging.info('Check that the certs have been updated on the disk of the host that started the VM, except dbx.') - for key in ['PK', 'KEK', 'db']: - check_disk_cert_md5sum(residence_host, key, pool_auths[key].auth) - check_disk_cert_md5sum(residence_host, 'dbx', disk_auths[key].auth) - - def test_pool_certs_present_and_disk_certs_present_and_same(self, uefi_vm): - vm = uefi_vm - # start with certs on pool and no certs on host disks - pool_auths = generate_keys(as_dict=True) - vm.host.pool.install_custom_uefi_certs([pool_auths[key] for key in ['PK', 'KEK', 'db', 'dbx']]) - install_certs_to_disks(vm.host.pool, pool_auths, ['PK', 'KEK', 'db', 'dbx']) - # start a VM so that certs may be synced to disk if appropriate - vm.start() - residence_host = vm.get_residence_host() - logging.info('Check that the certs have been written on the disk of the host that started the VM.') - for key in ['PK', 'KEK', 'db', 'dbx']: - check_disk_cert_md5sum(residence_host, key, pool_auths[key].auth) - -@pytest.mark.usefixtures("host_at_least_8_3", "pool_without_uefi_certs") -class TestPoolToDiskCertInheritanceAtXapiStart: - @pytest.fixture(autouse=True) - def setup_and_cleanup(self, host): - yield - host.pool.clear_uefi_certs() - - def test_pool_certs_present_and_disk_certs_absent(self, host): - # start with certs on pool and no certs on host disks - pool_auths = generate_keys(as_dict=True) - host.pool.install_custom_uefi_certs([pool_auths[key] for key in ['PK', 'KEK', 'db', 'dbx']]) - # Make sure certs are synced to disk - host.restart_toolstack(verify=True) - logging.info('Check that the certs have been written on the disk of the host.') - for key in ['PK', 'KEK', 'db', 'dbx']: - check_disk_cert_md5sum(host, key, pool_auths[key].auth) - - def test_pool_certs_present_and_disk_certs_different(self, host): - # start with different certs on pool and disks - pool_auths = generate_keys(as_dict=True) - disk_auths = generate_keys(as_dict=True) - host.pool.install_custom_uefi_certs([pool_auths[key] for key in ['PK', 'KEK', 'db', 'dbx']]) - logging.info("Installing different certs to hosts disks") - install_certs_to_disks(host.pool, disk_auths, ['PK', 'KEK', 'db', 'dbx']) - # Make sure certs are synced to disk - host.restart_toolstack(verify=True) - logging.info('Check that the certs have been updated on the disk of the host.') - for key in ['PK', 'KEK', 'db', 'dbx']: - check_disk_cert_md5sum(host, key, pool_auths[key].auth) - - # FIXME: this behaviour will never exist in 8.3: no certs will mean "use the default certs" - @pytest.mark.usefixtures("xfail_on_xcpng_8_3") - def test_pool_certs_absent_and_disk_certs_present(self, host): - # start with no pool certs and with certs on disks - disk_auths = generate_keys(as_dict=True) - logging.info("Installing certs to hosts disks") - install_certs_to_disks(host.pool, disk_auths, ['PK', 'KEK', 'db', 'dbx']) - host.restart_toolstack(verify=True) - logging.info('Check that the certs on disk have been erased since there is none in the pool.') - for key in ['PK', 'KEK', 'db', 'dbx']: - assert not host.file_exists(f'{host.varstore_dir()}/{key}.auth') - - def test_pool_certs_present_and_some_different_disk_certs_present(self, host): - # start with all certs on pool and just two certs on disks - pool_auths = generate_keys(as_dict=True) - disk_auths = generate_keys(as_dict=True) - host.pool.install_custom_uefi_certs([pool_auths[key] for key in ['PK', 'KEK', 'db', 'dbx']]) - logging.info("Installing different certs to hosts disks") - install_certs_to_disks(host.pool, disk_auths, ['KEK', 'dbx']) - # Make sure certs are synced to disk - host.restart_toolstack(verify=True) - logging.info('Check that the certs have been added or updated on the disk of the host.') - for key in ['PK', 'KEK', 'db', 'dbx']: - check_disk_cert_md5sum(host, key, pool_auths[key].auth) - - @pytest.mark.usefixtures("xfail_on_xcpng_8_3") - def test_pool_certs_present_except_dbx_and_disk_certs_different(self, host): - # start with no dbx on pool and all, different, certs on disks - pool_auths = generate_keys(as_dict=True) - disk_auths = generate_keys(as_dict=True) - host.pool.install_custom_uefi_certs([pool_auths[key] for key in ['PK', 'KEK', 'db']]) - logging.info("Installing different certs to hosts disks, including a dbx") - install_certs_to_disks(host.pool, disk_auths, ['PK', 'KEK', 'db', 'dbx']) - # Make sure certs are synced to disk - host.restart_toolstack(verify=True) - logging.info("Check host disk certs are in sync with pool's ones") - for key in ['PK', 'KEK', 'db']: - check_disk_cert_md5sum(host, key, pool_auths[key].auth) - - assert not host.file_exists(f'{host.varstore_dir()}/dbx.auth') - - def test_pool_certs_present_and_disk_certs_present_and_same(self, host): - # start with certs on pool and no certs on host disks - pool_auths = generate_keys(as_dict=True) - host.pool.install_custom_uefi_certs([pool_auths[key] for key in ['PK', 'KEK', 'db', 'dbx']]) - install_certs_to_disks(host.pool, pool_auths, ['PK', 'KEK', 'db', 'dbx']) - # Make sure certs are synced to disk - host.restart_toolstack(verify=True) - logging.info('Check that the certs have been written on the disk of the host.') - for key in ['PK', 'KEK', 'db', 'dbx']: - check_disk_cert_md5sum(host, key, pool_auths[key].auth) - -@pytest.mark.small_vm -@pytest.mark.usefixtures("pool_without_uefi_certs") -class TestPoolToVMCertInheritance: - @pytest.fixture(autouse=True) - def setup_and_cleanup(self, uefi_vm_and_snapshot): - vm, snapshot = uefi_vm_and_snapshot - yield - # Revert the VM, which has the interesting effect of also shutting it down instantly - revert_vm_state(vm, snapshot) - # clear pool certs for next test - vm.host.pool.clear_uefi_certs() - - def is_vm_cert_present(self, vm, key): - res = vm.host.ssh(['varstore-get', vm.uuid, get_secure_boot_guid(key).as_str(), key], - check=False, simple_output=False, decode=False) - return res.returncode == 0 - - def get_md5sum_from_auth(self, auth): - return hashlib.md5(esl_from_auth_file(auth)).hexdigest() - - def check_vm_cert_md5sum(self, vm, key, reference_file): - res = vm.host.ssh(['varstore-get', vm.uuid, get_secure_boot_guid(key).as_str(), key], - check=False, simple_output=False, decode=False) - assert res.returncode == 0, f"Cert {key} must be present" - reference_md5 = self.get_md5sum_from_auth(reference_file) - assert hashlib.md5(res.stdout).hexdigest() == reference_md5 - - def test_pool_certs_absent_and_vm_certs_absent(self, uefi_vm): - vm = uefi_vm - # start with no certs on pool and no certs in the VM - # start the VM so that certs may be synced to it if appropriate - vm.start() - logging.info("Check that the VM still has no certs") - for key in ['PK', 'KEK', 'db', 'dbx']: - assert not self.is_vm_cert_present(vm, key) - - def test_pool_certs_present_and_vm_certs_absent(self, uefi_vm): - vm = uefi_vm - # start with certs on pool and no certs in the VM - pool_auths = generate_keys(as_dict=True) - vm.host.pool.install_custom_uefi_certs([pool_auths[key] for key in ['PK', 'KEK', 'db', 'dbx']]) - # start the VM so that certs may be synced to it if appropriate - vm.start() - logging.info("Check that the VM got the pool certs") - for key in ['PK', 'KEK', 'db', 'dbx']: - self.check_vm_cert_md5sum(vm, key, pool_auths[key].auth) - - def test_pool_certs_present_and_vm_certs_present(self, uefi_vm): - vm = uefi_vm - # start with all certs on pool and in the VM - pool_auths = generate_keys(as_dict=True) - vm_auths = generate_keys(as_dict=True) - vm.host.pool.install_custom_uefi_certs([pool_auths[key] for key in ['PK', 'KEK', 'db', 'dbx']]) - vm.install_uefi_certs([vm_auths[key] for key in ['PK', 'KEK', 'db', 'dbx']]) - # start the VM so that certs may be synced to it if appropriate - vm.start() - logging.info("Check that the VM certs are unchanged") - for key in ['PK', 'KEK', 'db', 'dbx']: - self.check_vm_cert_md5sum(vm, key, vm_auths[key].auth) - - def test_pools_certs_absent_and_vm_certs_present(self, uefi_vm): - vm = uefi_vm - # start with no certs on pool and all certs in the VM - vm_auths = generate_keys(as_dict=True) - vm.install_uefi_certs([vm_auths[key] for key in ['PK', 'KEK', 'db', 'dbx']]) - # start the VM so that certs may be synced to it if appropriate - vm.start() - logging.info("Check that the VM certs are unchanged") - for key in ['PK', 'KEK', 'db', 'dbx']: - self.check_vm_cert_md5sum(vm, key, vm_auths[key].auth) - - @pytest.mark.usefixtures("host_less_than_8_3") - def test_pool_certs_partially_present_and_vm_certs_partially_present(self, uefi_vm): - vm = uefi_vm - # start with some certs on pool and some certs in the VM, partially overlaping - pool_auths = generate_keys(as_dict=True) - vm_auths = generate_keys(as_dict=True) - vm.host.pool.install_custom_uefi_certs([pool_auths[key] for key in ['PK', 'KEK', 'db']]) - # don't ask why the VM only has db and dbx certs. It's for the test. - vm.install_uefi_certs([vm_auths[key] for key in ['db', 'dbx']]) - # start the VM so that certs may be synced to it if appropriate - vm.start() - logging.info("Check that the VM db and dbx certs are unchanged and PK and KEK were updated") - for key in ['PK', 'KEK']: - self.check_vm_cert_md5sum(vm, key, pool_auths[key].auth) - for key in ['db', 'dbx']: - self.check_vm_cert_md5sum(vm, key, vm_auths[key].auth) - -@pytest.mark.usefixtures("host_at_least_8_3", "hostA2", "pool_without_uefi_certs") -class TestPoolToDiskCertPropagationToAllHosts: - @pytest.fixture(autouse=True) - def setup_and_cleanup(self, host): - yield - host.pool.clear_uefi_certs() - - def test_set_pool_certificates(self, host): - keys = ['PK', 'KEK', 'db', 'dbx'] - pool_auths = generate_keys(as_dict=True) - host.pool.install_custom_uefi_certs([pool_auths[key] for key in keys]) - for h in host.pool.hosts: - logging.info(f"Check Pool.set_uefi_certificates update host {h} certificate on disk.") - for key in keys: - check_disk_cert_md5sum(h, key, pool_auths[key].auth) - - def test_set_pool_certificates_partial(self, host): - keys = ['PK', 'KEK', 'db'] - missing_key = 'dbx' - pool_auths = generate_keys(as_dict=True) - host.pool.install_custom_uefi_certs([pool_auths[key] for key in keys]) - for h in host.pool.hosts: - logging.info(f"Check Pool.set_uefi_certificates update host {h} certificate on disk.") - for key in keys: - check_disk_cert_md5sum(h, key, pool_auths[key].auth) - assert not h.file_exists(f'{host.varstore_dir()}/{missing_key}.auth') - - def test_clear_certificates_from_pool(self, host): - keys = ['PK', 'KEK', 'db', 'dbx'] - pool_auths = generate_keys(as_dict=True) - host.pool.install_custom_uefi_certs([pool_auths[key] for key in keys]) - host.pool.clear_uefi_certs() - for h in host.pool.hosts: - logging.info(f"Check host {h} has no certificate on disk.") - for key in keys: - assert not h.file_exists(f'{host.varstore_dir()}/{key}.auth') - -@pytest.mark.usefixtures("host_at_least_8_3", "pool_without_uefi_certs") -class TestPoolToDiskCertInheritanceOnPoolJoin: - @pytest.fixture(scope='function') - def keys_auths_for_joined_host(self, host, hostB1): - from packaging import version - version_str = "8.3" - if not hostB1.xcp_version >= version.parse(version_str): - pytest.skip(f"This test requires a second XCP-ng pool with version >= {version_str}") - - # Install certs before host join - keys = ['PK', 'KEK', 'db', 'dbx'] - pool_auths = generate_keys(as_dict=True) - host.pool.install_custom_uefi_certs([pool_auths[key] for key in keys]) - - logging.info(f"> Join host {hostB1} to pool {host} after certificates installed.") - hostB1.join_pool(host.pool) - joined_host = host.pool.get_host_by_uuid(hostB1.uuid) - yield keys, pool_auths, joined_host - - logging.info(f"< Eject host {joined_host} from pool {host}.") - # Warning: triggers a reboot of ejected host. - host.pool.eject_host(joined_host) - host.pool.clear_uefi_certs() - - def test_host_certificates_updated_after_join(self, keys_auths_for_joined_host): - keys, pool_auths, joined_host = keys_auths_for_joined_host - for key in keys: - check_disk_cert_md5sum(joined_host, key, pool_auths[key].auth) diff --git a/tests/uefistored/utils.py b/tests/uefistored/utils.py deleted file mode 100644 index 056f05e12..000000000 --- a/tests/uefistored/utils.py +++ /dev/null @@ -1,42 +0,0 @@ -import logging - -from lib.efi import EFIAuth - -VM_SECURE_BOOT_FAILED = 'VM_SECURE_BOOT_FAILED' - -def generate_keys(self_signed=False, as_dict=False): - logging.info('Generating keys' + (' (self signed)' if self_signed else '')) - PK = EFIAuth('PK') - KEK = EFIAuth('KEK') - db = EFIAuth('db') - - if self_signed: - PK.sign_auth(PK) - KEK.sign_auth(KEK) - db.sign_auth(db) - else: - PK.sign_auth(PK) - PK.sign_auth(KEK) - KEK.sign_auth(db) - - # For our tests the dbx blacklists anything signed by the db - dbx = EFIAuth.copy(db, name='dbx') - - if as_dict: - return { - 'PK': PK, - 'KEK': KEK, - 'db': db, - 'dbx': dbx - } - else: - return PK, KEK, db, dbx - -def revert_vm_state(vm, snapshot): - try: - snapshot.revert() - finally: - # Messages may be populated from previous tests and may - # interfere with future tests, so remove them - logging.info('> remove guest SB messages') - vm.rm_messages(VM_SECURE_BOOT_FAILED) diff --git a/vm_data.py-dist b/vm_data.py-dist index 5964cd726..671ac577d 100644 --- a/vm_data.py-dist +++ b/vm_data.py-dist @@ -9,7 +9,7 @@ VMS = { "small_vm": "", # small VM on which the guest tools are installable. Excludes alpine currently. "small_vm_unix_tools": "", - # small UEFI VM on which efitools is installed, for some uefistored tests + # small UEFI VM on which efitools is installed, for some uefistored/varstored tests "small_vm_efitools": "", # "small" Windows VM (UEFI) "small_vm_windows": "",