diff --git a/openhcl/underhill_mem/src/lib.rs b/openhcl/underhill_mem/src/lib.rs index 01b5d05283..9af65c91a8 100644 --- a/openhcl/underhill_mem/src/lib.rs +++ b/openhcl/underhill_mem/src/lib.rs @@ -42,6 +42,7 @@ use std::sync::Arc; use thiserror::Error; use virt::IsolationType; use virt_mshv_vtl::ProtectIsolatedMemory; +use virt_mshv_vtl::TlbFlushLockAccess; use vm_topology::memory::MemoryLayout; use x86defs::snp::SevRmpAdjust; use x86defs::tdx::GpaVmAttributes; @@ -265,7 +266,7 @@ impl MemoryAcceptor { /// Apply the initial protections on lower-vtl memory. /// - /// After initialization, the default protections should be applied. + /// After initialization, the default protections should be applied. pub fn apply_initial_lower_vtl_protections( &self, range: MemoryRange, @@ -337,7 +338,6 @@ impl MemoryAcceptor { permissions: rmpadjust, vtl: vtl.into(), }) - // TODO SNP: Flush TLB } GpaVtlPermissions::Tdx((attributes, mask)) => { // For TDX VMs, the permissions apply to the specified VTL. @@ -375,13 +375,24 @@ struct HypercallOverlay { permissions: GpaVtlPermissions, } +// TODO CVM GUEST VSM: This type needs to go away, and proper functionality needs +// to be added here, but resolving the layering with hv1_emulator is complicated. +struct NoOpTlbFlushLockAccess; +impl TlbFlushLockAccess for NoOpTlbFlushLockAccess { + fn flush(&mut self, _vtl: GuestVtl) {} + fn flush_entire(&mut self) {} + fn set_wait_for_tlb_locks(&mut self, _vtl: GuestVtl) {} +} + impl VtlProtectHypercallOverlay for HypercallOverlayProtector { fn change_overlay(&self, gpn: u64) { - self.protector.change_hypercall_overlay(self.vtl, gpn) + self.protector + .change_hypercall_overlay(self.vtl, gpn, &mut NoOpTlbFlushLockAccess) } fn disable_overlay(&self) { - self.protector.disable_hypercall_overlay(self.vtl) + self.protector + .disable_hypercall_overlay(self.vtl, &mut NoOpTlbFlushLockAccess) } } @@ -471,7 +482,12 @@ impl HardwareIsolatedMemoryProtector { } impl ProtectIsolatedMemory for HardwareIsolatedMemoryProtector { - fn change_host_visibility(&self, shared: bool, gpns: &[u64]) -> Result<(), (HvError, usize)> { + fn change_host_visibility( + &self, + shared: bool, + gpns: &[u64], + tlb_access: &mut dyn TlbFlushLockAccess, + ) -> Result<(), (HvError, usize)> { // Validate the ranges are RAM. for &gpn in gpns { if !self @@ -531,7 +547,13 @@ impl ProtectIsolatedMemory for HardwareIsolatedMemoryProtector { clear_bitmap.update_bitmap(range, false); } - // TODO SNP: flush concurrent accessors and TLB. + // TODO SNP: flush concurrent accessors. + if let IsolationType::Snp = self.acceptor.isolation { + // We need to ensure that the guest TLB has been fully flushed since + // the unaccept operation is not guaranteed to do so in hardware, + // and the hypervisor is also not trusted with TLB hygiene. + tlb_access.flush_entire(); + } // TODO SNP: check list of locks, roll back bitmap changes if there was one. @@ -646,6 +668,7 @@ impl ProtectIsolatedMemory for HardwareIsolatedMemoryProtector { &self, vtl: GuestVtl, vtl_protections: HvMapGpaFlags, + tlb_access: &mut dyn TlbFlushLockAccess, ) -> Result<(), HvError> { // Prevent visibility changes while VTL protections are being // applied. @@ -693,6 +716,10 @@ impl ProtectIsolatedMemory for HardwareIsolatedMemoryProtector { self.apply_protections_with_overlay_handling(vtl, &ranges, vtl_protections) .expect("applying vtl protections should succeed"); + // Invalidate the entire VTL 0 TLB to ensure that the new permissions + // are observed. + tlb_access.flush(GuestVtl::Vtl0); + Ok(()) } @@ -701,6 +728,7 @@ impl ProtectIsolatedMemory for HardwareIsolatedMemoryProtector { vtl: GuestVtl, gpns: &[u64], protections: HvMapGpaFlags, + tlb_access: &mut dyn TlbFlushLockAccess, ) -> Result<(), (HvError, usize)> { // Validate the ranges are RAM. for &gpn in gpns { @@ -737,7 +765,13 @@ impl ProtectIsolatedMemory for HardwareIsolatedMemoryProtector { self.apply_protections_with_overlay_handling(vtl, &ranges, protections) .expect("applying vtl protections should succeed"); - // TODO CVM GUEST VSM: flush TLB and wait for the tlb lock + // Since page protections were modified, we must invalidate the entire + // VTL 0 TLB to ensure that the new permissions are observed, and wait for + // other CPUs to release all guest mappings before declaring that the VTL + // protection change has completed. + tlb_access.flush(GuestVtl::Vtl0); + tlb_access.set_wait_for_tlb_locks(vtl); + Ok(()) } @@ -751,7 +785,12 @@ impl ProtectIsolatedMemory for HardwareIsolatedMemoryProtector { }) } - fn change_hypercall_overlay(&self, vtl: GuestVtl, gpn: u64) { + fn change_hypercall_overlay( + &self, + vtl: GuestVtl, + gpn: u64, + tlb_access: &mut dyn TlbFlushLockAccess, + ) { // Should already have written contents to the page via the guest // memory object, confirming that this is a guest page assert!(self @@ -806,14 +845,15 @@ impl ProtectIsolatedMemory for HardwareIsolatedMemoryProtector { .apply_protections_from_flags( MemoryRange::new(gpn * HV_PAGE_SIZE..(gpn + 1) * HV_PAGE_SIZE), vtl, - HV_MAP_GPA_PERMISSIONS_ALL, + HV_MAP_GPA_PERMISSIONS_ALL.with_writable(false), ) .expect("applying vtl protections should succeed"); - // TODO CVM GUEST VSM: flush TLB + // Flush the guest TLB to ensure that the new permissions are observed. + tlb_access.flush(vtl); } - fn disable_hypercall_overlay(&self, vtl: GuestVtl) { + fn disable_hypercall_overlay(&self, vtl: GuestVtl, tlb_access: &mut dyn TlbFlushLockAccess) { let _lock = self.inner.lock(); let mut overlay = self.hypercall_overlay[vtl].lock(); @@ -825,7 +865,7 @@ impl ProtectIsolatedMemory for HardwareIsolatedMemoryProtector { *overlay = None; - // TODO CVM GUEST VSM: flush TLB + tlb_access.flush(vtl); } fn set_vtl1_protections_enabled(&self) { diff --git a/openhcl/virt_mshv_vtl/src/lib.rs b/openhcl/virt_mshv_vtl/src/lib.rs index d0e7763709..bda3287905 100644 --- a/openhcl/virt_mshv_vtl/src/lib.rs +++ b/openhcl/virt_mshv_vtl/src/lib.rs @@ -1255,7 +1255,12 @@ pub struct UhLateParams<'a> { /// Trait for CVM-related protections on guest memory. pub trait ProtectIsolatedMemory: Send + Sync { /// Changes host visibility on guest memory. - fn change_host_visibility(&self, shared: bool, gpns: &[u64]) -> Result<(), (HvError, usize)>; + fn change_host_visibility( + &self, + shared: bool, + gpns: &[u64], + tlb_access: &mut dyn TlbFlushLockAccess, + ) -> Result<(), (HvError, usize)>; /// Queries host visibility on guest memory. fn query_host_visibility( @@ -1274,6 +1279,7 @@ pub trait ProtectIsolatedMemory: Send + Sync { &self, vtl: GuestVtl, protections: HvMapGpaFlags, + tlb_access: &mut dyn TlbFlushLockAccess, ) -> Result<(), HvError>; /// Changes the vtl protections on a range of guest memory. @@ -1282,6 +1288,7 @@ pub trait ProtectIsolatedMemory: Send + Sync { vtl: GuestVtl, gpns: &[u64], protections: HvMapGpaFlags, + tlb_access: &mut dyn TlbFlushLockAccess, ) -> Result<(), (HvError, usize)>; /// Retrieves a protector for the hypercall code page overlay for a target @@ -1292,10 +1299,15 @@ pub trait ProtectIsolatedMemory: Send + Sync { ) -> Box; /// Changes the overlay for the hypercall code page for a target VTL. - fn change_hypercall_overlay(&self, vtl: GuestVtl, gpn: u64); + fn change_hypercall_overlay( + &self, + vtl: GuestVtl, + gpn: u64, + tlb_access: &mut dyn TlbFlushLockAccess, + ); /// Disables the overlay for the hypercall code page for a target VTL. - fn disable_hypercall_overlay(&self, vtl: GuestVtl); + fn disable_hypercall_overlay(&self, vtl: GuestVtl, tlb_access: &mut dyn TlbFlushLockAccess); /// Alerts the memory protector that vtl 1 is ready to set vtl protections /// on lower-vtl memory, and that these protections should be enforced. @@ -1306,6 +1318,18 @@ pub trait ProtectIsolatedMemory: Send + Sync { fn vtl1_protections_enabled(&self) -> bool; } +/// Trait for access to TLB flush and lock machinery. +pub trait TlbFlushLockAccess { + /// Flush the entire TLB for all VPs for the given VTL. + fn flush(&mut self, vtl: GuestVtl); + + /// Flush the entire TLB for all VPs for all VTLs. + fn flush_entire(&mut self); + + /// Causes the specified VTL on the current VP to wait on all TLB locks. + fn set_wait_for_tlb_locks(&mut self, vtl: GuestVtl); +} + /// A partially built partition. Used to allow querying partition capabilities /// before fully instantiating the partition. pub struct UhProtoPartition<'a> { diff --git a/openhcl/virt_mshv_vtl/src/processor/hardware_cvm/mod.rs b/openhcl/virt_mshv_vtl/src/processor/hardware_cvm/mod.rs index 6313a1dac6..c11406f810 100644 --- a/openhcl/virt_mshv_vtl/src/processor/hardware_cvm/mod.rs +++ b/openhcl/virt_mshv_vtl/src/processor/hardware_cvm/mod.rs @@ -15,11 +15,13 @@ use crate::validate_vtl_gpa_flags; use crate::GuestVsmState; use crate::GuestVsmVtl1State; use crate::GuestVtl; +use crate::TlbFlushLockAccess; use crate::WakeReason; use guestmem::GuestMemory; use hv1_emulator::RequestInterrupt; use hv1_hypercall::HvRepResult; use hv1_structs::ProcessorSet; +use hvdef::hypercall::HostVisibilityType; use hvdef::hypercall::HvFlushFlags; use hvdef::hypercall::TranslateGvaResultCode; use hvdef::HvCacheType; @@ -28,19 +30,24 @@ use hvdef::HvMapGpaFlags; use hvdef::HvRegisterVsmPartitionConfig; use hvdef::HvRegisterVsmVpSecureVtlConfig; use hvdef::HvResult; +use hvdef::HvSynicSint; use hvdef::HvVtlEntryReason; use hvdef::HvX64RegisterName; use hvdef::Vtl; use std::iter::zip; use virt::io::CpuIo; use virt::vp::AccessVpState; +use virt::x86::MsrError; use virt::Processor; use virt_support_x86emu::emulate::TranslateGvaSupport; use virt_support_x86emu::translate::TranslateCachingInfo; use virt_support_x86emu::translate::TranslationRegisters; use zerocopy::FromZeros; -impl UhHypercallHandler<'_, '_, T, B> { +impl<'b, T, B: HardwareIsolatedBacking> UhHypercallHandler<'_, 'b, T, B> +where + UhProcessor<'b, B>: TlbFlushLockAccess, +{ pub fn hcvm_enable_partition_vtl( &mut self, partition_id: u64, @@ -96,8 +103,11 @@ impl UhHypercallHandler<'_, '_, T, B> { // Grant VTL 1 access to lower VTL memory tracing::debug!("Granting VTL 1 access to lower VTL memory"); - protector - .change_default_vtl_protections(GuestVtl::Vtl1, hvdef::HV_MAP_GPA_PERMISSIONS_ALL)?; + protector.change_default_vtl_protections( + GuestVtl::Vtl1, + hvdef::HV_MAP_GPA_PERMISSIONS_ALL, + self.vp, + )?; tracing::debug!("Successfully granted vtl 1 access to lower vtl memory"); @@ -105,7 +115,9 @@ impl UhHypercallHandler<'_, '_, T, B> { Ok(()) } +} +impl UhHypercallHandler<'_, '_, T, B> { pub fn hcvm_enable_vp_vtl( &mut self, partition_id: u64, @@ -535,7 +547,12 @@ impl UhHypercallHandler<'_, '_, T, B> { } } } +} +impl<'b, T, B: HardwareIsolatedBacking> UhHypercallHandler<'_, 'b, T, B> +where + UhProcessor<'b, B>: TlbFlushLockAccess, +{ fn set_vp_register( &mut self, vtl: GuestVtl, @@ -715,6 +732,46 @@ impl UhHypercallHandler<'_, '_, T, B> { } } +impl<'b, T: CpuIo, B: HardwareIsolatedBacking> hv1_hypercall::ModifySparseGpaPageHostVisibility + for UhHypercallHandler<'_, 'b, T, B> +where + UhProcessor<'b, B>: TlbFlushLockAccess, +{ + fn modify_gpa_visibility( + &mut self, + partition_id: u64, + visibility: HostVisibilityType, + gpa_pages: &[u64], + ) -> HvRepResult { + if partition_id != hvdef::HV_PARTITION_ID_SELF { + return Err((HvError::AccessDenied, 0)); + } + + tracing::debug!( + ?visibility, + pages = gpa_pages.len(), + "modify_gpa_visibility" + ); + + if self.vp.partition.hide_isolation { + return Err((HvError::AccessDenied, 0)); + } + + let shared = match visibility { + HostVisibilityType::PRIVATE => false, + HostVisibilityType::SHARED => true, + _ => return Err((HvError::InvalidParameter, 0)), + }; + + self.vp + .partition + .isolated_memory_protector + .as_ref() + .ok_or((HvError::AccessDenied, 0))? + .change_host_visibility(shared, gpa_pages, self.vp) + } +} + impl UhHypercallHandler<'_, '_, T, B> { fn retarget_physical_interrupt( &mut self, @@ -844,8 +901,10 @@ impl hv1_hypercall::RetargetDeviceInterrup } } -impl hv1_hypercall::SetVpRegisters - for UhHypercallHandler<'_, '_, T, B> +impl<'b, T, B: HardwareIsolatedBacking> hv1_hypercall::SetVpRegisters + for UhHypercallHandler<'_, 'b, T, B> +where + UhProcessor<'b, B>: TlbFlushLockAccess, { fn set_vp_registers( &mut self, @@ -994,8 +1053,10 @@ impl } } -impl hv1_hypercall::ModifyVtlProtectionMask - for UhHypercallHandler<'_, '_, T, B> +impl<'b, T, B: HardwareIsolatedBacking> hv1_hypercall::ModifyVtlProtectionMask + for UhHypercallHandler<'_, 'b, T, B> +where + UhProcessor<'b, B>: TlbFlushLockAccess, { fn modify_vtl_protection_mask( &mut self, @@ -1048,7 +1109,7 @@ impl hv1_hypercall::ModifyVtlProtectionMask // protections on the VTL itself. Therefore, for a hardware CVM, // given that only VTL 1 can set the protections, the default // permissions should be changed for VTL 0. - protector.change_vtl_protections(GuestVtl::Vtl0, gpa_pages, map_flags) + protector.change_vtl_protections(GuestVtl::Vtl0, gpa_pages, map_flags, self.vp) } } @@ -1136,16 +1197,35 @@ impl hv1_hypercall::TranslateVirtualAddressX64 } } -impl UhProcessor<'_, B> { - /// Returns the partition-wide CVM state. - pub fn cvm_partition(&self) -> &'_ crate::UhCvmPartitionState { - B::cvm_partition_state(self.shared) - } - - /// Returns the per-vp cvm inner state for this vp - pub fn cvm_vp_inner(&self) -> &'_ crate::UhCvmVpInner { - self.cvm_partition() - .vp_inner(self.inner.vp_info.base.vp_index.index()) +impl UhProcessor<'_, B> +where + Self: TlbFlushLockAccess, +{ + pub(crate) fn write_msr_cvm( + &mut self, + msr: u32, + value: u64, + vtl: GuestVtl, + ) -> Result<(), MsrError> { + let hv = &mut self.backing.cvm_state_mut().hv[vtl]; + // If updated is Synic MSR, then check if its proxy or previous was proxy + // in either case, we need to update the `proxy_irr_blocked` + let mut irr_filter_update = false; + if matches!(msr, hvdef::HV_X64_MSR_SINT0..=hvdef::HV_X64_MSR_SINT15) { + let sint_curr = HvSynicSint::from(hv.synic.sint((msr - hvdef::HV_X64_MSR_SINT0) as u8)); + let sint_new = HvSynicSint::from(value); + if sint_curr.proxy() || sint_new.proxy() { + irr_filter_update = true; + } + } + let r = hv.msr_write(msr, value); + if !matches!(r, Err(MsrError::Unknown)) { + // Check if proxy filter update was required (in case of SINT writes) + if irr_filter_update { + self.update_proxy_irr_filter(vtl); + } + } + r } fn set_vsm_partition_config( @@ -1213,7 +1293,7 @@ impl UhProcessor<'_, B> { } } - protector.change_default_vtl_protections(targeted_vtl, protections)?; + protector.change_default_vtl_protections(targeted_vtl, protections, self)?; // TODO GUEST VSM: actually use the enable_vtl_protection value when // deciding whether to check vtl access(); @@ -1227,6 +1307,19 @@ impl UhProcessor<'_, B> { Ok(()) } +} + +impl UhProcessor<'_, B> { + /// Returns the partition-wide CVM state. + pub fn cvm_partition(&self) -> &'_ crate::UhCvmPartitionState { + B::cvm_partition_state(self.shared) + } + + /// Returns the per-vp cvm inner state for this vp + pub fn cvm_vp_inner(&self) -> &'_ crate::UhCvmVpInner { + self.cvm_partition() + .vp_inner(self.inner.vp_info.base.vp_index.index()) + } /// Handle checking for cross-VTL interrupts, preempting VTL 0, and setting /// VINA when appropriate. The `is_interrupt_pending` function should return diff --git a/openhcl/virt_mshv_vtl/src/processor/mod.rs b/openhcl/virt_mshv_vtl/src/processor/mod.rs index 4046aba06f..9bbac3e8aa 100644 --- a/openhcl/virt_mshv_vtl/src/processor/mod.rs +++ b/openhcl/virt_mshv_vtl/src/processor/mod.rs @@ -943,30 +943,6 @@ impl<'a, T: Backing> UhProcessor<'a, T> { #[cfg(guest_arch = "x86_64")] fn write_msr(&mut self, msr: u32, value: u64, vtl: GuestVtl) -> Result<(), MsrError> { - if msr & 0xf0000000 == 0x40000000 { - if let Some(hv) = self.backing.hv_mut(vtl).as_mut() { - // If updated is Synic MSR, then check if its proxy or previous was proxy - // in either case, we need to update the `proxy_irr_blocked` - let mut irr_filter_update = false; - if matches!(msr, hvdef::HV_X64_MSR_SINT0..=hvdef::HV_X64_MSR_SINT15) { - let sint_curr = - HvSynicSint::from(hv.synic.sint((msr - hvdef::HV_X64_MSR_SINT0) as u8)); - let sint_new = HvSynicSint::from(value); - if sint_curr.proxy() || sint_new.proxy() { - irr_filter_update = true; - } - } - let r = hv.msr_write(msr, value); - if !matches!(r, Err(MsrError::Unknown)) { - // Check if proxy filter update was required (in case of SINT writes) - if irr_filter_update { - self.update_proxy_irr_filter(vtl); - } - return r; - } - } - } - match msr { hvdef::HV_X64_MSR_GUEST_CRASH_CTL => { self.crash_control = hvdef::GuestCrashCtl::from(value); @@ -1335,44 +1311,6 @@ impl UhHypercallHandler<'_, '_, T, B> { } } -impl hv1_hypercall::ModifySparseGpaPageHostVisibility - for UhHypercallHandler<'_, '_, T, B> -{ - fn modify_gpa_visibility( - &mut self, - partition_id: u64, - visibility: HostVisibilityType, - gpa_pages: &[u64], - ) -> HvRepResult { - if partition_id != hvdef::HV_PARTITION_ID_SELF { - return Err((HvError::AccessDenied, 0)); - } - - tracing::debug!( - ?visibility, - pages = gpa_pages.len(), - "modify_gpa_visibility" - ); - - if self.vp.partition.hide_isolation { - return Err((HvError::AccessDenied, 0)); - } - - let shared = match visibility { - HostVisibilityType::PRIVATE => false, - HostVisibilityType::SHARED => true, - _ => return Err((HvError::InvalidParameter, 0)), - }; - - self.vp - .partition - .isolated_memory_protector - .as_ref() - .ok_or((HvError::AccessDenied, 0))? - .change_host_visibility(shared, gpa_pages) - } -} - impl hv1_hypercall::QuerySparseGpaPageHostVisibility for UhHypercallHandler<'_, '_, T, B> { diff --git a/openhcl/virt_mshv_vtl/src/processor/snp/mod.rs b/openhcl/virt_mshv_vtl/src/processor/snp/mod.rs index ea28a8a51d..6a08379967 100644 --- a/openhcl/virt_mshv_vtl/src/processor/snp/mod.rs +++ b/openhcl/virt_mshv_vtl/src/processor/snp/mod.rs @@ -18,6 +18,7 @@ use crate::processor::UhProcessor; use crate::BackingShared; use crate::Error; use crate::GuestVtl; +use crate::TlbFlushLockAccess; use crate::UhCvmPartitionState; use crate::UhCvmVpState; use crate::UhPartitionInner; @@ -1041,9 +1042,10 @@ impl UhProcessor<'_, SnpBacked> { vtl: entered_from_vtl, }) .msr_write(msr, value) + .or_else_if_unknown(|| self.write_msr_cvm(msr, value, entered_from_vtl)) .or_else_if_unknown(|| self.write_msr(msr, value, entered_from_vtl)) .or_else_if_unknown(|| { - self.write_msr_cvm(dev, msr, value, entered_from_vtl) + self.write_msr_snp(dev, msr, value, entered_from_vtl) }); match r { @@ -2105,7 +2107,7 @@ impl UhProcessor<'_, SnpBacked> { Ok(value) } - fn write_msr_cvm( + fn write_msr_snp( &mut self, _dev: &impl CpuIo, msr: u32, @@ -2366,20 +2368,55 @@ impl UhHypercallHandler<'_, '_, T, SnpBacked> { if only_self && flags.non_global_mappings_only() { self.vp.runner.vmsa_mut(self.intercepted_vtl).set_pcpu_id(0); } else { - let rax = SevInvlpgbRax::new() - .with_asid_valid(true) - .with_global(!flags.non_global_mappings_only()); - let ecx = SevInvlpgbEcx::new(); - let edx = SevInvlpgbEdx::new(); - self.vp - .partition - .hcl - .invlpgb(rax.into(), edx.into(), ecx.into()); + self.vp.partition.hcl.invlpgb( + SevInvlpgbRax::new() + .with_asid_valid(true) + .with_global(!flags.non_global_mappings_only()) + .into(), + SevInvlpgbEdx::new().into(), + SevInvlpgbEcx::new().into(), + ); self.vp.partition.hcl.tlbsync(); } } } +impl TlbFlushLockAccess for UhProcessor<'_, SnpBacked> { + fn flush(&mut self, vtl: GuestVtl) { + // SNP provides no mechanism to flush a single VTL across multiple VPs + // Do a flush entire, but only wait on the VTL that was asked for + self.partition.hcl.invlpgb( + SevInvlpgbRax::new() + .with_asid_valid(true) + .with_global(true) + .into(), + SevInvlpgbEdx::new().into(), + SevInvlpgbEcx::new().into(), + ); + self.partition.hcl.tlbsync(); + self.set_wait_for_tlb_locks(vtl); + } + + fn flush_entire(&mut self) { + self.partition.hcl.invlpgb( + SevInvlpgbRax::new() + .with_asid_valid(true) + .with_global(true) + .into(), + SevInvlpgbEdx::new().into(), + SevInvlpgbEcx::new().into(), + ); + self.partition.hcl.tlbsync(); + for vtl in [GuestVtl::Vtl0, GuestVtl::Vtl1] { + self.set_wait_for_tlb_locks(vtl); + } + } + + fn set_wait_for_tlb_locks(&mut self, vtl: GuestVtl) { + Self::set_wait_for_tlb_locks(self, vtl); + } +} + mod save_restore { use super::SnpBacked; use super::UhProcessor; diff --git a/openhcl/virt_mshv_vtl/src/processor/tdx/mod.rs b/openhcl/virt_mshv_vtl/src/processor/tdx/mod.rs index 920ecd8150..5aab4754c4 100644 --- a/openhcl/virt_mshv_vtl/src/processor/tdx/mod.rs +++ b/openhcl/virt_mshv_vtl/src/processor/tdx/mod.rs @@ -16,6 +16,7 @@ use super::UhHypercallHandler; use super::UhRunVpError; use crate::BackingShared; use crate::GuestVtl; +use crate::TlbFlushLockAccess; use crate::UhCvmPartitionState; use crate::UhCvmVpState; use crate::UhPartitionInner; @@ -1605,8 +1606,9 @@ impl UhProcessor<'_, TdxBacked> { vtl: intercepted_vtl, }) .msr_write(msr, value) + .or_else_if_unknown(|| self.write_msr_cvm(msr, value, intercepted_vtl)) .or_else_if_unknown(|| self.write_msr(msr, value, intercepted_vtl)) - .or_else_if_unknown(|| self.write_msr_cvm(msr, value, intercepted_vtl)); + .or_else_if_unknown(|| self.write_msr_tdx(msr, value, intercepted_vtl)); let inject_gp = match result { Ok(()) => false, @@ -2113,7 +2115,7 @@ impl UhProcessor<'_, TdxBacked> { } } - fn write_msr_cvm(&mut self, msr: u32, value: u64, vtl: GuestVtl) -> Result<(), MsrError> { + fn write_msr_tdx(&mut self, msr: u32, value: u64, vtl: GuestVtl) -> Result<(), MsrError> { let state = &mut self.backing.vtls[vtl].private_regs; match msr { @@ -3417,7 +3419,8 @@ impl hv1_hypercall::FlushVirtualAddressListEx } // Send flush IPIs to the specified VPs. - self.wake_processors_for_tlb_flush(vtl, (!flags.all_processors()).then_some(processor_set)); + self.vp + .wake_processors_for_tlb_flush(vtl, (!flags.all_processors()).then_some(processor_set)); // Mark that this VP needs to wait for all TLB locks to be released before returning. self.vp.set_wait_for_tlb_locks(vtl); @@ -3465,7 +3468,8 @@ impl hv1_hypercall::FlushVirtualAddressSpaceEx } // Send flush IPIs to the specified VPs. - self.wake_processors_for_tlb_flush(vtl, (!flags.all_processors()).then_some(processor_set)); + self.vp + .wake_processors_for_tlb_flush(vtl, (!flags.all_processors()).then_some(processor_set)); // Mark that this VP needs to wait for all TLB locks to be released before returning. self.vp.set_wait_for_tlb_locks(vtl); @@ -3500,7 +3504,9 @@ impl UhHypercallHandler<'_, '_, T, TdxBacked> { Ok(()) } +} +impl UhProcessor<'_, TdxBacked> { fn wake_processors_for_tlb_flush( &mut self, target_vtl: GuestVtl, @@ -3514,7 +3520,7 @@ impl UhHypercallHandler<'_, '_, T, TdxBacked> { ); } None => { - self.wake_processors_for_tlb_flush_inner(target_vtl, 0..self.vp.partition.vps.len()) + self.wake_processors_for_tlb_flush_inner(target_vtl, 0..self.partition.vps.len()) } } } @@ -3533,10 +3539,10 @@ impl UhHypercallHandler<'_, '_, T, TdxBacked> { // for each VP. std::sync::atomic::fence(Ordering::SeqCst); for target_vp in processors { - if self.vp.vp_index().index() as usize != target_vp - && self.vp.shared.active_vtl[target_vp].load(Ordering::Relaxed) == target_vtl as u8 + if self.vp_index().index() as usize != target_vp + && self.shared.active_vtl[target_vp].load(Ordering::Relaxed) == target_vtl as u8 { - self.vp.partition.vps[target_vp].wake_vtl2(); + self.partition.vps[target_vp].wake_vtl2(); } } @@ -3544,6 +3550,30 @@ impl UhHypercallHandler<'_, '_, T, TdxBacked> { } } +impl TlbFlushLockAccess for UhProcessor<'_, TdxBacked> { + fn flush(&mut self, vtl: GuestVtl) { + { + self.shared.flush_state[vtl].write().s.flush_entire_counter += 1; + } + self.wake_processors_for_tlb_flush(vtl, None); + self.set_wait_for_tlb_locks(vtl); + } + + fn flush_entire(&mut self) { + for vtl in [GuestVtl::Vtl0, GuestVtl::Vtl1] { + self.shared.flush_state[vtl].write().s.flush_entire_counter += 1; + } + for vtl in [GuestVtl::Vtl0, GuestVtl::Vtl1] { + self.wake_processors_for_tlb_flush(vtl, None); + self.set_wait_for_tlb_locks(vtl); + } + } + + fn set_wait_for_tlb_locks(&mut self, vtl: GuestVtl) { + Self::set_wait_for_tlb_locks(self, vtl); + } +} + mod save_restore { use super::TdxBacked; use super::UhProcessor;