diff --git a/openhcl/virt_mshv_vtl/src/lib.rs b/openhcl/virt_mshv_vtl/src/lib.rs index 418be3690..d0e776370 100644 --- a/openhcl/virt_mshv_vtl/src/lib.rs +++ b/openhcl/virt_mshv_vtl/src/lib.rs @@ -365,14 +365,28 @@ pub struct UhCvmPartitionState { with = "|arr| inspect::iter_by_index(arr.iter()).map_value(|bb| inspect::iter_by_index(bb.iter().map(|v| *v)))" )] tlb_locked_vps: VtlArray, 2>, - /// The current status of TLB locks, per-VP. - #[inspect( - with = "|vec| inspect::iter_by_index(vec.iter().map(|arr| inspect::iter_by_index(arr.iter())))" - )] - tlb_lock_info: Vec>, + #[inspect(with = "inspect::iter_by_index")] + vps: Vec, shared_memory: GuestMemory, } +#[cfg(guest_arch = "x86_64")] +impl UhCvmPartitionState { + fn vp_inner(&self, vp_index: u32) -> &UhCvmVpInner { + &self.vps[vp_index as usize] + } +} + +#[cfg(guest_arch = "x86_64")] +#[derive(Inspect)] +/// Per-vp state for CVMs. +pub struct UhCvmVpInner { + /// The current status of TLB locks + tlb_lock_info: VtlArray, + /// Whether VTL 1 has been enabled on the vp + vtl1_enabled: Mutex, +} + /// Partition-wide state for CVMs. #[cfg(guest_arch = "aarch64")] #[derive(Inspect)] @@ -603,9 +617,6 @@ struct UhVpInner { #[inspect(skip)] vp_info: TargetVpInfo, cpu_index: u32, - /// Only modified for hardware CVMs. On other types of VMs, since VTL 2 - /// doesn't handle EnableVpVtl, there's no obvious place to set this. - hcvm_vtl1_enabled: Mutex, #[cfg_attr(guest_arch = "aarch64", allow(dead_code))] #[inspect(with = "|arr| inspect::iter_by_index(arr.iter().map(|v| v.lock().is_some()))")] hv_start_enable_vtl_vp: VtlArray>>, 2>, @@ -1834,15 +1845,18 @@ impl UhProtoPartition<'_> { cpuid: cvm_cpuid::CpuidResults, ) -> Result { let vp_count = params.topology.vp_count() as usize; - let tlb_lock_info = (0..vp_count) - .map(|_| VtlArray::from_fn(|_| TlbLockInfo::new(vp_count))) + let vps = (0..vp_count) + .map(|_vp_index| UhCvmVpInner { + tlb_lock_info: VtlArray::from_fn(|_| TlbLockInfo::new(vp_count)), + vtl1_enabled: Mutex::new(false), + }) .collect(); let tlb_locked_vps = VtlArray::from_fn(|_| BitVec::repeat(false, vp_count).into_boxed_bitslice()); Ok(UhCvmPartitionState { cpuid, tlb_locked_vps, - tlb_lock_info, + vps, shared_memory: late_params .shared_memory .clone() diff --git a/openhcl/virt_mshv_vtl/src/processor/hardware_cvm/apic.rs b/openhcl/virt_mshv_vtl/src/processor/hardware_cvm/apic.rs index 61bacdda9..4dacac56e 100644 --- a/openhcl/virt_mshv_vtl/src/processor/hardware_cvm/apic.rs +++ b/openhcl/virt_mshv_vtl/src/processor/hardware_cvm/apic.rs @@ -64,7 +64,7 @@ pub(crate) fn poll_apic_core<'b, B: HardwareIsolatedBacking, T: ApicBacking<'b, // Check VTL enablement inside each block to avoid taking a lock on the hot path, // INIT and SIPI are quite cold. if init { - if !*apic_backing.vp().inner.hcvm_vtl1_enabled.lock() { + if !*apic_backing.vp().cvm_vp_inner().vtl1_enabled.lock() { debug_assert_eq!(vtl, GuestVtl::Vtl0); apic_backing.handle_init(vtl)?; } @@ -72,7 +72,7 @@ pub(crate) fn poll_apic_core<'b, B: HardwareIsolatedBacking, T: ApicBacking<'b, if let Some(vector) = sipi { if apic_backing.vp().backing.cvm_state_mut().lapics[vtl].activity == MpState::WaitForSipi { - if !*apic_backing.vp().inner.hcvm_vtl1_enabled.lock() { + if !*apic_backing.vp().cvm_vp_inner().vtl1_enabled.lock() { debug_assert_eq!(vtl, GuestVtl::Vtl0); let base = (vector as u64) << 12; let selector = (vector as u16) << 8; diff --git a/openhcl/virt_mshv_vtl/src/processor/hardware_cvm/mod.rs b/openhcl/virt_mshv_vtl/src/processor/hardware_cvm/mod.rs index 67f494d9c..a71650cb3 100644 --- a/openhcl/virt_mshv_vtl/src/processor/hardware_cvm/mod.rs +++ b/openhcl/virt_mshv_vtl/src/processor/hardware_cvm/mod.rs @@ -158,8 +158,12 @@ impl UhHypercallHandler<'_, '_, T, B> { // Lock the remote vp state to make sure no other VP is trying to enable // VTL 1 on it. - let target_vp = &self.vp.partition.vps[vp_index as usize]; - let mut vtl1_enabled = target_vp.hcvm_vtl1_enabled.lock(); + let mut vtl1_enabled = self + .vp + .cvm_partition() + .vp_inner(vp_index) + .vtl1_enabled + .lock(); if *vtl1_enabled { return Err(HvError::VtlAlreadyEnabled); @@ -880,7 +884,7 @@ impl hv1_hypercall::VtlCall for UhHypercallHandle self.intercepted_vtl ); false - } else if !*self.vp.inner.hcvm_vtl1_enabled.lock() { + } else if !*self.vp.cvm_vp_inner().vtl1_enabled.lock() { // VTL 1 must be enabled on the vp tracelimit::warn_ratelimited!("vtl call not allowed because vtl 1 is not enabled"); false @@ -1132,6 +1136,17 @@ impl hv1_hypercall::TranslateVirtualAddressX64 } impl UhProcessor<'_, B> { + /// Returns the partition-wide CVM state. + pub fn cvm_partition(&self) -> &'_ crate::UhCvmPartitionState { + B::cvm_partition_state(self.shared) + } + + /// Returns the per-vp cvm inner state for this vp + pub fn cvm_vp_inner(&self) -> &'_ crate::UhCvmVpInner { + self.cvm_partition() + .vp_inner(self.inner.vp_info.base.vp_index.index()) + } + fn set_vsm_partition_config( &mut self, value: HvRegisterVsmPartitionConfig, @@ -1286,6 +1301,10 @@ impl UhProcessor<'_, B> { Ok(()) } + pub(crate) fn hcvm_vtl1_inspectable(&self) -> bool { + *self.cvm_vp_inner().vtl1_enabled.lock() + } + fn get_vsm_vp_secure_config_vtl( &mut self, requesting_vtl: GuestVtl, diff --git a/openhcl/virt_mshv_vtl/src/processor/hardware_cvm/tlb_lock.rs b/openhcl/virt_mshv_vtl/src/processor/hardware_cvm/tlb_lock.rs index 8cf8cc6cd..1c5296e84 100644 --- a/openhcl/virt_mshv_vtl/src/processor/hardware_cvm/tlb_lock.rs +++ b/openhcl/virt_mshv_vtl/src/processor/hardware_cvm/tlb_lock.rs @@ -4,17 +4,12 @@ //! TLB lock infrastructure support for hardware-isolated partitions. use crate::processor::HardwareIsolatedBacking; -use crate::UhCvmPartitionState; use crate::UhProcessor; use hcl::GuestVtl; use hvdef::Vtl; use std::sync::atomic::Ordering; -impl<'a, B: HardwareIsolatedBacking> UhProcessor<'a, B> { - fn cvm_partition(&self) -> &'a UhCvmPartitionState { - B::cvm_partition_state(self.shared) - } - +impl UhProcessor<'_, B> { /// Causes the specified VTL on the current VP to wait on all TLB locks. /// This is typically used to synchronize VTL permission changes with /// concurrent instruction emulation. @@ -24,7 +19,7 @@ impl<'a, B: HardwareIsolatedBacking> UhProcessor<'a, B> { // any VP that acquires the lock after this point is guaranteed to see // state that this VP has already flushed. let self_index = self.vp_index().index() as usize; - let self_lock = &self.cvm_partition().tlb_lock_info[self_index][target_vtl]; + let self_lock = &self.cvm_vp_inner().tlb_lock_info[target_vtl]; for vp in self.cvm_partition().tlb_locked_vps[target_vtl] .clone() .iter_ones() @@ -48,7 +43,7 @@ impl<'a, B: HardwareIsolatedBacking> UhProcessor<'a, B> { // Because the wait by the current VP on the target VP is known to // be new, this bit should not already be set. let other_lock_blocked = - &self.cvm_partition().tlb_lock_info[vp][target_vtl].blocked_vps; + &self.cvm_partition().vp_inner(vp as u32).tlb_lock_info[target_vtl].blocked_vps; let _was_other_lock_blocked = other_lock_blocked.set_aliased(self_index, true); debug_assert!(!_was_other_lock_blocked); @@ -113,12 +108,12 @@ impl<'a, B: HardwareIsolatedBacking> UhProcessor<'a, B> { // of blocked VPs may be changing, it must be captured locally, since the // VP set scan below cannot safely be performed on a VP set that may be // changing. - for blocked_vp in self.cvm_partition().tlb_lock_info[self_index][target_vtl] + for blocked_vp in self.cvm_vp_inner().tlb_lock_info[target_vtl] .blocked_vps .clone() .iter_ones() { - self.cvm_partition().tlb_lock_info[self_index][target_vtl] + self.cvm_vp_inner().tlb_lock_info[target_vtl] .blocked_vps .set_aliased(blocked_vp, false); @@ -126,7 +121,10 @@ impl<'a, B: HardwareIsolatedBacking> UhProcessor<'a, B> { // Note that the target VP may have already marked itself as not // blocked if is has already noticed that the lock has already // been released on the current VP. - let other_lock = &self.cvm_partition().tlb_lock_info[blocked_vp][target_vtl]; + let other_lock = &self + .cvm_partition() + .vp_inner(blocked_vp as u32) + .tlb_lock_info[target_vtl]; if other_lock.blocking_vps.set_aliased(self_index, false) { let other_old_count = other_lock.blocking_vp_count.fetch_sub(1, Ordering::Relaxed); @@ -156,12 +154,11 @@ impl<'a, B: HardwareIsolatedBacking> UhProcessor<'a, B> { /// Returns whether the VP should halt to wait for the TLB lock of the specified VTL. pub fn should_halt_for_tlb_unlock(&mut self, target_vtl: GuestVtl) -> bool { - let self_index = self.vp_index().index() as usize; // No wait is required if this VP is not blocked on the TLB lock. if self.backing.cvm_state_mut().vtls_tlb_waiting[target_vtl] { // No wait is required unless this VP is blocked on another VP that // holds the TLB flush lock. - let self_lock = &self.cvm_partition().tlb_lock_info[self_index][target_vtl]; + let self_lock = &self.cvm_vp_inner().tlb_lock_info[target_vtl]; if self_lock.blocking_vp_count.load(Ordering::Relaxed) != 0 { self_lock.sleeping.store(true, Ordering::Relaxed); // Now that this VP has been marked as sleeping, check to see @@ -176,7 +173,7 @@ impl<'a, B: HardwareIsolatedBacking> UhProcessor<'a, B> { self.backing.cvm_state_mut().vtls_tlb_waiting[target_vtl] = false; } else { debug_assert_eq!( - self.cvm_partition().tlb_lock_info[self_index][target_vtl] + self.cvm_vp_inner().tlb_lock_info[target_vtl] .blocking_vp_count .load(Ordering::Relaxed), 0 diff --git a/openhcl/virt_mshv_vtl/src/processor/mod.rs b/openhcl/virt_mshv_vtl/src/processor/mod.rs index 6057945b0..48dacf47c 100644 --- a/openhcl/virt_mshv_vtl/src/processor/mod.rs +++ b/openhcl/virt_mshv_vtl/src/processor/mod.rs @@ -252,6 +252,8 @@ mod private { fn untrusted_synic(&self) -> Option<&ProcessorSynic>; fn untrusted_synic_mut(&mut self) -> Option<&mut ProcessorSynic>; + + fn vtl1_inspectable(this: &UhProcessor<'_, Self>) -> bool; } } @@ -343,7 +345,6 @@ impl UhVpInner { waker: Default::default(), cpu_index, vp_info, - hcvm_vtl1_enabled: Mutex::new(false), hv_start_enable_vtl_vp: VtlArray::from_fn(|_| Mutex::new(None)), sidecar_exit_reason: Default::default(), } @@ -758,16 +759,7 @@ impl<'p, T: Backing> Processor for UhProcessor<'p, T> { fn vtl_inspectable(&self, vtl: Vtl) -> bool { match vtl { Vtl::Vtl0 => true, - Vtl::Vtl1 => { - if self.partition.isolation.is_hardware_isolated() { - *self.inner.hcvm_vtl1_enabled.lock() - } else { - // TODO: when there's support for returning VTL 1 registers, - // use the VsmVpStatus register to query the hypervisor for - // whether VTL 1 is enabled on the vp (this can be cached). - false - } - } + Vtl::Vtl1 => T::vtl1_inspectable(self), Vtl::Vtl2 => false, } } diff --git a/openhcl/virt_mshv_vtl/src/processor/mshv/arm64.rs b/openhcl/virt_mshv_vtl/src/processor/mshv/arm64.rs index 9c2f24c44..ed7c15044 100644 --- a/openhcl/virt_mshv_vtl/src/processor/mshv/arm64.rs +++ b/openhcl/virt_mshv_vtl/src/processor/mshv/arm64.rs @@ -229,6 +229,12 @@ impl BackingPrivate for HypervisorBackedArm64 { ) -> Result<(), UhRunVpError> { unimplemented!() } + + fn vtl1_inspectable(_this: &UhProcessor<'_, Self>) -> bool { + // TODO: Use the VsmVpStatus register to query the hypervisor for + // whether VTL 1 is enabled on the vp (this can be cached). + false + } } impl UhProcessor<'_, HypervisorBackedArm64> { diff --git a/openhcl/virt_mshv_vtl/src/processor/mshv/x64.rs b/openhcl/virt_mshv_vtl/src/processor/mshv/x64.rs index f76260d6e..755da97d1 100644 --- a/openhcl/virt_mshv_vtl/src/processor/mshv/x64.rs +++ b/openhcl/virt_mshv_vtl/src/processor/mshv/x64.rs @@ -339,6 +339,12 @@ impl BackingPrivate for HypervisorBackedX86 { ) -> Result<(), UhRunVpError> { unimplemented!() } + + fn vtl1_inspectable(_this: &UhProcessor<'_, Self>) -> bool { + // TODO: Use the VsmVpStatus register to query the hypervisor for + // whether VTL 1 is enabled on the vp (this can be cached). + false + } } fn parse_sidecar_exit(message: &hvdef::HvMessage) -> SidecarRemoveExit { @@ -1995,8 +2001,6 @@ mod save_restore { // Topology information vp_info: _, cpu_index: _, - // Only relevant for CVMs - hcvm_vtl1_enabled: _, hv_start_enable_vtl_vp: _, }, // Saved diff --git a/openhcl/virt_mshv_vtl/src/processor/snp/mod.rs b/openhcl/virt_mshv_vtl/src/processor/snp/mod.rs index b20ce5e5e..ec86f5e96 100644 --- a/openhcl/virt_mshv_vtl/src/processor/snp/mod.rs +++ b/openhcl/virt_mshv_vtl/src/processor/snp/mod.rs @@ -460,7 +460,7 @@ impl BackingPrivate for SnpBacked { fn inspect_extra(this: &mut UhProcessor<'_, Self>, resp: &mut inspect::Response<'_>) { let vtl0_vmsa = this.runner.vmsa(GuestVtl::Vtl0); - let vtl1_vmsa = if *this.inner.hcvm_vtl1_enabled.lock() { + let vtl1_vmsa = if *this.cvm_vp_inner().vtl1_enabled.lock() { Some(this.runner.vmsa(GuestVtl::Vtl1)) } else { None @@ -510,6 +510,10 @@ impl BackingPrivate for SnpBacked { ) -> Result<(), UhRunVpError> { this.hcvm_handle_vp_start_enable_vtl(vtl) } + + fn vtl1_inspectable(this: &UhProcessor<'_, Self>) -> bool { + this.hcvm_vtl1_inspectable() + } } fn virt_seg_to_snp(val: SegmentRegister) -> SevSelector { diff --git a/openhcl/virt_mshv_vtl/src/processor/tdx/mod.rs b/openhcl/virt_mshv_vtl/src/processor/tdx/mod.rs index 245d8aa1b..9abbe728b 100644 --- a/openhcl/virt_mshv_vtl/src/processor/tdx/mod.rs +++ b/openhcl/virt_mshv_vtl/src/processor/tdx/mod.rs @@ -902,6 +902,10 @@ impl BackingPrivate for TdxBacked { ) -> Result<(), UhRunVpError> { this.hcvm_handle_vp_start_enable_vtl(vtl) } + + fn vtl1_inspectable(this: &UhProcessor<'_, Self>) -> bool { + this.hcvm_vtl1_inspectable() + } } impl UhProcessor<'_, TdxBacked> {