Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

cvm guest vsm: move vtl1_enabled from UhVpInner to a new UhCvmVpInner #842

Open
wants to merge 1 commit into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
36 changes: 25 additions & 11 deletions openhcl/virt_mshv_vtl/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -365,14 +365,28 @@ pub struct UhCvmPartitionState {
with = "|arr| inspect::iter_by_index(arr.iter()).map_value(|bb| inspect::iter_by_index(bb.iter().map(|v| *v)))"
)]
tlb_locked_vps: VtlArray<BitBox<AtomicU64>, 2>,
/// The current status of TLB locks, per-VP.
#[inspect(
with = "|vec| inspect::iter_by_index(vec.iter().map(|arr| inspect::iter_by_index(arr.iter())))"
)]
tlb_lock_info: Vec<VtlArray<TlbLockInfo, 2>>,
#[inspect(with = "inspect::iter_by_index")]
vps: Vec<UhCvmVpInner>,
shared_memory: GuestMemory,
}

#[cfg(guest_arch = "x86_64")]
impl UhCvmPartitionState {
fn vp_inner(&self, vp_index: u32) -> &UhCvmVpInner {
self.vps.get(vp_index as usize).unwrap()
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

get.unwrap is just [], so just use that

}
}

#[cfg(guest_arch = "x86_64")]
#[derive(Inspect)]
/// Per-vp state for CVMs.
pub struct UhCvmVpInner {
/// The current status of TLB locks
tlb_lock_info: VtlArray<TlbLockInfo, 2>,
/// Whether VTL 1 has been enabled on the vp
vtl1_enabled: Mutex<bool>,
}

/// Partition-wide state for CVMs.
#[cfg(guest_arch = "aarch64")]
#[derive(Inspect)]
Expand Down Expand Up @@ -603,9 +617,6 @@ struct UhVpInner {
#[inspect(skip)]
vp_info: TargetVpInfo,
cpu_index: u32,
/// Only modified for hardware CVMs. On other types of VMs, since VTL 2
/// doesn't handle EnableVpVtl, there's no obvious place to set this.
hcvm_vtl1_enabled: Mutex<bool>,
#[cfg_attr(guest_arch = "aarch64", allow(dead_code))]
#[inspect(with = "|arr| inspect::iter_by_index(arr.iter().map(|v| v.lock().is_some()))")]
hv_start_enable_vtl_vp: VtlArray<Mutex<Option<Box<hvdef::hypercall::InitialVpContextX64>>>, 2>,
Expand Down Expand Up @@ -1834,15 +1845,18 @@ impl UhProtoPartition<'_> {
cpuid: cvm_cpuid::CpuidResults,
) -> Result<UhCvmPartitionState, Error> {
let vp_count = params.topology.vp_count() as usize;
let tlb_lock_info = (0..vp_count)
.map(|_| VtlArray::from_fn(|_| TlbLockInfo::new(vp_count)))
let vps = (0..vp_count)
.map(|_vp_index| UhCvmVpInner {
tlb_lock_info: VtlArray::from_fn(|_| TlbLockInfo::new(vp_count)),
vtl1_enabled: Mutex::new(false),
})
.collect();
let tlb_locked_vps =
VtlArray::from_fn(|_| BitVec::repeat(false, vp_count).into_boxed_bitslice());
Ok(UhCvmPartitionState {
cpuid,
tlb_locked_vps,
tlb_lock_info,
vps,
shared_memory: late_params
.shared_memory
.clone()
Expand Down
4 changes: 2 additions & 2 deletions openhcl/virt_mshv_vtl/src/processor/hardware_cvm/apic.rs
Original file line number Diff line number Diff line change
Expand Up @@ -64,15 +64,15 @@ pub(crate) fn poll_apic_core<'b, B: HardwareIsolatedBacking, T: ApicBacking<'b,
// Check VTL enablement inside each block to avoid taking a lock on the hot path,
// INIT and SIPI are quite cold.
if init {
if !*apic_backing.vp().inner.hcvm_vtl1_enabled.lock() {
if !*apic_backing.vp().cvm_vp_inner().vtl1_enabled.lock() {
debug_assert_eq!(vtl, GuestVtl::Vtl0);
apic_backing.handle_init(vtl)?;
}
}

if let Some(vector) = sipi {
if apic_backing.vp().backing.cvm_state_mut().lapics[vtl].activity == MpState::WaitForSipi {
if !*apic_backing.vp().inner.hcvm_vtl1_enabled.lock() {
if !*apic_backing.vp().cvm_vp_inner().vtl1_enabled.lock() {
debug_assert_eq!(vtl, GuestVtl::Vtl0);
let base = (vector as u64) << 12;
let selector = (vector as u16) << 8;
Expand Down
25 changes: 22 additions & 3 deletions openhcl/virt_mshv_vtl/src/processor/hardware_cvm/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -158,8 +158,12 @@ impl<T, B: HardwareIsolatedBacking> UhHypercallHandler<'_, '_, T, B> {

// Lock the remote vp state to make sure no other VP is trying to enable
// VTL 1 on it.
let target_vp = &self.vp.partition.vps[vp_index as usize];
let mut vtl1_enabled = target_vp.hcvm_vtl1_enabled.lock();
let mut vtl1_enabled = self
.vp
.cvm_partition()
.vp_inner(vp_index)
.vtl1_enabled
.lock();

if *vtl1_enabled {
return Err(HvError::VtlAlreadyEnabled);
Expand Down Expand Up @@ -880,7 +884,7 @@ impl<T, B: HardwareIsolatedBacking> hv1_hypercall::VtlCall for UhHypercallHandle
self.intercepted_vtl
);
false
} else if !*self.vp.inner.hcvm_vtl1_enabled.lock() {
} else if !*self.vp.cvm_vp_inner().vtl1_enabled.lock() {
// VTL 1 must be enabled on the vp
tracelimit::warn_ratelimited!("vtl call not allowed because vtl 1 is not enabled");
false
Expand Down Expand Up @@ -1132,6 +1136,17 @@ impl<T, B: HardwareIsolatedBacking> hv1_hypercall::TranslateVirtualAddressX64
}

impl<B: HardwareIsolatedBacking> UhProcessor<'_, B> {
/// Returns the partition-wide CVM state.
pub fn cvm_partition(&self) -> &'_ crate::UhCvmPartitionState {
B::cvm_partition_state(self.shared)
}

/// Returns the per-vp cvm inner state for this vp
pub fn cvm_vp_inner(&self) -> &'_ crate::UhCvmVpInner {
self.cvm_partition()
.vp_inner(self.inner.vp_info.base.vp_index.index())
}

fn set_vsm_partition_config(
&mut self,
value: HvRegisterVsmPartitionConfig,
Expand Down Expand Up @@ -1286,6 +1301,10 @@ impl<B: HardwareIsolatedBacking> UhProcessor<'_, B> {
Ok(())
}

pub(crate) fn hcvm_vtl1_inspectable(&self) -> bool {
*self.cvm_vp_inner().vtl1_enabled.lock()
}

fn get_vsm_vp_secure_config_vtl(
&mut self,
requesting_vtl: GuestVtl,
Expand Down
25 changes: 11 additions & 14 deletions openhcl/virt_mshv_vtl/src/processor/hardware_cvm/tlb_lock.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,17 +4,12 @@
//! TLB lock infrastructure support for hardware-isolated partitions.

use crate::processor::HardwareIsolatedBacking;
use crate::UhCvmPartitionState;
use crate::UhProcessor;
use hcl::GuestVtl;
use hvdef::Vtl;
use std::sync::atomic::Ordering;

impl<'a, B: HardwareIsolatedBacking> UhProcessor<'a, B> {
fn cvm_partition(&self) -> &'a UhCvmPartitionState {
B::cvm_partition_state(self.shared)
}

impl<B: HardwareIsolatedBacking> UhProcessor<'_, B> {
/// Causes the specified VTL on the current VP to wait on all TLB locks.
/// This is typically used to synchronize VTL permission changes with
/// concurrent instruction emulation.
Expand All @@ -24,7 +19,7 @@ impl<'a, B: HardwareIsolatedBacking> UhProcessor<'a, B> {
// any VP that acquires the lock after this point is guaranteed to see
// state that this VP has already flushed.
let self_index = self.vp_index().index() as usize;
let self_lock = &self.cvm_partition().tlb_lock_info[self_index][target_vtl];
let self_lock = &self.cvm_vp_inner().tlb_lock_info[target_vtl];
for vp in self.cvm_partition().tlb_locked_vps[target_vtl]
.clone()
.iter_ones()
Expand All @@ -48,7 +43,7 @@ impl<'a, B: HardwareIsolatedBacking> UhProcessor<'a, B> {
// Because the wait by the current VP on the target VP is known to
// be new, this bit should not already be set.
let other_lock_blocked =
&self.cvm_partition().tlb_lock_info[vp][target_vtl].blocked_vps;
&self.cvm_partition().vp_inner(vp as u32).tlb_lock_info[target_vtl].blocked_vps;
let _was_other_lock_blocked = other_lock_blocked.set_aliased(self_index, true);
debug_assert!(!_was_other_lock_blocked);

Expand Down Expand Up @@ -113,20 +108,23 @@ impl<'a, B: HardwareIsolatedBacking> UhProcessor<'a, B> {
// of blocked VPs may be changing, it must be captured locally, since the
// VP set scan below cannot safely be performed on a VP set that may be
// changing.
for blocked_vp in self.cvm_partition().tlb_lock_info[self_index][target_vtl]
for blocked_vp in self.cvm_vp_inner().tlb_lock_info[target_vtl]
.blocked_vps
.clone()
.iter_ones()
{
self.cvm_partition().tlb_lock_info[self_index][target_vtl]
self.cvm_vp_inner().tlb_lock_info[target_vtl]
.blocked_vps
.set_aliased(blocked_vp, false);

// Mark the target VP as no longer blocked by the current VP.
// Note that the target VP may have already marked itself as not
// blocked if is has already noticed that the lock has already
// been released on the current VP.
let other_lock = &self.cvm_partition().tlb_lock_info[blocked_vp][target_vtl];
let other_lock = &self
.cvm_partition()
.vp_inner(blocked_vp as u32)
.tlb_lock_info[target_vtl];
if other_lock.blocking_vps.set_aliased(self_index, false) {
let other_old_count =
other_lock.blocking_vp_count.fetch_sub(1, Ordering::Relaxed);
Expand Down Expand Up @@ -156,12 +154,11 @@ impl<'a, B: HardwareIsolatedBacking> UhProcessor<'a, B> {

/// Returns whether the VP should halt to wait for the TLB lock of the specified VTL.
pub fn should_halt_for_tlb_unlock(&mut self, target_vtl: GuestVtl) -> bool {
let self_index = self.vp_index().index() as usize;
// No wait is required if this VP is not blocked on the TLB lock.
if self.backing.cvm_state_mut().vtls_tlb_waiting[target_vtl] {
// No wait is required unless this VP is blocked on another VP that
// holds the TLB flush lock.
let self_lock = &self.cvm_partition().tlb_lock_info[self_index][target_vtl];
let self_lock = &self.cvm_vp_inner().tlb_lock_info[target_vtl];
if self_lock.blocking_vp_count.load(Ordering::Relaxed) != 0 {
self_lock.sleeping.store(true, Ordering::Relaxed);
// Now that this VP has been marked as sleeping, check to see
Expand All @@ -176,7 +173,7 @@ impl<'a, B: HardwareIsolatedBacking> UhProcessor<'a, B> {
self.backing.cvm_state_mut().vtls_tlb_waiting[target_vtl] = false;
} else {
debug_assert_eq!(
self.cvm_partition().tlb_lock_info[self_index][target_vtl]
self.cvm_vp_inner().tlb_lock_info[target_vtl]
.blocking_vp_count
.load(Ordering::Relaxed),
0
Expand Down
14 changes: 3 additions & 11 deletions openhcl/virt_mshv_vtl/src/processor/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -252,6 +252,8 @@ mod private {

fn untrusted_synic(&self) -> Option<&ProcessorSynic>;
fn untrusted_synic_mut(&mut self) -> Option<&mut ProcessorSynic>;

fn vtl1_inspectable(this: &UhProcessor<'_, Self>) -> bool;
}
}

Expand Down Expand Up @@ -343,7 +345,6 @@ impl UhVpInner {
waker: Default::default(),
cpu_index,
vp_info,
hcvm_vtl1_enabled: Mutex::new(false),
hv_start_enable_vtl_vp: VtlArray::from_fn(|_| Mutex::new(None)),
sidecar_exit_reason: Default::default(),
}
Expand Down Expand Up @@ -758,16 +759,7 @@ impl<'p, T: Backing> Processor for UhProcessor<'p, T> {
fn vtl_inspectable(&self, vtl: Vtl) -> bool {
match vtl {
Vtl::Vtl0 => true,
Vtl::Vtl1 => {
if self.partition.isolation.is_hardware_isolated() {
*self.inner.hcvm_vtl1_enabled.lock()
} else {
// TODO: when there's support for returning VTL 1 registers,
// use the VsmVpStatus register to query the hypervisor for
// whether VTL 1 is enabled on the vp (this can be cached).
false
}
}
Vtl::Vtl1 => T::vtl1_inspectable(self),
Vtl::Vtl2 => false,
}
}
Expand Down
6 changes: 6 additions & 0 deletions openhcl/virt_mshv_vtl/src/processor/mshv/arm64.rs
Original file line number Diff line number Diff line change
Expand Up @@ -229,6 +229,12 @@ impl BackingPrivate for HypervisorBackedArm64 {
) -> Result<(), UhRunVpError> {
unimplemented!()
}

fn vtl1_inspectable(_this: &UhProcessor<'_, Self>) -> bool {
// TODO: Use the VsmVpStatus register to query the hypervisor for
// whether VTL 1 is enabled on the vp (this can be cached).
false
}
}

impl UhProcessor<'_, HypervisorBackedArm64> {
Expand Down
8 changes: 6 additions & 2 deletions openhcl/virt_mshv_vtl/src/processor/mshv/x64.rs
Original file line number Diff line number Diff line change
Expand Up @@ -339,6 +339,12 @@ impl BackingPrivate for HypervisorBackedX86 {
) -> Result<(), UhRunVpError> {
unimplemented!()
}

fn vtl1_inspectable(_this: &UhProcessor<'_, Self>) -> bool {
// TODO: Use the VsmVpStatus register to query the hypervisor for
// whether VTL 1 is enabled on the vp (this can be cached).
false
}
}

fn parse_sidecar_exit(message: &hvdef::HvMessage) -> SidecarRemoveExit {
Expand Down Expand Up @@ -1995,8 +2001,6 @@ mod save_restore {
// Topology information
vp_info: _,
cpu_index: _,
// Only relevant for CVMs
hcvm_vtl1_enabled: _,
hv_start_enable_vtl_vp: _,
},
// Saved
Expand Down
6 changes: 5 additions & 1 deletion openhcl/virt_mshv_vtl/src/processor/snp/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -460,7 +460,7 @@ impl BackingPrivate for SnpBacked {

fn inspect_extra(this: &mut UhProcessor<'_, Self>, resp: &mut inspect::Response<'_>) {
let vtl0_vmsa = this.runner.vmsa(GuestVtl::Vtl0);
let vtl1_vmsa = if *this.inner.hcvm_vtl1_enabled.lock() {
let vtl1_vmsa = if *this.cvm_vp_inner().vtl1_enabled.lock() {
Some(this.runner.vmsa(GuestVtl::Vtl1))
} else {
None
Expand Down Expand Up @@ -510,6 +510,10 @@ impl BackingPrivate for SnpBacked {
) -> Result<(), UhRunVpError> {
this.hcvm_handle_vp_start_enable_vtl(vtl)
}

fn vtl1_inspectable(this: &UhProcessor<'_, Self>) -> bool {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Do we need both vtl1_inspectable and inspect_extra? Can/should they be combined somehow? Fine for that to be a follow up, doesn't have to be here.

this.hcvm_vtl1_inspectable()
}
}

fn virt_seg_to_snp(val: SegmentRegister) -> SevSelector {
Expand Down
4 changes: 4 additions & 0 deletions openhcl/virt_mshv_vtl/src/processor/tdx/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -902,6 +902,10 @@ impl BackingPrivate for TdxBacked {
) -> Result<(), UhRunVpError> {
this.hcvm_handle_vp_start_enable_vtl(vtl)
}

fn vtl1_inspectable(this: &UhProcessor<'_, Self>) -> bool {
this.hcvm_vtl1_inspectable()
}
}

impl UhProcessor<'_, TdxBacked> {
Expand Down