Skip to content

Commit

Permalink
kernel/mm/guestmem: make read() and write*() unsafe
Browse files Browse the repository at this point in the history
Once creating a GuestPtr, the read(), write() and write_ret() methods
could grant arbitrary read/write if an attacker (HV or guest) can
control the GuestPtr.ptr value, and the write*() parameters.

We need to verify that this is not the case for the current calls, and
to enforce an unsafe block for the future calls to make the caller
validating the parameters.

Signed-off-by: Thomas Leroy <[email protected]>
  • Loading branch information
p4zuu committed Jun 20, 2024
1 parent e0cbb56 commit 86b58d8
Show file tree
Hide file tree
Showing 7 changed files with 105 additions and 26 deletions.
15 changes: 10 additions & 5 deletions kernel/src/cpu/apic.rs
Original file line number Diff line number Diff line change
Expand Up @@ -202,7 +202,8 @@ impl LocalApic {
if self.lazy_eoi_pending {
if let Some(virt_addr) = caa_addr {
let calling_area = GuestPtr::<SvsmCaa>::new(virt_addr);
if let Ok(caa) = calling_area.read() {
// SAFETY: HV and guests can't change VMSA's content, therefore CAA.
if let Ok(caa) = unsafe { calling_area.read() } {
if caa.no_eoi_required == 0 {
assert!(self.isr_stack_index != 0);
self.perform_eoi();
Expand Down Expand Up @@ -237,8 +238,9 @@ impl LocalApic {
let virt_addr = caa_addr?;
let calling_area = GuestPtr::<SvsmCaa>::new(virt_addr);
// Ignore errors here, since nothing can be done if an error occurs.
if let Ok(caa) = calling_area.read() {
let _ = calling_area.write(caa.update_no_eoi_required(0));
// SAFETY: HV and guests can't change VMSA's content, therefore CAA.
if let Ok(caa) = unsafe { calling_area.read() } {
let _ = unsafe { calling_area.write(caa.update_no_eoi_required(0)) };
}
Some(calling_area)
}
Expand Down Expand Up @@ -357,8 +359,11 @@ impl LocalApic {
// delivery of the next interrupt.
if self.scan_irr() == 0 {
if let Some(calling_area) = guest_caa {
if let Ok(caa) = calling_area.read() {
if calling_area.write(caa.update_no_eoi_required(1)).is_ok() {
// SAFETY: HV and guests can't change VMSA's content, therefore CAA.
if let Ok(caa) = unsafe { calling_area.read() } {
if unsafe {
calling_area.write(caa.update_no_eoi_required(1)).is_ok()
} {
// Only track a pending lazy EOI if the
// calling area page could successfully be
// updated.
Expand Down
6 changes: 5 additions & 1 deletion kernel/src/cpu/vc.rs
Original file line number Diff line number Diff line change
Expand Up @@ -272,7 +272,11 @@ fn vc_decode_insn(ctx: &X86ExceptionContext) -> Result<Option<DecodedInsnCtx>, S
// rip and rip+15 addresses should belong to a mapped page.
// To ensure this, we rely on GuestPtr::read() that uses the exception table
// to handle faults while fetching.
let insn_raw = rip.read()?;
// SAFETY: rip can be any mapped address, including userpace. It's technically
// not possible for a user to raise a #VC from a different context (different
// user or kernelspace), so rip can't belong to a different user or kernel
// memory.
let insn_raw = unsafe { rip.read()? };

let insn = Instruction::new(insn_raw);
Ok(Some(insn.decode(ctx)?))
Expand Down
18 changes: 12 additions & 6 deletions kernel/src/igvm_params.rs
Original file line number Diff line number Diff line change
Expand Up @@ -196,29 +196,35 @@ impl IgvmParams<'_> {
return Err(SvsmError::Firmware);
}

mem_map
.offset(i as isize)
.write(IGVM_VHS_MEMORY_MAP_ENTRY {
mem_map.offset(i as isize);
// SAFETY: mem_map_va points to a new mapped memory, whose address
// is defined in the config.
unsafe {
mem_map.write(IGVM_VHS_MEMORY_MAP_ENTRY {
starting_gpa_page_number: u64::from(entry.start()) / PAGE_SIZE as u64,
number_of_pages: entry.len() as u64 / PAGE_SIZE as u64,
entry_type: MemoryMapEntryType::default(),
flags: 0,
reserved: 0,
})?;
}
}

// Write a zero page count into the last entry to terminate the list.
let index = map.len();
if index < max_entries {
mem_map
.offset(index as isize)
.write(IGVM_VHS_MEMORY_MAP_ENTRY {
mem_map.offset(index as isize);
// SAFETY: mem_map_va points to a new mapped memory, whose address
// is defined in the config.
unsafe {
mem_map.write(IGVM_VHS_MEMORY_MAP_ENTRY {
starting_gpa_page_number: 0,
number_of_pages: 0,
entry_type: MemoryMapEntryType::default(),
flags: 0,
reserved: 0,
})?;
}
}

Ok(())
Expand Down
46 changes: 40 additions & 6 deletions kernel/src/mm/guestmem.rs
Original file line number Diff line number Diff line change
Expand Up @@ -201,8 +201,19 @@ impl<T: Copy> GuestPtr<T> {
Self { ptr: p }
}

/// # Safety
///
/// The caller should verify that the read address (`self.ptr`) is valid
/// for read:
/// - it should belong to a mapped memory region
/// - it should only belong to the issuer's memory space (a guest should not be
/// able to read in the SVSM kernel's memory, or a different guest's
/// memory).
///
/// This function returns an error if it tries to read to an unmapped address,
/// and that the #PF or #GP handler returns an error.
#[inline]
pub fn read(&self) -> Result<T, SvsmError> {
pub unsafe fn read(&self) -> Result<T, SvsmError> {
let mut buf = MaybeUninit::<T>::uninit();

unsafe {
Expand All @@ -211,13 +222,35 @@ impl<T: Copy> GuestPtr<T> {
}
}

/// # Safety
///
/// The caller should verify that the write address (`self.ptr`) is valid
/// for write:
/// - it should belong to a mapped memory region
/// - it should only belong to the issuer's memory space (a guest should not be
/// able to write in the SVSM kernel's memory, or a different guest's
/// memory).
///
/// This function returns an error if it tries to write to an unmapped address,
/// and that the #PF or #GP handler returns an error.
#[inline]
pub fn write(&self, buf: T) -> Result<(), SvsmError> {
pub unsafe fn write(&self, buf: T) -> Result<(), SvsmError> {
unsafe { do_movsb(&buf, self.ptr) }
}

/// # Safety
///
/// The caller should verify that the write address (`self.ptr`) is valid
/// for write:
/// - it should belong to a mapped memory region
/// - it should only belong to the issuer's memory space (a guest should not be
/// able to write in the SVSM kernel's memory, or a different guest's
/// memory).
///
/// This function returns an error if it tries to write to an unmapped address,
/// and that the #PF or #GP handler returns an error.
#[inline]
pub fn write_ref(&self, buf: &T) -> Result<(), SvsmError> {
pub unsafe fn write_ref(&self, buf: &T) -> Result<(), SvsmError> {
unsafe { do_movsb(buf, self.ptr) }
}

Expand Down Expand Up @@ -270,7 +303,8 @@ mod tests {
let test_buffer = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14];
let test_addr = VirtAddr::from(test_buffer.as_ptr());
let ptr: GuestPtr<[u8; 15]> = GuestPtr::new(test_addr);
let result = ptr.read().unwrap();
// SAFETY: this is a test
let result = unsafe { ptr.read().unwrap() };

assert_eq!(result, test_buffer);
}
Expand All @@ -280,8 +314,8 @@ mod tests {
#[cfg_attr(not(test_in_svsm), ignore = "Can only be run inside guest")]
fn test_read_invalid_address() {
let ptr: GuestPtr<u8> = GuestPtr::new(VirtAddr::new(0xDEAD_BEEF));

let err = ptr.read();
//SAFETY: this is a test
let err = unsafe { ptr.read() };
assert!(err.is_err());
}
}
20 changes: 16 additions & 4 deletions kernel/src/protocols/core.rs
Original file line number Diff line number Diff line change
Expand Up @@ -338,7 +338,9 @@ fn core_pvalidate(params: &RequestParams) -> Result<(), SvsmReqError> {
let start = guard.virt_addr();

let guest_page = GuestPtr::<PValidateRequest>::new(start + offset);
let mut request = guest_page.read()?;
// SAFETY: start is a new mapped page address, thus valid.
// offset can't exceed a page size, so guest_page belongs to mapped memory.
let mut request = unsafe { guest_page.read()? };

let entries = request.entries;
let next = request.next;
Expand All @@ -356,7 +358,10 @@ fn core_pvalidate(params: &RequestParams) -> Result<(), SvsmReqError> {
let guest_entries = guest_page.offset(1).cast::<u64>();
for i in next..entries {
let index = i as isize;
let entry = match guest_entries.offset(index).read() {
// SAFETY: guest_entries comes from guest_page which is a new mapped
// page. index is between [next, entries) and both values have been
// validated.
let entry = match unsafe { guest_entries.offset(index).read() } {
Ok(v) => v,
Err(e) => {
loop_result = Err(e.into());
Expand All @@ -372,7 +377,11 @@ fn core_pvalidate(params: &RequestParams) -> Result<(), SvsmReqError> {
}
}

if let Err(e) = guest_page.write_ref(&request) {
// SAFETY: guest_page is obtained from a guest-provided physical address
// (untrusted), so it needs to be valid (ie. belongs to the guest and only
// the guest). The physical address is validated by valid_phys_address()
// called at the beginning of SVSM_CORE_PVALIDATE handler (this one).
if let Err(e) = unsafe { guest_page.write_ref(&request) } {
loop_result = Err(e.into());
}

Expand All @@ -398,7 +407,10 @@ fn core_remap_ca(params: &RequestParams) -> Result<(), SvsmReqError> {
let vaddr = mapping_guard.virt_addr() + offset;

let pending = GuestPtr::<SvsmCaa>::new(vaddr);
pending.write(SvsmCaa::zeroed())?;
// SAFETY: pending points to a new allocated page, not attacker controlled
// TODO(tleroy): verify that vaddr can't point to a different user or
// kernelspace?
unsafe { pending.write(SvsmCaa::zeroed())? };

// Clear any pending interrupt state before remapping the calling area to
// ensure that any pending lazy EOI has been processed.
Expand Down
14 changes: 12 additions & 2 deletions kernel/src/protocols/vtpm.rs
Original file line number Diff line number Diff line change
Expand Up @@ -230,7 +230,9 @@ fn vtpm_command_request(params: &RequestParams) -> Result<(), SvsmReqError> {
// IN: platform command
// OUT: platform command response size

let command = GuestPtr::<u32>::new(vaddr).read()?;
// SAFETY: vaddr comes from a new mapped region.
let command = unsafe { GuestPtr::<u32>::new(vaddr).read()? };

let cmd = TpmPlatformCommand::try_from(command)?;

if !is_vtpm_platform_command_supported(cmd) {
Expand All @@ -243,7 +245,15 @@ fn vtpm_command_request(params: &RequestParams) -> Result<(), SvsmReqError> {
TpmPlatformCommand::SendCommand => tpm_send_command_request(buffer)?,
};

GuestPtr::<u32>::new(vaddr).write(response_size)?;
// SAFETY: vaddr points to a new mapped region.
// if paddr + sizeof::<u32>() goes to the folowing page, it should
// not be a problem since the end of the requested region is
// (paddr + PAGE_SIZE), which requests another page. So
// write(response_size) can only happen on valid memory, mapped
// by PerCPUPageMappingGuard::create().
unsafe {
GuestPtr::<u32>::new(vaddr).write(response_size)?;
}

Ok(())
}
Expand Down
12 changes: 10 additions & 2 deletions kernel/src/requests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -120,8 +120,16 @@ fn check_requests() -> Result<bool, SvsmReqError> {
let vmsa_ref = cpu.guest_vmsa_ref();
if let Some(caa_addr) = vmsa_ref.caa_addr() {
let calling_area = GuestPtr::<SvsmCaa>::new(caa_addr);
let caa = calling_area.read()?;
calling_area.write(caa.serviced())?;
// SAFETY: HV and guests can't change VMSA's content, therefore CAA.
let caa = unsafe { calling_area.read()? };

let caa_serviced = caa.serviced();

// SAFETY: HV and guests can't change VMSA's content, therefore CAA.
unsafe {
calling_area.write(caa_serviced)?;
}

Ok(caa.call_pending != 0)
} else {
Ok(false)
Expand Down

0 comments on commit 86b58d8

Please sign in to comment.