Skip to content

Commit

Permalink
kernel/mm/guestmem: make read() and write*() unsafe
Browse files Browse the repository at this point in the history
Once creating a GuestPtr, the read(), write() and write_ret() methods
could grant arbitrary read/write if an attacker (HV or guest) can
control the GuestPtr.ptr value, and the write*() parameters.

We need to verify that this is not the case for the current calls, and
to enforce an unsafe block for the future calls to make the caller
validating the parameters.

Signed-off-by: Thomas Leroy <[email protected]>
  • Loading branch information
p4zuu committed Jun 13, 2024
1 parent 1b56366 commit ccf23b5
Show file tree
Hide file tree
Showing 5 changed files with 74 additions and 15 deletions.
6 changes: 5 additions & 1 deletion kernel/src/cpu/vc.rs
Original file line number Diff line number Diff line change
Expand Up @@ -272,7 +272,11 @@ fn vc_decode_insn(ctx: &X86ExceptionContext) -> Result<Option<DecodedInsnCtx>, S
// rip and rip+15 addresses should belong to a mapped page.
// To ensure this, we rely on GuestPtr::read() that uses the exception table
// to handle faults while fetching.
let insn_raw = rip.read()?;
// SAFETY: rip can be any mapped address, including userpace. It's technically
// not possible for a user to raise a #VC from a different context (different
// user or kernelspace), so rip can't belong to a different user or kernel
// memory.
let insn_raw = unsafe { rip.read()? };

let insn = Instruction::new(insn_raw);
Ok(Some(insn.decode(ctx)?))
Expand Down
32 changes: 26 additions & 6 deletions kernel/src/mm/guestmem.rs
Original file line number Diff line number Diff line change
Expand Up @@ -196,8 +196,13 @@ impl<T: Copy> GuestPtr<T> {
Self { ptr: p }
}

/// # Safety
///
/// Caller should verify that `self.ptr` is valid for read and that an attacker
/// can't control where `self.ptr` points to, that could grant it a potential
/// arbitrary read primitive.
#[inline]
pub fn read(&self) -> Result<T, SvsmError> {
pub unsafe fn read(&self) -> Result<T, SvsmError> {
let mut buf = MaybeUninit::<T>::uninit();

unsafe {
Expand All @@ -206,13 +211,27 @@ impl<T: Copy> GuestPtr<T> {
}
}

/// # Safety
///
/// Caller should verify that `self.ptr` and buf are valid for writes
/// and that an attacker can't control where `self.ptr` points to. That
/// could grant it a potential arbitrary write primitive.
/// The caller should also verify that `self.ptr + size_of::<T> still
/// points to valid memory, and that belongs to the caller.
#[inline]
pub fn write(&self, buf: T) -> Result<(), SvsmError> {
pub unsafe fn write(&self, buf: T) -> Result<(), SvsmError> {
unsafe { do_movsb(&buf, self.ptr) }
}

/// # Safety
///
/// Caller should verify that `self.ptr` and buf are valid for writes and
/// that an attacker can't control where `self.ptr` points to. That could
/// grant it a potential arbitrary write primitive.
/// The caller should also verify that `self.ptr + size_of::<T> still
/// points to valid memory, and that belongs to the caller.
#[inline]
pub fn write_ref(&self, buf: &T) -> Result<(), SvsmError> {
pub unsafe fn write_ref(&self, buf: &T) -> Result<(), SvsmError> {
unsafe { do_movsb(buf, self.ptr) }
}

Expand Down Expand Up @@ -265,7 +284,8 @@ mod tests {
let test_buffer = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14];
let test_addr = VirtAddr::from(test_buffer.as_ptr());
let ptr: GuestPtr<[u8; 15]> = GuestPtr::new(test_addr);
let result = ptr.read().unwrap();
// SAFETY: this is a test
let result = unsafe { ptr.read().unwrap() };

assert_eq!(result, test_buffer);
}
Expand All @@ -275,8 +295,8 @@ mod tests {
#[cfg_attr(not(test_in_svsm), ignore = "Can only be run inside guest")]
fn test_read_invalid_address() {
let ptr: GuestPtr<u8> = GuestPtr::new(VirtAddr::new(0xDEAD_BEEF));

let err = ptr.read();
//SAFETY: this is a test
let err = unsafe { ptr.read() };
assert!(err.is_err());
}
}
25 changes: 21 additions & 4 deletions kernel/src/protocols/core.rs
Original file line number Diff line number Diff line change
Expand Up @@ -323,7 +323,11 @@ fn core_pvalidate(params: &RequestParams) -> Result<(), SvsmReqError> {
let start = guard.virt_addr();

let guest_page = GuestPtr::<PValidateRequest>::new(start + offset);
let mut request = guest_page.read()?;
// SAFETY: start is a new mapped page address, thus valid.
// offset can't exceed a page size, so guest_page belongs to mapped memory.
// TODO(tleroy): verify that guest_page can't point to a different guest,
// or SVSM kernel memory?
let mut request = unsafe { guest_page.read()? };

let entries = request.entries;
let next = request.next;
Expand All @@ -341,7 +345,10 @@ fn core_pvalidate(params: &RequestParams) -> Result<(), SvsmReqError> {
let guest_entries = guest_page.offset(1).cast::<u64>();
for i in next..entries {
let index = i as isize;
let entry = match guest_entries.offset(index).read() {
// SAFETY: guest_entries comes from guest_page which is a new mapped
// page. index is between [next, entries) and both values have been
// validated.
let entry = match unsafe { guest_entries.offset(index).read() } {
Ok(v) => v,
Err(e) => {
loop_result = Err(e.into());
Expand All @@ -357,7 +364,14 @@ fn core_pvalidate(params: &RequestParams) -> Result<(), SvsmReqError> {
}
}

if let Err(e) = guest_page.write_ref(&request) {
// SAFETY: guest_page is obtained from a guest-provided physical address
// (untrusted), so it needs to be valid (ie. belongs to the guest and only
// the guest). The physical address is validated by valid_phys_address()
// called at the beginning of SVSM_CORE_PVALIDATE handler (this one).
// TODO(tleroy): this is not true if a guest can provide any address, like
// SVSM kernel address. An attacker could put SVSM kernel addresses in
// the pvalidate request entries.
if let Err(e) = unsafe { guest_page.write_ref(&request) } {
loop_result = Err(e.into());
}

Expand All @@ -383,7 +397,10 @@ fn core_remap_ca(params: &RequestParams) -> Result<(), SvsmReqError> {
let vaddr = mapping_guard.virt_addr() + offset;

let pending = GuestPtr::<SvsmCaa>::new(vaddr);
pending.write(SvsmCaa::zeroed())?;
// SAFETY: pending points to a new allocated page, not attacker controlled
// TODO(tleroy): verify that vaddr can't point to a different user or
// kernelspace?
unsafe { pending.write(SvsmCaa::zeroed())? };

this_cpu_shared().update_guest_caa(gpa);

Expand Down
14 changes: 12 additions & 2 deletions kernel/src/protocols/vtpm.rs
Original file line number Diff line number Diff line change
Expand Up @@ -230,7 +230,9 @@ fn vtpm_command_request(params: &RequestParams) -> Result<(), SvsmReqError> {
// IN: platform command
// OUT: platform command response size

let command = GuestPtr::<u32>::new(vaddr).read()?;
// SAFETY: vaddr comes from a new mapped region.
let command = unsafe { GuestPtr::<u32>::new(vaddr).read()? };

let cmd = TpmPlatformCommand::try_from(command)?;

if !is_vtpm_platform_command_supported(cmd) {
Expand All @@ -243,7 +245,15 @@ fn vtpm_command_request(params: &RequestParams) -> Result<(), SvsmReqError> {
TpmPlatformCommand::SendCommand => tpm_send_command_request(buffer)?,
};

GuestPtr::<u32>::new(vaddr).write(response_size)?;
// SAFETY: vaddr points to a new mapped region.
// if paddr + sizeof::<u32>() goes to the folowing page, it should
// not be a problem since the end of the requested region is
// (paddr + PAGE_SIZE), which requests another page. So
// write(response_size) can only happen on valid memory, mapped
// by PerCPUPageMappingGuard::create().
unsafe {
GuestPtr::<u32>::new(vaddr).write(response_size)?;
}

Ok(())
}
Expand Down
12 changes: 10 additions & 2 deletions kernel/src/requests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -106,8 +106,16 @@ fn check_requests() -> Result<bool, SvsmReqError> {
let vmsa_ref = cpu.guest_vmsa_ref();
if let Some(caa_addr) = vmsa_ref.caa_addr() {
let calling_area = GuestPtr::<SvsmCaa>::new(caa_addr);
let caa = calling_area.read()?;
calling_area.write(caa.serviced())?;
// SAFETY: HV and guests can't change VMSA's content, therefore CAA.
let caa = unsafe { calling_area.read()? };

let caa_serviced = caa.serviced();

// SAFETY: HV and guests can't change VMSA's content, therefore CAA.
unsafe {
calling_area.write(caa_serviced)?;
}

Ok(caa.call_pending != 0)
} else {
Ok(false)
Expand Down

0 comments on commit ccf23b5

Please sign in to comment.