Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Make read/write methods from GuestPtr unsafe, and copy_{from, to} methods from SecretsPage as well #382

Merged
merged 3 commits into from
Jul 1, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
20 changes: 15 additions & 5 deletions kernel/src/cpu/apic.rs
Original file line number Diff line number Diff line change
Expand Up @@ -205,7 +205,10 @@ impl LocalApic {
if self.lazy_eoi_pending {
if let Some(virt_addr) = caa_addr {
let calling_area = GuestPtr::<SvsmCaa>::new(virt_addr);
if let Ok(caa) = calling_area.read() {
// SAFETY: guest vmsa and ca are always validated before beeing updated
// (core_remap_ca(), core_create_vcpu() or prepare_fw_launch())
// so they're safe to use.
if let Ok(caa) = unsafe { calling_area.read() } {
if caa.no_eoi_required == 0 {
assert!(self.isr_stack_index != 0);
self.perform_eoi();
Expand Down Expand Up @@ -240,8 +243,11 @@ impl LocalApic {
let virt_addr = caa_addr?;
let calling_area = GuestPtr::<SvsmCaa>::new(virt_addr);
// Ignore errors here, since nothing can be done if an error occurs.
if let Ok(caa) = calling_area.read() {
let _ = calling_area.write(caa.update_no_eoi_required(0));
// SAFETY: guest vmsa and ca are always validated before beeing updated
// (core_remap_ca(), core_create_vcpu() or prepare_fw_launch()) so
// they're safe to use.
if let Ok(caa) = unsafe { calling_area.read() } {
let _ = unsafe { calling_area.write(caa.update_no_eoi_required(0)) };
}
Some(calling_area)
}
Expand Down Expand Up @@ -363,8 +369,12 @@ impl LocalApic {
// delivery of the next interrupt.
if self.scan_irr() == 0 {
if let Some(calling_area) = guest_caa {
if let Ok(caa) = calling_area.read() {
if calling_area.write(caa.update_no_eoi_required(1)).is_ok() {
// SAFETY: guest vmsa and ca are always validated before beeing upated
// (core_remap_ca(), core_create_vcpu() or prepare_fw_launch())
// so they're safe to use.
if let Ok(caa) = unsafe { calling_area.read() } {
if unsafe { calling_area.write(caa.update_no_eoi_required(1)).is_ok() }
{
// Only track a pending lazy EOI if the
// calling area page could successfully be
// updated.
Expand Down
5 changes: 4 additions & 1 deletion kernel/src/cpu/vc.rs
Original file line number Diff line number Diff line change
Expand Up @@ -272,7 +272,10 @@ fn vc_decode_insn(ctx: &X86ExceptionContext) -> Result<Option<DecodedInsnCtx>, S
// rip and rip+15 addresses should belong to a mapped page.
// To ensure this, we rely on GuestPtr::read() that uses the exception table
// to handle faults while fetching.
let insn_raw = rip.read()?;
// SAFETY: we trust the CPU-provided register state to be valid. Thus, RIP
// will point to the instruction that caused #VC to be raised, so it can
// safely be read.
let insn_raw = unsafe { rip.read()? };

let insn = Instruction::new(insn_raw);
Ok(Some(insn.decode(ctx)?))
Expand Down
25 changes: 18 additions & 7 deletions kernel/src/debug/gdbstub.rs
Original file line number Diff line number Diff line change
Expand Up @@ -394,13 +394,22 @@ pub mod svsm_gdbstub {
// mapping

let Ok(phys) = this_cpu().get_pgtable().phys_addr(addr) else {
// The virtual address is not one that SVSM has mapped. Try safely
// writing it to the original virtual address
return write_u8(addr, value);
// The virtual address is not one that SVSM has mapped.
// Try safely writing it to the original virtual address
// SAFETY: it is up to the user to ensure that the address we
// are writing a breakpoint to is valid.
return unsafe { write_u8(addr, value) };
};

let guard = PerCPUPageMappingGuard::create_4k(phys.page_align())?;
write_u8(guard.virt_addr() + phys.page_offset(), value)
let dst = guard
.virt_addr()
.checked_add(phys.page_offset())
.ok_or(SvsmError::InvalidAddress)?;

// SAFETY: guard is a new mapped page, non controllable by user.
// We also checked that the destination address didn't overflow.
unsafe { write_u8(dst, value) }
}
}

Expand Down Expand Up @@ -532,9 +541,11 @@ pub mod svsm_gdbstub {
) -> gdbstub::target::TargetResult<(), Self> {
let start_addr = VirtAddr::from(start_addr);
for (off, src) in data.iter().enumerate() {
if write_u8(start_addr + off, *src).is_err() {
return Err(TargetError::NonFatal);
}
let dst = start_addr.checked_add(off).ok_or(TargetError::NonFatal)?;

// SAFETY: We trust the caller of this trait method to provide a valid address.
// We only cheked that start_adddr + off didn't overflow.
unsafe { write_u8(dst, *src).map_err(|_| TargetError::NonFatal)? }
}
Ok(())
}
Expand Down
18 changes: 12 additions & 6 deletions kernel/src/igvm_params.rs
Original file line number Diff line number Diff line change
Expand Up @@ -196,29 +196,35 @@ impl IgvmParams<'_> {
return Err(SvsmError::Firmware);
}

mem_map
.offset(i as isize)
.write(IGVM_VHS_MEMORY_MAP_ENTRY {
mem_map.offset(i as isize);
// SAFETY: mem_map_va points to newly mapped memory, whose physical
// address is defined in the IGVM config.
unsafe {
mem_map.write(IGVM_VHS_MEMORY_MAP_ENTRY {
starting_gpa_page_number: u64::from(entry.start()) / PAGE_SIZE as u64,
number_of_pages: entry.len() as u64 / PAGE_SIZE as u64,
entry_type: MemoryMapEntryType::default(),
flags: 0,
reserved: 0,
})?;
}
}

// Write a zero page count into the last entry to terminate the list.
let index = map.len();
if index < max_entries {
mem_map
.offset(index as isize)
.write(IGVM_VHS_MEMORY_MAP_ENTRY {
mem_map.offset(index as isize);
// SAFETY: mem_map_va points to newly mapped memory, whose physical
// address is defined in the IGVM config.
unsafe {
mem_map.write(IGVM_VHS_MEMORY_MAP_ENTRY {
starting_gpa_page_number: 0,
number_of_pages: 0,
entry_type: MemoryMapEntryType::default(),
flags: 0,
reserved: 0,
})?;
}
}

Ok(())
Expand Down
58 changes: 50 additions & 8 deletions kernel/src/mm/guestmem.rs
Original file line number Diff line number Diff line change
Expand Up @@ -39,9 +39,20 @@ pub fn read_u8(v: VirtAddr) -> Result<u8, SvsmError> {
}
}

/// Writes 1 byte at a virtual address.
///
/// # Safety
///
/// The caller must verify not to corrupt arbitrary memory, as this function
/// doesn't make any checks in that regard.
///
/// # Returns
///
/// Returns an error if the specified address is not mapped or is not mapped
/// with the appropriate write permissions.
#[allow(dead_code)]
#[inline]
pub fn write_u8(v: VirtAddr, val: u8) -> Result<(), SvsmError> {
pub unsafe fn write_u8(v: VirtAddr, val: u8) -> Result<(), SvsmError> {
let mut rcx: u64;

unsafe {
Expand Down Expand Up @@ -189,8 +200,16 @@ impl<T: Copy> GuestPtr<T> {
Self { ptr: p }
}

/// # Safety
///
/// The caller must verify not to read arbitrary memory, as this function
/// doesn't make any checks in that regard.
///
/// # Returns
///
/// Returns an error if the specified address is not mapped.
#[inline]
pub fn read(&self) -> Result<T, SvsmError> {
pub unsafe fn read(&self) -> Result<T, SvsmError> {
let mut buf = MaybeUninit::<T>::uninit();

unsafe {
Expand All @@ -199,13 +218,31 @@ impl<T: Copy> GuestPtr<T> {
}
}

/// # Safety
///
/// The caller must verify not to corrupt arbitrary memory, as this function
/// doesn't make any checks in that regard.
///
/// # Returns
///
/// Returns an error if the specified address is not mapped or is not mapped
/// with the appropriate write permissions.
#[inline]
pub fn write(&self, buf: T) -> Result<(), SvsmError> {
pub unsafe fn write(&self, buf: T) -> Result<(), SvsmError> {
unsafe { do_movsb(&buf, self.ptr) }
}

/// # Safety
///
/// The caller must verify not to corrupt arbitrary memory, as this function
/// doesn't make any checks in that regard.
///
/// # Returns
///
/// Returns an error if the specified address is not mapped or is not mapped
/// with the appropriate write permissions.
#[inline]
pub fn write_ref(&self, buf: &T) -> Result<(), SvsmError> {
pub unsafe fn write_ref(&self, buf: &T) -> Result<(), SvsmError> {
unsafe { do_movsb(buf, self.ptr) }
}

Expand Down Expand Up @@ -244,7 +281,10 @@ mod tests {
let test_address = VirtAddr::from(test_buffer.as_mut_ptr());
let data_to_write = 0x42;

write_u8(test_address, data_to_write).unwrap();
// SAFETY: test_address points to the virtual address of test_buffer.
unsafe {
write_u8(test_address, data_to_write).unwrap();
}

assert_eq!(test_buffer[0], data_to_write);
}
Expand All @@ -255,7 +295,8 @@ mod tests {
let test_buffer = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14];
let test_addr = VirtAddr::from(test_buffer.as_ptr());
let ptr: GuestPtr<[u8; 15]> = GuestPtr::new(test_addr);
let result = ptr.read().unwrap();
// SAFETY: ptr points to test_buffer's virtual address
let result = unsafe { ptr.read().unwrap() };

assert_eq!(result, test_buffer);
}
Expand All @@ -265,8 +306,9 @@ mod tests {
#[cfg_attr(not(test_in_svsm), ignore = "Can only be run inside guest")]
fn test_read_invalid_address() {
let ptr: GuestPtr<u8> = GuestPtr::new(VirtAddr::new(0xDEAD_BEEF));

let err = ptr.read();
// SAFETY: ptr points to an invalid virtual address (0xDEADBEEF is
// unmapped). ptr.read() will return an error but this is expected.
let err = unsafe { ptr.read() };
assert!(err.is_err());
}
}
18 changes: 14 additions & 4 deletions kernel/src/protocols/core.rs
Original file line number Diff line number Diff line change
Expand Up @@ -338,7 +338,9 @@ fn core_pvalidate(params: &RequestParams) -> Result<(), SvsmReqError> {
let start = guard.virt_addr();

let guest_page = GuestPtr::<PValidateRequest>::new(start + offset);
let mut request = guest_page.read()?;
// SAFETY: start is a new mapped page address, thus valid.
// offset can't exceed a page size, so guest_page belongs to mapped memory.
let mut request = unsafe { guest_page.read()? };

let entries = request.entries;
let next = request.next;
Expand All @@ -356,7 +358,10 @@ fn core_pvalidate(params: &RequestParams) -> Result<(), SvsmReqError> {
let guest_entries = guest_page.offset(1).cast::<u64>();
for i in next..entries {
let index = i as isize;
let entry = match guest_entries.offset(index).read() {
// SAFETY: guest_entries comes from guest_page which is a new mapped
// page. index is between [next, entries) and both values have been
// validated.
let entry = match unsafe { guest_entries.offset(index).read() } {
Ok(v) => v,
Err(e) => {
loop_result = Err(e.into());
Expand All @@ -372,7 +377,11 @@ fn core_pvalidate(params: &RequestParams) -> Result<(), SvsmReqError> {
}
}

if let Err(e) = guest_page.write_ref(&request) {
// SAFETY: guest_page is obtained from a guest-provided physical address
// (untrusted), so it needs to be valid (ie. belongs to the guest and only
// the guest). The physical address is validated by valid_phys_address()
// called at the beginning of SVSM_CORE_PVALIDATE handler (this one).
if let Err(e) = unsafe { guest_page.write_ref(&request) } {
loop_result = Err(e.into());
}

Expand All @@ -398,7 +407,8 @@ fn core_remap_ca(params: &RequestParams) -> Result<(), SvsmReqError> {
let vaddr = mapping_guard.virt_addr() + offset;

let pending = GuestPtr::<SvsmCaa>::new(vaddr);
pending.write(SvsmCaa::zeroed())?;
// SAFETY: pending points to a new allocated page
unsafe { pending.write(SvsmCaa::zeroed())? };

// Clear any pending interrupt state before remapping the calling area to
// ensure that any pending lazy EOI has been processed.
Expand Down
14 changes: 12 additions & 2 deletions kernel/src/protocols/vtpm.rs
Original file line number Diff line number Diff line change
Expand Up @@ -230,7 +230,9 @@ fn vtpm_command_request(params: &RequestParams) -> Result<(), SvsmReqError> {
// IN: platform command
// OUT: platform command response size

let command = GuestPtr::<u32>::new(vaddr).read()?;
// SAFETY: vaddr comes from a new mapped region.
let command = unsafe { GuestPtr::<u32>::new(vaddr).read()? };

let cmd = TpmPlatformCommand::try_from(command)?;

if !is_vtpm_platform_command_supported(cmd) {
Expand All @@ -243,7 +245,15 @@ fn vtpm_command_request(params: &RequestParams) -> Result<(), SvsmReqError> {
TpmPlatformCommand::SendCommand => tpm_send_command_request(buffer)?,
};

GuestPtr::<u32>::new(vaddr).write(response_size)?;
// SAFETY: vaddr points to a new mapped region.
// if paddr + sizeof::<u32>() goes to the folowing page, it should
// not be a problem since the end of the requested region is
// (paddr + PAGE_SIZE), which requests another page. So
// write(response_size) can only happen on valid memory, mapped
// by PerCPUPageMappingGuard::create().
unsafe {
GuestPtr::<u32>::new(vaddr).write(response_size)?;
}

Ok(())
}
Expand Down
15 changes: 13 additions & 2 deletions kernel/src/requests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -120,8 +120,19 @@ fn check_requests() -> Result<bool, SvsmReqError> {
let vmsa_ref = cpu.guest_vmsa_ref();
if let Some(caa_addr) = vmsa_ref.caa_addr() {
let calling_area = GuestPtr::<SvsmCaa>::new(caa_addr);
let caa = calling_area.read()?;
calling_area.write(caa.serviced())?;
// SAFETY: guest vmsa and ca are always validated before beeing updated
// (core_remap_ca(), core_create_vcpu() or prepare_fw_launch()) so
// they're safe to use.
let caa = unsafe { calling_area.read()? };

let caa_serviced = caa.serviced();

// SAFETY: guest vmsa is always validated before beeing updated
// (core_remap_ca() or core_create_vcpu()) so it's safe to use.
unsafe {
calling_area.write(caa_serviced)?;
}

Ok(caa.call_pending != 0)
} else {
Ok(false)
Expand Down
21 changes: 18 additions & 3 deletions kernel/src/sev/secrets_page.rs
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ pub struct SecretsPage {

impl SecretsPage {
pub const fn new() -> Self {
SecretsPage {
Self {
version: 0,
gctxt: 0,
fms: 0,
Expand All @@ -57,15 +57,30 @@ impl SecretsPage {
}
}

pub fn copy_from(&mut self, source: VirtAddr) {
/// Copy secrets page's content pointed by a [`VirtAddr`]
///
/// # Safety
///
/// The caller should verify that `source` points to mapped memory whose
/// size is at least the size of the [`SecretsPage`] structure.
pub unsafe fn copy_from(&mut self, source: VirtAddr) {
let from = source.as_ptr::<SecretsPage>();

unsafe {
*self = *from;
}
}

pub fn copy_to(&self, target: VirtAddr) {
/// Copy a secrets page's content to memory pointed by a [`VirtAddr`]
///
/// # Safety
///
/// The caller should verify that `target` points to mapped memory whose
/// size is at least the size of the [`SecretsPage`] structure.
///
/// The caller should verify not to corrupt arbitrary memory, as this function
/// doesn't make any checks in that regard.
pub unsafe fn copy_to(&self, target: VirtAddr) {
let to = target.as_mut_ptr::<SecretsPage>();

unsafe {
Expand Down
Loading
Loading