Skip to content

Commit

Permalink
utils/util: make zero_mem_region unsafe
Browse files Browse the repository at this point in the history
As advised by @Freax13 in coconut-svsm#359, if misused, zero_mem_region() can corrupt valid
memory by writing zeroes to it. Therefore, it should be unsafe so that callers
verfies that it points to the intended memory.

Signed-off-by: Thomas Leroy <[email protected]>
  • Loading branch information
p4zuu committed Dec 6, 2024
1 parent f481237 commit 44909b6
Show file tree
Hide file tree
Showing 5 changed files with 54 additions and 12 deletions.
16 changes: 14 additions & 2 deletions kernel/src/mm/alloc.rs
Original file line number Diff line number Diff line change
Expand Up @@ -589,7 +589,12 @@ impl MemoryRegion {
fn allocate_zeroed_page(&mut self) -> Result<VirtAddr, AllocError> {
let vaddr = self.allocate_page()?;

zero_mem_region(vaddr, vaddr + PAGE_SIZE);
// SAFETY: we trust allocate_page() to return a pointer to a valid
// page. vaddr + PAGE_SIZE also correctly points to the end of the
// page.
unsafe {
zero_mem_region(vaddr, vaddr + PAGE_SIZE);
}

Ok(vaddr)
}
Expand Down Expand Up @@ -1091,7 +1096,14 @@ pub fn allocate_zeroed_page() -> Result<VirtAddr, SvsmError> {
/// `SvsmError` if allocation fails.
pub fn allocate_file_page() -> Result<VirtAddr, SvsmError> {
let vaddr = ROOT_MEM.lock().allocate_file_page()?;
zero_mem_region(vaddr, vaddr + PAGE_SIZE);

// SAFETY: we trust allocate_file_page() to return a pointer to a valid
// page. vaddr + PAGE_SIZE also correctly points to the end of the
// page.
unsafe {
zero_mem_region(vaddr, vaddr + PAGE_SIZE);
}

Ok(vaddr)
}

Expand Down
18 changes: 15 additions & 3 deletions kernel/src/platform/snp_fw.rs
Original file line number Diff line number Diff line change
Expand Up @@ -208,7 +208,11 @@ fn validate_fw_mem_region(
PageSize::Regular,
)?;

zero_mem_region(vaddr, vaddr + PAGE_SIZE);
// SAFETY: we trust PerCPUPageMappingGuard::create_4k() to return a
// valid pointer to a correctly mapped region of size PAGE_SIZE.
unsafe {
zero_mem_region(vaddr, vaddr + PAGE_SIZE);
}
}

Ok(())
Expand Down Expand Up @@ -322,7 +326,11 @@ fn copy_secrets_page_to_fw(
let start = guard.virt_addr();

// Zero target
zero_mem_region(start, start + PAGE_SIZE);
// SAFETY: we trust PerCPUPageMappingGuard::create_4k() to return a
// valid pointer to a correctly mapped region of size PAGE_SIZE.
unsafe {
zero_mem_region(start, start + PAGE_SIZE);
}

// Copy secrets page
let mut fw_secrets_page = secrets_page().copy_for_vmpl(GUEST_VMPL);
Expand All @@ -345,7 +353,11 @@ fn zero_caa_page(fw_addr: PhysAddr) -> Result<(), SvsmError> {
let guard = PerCPUPageMappingGuard::create_4k(fw_addr)?;
let vaddr = guard.virt_addr();

zero_mem_region(vaddr, vaddr + PAGE_SIZE);
// SAFETY: we trust PerCPUPageMappingGuard::create_4k() to return a
// valid pointer to a correctly mapped region of size PAGE_SIZE.
unsafe {
zero_mem_region(vaddr, vaddr + PAGE_SIZE);
}

Ok(())
}
Expand Down
9 changes: 8 additions & 1 deletion kernel/src/protocols/core.rs
Original file line number Diff line number Diff line change
Expand Up @@ -312,7 +312,14 @@ fn core_pvalidate_one(entry: u64, flush: &mut bool) -> Result<(), SvsmReqError>
// FIXME: This check leaves a window open for the attack described
// above. Remove the check once OVMF and Linux have been fixed and
// no longer try to pvalidate MMIO memory.
zero_mem_region(vaddr, vaddr + page_size_bytes);

// SAFETY: paddr is validated at the beginning of the function, and
// we trust PerCPUPageMappingGuard::create() to return a valid
// vaddr pointing to a mapped region of at least page_size_bytes
// size.
unsafe {
zero_mem_region(vaddr, vaddr + page_size_bytes);
}
} else {
log::warn!("Not clearing possible read-only page at PA {:#x}", paddr);
}
Expand Down
3 changes: 1 addition & 2 deletions kernel/src/svsm.rs
Original file line number Diff line number Diff line change
Expand Up @@ -176,10 +176,9 @@ pub extern "C" fn svsm_start(li: &KernelLaunchInfo, vb_addr: usize) {
// We trust stage 2 to give the value provided by IGVM.
unsafe {
secrets_page_mut().copy_from(secrets_page_virt);
zero_mem_region(secrets_page_virt, secrets_page_virt + PAGE_SIZE);
}

zero_mem_region(secrets_page_virt, secrets_page_virt + PAGE_SIZE);

cr0_init();
cr4_init(&*platform);
determine_cet_support();
Expand Down
20 changes: 16 additions & 4 deletions kernel/src/utils/util.rs
Original file line number Diff line number Diff line change
Expand Up @@ -52,14 +52,22 @@ where
x1 <= y2 && y1 <= x2
}

pub fn zero_mem_region(start: VirtAddr, end: VirtAddr) {
let size = end - start;
/// # Safety
///
/// Caller should ensure [`core::ptr::write_bytes`] safety rules.
pub unsafe fn zero_mem_region(start: VirtAddr, end: VirtAddr) {
if start.is_null() {
panic!("Attempted to zero out a NULL pointer");
}

let count = end
.checked_sub(start.as_usize())
.expect("Invalid size calculation")
.as_usize();

// Zero region
unsafe { start.as_mut_ptr::<u8>().write_bytes(0, size) }
// SAFETY: the safety rules must be upheld by the caller.
unsafe { start.as_mut_ptr::<u8>().write_bytes(0, count) }
}

/// Obtain bit for a given position
Expand Down Expand Up @@ -117,7 +125,11 @@ mod tests {
let start = VirtAddr::from(data.as_mut_ptr());
let end = start + core::mem::size_of_val(&data);

zero_mem_region(start, end);
// SAFETY: start and end correctly point respectively to the start and
// end of data.
unsafe {
zero_mem_region(start, end);
}

for byte in &data {
assert_eq!(*byte, 0);
Expand Down

0 comments on commit 44909b6

Please sign in to comment.