diff --git a/kernel/src/mm/alloc.rs b/kernel/src/mm/alloc.rs index 793cbaeff..e389bf4d6 100644 --- a/kernel/src/mm/alloc.rs +++ b/kernel/src/mm/alloc.rs @@ -589,7 +589,12 @@ impl MemoryRegion { fn allocate_zeroed_page(&mut self) -> Result { let vaddr = self.allocate_page()?; - zero_mem_region(vaddr, vaddr + PAGE_SIZE); + // SAFETY: we trust allocate_page() to return a pointer to a valid + // page. vaddr + PAGE_SIZE also correctly points to the end of the + // page. + unsafe { + zero_mem_region(vaddr, vaddr + PAGE_SIZE); + } Ok(vaddr) } @@ -1091,7 +1096,14 @@ pub fn allocate_zeroed_page() -> Result { /// `SvsmError` if allocation fails. pub fn allocate_file_page() -> Result { let vaddr = ROOT_MEM.lock().allocate_file_page()?; - zero_mem_region(vaddr, vaddr + PAGE_SIZE); + + // SAFETY: we trust allocate_file_page() to return a pointer to a valid + // page. vaddr + PAGE_SIZE also correctly points to the end of the + // page. + unsafe { + zero_mem_region(vaddr, vaddr + PAGE_SIZE); + } + Ok(vaddr) } diff --git a/kernel/src/platform/snp_fw.rs b/kernel/src/platform/snp_fw.rs index a5adb094e..c44f1cb91 100644 --- a/kernel/src/platform/snp_fw.rs +++ b/kernel/src/platform/snp_fw.rs @@ -208,7 +208,11 @@ fn validate_fw_mem_region( PageSize::Regular, )?; - zero_mem_region(vaddr, vaddr + PAGE_SIZE); + // SAFETY: we trust PerCPUPageMappingGuard::create_4k() to return a + // valid pointer to a correctly mapped region of size PAGE_SIZE. + unsafe { + zero_mem_region(vaddr, vaddr + PAGE_SIZE); + } } Ok(()) @@ -322,7 +326,11 @@ fn copy_secrets_page_to_fw( let start = guard.virt_addr(); // Zero target - zero_mem_region(start, start + PAGE_SIZE); + // SAFETY: we trust PerCPUPageMappingGuard::create_4k() to return a + // valid pointer to a correctly mapped region of size PAGE_SIZE. + unsafe { + zero_mem_region(start, start + PAGE_SIZE); + } // Copy secrets page let mut fw_secrets_page = secrets_page().copy_for_vmpl(GUEST_VMPL); @@ -345,7 +353,11 @@ fn zero_caa_page(fw_addr: PhysAddr) -> Result<(), SvsmError> { let guard = PerCPUPageMappingGuard::create_4k(fw_addr)?; let vaddr = guard.virt_addr(); - zero_mem_region(vaddr, vaddr + PAGE_SIZE); + // SAFETY: we trust PerCPUPageMappingGuard::create_4k() to return a + // valid pointer to a correctly mapped region of size PAGE_SIZE. + unsafe { + zero_mem_region(vaddr, vaddr + PAGE_SIZE); + } Ok(()) } diff --git a/kernel/src/protocols/core.rs b/kernel/src/protocols/core.rs index 123b48fe8..b93f7a679 100644 --- a/kernel/src/protocols/core.rs +++ b/kernel/src/protocols/core.rs @@ -312,7 +312,14 @@ fn core_pvalidate_one(entry: u64, flush: &mut bool) -> Result<(), SvsmReqError> // FIXME: This check leaves a window open for the attack described // above. Remove the check once OVMF and Linux have been fixed and // no longer try to pvalidate MMIO memory. - zero_mem_region(vaddr, vaddr + page_size_bytes); + + // SAFETY: paddr is validated at the beginning of the function, and + // we trust PerCPUPageMappingGuard::create() to return a valid + // vaddr pointing to a mapped region of at least page_size_bytes + // size. + unsafe { + zero_mem_region(vaddr, vaddr + page_size_bytes); + } } else { log::warn!("Not clearing possible read-only page at PA {:#x}", paddr); } diff --git a/kernel/src/svsm.rs b/kernel/src/svsm.rs index a25058f36..763e1dd5c 100755 --- a/kernel/src/svsm.rs +++ b/kernel/src/svsm.rs @@ -176,10 +176,9 @@ pub extern "C" fn svsm_start(li: &KernelLaunchInfo, vb_addr: usize) { // We trust stage 2 to give the value provided by IGVM. unsafe { secrets_page_mut().copy_from(secrets_page_virt); + zero_mem_region(secrets_page_virt, secrets_page_virt + PAGE_SIZE); } - zero_mem_region(secrets_page_virt, secrets_page_virt + PAGE_SIZE); - cr0_init(); cr4_init(&*platform); determine_cet_support(); diff --git a/kernel/src/utils/util.rs b/kernel/src/utils/util.rs index 45aa5c827..abfc78625 100644 --- a/kernel/src/utils/util.rs +++ b/kernel/src/utils/util.rs @@ -52,14 +52,22 @@ where x1 <= y2 && y1 <= x2 } -pub fn zero_mem_region(start: VirtAddr, end: VirtAddr) { - let size = end - start; +/// # Safety +/// +/// Caller should ensure [`core::ptr::write_bytes`] safety rules. +pub unsafe fn zero_mem_region(start: VirtAddr, end: VirtAddr) { if start.is_null() { panic!("Attempted to zero out a NULL pointer"); } + let count = end + .checked_sub(start.as_usize()) + .expect("Invalid size calculation") + .as_usize(); + // Zero region - unsafe { start.as_mut_ptr::().write_bytes(0, size) } + // SAFETY: the safety rules must be upheld by the caller. + unsafe { start.as_mut_ptr::().write_bytes(0, count) } } /// Obtain bit for a given position @@ -117,7 +125,11 @@ mod tests { let start = VirtAddr::from(data.as_mut_ptr()); let end = start + core::mem::size_of_val(&data); - zero_mem_region(start, end); + // SAFETY: start and end correctly point respectively to the start and + // end of data. + unsafe { + zero_mem_region(start, end); + } for byte in &data { assert_eq!(*byte, 0);