Skip to content

Commit

Permalink
Merge pull request #97 from joergroedel/virtual-memory
Browse files Browse the repository at this point in the history
Implement Virtual Memory Manager
  • Loading branch information
joergroedel authored Oct 16, 2023
2 parents c199984 + 7f1840f commit dde36a0
Show file tree
Hide file tree
Showing 21 changed files with 1,488 additions and 43 deletions.
19 changes: 19 additions & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 1 addition & 0 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ doctest = true
bitflags = "2.4"
gdbstub = { version = "0.6.6", default-features = false, optional = true }
gdbstub_arch = { version = "0.2.4", optional = true }
intrusive-collections = "0.9.6"
log = { version = "0.4.17", features = ["max_level_info", "release_max_level_info"] }
packit = { git = "https://github.com/coconut-svsm/packit", version = "0.1.0" }

Expand Down
6 changes: 5 additions & 1 deletion src/address.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
//
// Author: Carlos López <[email protected]>

use crate::types::PAGE_SIZE;
use crate::types::{PAGE_SHIFT, PAGE_SIZE};
use core::fmt;
use core::ops;

Expand Down Expand Up @@ -73,6 +73,10 @@ pub trait Address:
let x2 = (start + size - 1) / PAGE_SIZE;
x1 != x2
}

fn pfn(&self) -> InnerAddr {
self.bits() >> PAGE_SHIFT
}
}

#[derive(Clone, Copy, Debug, Default, PartialEq, Eq, PartialOrd, Ord)]
Expand Down
110 changes: 80 additions & 30 deletions src/cpu/percpu.rs
Original file line number Diff line number Diff line change
Expand Up @@ -14,18 +14,19 @@ use crate::cpu::vmsa::init_guest_vmsa;
use crate::error::SvsmError;
use crate::locking::{LockGuard, RWLock, SpinLock};
use crate::mm::alloc::{allocate_page, allocate_zeroed_page};
use crate::mm::pagetable::{get_init_pgtable_locked, PageTable, PageTableRef};
use crate::mm::stack::{allocate_stack_addr, stack_base_pointer};
use crate::mm::pagetable::{get_init_pgtable_locked, PTEntryFlags, PageTable, PageTableRef};
use crate::mm::virtualrange::VirtualRange;
use crate::mm::vm::{Mapping, VMKernelStack, VMPhysMem, VMReserved, VMR};
use crate::mm::{
virt_to_phys, SVSM_PERCPU_BASE, SVSM_PERCPU_CAA_BASE, SVSM_PERCPU_TEMP_BASE_2M,
SVSM_PERCPU_TEMP_BASE_4K, SVSM_PERCPU_TEMP_END_2M, SVSM_PERCPU_TEMP_END_4K,
SVSM_PERCPU_VMSA_BASE, SVSM_STACKS_INIT_TASK, SVSM_STACK_IST_DF_BASE,
virt_to_phys, SVSM_PERCPU_BASE, SVSM_PERCPU_CAA_BASE, SVSM_PERCPU_END,
SVSM_PERCPU_TEMP_BASE_2M, SVSM_PERCPU_TEMP_BASE_4K, SVSM_PERCPU_TEMP_END_2M,
SVSM_PERCPU_TEMP_END_4K, SVSM_PERCPU_VMSA_BASE, SVSM_STACKS_INIT_TASK, SVSM_STACK_IST_DF_BASE,
};
use crate::sev::ghcb::GHCB;
use crate::sev::utils::RMPFlags;
use crate::sev::vmsa::{allocate_new_vmsa, VMSASegment, VMSA};
use crate::types::{PAGE_SHIFT, PAGE_SHIFT_2M, PAGE_SIZE, PAGE_SIZE_2M, SVSM_TR_FLAGS, SVSM_TSS};
use alloc::sync::Arc;
use alloc::vec::Vec;
use core::cell::UnsafeCell;
use core::ptr;
Expand Down Expand Up @@ -183,14 +184,23 @@ pub struct PerCpu {
guest_vmsa: SpinLock<GuestVmsaRef>,
reset_ip: u64,

/// PerCpu Virtual Memory Range
vm_range: VMR,

/// Address allocator for per-cpu 4k temporary mappings
pub vrange_4k: VirtualRange,
/// Address allocator for per-cpu 2m temporary mappings
pub vrange_2m: VirtualRange,
}

impl Default for PerCpu {
fn default() -> Self {
Self::new()
}
}

impl PerCpu {
pub const fn new() -> Self {
pub fn new() -> Self {
PerCpu {
online: AtomicBool::new(false),
apic_id: 0,
Expand All @@ -202,6 +212,7 @@ impl PerCpu {
svsm_vmsa: None,
guest_vmsa: SpinLock::new(GuestVmsaRef::new()),
reset_ip: 0xffff_fff0u64,
vm_range: VMR::new(SVSM_PERCPU_BASE, SVSM_PERCPU_END, PTEntryFlags::GLOBAL),
vrange_4k: VirtualRange::new(),
vrange_2m: VirtualRange::new(),
}
Expand Down Expand Up @@ -231,8 +242,11 @@ impl PerCpu {
}

fn allocate_page_table(&mut self) -> Result<(), SvsmError> {
let pgtable_ref = get_init_pgtable_locked().clone_shared()?;
self.vm_range.initialize()?;
let mut pgtable_ref = get_init_pgtable_locked().clone_shared()?;
self.vm_range.populate(&mut pgtable_ref);
self.set_pgtable(pgtable_ref);

Ok(())
}

Expand All @@ -241,20 +255,23 @@ impl PerCpu {
*my_pgtable = pgtable;
}

fn allocate_stack(&mut self, base: VirtAddr) -> Result<VirtAddr, SvsmError> {
let stack = VMKernelStack::new()?;
let top_of_stack = stack.top_of_stack(base);
let mapping = Arc::new(Mapping::new(stack));

self.vm_range.insert_at(base, mapping)?;

Ok(top_of_stack)
}

fn allocate_init_stack(&mut self) -> Result<(), SvsmError> {
let addr = SVSM_STACKS_INIT_TASK;
allocate_stack_addr(addr, &mut self.get_pgtable())
.expect("Failed to allocate per-cpu init stack");
self.init_stack = Some(addr);
self.init_stack = Some(self.allocate_stack(SVSM_STACKS_INIT_TASK)?);
Ok(())
}

fn allocate_ist_stacks(&mut self) -> Result<(), SvsmError> {
let addr = SVSM_STACK_IST_DF_BASE;
allocate_stack_addr(addr, &mut self.get_pgtable())
.expect("Failed to allocate percpu double-fault stack");

self.ist.double_fault_stack = Some(addr);
self.ist.double_fault_stack = Some(self.allocate_stack(SVSM_STACK_IST_DF_BASE)?);
Ok(())
}

Expand All @@ -273,28 +290,63 @@ impl PerCpu {
}

pub fn get_top_of_stack(&self) -> VirtAddr {
stack_base_pointer(self.init_stack.unwrap())
self.init_stack.unwrap()
}

pub fn get_top_of_df_stack(&self) -> VirtAddr {
self.ist.double_fault_stack.unwrap()
}

fn setup_tss(&mut self) {
self.tss.ist_stacks[IST_DF] = stack_base_pointer(self.ist.double_fault_stack.unwrap());
self.tss.ist_stacks[IST_DF] = self.ist.double_fault_stack.unwrap();
}

pub fn map_self(&mut self) -> Result<(), SvsmError> {
pub fn map_self_stage2(&mut self) -> Result<(), SvsmError> {
let vaddr = VirtAddr::from(self as *const PerCpu);
let paddr = virt_to_phys(vaddr);
let flags = PageTable::data_flags();

self.get_pgtable().map_4k(SVSM_PERCPU_BASE, paddr, flags)
}

pub fn map_self(&mut self) -> Result<(), SvsmError> {
let vaddr = VirtAddr::from(self as *const PerCpu);
let paddr = virt_to_phys(vaddr);

let self_mapping = Arc::new(VMPhysMem::new_mapping(paddr, PAGE_SIZE, true));
self.vm_range.insert_at(SVSM_PERCPU_BASE, self_mapping)?;

Ok(())
}

fn initialize_vm_ranges(&mut self) -> Result<(), SvsmError> {
let size_4k = SVSM_PERCPU_TEMP_END_4K - SVSM_PERCPU_TEMP_BASE_4K;
let temp_mapping_4k = Arc::new(VMReserved::new_mapping(size_4k));
self.vm_range
.insert_at(SVSM_PERCPU_TEMP_BASE_4K, temp_mapping_4k)?;

let size_2m = SVSM_PERCPU_TEMP_END_2M - SVSM_PERCPU_TEMP_BASE_2M;
let temp_mapping_2m = Arc::new(VMReserved::new_mapping(size_2m));
self.vm_range
.insert_at(SVSM_PERCPU_TEMP_BASE_2M, temp_mapping_2m)?;

Ok(())
}

pub fn dump_vm_ranges(&self) {
self.vm_range.dump_ranges();
}

pub fn setup(&mut self) -> Result<(), SvsmError> {
// Allocate page-table
self.allocate_page_table()?;

// Map PerCpu data in own page-table
self.map_self()?;

// Reserve ranges for temporary mappings
self.initialize_vm_ranges()?;

// Setup GHCB
self.setup_ghcb()?;

Expand Down Expand Up @@ -377,16 +429,15 @@ impl PerCpu {

pub fn unmap_guest_vmsa(&self) {
assert!(self.apic_id == this_cpu().get_apic_id());
self.get_pgtable().unmap_4k(SVSM_PERCPU_VMSA_BASE);
// Ignore errors - the mapping might or might not be there
let _ = self.vm_range.remove(SVSM_PERCPU_VMSA_BASE);
}

pub fn map_guest_vmsa(&self, paddr: PhysAddr) -> Result<(), SvsmError> {
assert!(self.apic_id == this_cpu().get_apic_id());

let flags = PageTable::data_flags();

self.get_pgtable()
.map_4k(SVSM_PERCPU_VMSA_BASE, paddr, flags)?;
let vmsa_mapping = Arc::new(VMPhysMem::new_mapping(paddr, PAGE_SIZE, true));
self.vm_range
.insert_at(SVSM_PERCPU_VMSA_BASE, vmsa_mapping)?;

Ok(())
}
Expand Down Expand Up @@ -443,16 +494,15 @@ impl PerCpu {
}

pub fn unmap_caa(&self) {
self.get_pgtable().unmap_4k(SVSM_PERCPU_CAA_BASE);
// Ignore errors - the mapping might or might not be there
let _ = self.vm_range.remove(SVSM_PERCPU_CAA_BASE);
}

pub fn map_guest_caa(&self, paddr: PhysAddr) -> Result<(), SvsmError> {
self.unmap_caa();

let flags = PageTable::data_flags();

self.get_pgtable()
.map_4k(SVSM_PERCPU_CAA_BASE, paddr.page_align(), flags)?;
let caa_mapping = Arc::new(VMPhysMem::new_mapping(paddr, PAGE_SIZE, true));
self.vm_range.insert_at(SVSM_PERCPU_CAA_BASE, caa_mapping)?;

Ok(())
}
Expand Down
14 changes: 9 additions & 5 deletions src/debug/stacktrace.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,9 @@ use crate::address::{Address, VirtAddr};
#[cfg(feature = "enable-stacktrace")]
use crate::cpu::idt::is_exception_handler_return_site;
use crate::cpu::idt::X86ExceptionContext;
use crate::cpu::percpu::this_cpu;
#[cfg(feature = "enable-stacktrace")]
use crate::mm::address_space::{STACK_SIZE, SVSM_STACKS_INIT_TASK, SVSM_STACK_IST_DF_BASE};
use crate::mm::address_space::STACK_SIZE;
#[cfg(feature = "enable-stacktrace")]
use core::arch::asm;
#[cfg(feature = "enable-stacktrace")]
Expand Down Expand Up @@ -66,14 +67,17 @@ impl StackUnwinder {
options(att_syntax));
};

let top_of_init_stack = this_cpu().get_top_of_stack();
let top_of_df_stack = this_cpu().get_top_of_df_stack();

let stacks: StacksBounds = [
StackBounds {
bottom: SVSM_STACKS_INIT_TASK,
top: SVSM_STACKS_INIT_TASK + STACK_SIZE,
bottom: top_of_init_stack - STACK_SIZE,
top: top_of_init_stack,
},
StackBounds {
bottom: SVSM_STACK_IST_DF_BASE,
top: SVSM_STACK_IST_DF_BASE + STACK_SIZE,
bottom: top_of_df_stack - STACK_SIZE,
top: top_of_df_stack,
},
];

Expand Down
7 changes: 5 additions & 2 deletions src/mm/address_space.rs
Original file line number Diff line number Diff line change
Expand Up @@ -85,11 +85,11 @@ pub const SIZE_LEVEL0: usize = 1usize << ((9 * 0) + 12);
// Stack definitions
// The GDB stub requires a larger stack.
#[cfg(feature = "enable-gdb")]
pub const STACK_PAGES_GDB: usize = 12;
pub const STACK_PAGES_GDB: usize = 8;
#[cfg(not(feature = "enable-gdb"))]
pub const STACK_PAGES_GDB: usize = 0;

pub const STACK_PAGES: usize = 4 + STACK_PAGES_GDB;
pub const STACK_PAGES: usize = 8 + STACK_PAGES_GDB;
pub const STACK_SIZE: usize = PAGE_SIZE * STACK_PAGES;
pub const STACK_GUARD_SIZE: usize = STACK_SIZE;
pub const STACK_TOTAL_SIZE: usize = STACK_SIZE + STACK_GUARD_SIZE;
Expand All @@ -114,6 +114,9 @@ pub const PGTABLE_LVL3_IDX_PERCPU: usize = 510;
/// Base Address of shared memory region
pub const SVSM_PERCPU_BASE: VirtAddr = virt_from_idx(PGTABLE_LVL3_IDX_PERCPU);

/// End Address of per-cpu memory region
pub const SVSM_PERCPU_END: VirtAddr = SVSM_PERCPU_BASE.const_add(SIZE_LEVEL3);

/// PerCPU CAA mappings
pub const SVSM_PERCPU_CAA_BASE: VirtAddr = SVSM_PERCPU_BASE.const_add(2 * SIZE_LEVEL0);

Expand Down
4 changes: 4 additions & 0 deletions src/mm/alloc.rs
Original file line number Diff line number Diff line change
Expand Up @@ -695,6 +695,10 @@ impl PageRef {
pub fn virt_addr(&self) -> VirtAddr {
self.virt_addr
}

pub fn phys_addr(&self) -> PhysAddr {
self.phys_addr
}
}

impl AsRef<[u8; PAGE_SIZE]> for PageRef {
Expand Down
3 changes: 3 additions & 0 deletions src/mm/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -13,12 +13,15 @@ pub mod ptguards;
pub mod stack;
pub mod validate;
pub mod virtualrange;
pub mod vm;

pub use address_space::*;
pub use guestmem::GuestPtr;
pub use memory::{valid_phys_address, writable_phys_addr};
pub use ptguards::*;

pub use pagetable::PageTablePart;

pub use alloc::{
allocate_file_page, allocate_file_page_ref, get_file_page, put_file_page, PageRef,
};
Loading

0 comments on commit dde36a0

Please sign in to comment.