Skip to content

Commit 4926f2a

Browse files
authored
Merge pull request #136 from roy-hopkins/task_stack
task, percpu: Use VMR to manage the task pagetable and VMKernelStack to allocate the task kernel stack
2 parents 6069ece + ec63843 commit 4926f2a

File tree

5 files changed

+89
-73
lines changed

5 files changed

+89
-73
lines changed

src/cpu/percpu.rs

+26-1
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@ use crate::locking::{LockGuard, RWLock, SpinLock};
1616
use crate::mm::alloc::{allocate_page, allocate_zeroed_page};
1717
use crate::mm::pagetable::{get_init_pgtable_locked, PTEntryFlags, PageTable, PageTableRef};
1818
use crate::mm::virtualrange::VirtualRange;
19-
use crate::mm::vm::{Mapping, VMKernelStack, VMPhysMem, VMReserved, VMR};
19+
use crate::mm::vm::{Mapping, VMKernelStack, VMPhysMem, VMRMapping, VMReserved, VMR};
2020
use crate::mm::{
2121
virt_to_phys, SVSM_PERCPU_BASE, SVSM_PERCPU_CAA_BASE, SVSM_PERCPU_END,
2222
SVSM_PERCPU_TEMP_BASE_2M, SVSM_PERCPU_TEMP_BASE_4K, SVSM_PERCPU_TEMP_END_2M,
@@ -537,6 +537,31 @@ impl PerCpu {
537537
self.vrange_2m
538538
.init(SVSM_PERCPU_TEMP_BASE_2M, page_count, PAGE_SHIFT_2M);
539539
}
540+
541+
/// Create a new virtual memory mapping in the PerCpu VMR
542+
///
543+
/// # Arguments
544+
///
545+
/// * `mapping` - The mapping to insert into the PerCpu VMR
546+
///
547+
/// # Returns
548+
///
549+
/// On success, a new ['VMRMapping'} that provides a virtual memory address for
550+
/// the mapping which remains valid until the ['VRMapping'] is dropped.
551+
///
552+
/// On error, an ['SvsmError'].
553+
pub fn new_mapping(&mut self, mapping: Arc<Mapping>) -> Result<VMRMapping, SvsmError> {
554+
VMRMapping::new(&mut self.vm_range, mapping)
555+
}
556+
557+
/// Add the PerCpu virtual range into the provided pagetable
558+
///
559+
/// # Arguments
560+
///
561+
/// * `pt` - The page table to populate the the PerCpu range into
562+
pub fn populate_page_table(&self, pt: &mut PageTableRef) {
563+
self.vm_range.populate(pt);
564+
}
540565
}
541566

542567
unsafe impl Sync for PerCpu {}

src/mm/address_space.rs

+7-16
Original file line numberDiff line numberDiff line change
@@ -148,23 +148,14 @@ pub const SVSM_PERCPU_TEMP_END_4K: VirtAddr = SVSM_PERCPU_TEMP_BASE_4K.const_add
148148
pub const SVSM_PERCPU_TEMP_BASE_2M: VirtAddr = SVSM_PERCPU_TEMP_BASE.const_add(SIZE_LEVEL1);
149149
pub const SVSM_PERCPU_TEMP_END_2M: VirtAddr = SVSM_PERCPU_TEMP_BASE.const_add(SIZE_LEVEL2);
150150

151-
/// Per-task memory map
152-
/// Use the region from 0xfffffe0000000000 - 0xffffff0000000000 for tasks
151+
/// Task mappings level 3 index
153152
pub const PGTABLE_LVL3_IDX_PERTASK: usize = 508;
154-
/// Layout of the per-task memory space is:
155-
///
156-
/// +------------------+------------------+-------------------------------------+
157-
/// | Start | End | Size | Description |
158-
/// +------------------+------------------+------+------------------------------+
159-
/// | fffffeffffff0000 | ffffff0000000000 | 64K | Task stack |
160-
/// +------------------+------------------+------+------------------------------+
161-
/// | fffffe0000000000 | fffffe0004000000 | 64M | Dynamic memory allocation |
162-
/// +------------------+------------------+------+------------------------------+
153+
154+
/// Base address of task memory region
163155
pub const SVSM_PERTASK_BASE: VirtAddr = virt_from_idx(PGTABLE_LVL3_IDX_PERTASK);
164156

165-
/// Virtual addresses for dynamic memory allocation
166-
pub const SVSM_PERTASK_DYNAMIC_MEMORY: VirtAddr = SVSM_PERTASK_BASE;
157+
/// End address of task memory region
158+
pub const SVSM_PERTASK_END: VirtAddr = SVSM_PERTASK_BASE.const_add(SIZE_LEVEL3);
167159

168-
/// Task stack
169-
pub const SVSM_PERTASK_STACK_BASE: VirtAddr = SVSM_PERTASK_BASE.const_add(0xffffff0000);
170-
pub const SVSM_PERTASK_STACK_TOP: VirtAddr = SVSM_PERTASK_STACK_BASE.const_add(0x10000);
160+
/// Kernel stack for a task
161+
pub const SVSM_PERTASK_STACK_BASE: VirtAddr = SVSM_PERTASK_BASE;

src/mm/vm/mod.rs

+1-1
Original file line numberDiff line numberDiff line change
@@ -11,4 +11,4 @@ pub use mapping::{
1111
Mapping, RawAllocMapping, VMKernelStack, VMMAdapter, VMPhysMem, VMReserved, VMalloc,
1212
VirtualMapping, VMM,
1313
};
14-
pub use range::{VMR, VMR_GRANULE};
14+
pub use range::{VMRMapping, VMR, VMR_GRANULE};

src/mm/vm/range.rs

+25
Original file line numberDiff line numberDiff line change
@@ -374,3 +374,28 @@ impl VMR {
374374
}
375375
}
376376
}
377+
378+
#[derive(Debug)]
379+
pub struct VMRMapping<'a> {
380+
vmr: &'a mut VMR,
381+
va: VirtAddr,
382+
}
383+
384+
impl<'a> VMRMapping<'a> {
385+
pub fn new(vmr: &'a mut VMR, mapping: Arc<Mapping>) -> Result<Self, SvsmError> {
386+
let va = vmr.insert(mapping)?;
387+
Ok(Self { vmr, va })
388+
}
389+
390+
pub fn virt_addr(&self) -> VirtAddr {
391+
self.va
392+
}
393+
}
394+
395+
impl<'a> Drop for VMRMapping<'a> {
396+
fn drop(&mut self) {
397+
self.vmr
398+
.remove(self.va)
399+
.expect("Error removing VRMapping virtual memory range");
400+
}
401+
}

src/task/tasks.rs

+30-55
Original file line numberDiff line numberDiff line change
@@ -6,31 +6,25 @@
66

77
extern crate alloc;
88

9+
use alloc::boxed::Box;
10+
use alloc::sync::Arc;
911
use core::arch::{asm, global_asm};
1012
use core::fmt;
1113
use core::mem::size_of;
1214
use core::sync::atomic::{AtomicU32, Ordering};
1315

14-
use alloc::boxed::Box;
15-
16-
use crate::address::{Address, PhysAddr, VirtAddr};
16+
use crate::address::{Address, VirtAddr};
1717
use crate::cpu::msr::{rdtsc, read_flags};
18-
use crate::cpu::percpu::this_cpu;
18+
use crate::cpu::percpu::{this_cpu, this_cpu_mut};
1919
use crate::cpu::X86GeneralRegs;
2020
use crate::error::SvsmError;
2121
use crate::locking::SpinLock;
22-
use crate::mm::alloc::{allocate_pages, get_order};
23-
use crate::mm::pagetable::{get_init_pgtable_locked, PageTable, PageTableRef};
24-
use crate::mm::{
25-
virt_to_phys, PAGE_SIZE, PGTABLE_LVL3_IDX_PERCPU, SVSM_PERTASK_STACK_BASE,
26-
SVSM_PERTASK_STACK_TOP,
27-
};
28-
use crate::utils::zero_mem_region;
22+
use crate::mm::pagetable::{get_init_pgtable_locked, PTEntryFlags, PageTableRef};
23+
use crate::mm::vm::{Mapping, VMKernelStack, VMR};
24+
use crate::mm::{SVSM_PERTASK_BASE, SVSM_PERTASK_END, SVSM_PERTASK_STACK_BASE};
2925

3026
pub const INITIAL_TASK_ID: u32 = 1;
3127

32-
const STACK_SIZE: usize = 65536;
33-
3428
#[derive(PartialEq, Debug, Copy, Clone, Default)]
3529
pub enum TaskState {
3630
RUNNING,
@@ -39,13 +33,6 @@ pub enum TaskState {
3933
TERMINATED,
4034
}
4135

42-
#[derive(Debug, Default)]
43-
pub struct TaskStack {
44-
pub virt_base: VirtAddr,
45-
pub virt_top: VirtAddr,
46-
pub phys: PhysAddr,
47-
}
48-
4936
pub const TASK_FLAG_SHARE_PT: u16 = 0x01;
5037

5138
#[derive(Debug, Default)]
@@ -183,12 +170,12 @@ pub struct TaskContext {
183170
pub struct Task {
184171
pub rsp: u64,
185172

186-
/// Information about the task stack
187-
pub stack: TaskStack,
188-
189173
/// Page table that is loaded when the task is scheduled
190174
pub page_table: SpinLock<PageTableRef>,
191175

176+
/// Task virtual memory range for use at CPL 0
177+
vm_kernel_range: VMR,
178+
192179
/// Current state of the task
193180
pub state: TaskState,
194181

@@ -208,7 +195,6 @@ impl fmt::Debug for Task {
208195
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
209196
f.debug_struct("Task")
210197
.field("rsp", &self.rsp)
211-
.field("stack", &self.stack)
212198
.field("state", &self.state)
213199
.field("affinity", &self.affinity)
214200
.field("id", &self.id)
@@ -225,12 +211,18 @@ impl Task {
225211
Self::allocate_page_table()?
226212
};
227213

228-
let (task_stack, rsp) = Self::allocate_stack(entry, &mut pgtable)?;
214+
let mut vm_kernel_range = VMR::new(SVSM_PERTASK_BASE, SVSM_PERTASK_END, PTEntryFlags::USER);
215+
vm_kernel_range.initialize()?;
216+
217+
let (stack, rsp_offset) = Self::allocate_stack(entry)?;
218+
vm_kernel_range.insert_at(SVSM_PERTASK_STACK_BASE, stack)?;
219+
220+
vm_kernel_range.populate(&mut pgtable);
229221

230222
let task: Box<Task> = Box::new(Task {
231-
rsp: u64::from(rsp),
232-
stack: task_stack,
223+
rsp: (SVSM_PERTASK_STACK_BASE.bits() + rsp_offset.bits()) as u64,
233224
page_table: SpinLock::new(pgtable),
225+
vm_kernel_range,
234226
state: TaskState::RUNNING,
235227
affinity: None,
236228
id: TASK_ID_ALLOCATOR.next_id(),
@@ -262,35 +254,16 @@ impl Task {
262254
self.affinity = affinity;
263255
}
264256

265-
fn allocate_stack(
266-
entry: extern "C" fn(),
267-
pgtable: &mut PageTableRef,
268-
) -> Result<(TaskStack, VirtAddr), SvsmError> {
269-
let stack_size = SVSM_PERTASK_STACK_TOP - SVSM_PERTASK_STACK_BASE;
270-
let num_pages = 1 << get_order(STACK_SIZE);
271-
assert!(stack_size == num_pages * PAGE_SIZE);
272-
let pages = allocate_pages(get_order(STACK_SIZE))?;
273-
zero_mem_region(pages, pages + stack_size);
274-
275-
let task_stack = TaskStack {
276-
virt_base: SVSM_PERTASK_STACK_BASE,
277-
virt_top: SVSM_PERTASK_STACK_TOP,
278-
phys: virt_to_phys(pages),
279-
};
257+
fn allocate_stack(entry: extern "C" fn()) -> Result<(Arc<Mapping>, VirtAddr), SvsmError> {
258+
let stack = VMKernelStack::new()?;
259+
let offset = stack.top_of_stack(VirtAddr::from(0u64));
280260

281-
// We current have a virtual address in SVSM shared memory for the stack. Configure
282-
// the per-task pagetable to map the stack into the task memory map.
283-
pgtable.map_region_4k(
284-
task_stack.virt_base,
285-
task_stack.virt_top,
286-
task_stack.phys,
287-
PageTable::task_data_flags(),
288-
)?;
261+
let mapping = Arc::new(Mapping::new(stack));
262+
let percpu_mapping = this_cpu_mut().new_mapping(mapping.clone())?;
289263

290264
// We need to setup a context on the stack that matches the stack layout
291265
// defined in switch_context below.
292-
let stack_pos = pages + stack_size;
293-
let stack_ptr: *mut u64 = stack_pos.as_mut_ptr();
266+
let stack_ptr: *mut u64 = (percpu_mapping.virt_addr().bits() + offset.bits()) as *mut u64;
294267

295268
// 'Push' the task frame onto the stack
296269
unsafe {
@@ -302,8 +275,10 @@ impl Task {
302275
stack_ptr.offset(-1).write(task_exit as *const () as u64);
303276
}
304277

305-
let initial_rsp = SVSM_PERTASK_STACK_TOP - (size_of::<TaskContext>() + size_of::<u64>());
306-
Ok((task_stack, initial_rsp))
278+
Ok((
279+
mapping,
280+
offset - (size_of::<TaskContext>() + size_of::<u64>()),
281+
))
307282
}
308283

309284
fn allocate_page_table() -> Result<PageTableRef, SvsmError> {
@@ -323,7 +298,7 @@ extern "C" fn task_exit() {
323298
extern "C" fn apply_new_context(new_task: *mut Task) -> u64 {
324299
unsafe {
325300
let mut pt = (*new_task).page_table.lock();
326-
pt.copy_entry(&this_cpu().get_pgtable(), PGTABLE_LVL3_IDX_PERCPU);
301+
this_cpu().populate_page_table(&mut pt);
327302
pt.cr3_value().bits() as u64
328303
}
329304
}

0 commit comments

Comments
 (0)