Skip to content

Commit a5d13a9

Browse files
committed
cpu: Support TPR when handling SVSM interrupts
The priority scheme inherently associated with interrupts permits masking interrupts of lower priority classes while handling higher-priority interrupts. This makes it possible to enable interrupts while in an interrupt handler, allowing high-priority interrupts to be handled while masking lower-priorty interrupts. By raising TPR during interrupt handling, it becomes possible to be safely preempted by higher-priority interrupts during interrupt handling. This change also defines a mechanism where code unrelated to interrupt handlers can temporarily raise TPR to mask low-priority interrupts while permitting the delivery of high-priority interrupts. In the future, it will be possible to enable spin locks or other locks to be associated with TPR so that a lock can safely be acquired and prevent reentracncy by low-priority interrupts while still permitting the handilng of high-priority interrupts. Signed-off-by: Jon Lange <[email protected]>
1 parent 928e8ff commit a5d13a9

File tree

6 files changed

+293
-32
lines changed

6 files changed

+293
-32
lines changed

kernel/src/cpu/idt/svsm.rs

+22-4
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,7 @@ use super::common::{
1616
TS_VECTOR, UD_VECTOR, VC_VECTOR, XF_VECTOR,
1717
};
1818
use crate::address::VirtAddr;
19+
use crate::cpu::irq_state::{raw_get_tpr, raw_set_tpr, tpr_from_vector};
1920
use crate::cpu::registers::RFlags;
2021
use crate::cpu::shadow_stack::IS_CET_SUPPORTED;
2122
use crate::cpu::X86ExceptionContext;
@@ -351,11 +352,28 @@ pub extern "C" fn common_isr_handler_entry(vector: usize) {
351352
cpu.irqs_pop_nesting();
352353
}
353354

354-
pub fn common_isr_handler(_vector: usize) {
355-
// Interrupt injection requests currently require no processing; they occur
356-
// simply to ensure an exit from the guest.
355+
pub fn common_isr_handler(vector: usize) {
356+
// Set TPR based on the vector being handled and reenable interrupts to
357+
// permit delivery of higher priority interrupts. Because this routine
358+
// dispatches interrupts which should only be observable if interrupts
359+
// are enabled, the IRQ nesting count must be zero at this point.
360+
let previous_tpr = raw_get_tpr();
361+
raw_set_tpr(tpr_from_vector(vector));
357362

358-
// Treat any unhandled interrupt as a spurious interrupt.
363+
let cpu = this_cpu();
364+
cpu.irqs_enable();
365+
366+
// Treat any unhandled interrupt as a spurious interrupt. Interrupt
367+
// injection requests currently require no processing; they occur simply
368+
// to ensure an exit from the guest.
369+
370+
// Disable interrupts before restoring TPR.
371+
cpu.irqs_disable();
372+
raw_set_tpr(previous_tpr);
373+
374+
// Perform the EOI cycle after the interrupt processing state has been
375+
// restored so that recurrent interrupts will not introduce recursion at
376+
// this point.
359377
SVSM_PLATFORM.eoi();
360378
}
361379

kernel/src/cpu/irq_state.rs

+186-15
Original file line numberDiff line numberDiff line change
@@ -5,10 +5,10 @@
55
// Author: Joerg Roedel <[email protected]>
66

77
use crate::cpu::percpu::this_cpu;
8-
use crate::cpu::{irqs_disable, irqs_enable};
8+
use crate::cpu::{irqs_disable, irqs_enable, lower_tpr, raise_tpr};
99
use core::arch::asm;
1010
use core::marker::PhantomData;
11-
use core::sync::atomic::{AtomicBool, AtomicIsize, Ordering};
11+
use core::sync::atomic::{AtomicBool, AtomicI32, Ordering};
1212

1313
/// Interrupt flag in RFLAGS register
1414
pub const EFLAGS_IF: usize = 1 << 9;
@@ -51,9 +51,9 @@ pub fn raw_irqs_enable() {
5151
#[inline(always)]
5252
#[must_use = "Unused irqs_enabled() result - meant to be irq_enable()?"]
5353
pub fn irqs_enabled() -> bool {
54+
let state: usize;
5455
// SAFETY: The inline assembly just reads the processors RFLAGS register
5556
// and does not change any state.
56-
let state: usize;
5757
unsafe {
5858
asm!("pushfq",
5959
"popq {}",
@@ -75,6 +75,50 @@ pub fn irqs_disabled() -> bool {
7575
!irqs_enabled()
7676
}
7777

78+
/// Converts an interrupt vector to a TPR value.
79+
#[inline(always)]
80+
pub fn tpr_from_vector(vector: usize) -> usize {
81+
// TPR is the high four bits of the vector number.
82+
vector >> 4
83+
}
84+
85+
/// Unconditionally set TPR.
86+
///
87+
/// Callers need to ensure that the selected TPR is appropriate for the
88+
/// current context.
89+
///
90+
/// * `tpr_value` - the new TPR value.
91+
#[inline(always)]
92+
pub fn raw_set_tpr(tpr_value: usize) {
93+
// SAFETY: Inline assembly to change TPR, which does not change any state
94+
// related to memory safety.
95+
unsafe {
96+
asm!("mov {tpr}, %cr8",
97+
tpr = in(reg) tpr_value,
98+
options(att_syntax));
99+
}
100+
}
101+
102+
/// Query IRQ state on current CPU
103+
///
104+
/// # Returns
105+
///
106+
/// The current TPR.
107+
#[inline(always)]
108+
pub fn raw_get_tpr() -> usize {
109+
// SAFETY: The inline assembly just reads the TPR register and does not
110+
// change any state.
111+
unsafe {
112+
let mut ret: usize;
113+
asm!("movq %cr8, {tpr}",
114+
tpr = out(reg) ret,
115+
options(att_syntax));
116+
ret
117+
}
118+
}
119+
120+
const TPR_LIMIT: usize = 16;
121+
78122
/// This structure keeps track of PerCpu IRQ states. It tracks the original IRQ
79123
/// state and how deep IRQ-disable calls have been nested. The use of atomics
80124
/// is necessary for interior mutability and to make state modifications safe
@@ -87,8 +131,10 @@ pub fn irqs_disabled() -> bool {
87131
pub struct IrqState {
88132
/// IRQ state when count was `0`
89133
state: AtomicBool,
90-
/// Depth of IRQ-disabled nesting
91-
count: AtomicIsize,
134+
/// Depth of IRQ-disabled nesting. Index 0 specifies the count of
135+
/// IRQ disables and the remaining indices specify the nesting count
136+
/// for eached raised TPR level.
137+
counts: [AtomicI32; TPR_LIMIT],
92138
/// Make the type !Send + !Sync
93139
phantom: PhantomData<*const ()>,
94140
}
@@ -98,7 +144,7 @@ impl IrqState {
98144
pub fn new() -> Self {
99145
Self {
100146
state: AtomicBool::new(false),
101-
count: AtomicIsize::new(0),
147+
counts: Default::default(),
102148
phantom: PhantomData,
103149
}
104150
}
@@ -120,7 +166,7 @@ impl IrqState {
120166
/// The previous nesting level.
121167
pub fn push_nesting(&self, was_enabled: bool) {
122168
debug_assert!(irqs_disabled());
123-
let val = self.count.fetch_add(1, Ordering::Relaxed);
169+
let val = self.counts[0].fetch_add(1, Ordering::Relaxed);
124170

125171
assert!(val >= 0);
126172

@@ -151,10 +197,10 @@ impl IrqState {
151197
/// # Returns
152198
///
153199
/// The new IRQ nesting level.
154-
pub fn pop_nesting(&self) -> isize {
200+
pub fn pop_nesting(&self) -> i32 {
155201
debug_assert!(irqs_disabled());
156202

157-
let val = self.count.fetch_sub(1, Ordering::Relaxed);
203+
let val = self.counts[0].fetch_sub(1, Ordering::Relaxed);
158204

159205
assert!(val > 0);
160206

@@ -181,8 +227,8 @@ impl IrqState {
181227
/// # Returns
182228
///
183229
/// Levels of IRQ-disable nesting currently active
184-
pub fn count(&self) -> isize {
185-
self.count.load(Ordering::Relaxed)
230+
pub fn count(&self) -> i32 {
231+
self.counts[0].load(Ordering::Relaxed)
186232
}
187233

188234
/// Changes whether interrupts will be enabled when the nesting count
@@ -192,17 +238,66 @@ impl IrqState {
192238
/// and must ensure that the specified value is appropriate for the
193239
/// current environment.
194240
pub fn set_restore_state(&self, enabled: bool) {
195-
assert!(self.count.load(Ordering::Relaxed) != 0);
241+
assert!(self.counts[0].load(Ordering::Relaxed) != 0);
196242
self.state.store(enabled, Ordering::Relaxed);
197243
}
244+
245+
/// Increments TPR.
246+
///
247+
/// The caller must ensure that a `raise_tpr()` call is followed by a
248+
/// matching call to `lower_tpr()`.
249+
///
250+
/// * `tpr_value` - The new TPR value. Must be greater than or equal to
251+
/// the current TPR value.
252+
#[inline(always)]
253+
pub fn raise_tpr(&self, tpr_value: usize) {
254+
assert!(tpr_value > 0 && tpr_value >= raw_get_tpr());
255+
raw_set_tpr(tpr_value);
256+
257+
// Increment the count of requests to raise to this TPR to indicate
258+
// the number of execution contexts that require this TPR.
259+
self.counts[tpr_value].fetch_add(1, Ordering::Relaxed);
260+
}
261+
262+
/// Decrements TPR.
263+
///
264+
/// The caller must ensure that a `lower` call balances a preceding
265+
/// `raise` call to the indicated level.
266+
///
267+
/// * `tpr_value` - The TPR from which the caller would like to lower.
268+
/// Must be less than or equal to the current TPR.
269+
#[inline(always)]
270+
pub fn lower_tpr(&self, tpr_value: usize) {
271+
let current_tpr = raw_get_tpr();
272+
debug_assert!(tpr_value <= current_tpr);
273+
274+
// Decrement the count of execution contexts requiring this raised
275+
// TPR.
276+
let count = self.counts[tpr_value].fetch_sub(1, Ordering::Relaxed);
277+
debug_assert!(count > 0);
278+
279+
if count == 1 && tpr_value >= current_tpr {
280+
// Find the highest TPR that is still required.
281+
for new_tpr in (0..tpr_value).rev() {
282+
if self.counts[new_tpr].load(Ordering::Relaxed) != 0 {
283+
raw_set_tpr(new_tpr);
284+
return;
285+
}
286+
}
287+
288+
// No TPR is still in use, so lower to zero.
289+
raw_set_tpr(0);
290+
}
291+
}
198292
}
199293

200294
impl Drop for IrqState {
201295
/// This struct should never be dropped. Add a debug check in case it is
202296
/// dropped anyway.
203297
fn drop(&mut self) {
204-
let count = self.count.load(Ordering::Relaxed);
205-
assert_eq!(count, 0);
298+
for count in &self.counts {
299+
assert_eq!(count.load(Ordering::Relaxed), 0);
300+
}
206301
}
207302
}
208303

@@ -212,7 +307,7 @@ impl Drop for IrqState {
212307
///
213308
/// The struct implements the `Default` and `Drop` traits for easy use.
214309
#[derive(Debug)]
215-
#[must_use = "if unused previous IRQ state will be immediatly restored"]
310+
#[must_use = "if unused previous IRQ state will be immediately restored"]
216311
pub struct IrqGuard {
217312
/// Make the type !Send + !Sync
218313
phantom: PhantomData<*const ()>,
@@ -244,9 +339,43 @@ impl Drop for IrqGuard {
244339
}
245340
}
246341

342+
/// A TPR guard which raises TPR upon creation. When the guard goes out of
343+
/// scope, TPR is lowered to the highest active TPR.
344+
///
345+
/// The struct implements the `Drop` trait for easy use.
346+
#[derive(Debug, Default)]
347+
#[must_use = "if unused previous TPR will be immediately restored"]
348+
pub struct TprGuard {
349+
tpr_value: usize,
350+
351+
/// Make the type !Send + !Sync
352+
phantom: PhantomData<*const ()>,
353+
}
354+
355+
impl TprGuard {
356+
pub fn raise(tpr_value: usize) -> Self {
357+
// SAFETY: Safe because the struct implements `Drop, which restores
358+
// TPR state.
359+
raise_tpr(tpr_value);
360+
361+
Self {
362+
tpr_value,
363+
phantom: PhantomData,
364+
}
365+
}
366+
}
367+
368+
impl Drop for TprGuard {
369+
fn drop(&mut self) {
370+
// Lower TPR from the value to which it was raised.
371+
lower_tpr(self.tpr_value);
372+
}
373+
}
374+
247375
#[cfg(test)]
248376
mod tests {
249377
use super::*;
378+
use crate::platform::SVSM_PLATFORM;
250379

251380
#[test]
252381
#[cfg_attr(not(test_in_svsm), ignore = "Can only be run inside guest")]
@@ -293,4 +422,46 @@ mod tests {
293422
raw_irqs_disable();
294423
}
295424
}
425+
426+
#[test]
427+
#[cfg_attr(not(test_in_svsm), ignore = "Can only be run inside guest")]
428+
fn tpr_test() {
429+
if SVSM_PLATFORM.use_interrupts() {
430+
assert_eq!(raw_get_tpr(), 0);
431+
raise_tpr(7);
432+
assert_eq!(raw_get_tpr(), 7);
433+
raise_tpr(8);
434+
assert_eq!(raw_get_tpr(), 8);
435+
lower_tpr(8);
436+
assert_eq!(raw_get_tpr(), 7);
437+
lower_tpr(7);
438+
assert_eq!(raw_get_tpr(), 0);
439+
}
440+
}
441+
442+
#[test]
443+
#[cfg_attr(not(test_in_svsm), ignore = "Can only be run inside guest")]
444+
fn tpr_guard_test() {
445+
if SVSM_PLATFORM.use_interrupts() {
446+
assert_eq!(raw_get_tpr(), 0);
447+
// Test in-order raise/lower.
448+
let g1 = TprGuard::raise(8);
449+
assert_eq!(raw_get_tpr(), 8);
450+
let g2 = TprGuard::raise(9);
451+
assert_eq!(raw_get_tpr(), 9);
452+
drop(g2);
453+
assert_eq!(raw_get_tpr(), 8);
454+
drop(g1);
455+
assert_eq!(raw_get_tpr(), 0);
456+
// Test out-of-order raise/lower.
457+
let g1 = TprGuard::raise(8);
458+
assert_eq!(raw_get_tpr(), 8);
459+
let g2 = TprGuard::raise(9);
460+
assert_eq!(raw_get_tpr(), 9);
461+
drop(g1);
462+
assert_eq!(raw_get_tpr(), 9);
463+
drop(g2);
464+
assert_eq!(raw_get_tpr(), 0);
465+
}
466+
}
296467
}

kernel/src/cpu/mod.rs

+2-2
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@ pub mod x86;
2929

3030
pub use apic::LocalApic;
3131
pub use idt::common::X86ExceptionContext;
32-
pub use irq_state::{irqs_disabled, irqs_enabled, IrqGuard, IrqState};
33-
pub use percpu::{irq_nesting_count, irqs_disable, irqs_enable};
32+
pub use irq_state::{irqs_disabled, irqs_enabled, IrqGuard, IrqState, TprGuard};
33+
pub use percpu::{irq_nesting_count, irqs_disable, irqs_enable, lower_tpr, raise_tpr};
3434
pub use registers::{X86GeneralRegs, X86InterruptFrame, X86SegmentRegs};
3535
pub use tlb::*;

0 commit comments

Comments
 (0)