Skip to content

Commit 126400b

Browse files
authored
Merge pull request #386 from msft-jlange/apic_cleanup
cpu/apic: Clean up APIC emulation code
2 parents 0849705 + 90eee39 commit 126400b

File tree

4 files changed

+202
-192
lines changed

4 files changed

+202
-192
lines changed

igvmbuilder/src/igvm_builder.rs

+1-4
Original file line numberDiff line numberDiff line change
@@ -220,10 +220,7 @@ impl IgvmBuilder {
220220
kernel_size: self.gpa_map.kernel.get_size() as u32,
221221
kernel_base: self.gpa_map.kernel.get_start(),
222222
vtom,
223-
use_alternate_injection: match self.options.alt_injection {
224-
true => 1,
225-
false => 0,
226-
},
223+
use_alternate_injection: u8::from(self.options.alt_injection),
227224
..Default::default()
228225
})
229226
}

kernel/src/cpu/apic.rs

+157-145
Original file line numberDiff line numberDiff line change
@@ -102,7 +102,10 @@ pub enum ApicError {
102102
ApicError,
103103
}
104104

105-
#[derive(Default, Clone, Copy, Debug)]
105+
// This structure must never be copied because a silent copy will cause APIC
106+
// state to be lost.
107+
#[allow(missing_copy_implementations)]
108+
#[derive(Default, Debug)]
106109
pub struct LocalApic {
107110
irr: [u32; 8],
108111
allowed_irr: [u32; 8],
@@ -318,52 +321,54 @@ impl LocalApic {
318321
// in service. This check does not consider TPR, because an
319322
// interrupt lower in priority than TPR must be queued for delivery
320323
// as soon as TPR is lowered.
321-
if (irq & 0xF0) > (current_priority & 0xF0) {
322-
// Determine whether this interrupt can be injected
323-
// immediately. If not, queue it for delivery when possible.
324-
let try_lazy_eoi = if self.deliver_interrupt_immediately(irq, cpu_state) {
325-
self.interrupt_delivered = true;
326-
327-
// Use of lazy EOI can safely be attempted, because the
328-
// highest priority interrupt in service is unambiguous.
329-
true
330-
} else {
331-
cpu_state.queue_interrupt(irq);
332-
self.interrupt_queued = true;
333-
334-
// A lazy EOI can only be attempted if there is no lower
335-
// priority interrupt in service. If a lower priority
336-
// interrupt is in service, then the lazy EOI handler
337-
// won't know whether the lazy EOI is for the one that
338-
// is already in service or the one that is being queued
339-
// here.
340-
self.isr_stack_index == 0
341-
};
342-
343-
// Mark this interrupt in-service. It will be recalled if
344-
// the ISR is examined again before the interrupt is actually
345-
// delivered.
346-
Self::remove_vector_register(&mut self.irr, irq);
347-
self.isr_stack[self.isr_stack_index] = irq;
348-
self.isr_stack_index += 1;
349-
350-
// Configure a lazy EOI if possible. Lazy EOI is not possible
351-
// for level-sensitive interrupts, because an explicit EOI
352-
// is required to acknowledge the interrupt at the source.
353-
if try_lazy_eoi && !Self::test_vector_register(&self.tmr, irq) {
354-
// A lazy EOI is possible only if there is no other
355-
// interrupt pending. If another interrupt is pending,
356-
// then an explicit EOI will be required to prompt
357-
// delivery of the next interrupt.
358-
if self.scan_irr() == 0 {
359-
if let Some(calling_area) = guest_caa {
360-
if let Ok(caa) = calling_area.read() {
361-
if calling_area.write(caa.update_no_eoi_required(1)).is_ok() {
362-
// Only track a pending lazy EOI if the
363-
// calling area page could successfully be
364-
// updated.
365-
self.lazy_eoi_pending = true;
366-
}
324+
if (irq & 0xF0) <= (current_priority & 0xF0) {
325+
return;
326+
}
327+
328+
// Determine whether this interrupt can be injected
329+
// immediately. If not, queue it for delivery when possible.
330+
let try_lazy_eoi = if self.deliver_interrupt_immediately(irq, cpu_state) {
331+
self.interrupt_delivered = true;
332+
333+
// Use of lazy EOI can safely be attempted, because the
334+
// highest priority interrupt in service is unambiguous.
335+
true
336+
} else {
337+
cpu_state.queue_interrupt(irq);
338+
self.interrupt_queued = true;
339+
340+
// A lazy EOI can only be attempted if there is no lower
341+
// priority interrupt in service. If a lower priority
342+
// interrupt is in service, then the lazy EOI handler
343+
// won't know whether the lazy EOI is for the one that
344+
// is already in service or the one that is being queued
345+
// here.
346+
self.isr_stack_index == 0
347+
};
348+
349+
// Mark this interrupt in-service. It will be recalled if
350+
// the ISR is examined again before the interrupt is actually
351+
// delivered.
352+
Self::remove_vector_register(&mut self.irr, irq);
353+
self.isr_stack[self.isr_stack_index] = irq;
354+
self.isr_stack_index += 1;
355+
356+
// Configure a lazy EOI if possible. Lazy EOI is not possible
357+
// for level-sensitive interrupts, because an explicit EOI
358+
// is required to acknowledge the interrupt at the source.
359+
if try_lazy_eoi && !Self::test_vector_register(&self.tmr, irq) {
360+
// A lazy EOI is possible only if there is no other
361+
// interrupt pending. If another interrupt is pending,
362+
// then an explicit EOI will be required to prompt
363+
// delivery of the next interrupt.
364+
if self.scan_irr() == 0 {
365+
if let Some(calling_area) = guest_caa {
366+
if let Ok(caa) = calling_area.read() {
367+
if calling_area.write(caa.update_no_eoi_required(1)).is_ok() {
368+
// Only track a pending lazy EOI if the
369+
// calling area page could successfully be
370+
// updated.
371+
self.lazy_eoi_pending = true;
367372
}
368373
}
369374
}
@@ -380,24 +385,29 @@ impl LocalApic {
380385
}
381386

382387
pub fn perform_eoi(&mut self) {
383-
// Pop any in-service interrupt from the stack, and schedule the APIC
384-
// for reevaluation.
385-
if self.isr_stack_index != 0 {
386-
self.isr_stack_index -= 1;
387-
let vector = self.isr_stack[self.isr_stack_index];
388-
if Self::test_vector_register(&self.tmr, vector) {
389-
if Self::test_vector_register(&self.host_tmr, vector) {
390-
Self::perform_host_eoi(vector);
391-
Self::remove_vector_register(&mut self.host_tmr, vector);
392-
} else {
393-
// FIXME: should do something with locally generated
394-
// level-sensitive interrupts.
395-
}
396-
Self::remove_vector_register(&mut self.tmr, vector);
388+
// Pop any in-service interrupt from the stack. If there is no
389+
// interrupt in service, then there is nothing to do.
390+
if self.isr_stack_index == 0 {
391+
return;
392+
}
393+
394+
self.isr_stack_index -= 1;
395+
let vector = self.isr_stack[self.isr_stack_index];
396+
if Self::test_vector_register(&self.tmr, vector) {
397+
if Self::test_vector_register(&self.host_tmr, vector) {
398+
Self::perform_host_eoi(vector);
399+
Self::remove_vector_register(&mut self.host_tmr, vector);
400+
} else {
401+
// FIXME: should do something with locally generated
402+
// level-sensitive interrupts.
397403
}
398-
self.update_required = true;
399-
self.lazy_eoi_pending = false;
404+
Self::remove_vector_register(&mut self.tmr, vector);
400405
}
406+
407+
// Schedule the APIC for reevaluation so any additional pending
408+
// interrupt can be processed.
409+
self.update_required = true;
410+
self.lazy_eoi_pending = false;
401411
}
402412

403413
fn get_isr(&self, index: usize) -> u32 {
@@ -451,7 +461,7 @@ impl LocalApic {
451461
// requested destination. Skip the current CPU, since it was checked
452462
// above.
453463
for cpu_ref in PERCPU_AREAS.iter() {
454-
let cpu = cpu_ref.unwrap();
464+
let cpu = cpu_ref.as_cpu_ref();
455465
let this_apic_id = cpu.apic_id();
456466
if (this_apic_id != apic_id)
457467
&& Self::logical_destination_match(destination, this_apic_id)
@@ -521,7 +531,7 @@ impl LocalApic {
521531
// current CPU and indicate that an IPI has been requested.
522532
let apic_id = this_cpu().get_apic_id();
523533
for cpu_ref in PERCPU_AREAS.iter() {
524-
let cpu = cpu_ref.unwrap();
534+
let cpu = cpu_ref.as_cpu_ref();
525535
if cpu.apic_id() != apic_id {
526536
Self::post_ipi_one_target(cpu, icr);
527537
}
@@ -625,26 +635,26 @@ impl LocalApic {
625635
match register {
626636
APIC_REGISTER_TPR => {
627637
// TPR must be an 8-bit value.
628-
if value > 0xFF {
629-
Err(ApicError::ApicError)
630-
} else {
631-
cpu_state.set_tpr((value & 0xFF) as u8);
632-
Ok(())
638+
match u8::try_from(value) {
639+
Ok(tpr) => {
640+
cpu_state.set_tpr(tpr);
641+
Ok(())
642+
}
643+
Err(_) => Err(ApicError::ApicError),
633644
}
634645
}
635646
APIC_REGISTER_EOI => {
636647
self.perform_eoi();
637648
Ok(())
638649
}
639650
APIC_REGISTER_ICR => self.handle_icr_write(value),
640-
APIC_REGISTER_SELF_IPI => {
641-
if value > 0xFF {
642-
Err(ApicError::ApicError)
643-
} else {
644-
self.post_interrupt((value & 0xFF) as u8, false);
651+
APIC_REGISTER_SELF_IPI => match u8::try_from(value) {
652+
Ok(vector) => {
653+
self.post_interrupt(vector, false);
645654
Ok(())
646655
}
647-
}
656+
Err(_) => Err(ApicError::ApicError),
657+
},
648658
_ => Err(ApicError::ApicError),
649659
}
650660
}
@@ -683,82 +693,84 @@ impl LocalApic {
683693
let hv_doorbell = this_cpu().hv_doorbell().unwrap();
684694
let vmpl_event_mask = hv_doorbell.per_vmpl_events.swap(0, Ordering::Relaxed);
685695
// Ignore events other than for the guest VMPL.
686-
if vmpl_event_mask & (1 << (GUEST_VMPL - 1)) != 0 {
687-
let descriptor = &hv_doorbell.per_vmpl[GUEST_VMPL - 1];
688-
689-
// First consume any level-sensitive vector that is present.
690-
let mut flags = HVExtIntStatus::from(descriptor.status.load(Ordering::Relaxed));
691-
if flags.level_sensitive() {
692-
let mut vector;
693-
// Consume the correct vector atomically.
694-
loop {
695-
let mut new_flags = flags;
696-
vector = flags.pending_vector();
697-
new_flags.set_pending_vector(0);
698-
new_flags.set_level_sensitive(false);
699-
if let Err(fail_flags) = descriptor.status.compare_exchange(
700-
flags.into(),
701-
new_flags.into(),
702-
Ordering::Relaxed,
703-
Ordering::Relaxed,
704-
) {
705-
flags = fail_flags.into();
706-
} else {
707-
flags = new_flags;
708-
break;
709-
}
710-
}
696+
if vmpl_event_mask & (1 << (GUEST_VMPL - 1)) == 0 {
697+
return;
698+
}
699+
700+
let descriptor = &hv_doorbell.per_vmpl[GUEST_VMPL - 1];
711701

712-
if self.signal_one_host_interrupt(vector, true) {
713-
Self::insert_vector_register(&mut self.host_tmr, vector);
702+
// First consume any level-sensitive vector that is present.
703+
let mut flags = HVExtIntStatus::from(descriptor.status.load(Ordering::Relaxed));
704+
if flags.level_sensitive() {
705+
let mut vector;
706+
// Consume the correct vector atomically.
707+
loop {
708+
let mut new_flags = flags;
709+
vector = flags.pending_vector();
710+
new_flags.set_pending_vector(0);
711+
new_flags.set_level_sensitive(false);
712+
if let Err(fail_flags) = descriptor.status.compare_exchange(
713+
flags.into(),
714+
new_flags.into(),
715+
Ordering::Relaxed,
716+
Ordering::Relaxed,
717+
) {
718+
flags = fail_flags.into();
719+
} else {
720+
flags = new_flags;
721+
break;
714722
}
715723
}
716724

717-
// If a single vector is present, then signal it, otherwise
718-
// process the entire IRR.
719-
if flags.multiple_vectors() {
720-
// Clear the multiple vectors flag first so that additional
721-
// interrupts are presented via the 8-bit vector. This must
722-
// be done before the IRR is scanned so that if additional
723-
// vectors are presented later, the multiple vectors flag
724-
// will be set again.
725-
let multiple_vectors_mask: u32 =
726-
HVExtIntStatus::new().with_multiple_vectors(true).into();
725+
if self.signal_one_host_interrupt(vector, true) {
726+
Self::insert_vector_register(&mut self.host_tmr, vector);
727+
}
728+
}
729+
730+
// If a single vector is present, then signal it, otherwise
731+
// process the entire IRR.
732+
if flags.multiple_vectors() {
733+
// Clear the multiple vectors flag first so that additional
734+
// interrupts are presented via the 8-bit vector. This must
735+
// be done before the IRR is scanned so that if additional
736+
// vectors are presented later, the multiple vectors flag
737+
// will be set again.
738+
let multiple_vectors_mask: u32 =
739+
HVExtIntStatus::new().with_multiple_vectors(true).into();
740+
descriptor
741+
.status
742+
.fetch_and(!multiple_vectors_mask, Ordering::Relaxed);
743+
744+
// Handle the special case of vector 31.
745+
if flags.vector_31() {
727746
descriptor
728747
.status
729-
.fetch_and(!multiple_vectors_mask, Ordering::Relaxed);
730-
731-
// Handle the special case of vector 31.
732-
if flags.vector_31() {
733-
descriptor
734-
.status
735-
.fetch_and(!(1u32 << 31), Ordering::Relaxed);
736-
self.signal_one_host_interrupt(31, false);
737-
}
748+
.fetch_and(!(1u32 << 31), Ordering::Relaxed);
749+
self.signal_one_host_interrupt(31, false);
750+
}
738751

739-
for i in 1..8 {
740-
let bits = descriptor.irr[i - 1].swap(0, Ordering::Relaxed);
741-
self.signal_several_interrupts(i, bits & self.allowed_irr[i]);
742-
}
743-
} else if flags.pending_vector() != 0 {
744-
// Atomically consume this interrupt. If it cannot be consumed
745-
// atomically, then it must be because some other interrupt
746-
// has been presented, and that can be consumed in another
747-
// pass.
748-
let mut new_flags = flags;
749-
new_flags.set_pending_vector(0);
750-
if descriptor
751-
.status
752-
.compare_exchange(
753-
flags.into(),
754-
new_flags.into(),
755-
Ordering::Relaxed,
756-
Ordering::Relaxed,
757-
)
758-
.is_ok()
759-
{
760-
self.signal_one_host_interrupt(flags.pending_vector(), false);
761-
}
752+
for i in 1..8 {
753+
let bits = descriptor.irr[i - 1].swap(0, Ordering::Relaxed);
754+
self.signal_several_interrupts(i, bits & self.allowed_irr[i]);
755+
}
756+
} else if flags.pending_vector() != 0 {
757+
// Atomically consume this interrupt. If it cannot be consumed
758+
// atomically, then it must be because some other interrupt
759+
// has been presented, and that can be consumed in another
760+
// pass.
761+
let mut new_flags = flags;
762+
new_flags.set_pending_vector(0);
763+
if descriptor
764+
.status
765+
.compare_exchange(
766+
flags.into(),
767+
new_flags.into(),
768+
Ordering::Relaxed,
769+
Ordering::Relaxed,
770+
)
771+
.is_ok()
772+
{
773+
self.signal_one_host_interrupt(flags.pending_vector(), false);
762774
}
763775
}
764776
}

0 commit comments

Comments
 (0)