Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[arm] Ensure context switch doesn't happen from irq #280

Open
wants to merge 1 commit into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 13 additions & 1 deletion arch/arch.c
Original file line number Diff line number Diff line change
@@ -1 +1,13 @@
// empty file to help build empty arch module
/*
* Copyright (c) 2020 Travis Geiselbrecht
*
* Use of this source code is governed by a MIT-style
* license that can be found in the LICENSE file or at
* https://opensource.org/licenses/MIT
*/

#include <arch/ops.h>

__WEAK bool arch_in_int_handler(void) {
return false;
}
26 changes: 26 additions & 0 deletions arch/arm/arm/arch.c
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,32 @@ static void arm_basic_setup(void);
static void spinlock_test(void);
static void spinlock_test_secondary(void);

static uint32_t cpu_in_irq_ctxt[SMP_MAX_CPUS];

enum handler_return arch_irq(struct arm_iframe *frame) {
enum handler_return ret;
uint32_t cpu = arch_curr_cpu_num();
DEBUG_ASSERT(cpu < SMP_MAX_CPUS);

cpu_in_irq_ctxt[cpu] = 1;
ret = platform_irq(frame);
cpu_in_irq_ctxt[cpu] = 0;
return ret;
}

bool arch_in_int_handler() {
#if ARM_ISA_ARMV7M
uint32_t ipsr;
__asm volatile ("MRS %0, ipsr" : "=r" (ipsr) );
return (ipsr & IPSR_ISR_Msk);
#else
uint32_t cpu = arch_curr_cpu_num();
DEBUG_ASSERT(cpu < SMP_MAX_CPUS);

return (cpu_in_irq_ctxt[cpu] == 1);
#endif
}

#if WITH_SMP
/* smp boot lock */
spin_lock_t arm_boot_cpu_lock = 1;
Expand Down
16 changes: 1 addition & 15 deletions arch/arm/arm/exceptions.S
Original file line number Diff line number Diff line change
Expand Up @@ -204,18 +204,8 @@ FUNCTION(arm_irq)

/* r0 now holds pointer to iframe */

/* track that we're inside an irq handler */
LOADCONST(r2, __arm_in_handler)
mov r1, #1
str r1, [r2]

/* call into higher level code */
bl platform_irq

/* clear the irq handler status */
LOADCONST(r1, __arm_in_handler)
mov r2, #0
str r2, [r1]
bl arch_irq

/* reschedule if the handler returns nonzero */
cmp r0, #0
Expand All @@ -237,7 +227,3 @@ FUNCTION(arm_fiq)
DATA(__irq_cycle_count)
.word 0
#endif

.data
DATA(__arm_in_handler)
.word 0
15 changes: 2 additions & 13 deletions arch/arm/include/arch/arch_ops.h
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,8 @@

__BEGIN_CDECLS

enum handler_return platform_irq(struct arm_iframe *frame);

#if ARM_ISA_ARMV7 || (ARM_ISA_ARMV6 && !__thumb__)
#define ENABLE_CYCLE_COUNTER 1

Expand Down Expand Up @@ -67,19 +69,6 @@ static inline bool arch_fiqs_disabled(void) {
return !!state;
}

static inline bool arch_in_int_handler(void) {
#if ARM_ISA_ARMV7M
uint32_t ipsr;
__asm volatile ("MRS %0, ipsr" : "=r" (ipsr) );
return (ipsr & IPSR_ISR_Msk);
#else
/* set by the interrupt glue to track that the cpu is inside a handler */
extern bool __arm_in_handler;

return __arm_in_handler;
#endif
}

static inline ulong arch_cycle_count(void) {
#if ARM_ISA_ARMV7M
#if ENABLE_CYCLE_COUNTER
Expand Down
19 changes: 19 additions & 0 deletions arch/arm64/arch.c
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,25 @@
static spin_lock_t arm_boot_cpu_lock = 1;
static volatile int secondaries_to_init = 0;
#endif
static uint32_t cpu_in_irq_ctxt[SMP_MAX_CPUS];

enum handler_return arch_irq(struct arm64_iframe_short *frame) {
enum handler_return ret;
uint32_t cpu = arch_curr_cpu_num();
DEBUG_ASSERT(cpu < SMP_MAX_CPUS);

cpu_in_irq_ctxt[cpu] = 1;
ret = platform_irq(frame);
cpu_in_irq_ctxt[cpu] = 0;
return ret;
}

bool arch_in_int_handler() {
uint32_t cpu = arch_curr_cpu_num();
DEBUG_ASSERT(cpu < SMP_MAX_CPUS);

return (cpu_in_irq_ctxt[cpu] == 1);
}

static void arm64_cpu_early_init(void) {
/* set the vector base */
Expand Down
2 changes: 1 addition & 1 deletion arch/arm64/exceptions.S
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,7 @@ add sp, sp, #32
regsave_short
msr daifclr, #1 /* reenable fiqs once elr and spsr have been saved */
mov x0, sp
bl platform_irq
bl arch_irq
cbz x0, .Lirq_exception_no_preempt\@
bl thread_preempt
.Lirq_exception_no_preempt\@:
Expand Down
1 change: 1 addition & 0 deletions arch/arm64/include/arch/arch_ops.h
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
#define ENABLE_CYCLE_COUNTER 1

void arch_stacktrace(uint64_t fp, uint64_t pc);
enum handler_return platform_irq(struct arm64_iframe_short *frame);

// override of some routines
static inline void arch_enable_ints(void) {
Expand Down
3 changes: 2 additions & 1 deletion arch/include/arch/ops.h
Original file line number Diff line number Diff line change
Expand Up @@ -16,11 +16,12 @@

__BEGIN_CDECLS

bool arch_in_int_handler(void);

/* fast routines that most arches will implement inline */
static void arch_enable_ints(void);
static void arch_disable_ints(void);
static bool arch_ints_disabled(void);
static bool arch_in_int_handler(void);

static ulong arch_cycle_count(void);

Expand Down
4 changes: 4 additions & 0 deletions kernel/thread.c
Original file line number Diff line number Diff line change
Expand Up @@ -472,6 +472,10 @@ void thread_resched(void) {
thread_t *current_thread = get_current_thread();
uint cpu = arch_curr_cpu_num();

/* Assert that cpu is not in active irq handling
* context */
ASSERT(!arch_in_int_handler());

DEBUG_ASSERT(arch_ints_disabled());
DEBUG_ASSERT(spin_lock_held(&thread_lock));
DEBUG_ASSERT(current_thread->state != THREAD_RUNNING);
Expand Down