From 54fe69caa09362a7c5247e40e73ec7cf0742e686 Mon Sep 17 00:00:00 2001 From: Travis Geiselbrecht Date: Tue, 3 Sep 2024 21:54:19 -0700 Subject: [PATCH] WIP gicv3 from trusty --- dev/interrupt/arm_gic/arm_gic.c | 649 ++++++++++-------- dev/interrupt/arm_gic/arm_gic_common.h | 312 +++++++++ dev/interrupt/arm_gic/gic_v3.c | 328 +++++++++ dev/interrupt/arm_gic/gic_v3.h | 32 + .../arm_gic/include/dev/interrupt/arm_gic.h | 77 ++- dev/interrupt/arm_gic/rules.mk | 12 + 6 files changed, 1125 insertions(+), 285 deletions(-) create mode 100644 dev/interrupt/arm_gic/arm_gic_common.h create mode 100644 dev/interrupt/arm_gic/gic_v3.c create mode 100644 dev/interrupt/arm_gic/gic_v3.h diff --git a/dev/interrupt/arm_gic/arm_gic.c b/dev/interrupt/arm_gic/arm_gic.c index 7072f6145..54bbe796f 100644 --- a/dev/interrupt/arm_gic/arm_gic.c +++ b/dev/interrupt/arm_gic/arm_gic.c @@ -1,9 +1,24 @@ /* * Copyright (c) 2012-2015 Travis Geiselbrecht * - * Use of this source code is governed by a MIT-style - * license that can be found in the LICENSE file or at - * https://opensource.org/licenses/MIT + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files + * (the "Software"), to deal in the Software without restriction, + * including without limitation the rights to use, copy, modify, merge, + * publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include #include @@ -11,33 +26,44 @@ #include #include #include +#include #include #include #include +#include #include +//#include #include #include #include #include +#include #if WITH_LIB_SM #include #include #endif +#include "arm_gic_common.h" + +#if GIC_VERSION > 2 +#include "gic_v3.h" +#endif + #define LOCAL_TRACE 0 #if ARCH_ARM -#include #define iframe arm_iframe #define IFRAME_PC(frame) ((frame)->pc) #endif #if ARCH_ARM64 -#include #define iframe arm64_iframe_short #define IFRAME_PC(frame) ((frame)->elr) #endif +void platform_fiq(struct iframe *frame); static status_t arm_gic_set_secure_locked(u_int irq, bool secure); +static void gic_set_enable(uint vector, bool enable); +static void arm_gic_init_hw(void); static spin_lock_t gicd_lock; #if WITH_LIB_SM @@ -46,29 +72,42 @@ static spin_lock_t gicd_lock; #define GICD_LOCK_FLAGS SPIN_LOCK_FLAG_INTERRUPTS #endif #define GIC_MAX_PER_CPU_INT 32 +#define GIC_MAX_SGI_INT 16 + +#if ARM_GIC_USE_DOORBELL_NS_IRQ +static bool doorbell_enabled; +#endif + +struct arm_gic arm_gics[NUM_ARM_GICS]; + +static bool arm_gic_check_init(int irq) +{ + /* check if we have a vaddr for gicd, both gicv2 and gicv3/4 use this */ + if (!arm_gics[0].gicd_vaddr) { + TRACEF("change to interrupt %d ignored before init\n", irq); + return false; + } + return true; +} #if WITH_LIB_SM static bool arm_gic_non_secure_interrupts_frozen; -static bool arm_gic_interrupt_change_allowed(uint irq) { +static bool arm_gic_interrupt_change_allowed(int irq) +{ if (!arm_gic_non_secure_interrupts_frozen) - return true; + return arm_gic_check_init(irq); - TRACEF("change to interrupt %u ignored after booting ns\n", irq); + TRACEF("change to interrupt %d ignored after booting ns\n", irq); return false; } - -static void suspend_resume_fiq(bool resume_gicc, bool resume_gicd); #else -static bool arm_gic_interrupt_change_allowed(uint irq) { - return true; -} - -static void suspend_resume_fiq(bool resume_gicc, bool resume_gicd) { +static bool arm_gic_interrupt_change_allowed(int irq) +{ + return arm_gic_check_init(irq); } #endif - struct int_handler_struct { int_handler handler; void *arg; @@ -77,15 +116,20 @@ struct int_handler_struct { static struct int_handler_struct int_handler_table_per_cpu[GIC_MAX_PER_CPU_INT][SMP_MAX_CPUS]; static struct int_handler_struct int_handler_table_shared[MAX_INT-GIC_MAX_PER_CPU_INT]; -static struct int_handler_struct *get_int_handler(unsigned int vector, uint cpu) { - if (vector < GIC_MAX_PER_CPU_INT) { +static struct int_handler_struct *get_int_handler(unsigned int vector, uint cpu) +{ + if (vector < GIC_MAX_PER_CPU_INT) return &int_handler_table_per_cpu[vector][cpu]; - } else { + else return &int_handler_table_shared[vector - GIC_MAX_PER_CPU_INT]; - } } -void register_int_handler(unsigned int vector, int_handler handler, void *arg) { +#if ARM_GIC_USE_DOORBELL_NS_IRQ +static status_t arm_gic_set_priority_locked(u_int irq, uint8_t priority); +#endif + +void register_int_handler(unsigned int vector, int_handler handler, void *arg) +{ struct int_handler_struct *h; uint cpu = arch_curr_cpu_num(); @@ -97,9 +141,29 @@ void register_int_handler(unsigned int vector, int_handler handler, void *arg) { spin_lock_save(&gicd_lock, &state, GICD_LOCK_FLAGS); if (arm_gic_interrupt_change_allowed(vector)) { +#if GIC_VERSION > 2 + arm_gicv3_configure_irq_locked(cpu, vector); +#endif h = get_int_handler(vector, cpu); h->handler = handler; h->arg = arg; +#if ARM_GIC_USE_DOORBELL_NS_IRQ + /* + * Use lowest priority Linux does not mask to allow masking the entire + * group while still allowing other interrupts to be delivered. + */ + arm_gic_set_priority_locked(vector, 0xf7); +#endif + + /* + * For GICv3, SGIs are maskable, and on GICv2, whether they are + * maskable is implementation defined. As a result, the caller cannot + * rely on them being maskable, so we enable all registered SGIs as if + * they were non-maskable. + */ + if (vector < GIC_MAX_SGI_INT) { + gic_set_enable(vector, true); + } } spin_unlock_restore(&gicd_lock, state, GICD_LOCK_FLAGS); @@ -112,47 +176,10 @@ void register_int_handler_msi(unsigned int vector, int_handler handler, void *ar register_int_handler(vector, handler, arg); } -/* main cpu regs */ -#define GICC_CTLR (GICC_OFFSET + 0x0000) -#define GICC_PMR (GICC_OFFSET + 0x0004) -#define GICC_BPR (GICC_OFFSET + 0x0008) -#define GICC_IAR (GICC_OFFSET + 0x000c) -#define GICC_EOIR (GICC_OFFSET + 0x0010) -#define GICC_RPR (GICC_OFFSET + 0x0014) -#define GICC_HPPIR (GICC_OFFSET + 0x0018) -#define GICC_APBR (GICC_OFFSET + 0x001c) -#define GICC_AIAR (GICC_OFFSET + 0x0020) -#define GICC_AEOIR (GICC_OFFSET + 0x0024) -#define GICC_AHPPIR (GICC_OFFSET + 0x0028) -#define GICC_APR(n) (GICC_OFFSET + 0x00d0 + (n) * 4) -#define GICC_NSAPR(n) (GICC_OFFSET + 0x00e0 + (n) * 4) -#define GICC_IIDR (GICC_OFFSET + 0x00fc) -#define GICC_DIR (GICC_OFFSET + 0x1000) - -/* distribution regs */ -#define GICD_CTLR (GICD_OFFSET + 0x000) -#define GICD_TYPER (GICD_OFFSET + 0x004) -#define GICD_IIDR (GICD_OFFSET + 0x008) -#define GICD_IGROUPR(n) (GICD_OFFSET + 0x080 + (n) * 4) -#define GICD_ISENABLER(n) (GICD_OFFSET + 0x100 + (n) * 4) -#define GICD_ICENABLER(n) (GICD_OFFSET + 0x180 + (n) * 4) -#define GICD_ISPENDR(n) (GICD_OFFSET + 0x200 + (n) * 4) -#define GICD_ICPENDR(n) (GICD_OFFSET + 0x280 + (n) * 4) -#define GICD_ISACTIVER(n) (GICD_OFFSET + 0x300 + (n) * 4) -#define GICD_ICACTIVER(n) (GICD_OFFSET + 0x380 + (n) * 4) -#define GICD_IPRIORITYR(n) (GICD_OFFSET + 0x400 + (n) * 4) -#define GICD_ITARGETSR(n) (GICD_OFFSET + 0x800 + (n) * 4) -#define GICD_ICFGR(n) (GICD_OFFSET + 0xc00 + (n) * 4) -#define GICD_NSACR(n) (GICD_OFFSET + 0xe00 + (n) * 4) -#define GICD_SGIR (GICD_OFFSET + 0xf00) -#define GICD_CPENDSGIR(n) (GICD_OFFSET + 0xf10 + (n) * 4) -#define GICD_SPENDSGIR(n) (GICD_OFFSET + 0xf20 + (n) * 4) - -#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d)) #define GIC_REG_COUNT(bit_per_reg) DIV_ROUND_UP(MAX_INT, (bit_per_reg)) #define DEFINE_GIC_SHADOW_REG(name, bit_per_reg, init_val, init_from) \ uint32_t (name)[GIC_REG_COUNT(bit_per_reg)] = { \ - [((init_from) / (bit_per_reg)) ... \ + [(init_from / bit_per_reg) ... \ (GIC_REG_COUNT(bit_per_reg) - 1)] = (init_val) \ } @@ -161,122 +188,133 @@ static DEFINE_GIC_SHADOW_REG(gicd_igroupr, 32, ~0U, 0); #endif static DEFINE_GIC_SHADOW_REG(gicd_itargetsr, 4, 0x01010101, 32); -// accessor routines for GIC registers that go through the mmio interface -static inline uint32_t gicreg_read32(uint32_t gic, uint32_t register_offset) { - return mmio_read32((volatile uint32_t *)(GICBASE(gic) + register_offset)); -} - -static inline void gicreg_write32(uint32_t gic, uint32_t register_offset, uint32_t value) { - mmio_write32((volatile uint32_t *)(GICBASE(gic) + register_offset), value); -} - -static void gic_set_enable(uint vector, bool enable) { - uint reg = vector / 32; +static void gic_set_enable(uint vector, bool enable) +{ + int reg = vector / 32; uint32_t mask = 1ULL << (vector % 32); - if (enable) { - gicreg_write32(0, GICD_ISENABLER(reg), mask); - } else { - gicreg_write32(0, GICD_ICENABLER(reg), mask); +#if GIC_VERSION > 2 + if (reg == 0) { + uint32_t cpu = arch_curr_cpu_num(); + + /* On GICv3/v4 these are on GICR */ + if (enable) + GICRREG_WRITE(0, cpu, GICR_ISENABLER0, mask); + else + GICRREG_WRITE(0, cpu, GICR_ICENABLER0, mask); + return; + } +#endif + if (enable) + GICDREG_WRITE(0, GICD_ISENABLER(reg), mask); + else { + GICDREG_WRITE(0, GICD_ICENABLER(reg), mask); + +#if GIC_VERSION > 2 + /* for GIC V3, make sure write is complete */ + arm_gicv3_wait_for_write_complete(); +#endif } } -static void arm_gic_init_percpu(uint level) { +static void arm_gic_init_percpu(uint level) +{ +#if GIC_VERSION > 2 + /* GICv3/v4 */ + arm_gicv3_init_percpu(); +#else + /* GICv2 */ #if WITH_LIB_SM - gicreg_write32(0, GICC_CTLR, 0xb); // enable GIC0 and select fiq mode for secure - gicreg_write32(0, GICD_IGROUPR(0), ~0U); /* GICD_IGROUPR0 is banked */ + GICCREG_WRITE(0, GICC_CTLR, 0xb); // enable GIC0 and select fiq mode for secure + GICDREG_WRITE(0, GICD_IGROUPR(0), ~0U); /* GICD_IGROUPR0 is banked */ #else - gicreg_write32(0, GICC_CTLR, 1); // enable GIC0 + GICCREG_WRITE(0, GICC_CTLR, 1); // enable GIC0 #endif - gicreg_write32(0, GICC_PMR, 0xFF); // unmask interrupts at all priority levels + GICCREG_WRITE(0, GICC_PMR, 0xFF); // unmask interrupts at all priority levels +#endif /* GIC_VERSION > 2 */ } LK_INIT_HOOK_FLAGS(arm_gic_init_percpu, arm_gic_init_percpu, LK_INIT_LEVEL_PLATFORM_EARLY, LK_INIT_FLAG_SECONDARY_CPUS); -static void arm_gic_suspend_cpu(uint level) { - suspend_resume_fiq(false, false); +static void arm_gic_suspend_cpu(uint level) +{ +#if GIC_VERSION > 2 + arm_gicv3_suspend_cpu(arch_curr_cpu_num()); +#endif } LK_INIT_HOOK_FLAGS(arm_gic_suspend_cpu, arm_gic_suspend_cpu, - LK_INIT_LEVEL_PLATFORM, LK_INIT_FLAG_CPU_SUSPEND); + LK_INIT_LEVEL_PLATFORM, LK_INIT_FLAG_CPU_OFF); -static void arm_gic_resume_cpu(uint level) { +static void arm_gic_resume_cpu(uint level) +{ spin_lock_saved_state_t state; - bool resume_gicd = false; + __UNUSED bool resume_gicd = false; spin_lock_save(&gicd_lock, &state, GICD_LOCK_FLAGS); - if (!(gicreg_read32(0, GICD_CTLR) & 1)) { + +#if GIC_VERSION > 2 + if (!(GICDREG_READ(0, GICD_CTLR) & 5)) { +#else + if (!(GICDREG_READ(0, GICD_CTLR) & 1)) { +#endif dprintf(SPEW, "%s: distibutor is off, calling arm_gic_init instead\n", __func__); - arm_gic_init(); + arm_gic_init_hw(); resume_gicd = true; } else { arm_gic_init_percpu(0); } + +#if GIC_VERSION > 2 + { + uint cpu = arch_curr_cpu_num(); + uint max_irq = resume_gicd ? MAX_INT : GIC_MAX_PER_CPU_INT; + + for (uint v = 0; v < max_irq; v++) { + struct int_handler_struct *h = get_int_handler(v, cpu); + if (h->handler) { + arm_gicv3_configure_irq_locked(cpu, v); + } + } + arm_gicv3_resume_cpu_locked(cpu, resume_gicd); + } +#endif spin_unlock_restore(&gicd_lock, state, GICD_LOCK_FLAGS); - suspend_resume_fiq(true, resume_gicd); } LK_INIT_HOOK_FLAGS(arm_gic_resume_cpu, arm_gic_resume_cpu, LK_INIT_LEVEL_PLATFORM, LK_INIT_FLAG_CPU_RESUME); -static uint arm_gic_max_cpu(void) { - return (gicreg_read32(0, GICD_TYPER) >> 5) & 0x7; -} - -static status_t gic_configure_interrupt(unsigned int vector, - enum interrupt_trigger_mode tm, - enum interrupt_polarity pol) { - //Only configurable for SPI interrupts - if ((vector >= MAX_INT) || (vector < GIC_BASE_SPI)) { - return ERR_INVALID_ARGS; - } - - if (pol != IRQ_POLARITY_ACTIVE_HIGH) { - // TODO: polarity should actually be configure through a GPIO controller - return ERR_NOT_SUPPORTED; - } - - // type is encoded with two bits, MSB of the two determine type - // 16 irqs encoded per ICFGR register - uint32_t reg_ndx = vector >> 4; - uint32_t bit_shift = ((vector & 0xf) << 1) + 1; - uint32_t reg_val = gicreg_read32(0, GICD_ICFGR(reg_ndx)); - if (tm == IRQ_TRIGGER_MODE_EDGE) { - reg_val |= (1 << bit_shift); - } else { - reg_val &= ~(1 << bit_shift); - } - gicreg_write32(0, GICD_ICFGR(reg_ndx), reg_val); - - return NO_ERROR; +static int arm_gic_max_cpu(void) +{ + return (GICDREG_READ(0, GICD_TYPER) >> 5) & 0x7; } -void arm_gic_init(void) { +static void arm_gic_init_hw(void) +{ +#if GIC_VERSION > 2 + /* GICv3/v4 */ + arm_gicv3_init(); +#else int i; for (i = 0; i < MAX_INT; i+= 32) { - gicreg_write32(0, GICD_ICENABLER(i / 32), ~0); - gicreg_write32(0, GICD_ICPENDR(i / 32), ~0); + GICDREG_WRITE(0, GICD_ICENABLER(i / 32), ~0U); + GICDREG_WRITE(0, GICD_ICPENDR(i / 32), ~0U); } if (arm_gic_max_cpu() > 0) { /* Set external interrupts to target cpu 0 */ for (i = 32; i < MAX_INT; i += 4) { - gicreg_write32(0, GICD_ITARGETSR(i / 4), gicd_itargetsr[i / 4]); + GICDREG_WRITE(0, GICD_ITARGETSR(i / 4), gicd_itargetsr[i / 4]); } } - // Initialize all the SPIs to edge triggered - for (i = 32; i < MAX_INT; i++) { - gic_configure_interrupt(i, IRQ_TRIGGER_MODE_EDGE, IRQ_POLARITY_ACTIVE_HIGH); - } - - - gicreg_write32(0, GICD_CTLR, 1); // enable GIC0 + GICDREG_WRITE(0, GICD_CTLR, 1); // enable GIC0 #if WITH_LIB_SM - gicreg_write32(0, GICD_CTLR, 3); // enable GIC0 ns interrupts + GICDREG_WRITE(0, GICD_CTLR, 3); // enable GIC0 ns interrupts /* * Iterate through all IRQs and set them to non-secure * mode. This will allow the non-secure side to handle @@ -284,13 +322,86 @@ void arm_gic_init(void) { */ for (i = 32; i < MAX_INT; i += 32) { u_int reg = i / 32; - gicreg_write32(0, GICD_IGROUPR(reg), gicd_igroupr[reg]); + GICDREG_WRITE(0, GICD_IGROUPR(reg), gicd_igroupr[reg]); } #endif +#endif /* GIC_VERSION > 2 */ arm_gic_init_percpu(0); } -static status_t arm_gic_set_secure_locked(u_int irq, bool secure) { +void arm_gic_init(void) { +#ifdef GICBASE + arm_gics[0].gicd_vaddr = GICBASE(0) + GICD_OFFSET; + arm_gics[0].gicd_size = GICD_MIN_SIZE; +#if GIC_VERSION > 2 + arm_gics[0].gicr_vaddr = GICBASE(0) + GICR_OFFSET; + arm_gics[0].gicr_size = GICR_CPU_OFFSET(SMP_MAX_CPUS - 1) + GICR_MIN_SIZE; +#else /* GIC_VERSION > 2 */ + arm_gics[0].gicc_vaddr = GICBASE(0) + GICC_OFFSET; + arm_gics[0].gicc_size = GICC_MIN_SIZE; +#endif /* GIC_VERSION > 2 */ +#else + /* Platforms should define GICBASE if they want to call this */ + panic("%s: GICBASE not defined\n", __func__); +#endif /* GICBASE */ + + arm_gic_init_hw(); +} + +static void arm_map_regs(const char* name, + vaddr_t* vaddr, + paddr_t paddr, + size_t size) { + status_t ret; + void* vaddrp = (void*)vaddr; + + if (!size) { + return; + } + + ret = vmm_alloc_physical(vmm_get_kernel_aspace(), "gic", size, &vaddrp, 0, + paddr, 0, ARCH_MMU_FLAG_UNCACHED_DEVICE | + ARCH_MMU_FLAG_PERM_NO_EXECUTE); + if (ret) { + panic("%s: failed %d\n", __func__, ret); + } + + *vaddr = (vaddr_t)vaddrp; +} + +void arm_gic_init_map(struct arm_gic_init_info* init_info) +{ + if (init_info->gicd_size < GICD_MIN_SIZE) { + panic("%s: gicd mapping too small %zu\n", __func__, + init_info->gicd_size); + } + arm_map_regs("gicd", &arm_gics[0].gicd_vaddr, init_info->gicd_paddr, + init_info->gicd_size); + arm_gics[0].gicd_size = init_info->gicd_size; + +#if GIC_VERSION > 2 + if (init_info->gicr_size < GICR_CPU_OFFSET(SMP_MAX_CPUS - 1) + GICR_MIN_SIZE) { + panic("%s: gicr mapping too small %zu\n", __func__, + init_info->gicr_size); + } + arm_map_regs("gicr", &arm_gics[0].gicr_vaddr, init_info->gicr_paddr, + init_info->gicr_size); + arm_gics[0].gicr_size = init_info->gicr_size; +#else /* GIC_VERSION > 2 */ + if (init_info->gicc_size < GICC_MIN_SIZE) { + panic("%s: gicc mapping too small %zu\n", __func__, + init_info->gicc_size); + } + arm_map_regs("gicc", &arm_gics[0].gicc_vaddr, init_info->gicc_paddr, + init_info->gicc_size); + arm_gics[0].gicc_size = init_info->gicc_size; +#endif /* GIC_VERSION > 2 */ + + arm_gic_init_hw(); +} + +static status_t arm_gic_set_secure_locked(u_int irq, bool secure) +{ #if WITH_LIB_SM int reg = irq / 32; uint32_t mask = 1ULL << (irq % 32); @@ -299,16 +410,17 @@ static status_t arm_gic_set_secure_locked(u_int irq, bool secure) { return ERR_INVALID_ARGS; if (secure) - gicreg_write32(0, GICD_IGROUPR(reg), (gicd_igroupr[reg] &= ~mask)); + GICDREG_WRITE(0, GICD_IGROUPR(reg), (gicd_igroupr[reg] &= ~mask)); else - gicreg_write32(0, GICD_IGROUPR(reg), (gicd_igroupr[reg] |= mask)); + GICDREG_WRITE(0, GICD_IGROUPR(reg), (gicd_igroupr[reg] |= mask)); LTRACEF("irq %d, secure %d, GICD_IGROUP%d = %x\n", - irq, secure, reg, gicreg_read32(0, GICD_IGROUPR(reg))); + irq, secure, reg, GICDREG_READ(0, GICD_IGROUPR(reg))); #endif return NO_ERROR; } -static status_t arm_gic_set_target_locked(u_int irq, u_int cpu_mask, u_int enable_mask) { +static status_t arm_gic_set_target_locked(u_int irq, u_int cpu_mask, u_int enable_mask) +{ u_int reg = irq / 4; u_int shift = 8 * (irq % 4); u_int old_val; @@ -317,55 +429,91 @@ static status_t arm_gic_set_target_locked(u_int irq, u_int cpu_mask, u_int enabl cpu_mask = (cpu_mask & 0xff) << shift; enable_mask = (enable_mask << shift) & cpu_mask; - old_val = gicreg_read32(0, GICD_ITARGETSR(reg)); + old_val = GICDREG_READ(0, GICD_ITARGETSR(reg)); new_val = (gicd_itargetsr[reg] & ~cpu_mask) | enable_mask; - gicreg_write32(0, GICD_ITARGETSR(reg), (gicd_itargetsr[reg] = new_val)); + GICDREG_WRITE(0, GICD_ITARGETSR(reg), (gicd_itargetsr[reg] = new_val)); LTRACEF("irq %i, GICD_ITARGETSR%d %x => %x (got %x)\n", - irq, reg, old_val, new_val, gicreg_read32(0, GICD_ITARGETSR(reg))); + irq, reg, old_val, new_val, GICDREG_READ(0, GICD_ITARGETSR(reg))); return NO_ERROR; } -static uint8_t arm_gic_get_priority(u_int irq) { +static status_t arm_gic_get_priority(u_int irq) +{ u_int reg = irq / 4; u_int shift = 8 * (irq % 4); - return (gicreg_read32(0, GICD_IPRIORITYR(reg)) >> shift) & 0xff; + return (GICDREG_READ(0, GICD_IPRIORITYR(reg)) >> shift) & 0xff; } -static status_t arm_gic_set_priority_locked(u_int irq, uint8_t priority) { +static status_t arm_gic_set_priority_locked(u_int irq, uint8_t priority) +{ u_int reg = irq / 4; u_int shift = 8 * (irq % 4); - u_int mask = 0xff << shift; + u_int mask = 0xffU << shift; uint32_t regval; - regval = gicreg_read32(0, GICD_IPRIORITYR(reg)); +#if GIC_VERSION > 2 + if (irq < 32) { + uint cpu = arch_curr_cpu_num(); + + /* On GICv3 IPRIORITY registers are on redistributor */ + regval = GICRREG_READ(0, cpu, GICR_IPRIORITYR(reg)); + LTRACEF("irq %i, cpu %d: old GICR_IPRIORITYR%d = %x\n", irq, cpu, reg, + regval); + regval = (regval & ~mask) | ((uint32_t)priority << shift); + GICRREG_WRITE(0, cpu, GICR_IPRIORITYR(reg), regval); + LTRACEF("irq %i, cpu %d, new GICD_IPRIORITYR%d = %x, req %x\n", + irq, cpu, reg, GICDREG_READ(0, GICD_IPRIORITYR(reg)), regval); + return 0; + } +#endif + + regval = GICDREG_READ(0, GICD_IPRIORITYR(reg)); LTRACEF("irq %i, old GICD_IPRIORITYR%d = %x\n", irq, reg, regval); regval = (regval & ~mask) | ((uint32_t)priority << shift); - gicreg_write32(0, GICD_IPRIORITYR(reg), regval); + GICDREG_WRITE(0, GICD_IPRIORITYR(reg), regval); LTRACEF("irq %i, new GICD_IPRIORITYR%d = %x, req %x\n", - irq, reg, gicreg_read32(0, GICD_IPRIORITYR(reg)), regval); + irq, reg, GICDREG_READ(0, GICD_IPRIORITYR(reg)), regval); return 0; } -status_t arm_gic_sgi(u_int irq, u_int flags, u_int cpu_mask) { +status_t arm_gic_sgi(u_int irq, u_int flags, u_int cpu_mask) +{ + if (irq >= 16) { + return ERR_INVALID_ARGS; + } + +#if GIC_VERSION > 2 + for (size_t cpu = 0; cpu < SMP_MAX_CPUS; cpu++) { + if (!((cpu_mask >> cpu) & 1)) { + continue; + } + + uint64_t val = arm_gicv3_sgir_val(irq, cpu); + + GICCREG_WRITE(0, GICC_PRIMARY_SGIR, val); + } + +#else /* else GIC_VERSION > 2 */ + u_int val = ((flags & ARM_GIC_SGI_FLAG_TARGET_FILTER_MASK) << 24) | ((cpu_mask & 0xff) << 16) | ((flags & ARM_GIC_SGI_FLAG_NS) ? (1U << 15) : 0) | (irq & 0xf); - if (irq >= 16) - return ERR_INVALID_ARGS; - LTRACEF("GICD_SGIR: %x\n", val); - gicreg_write32(0, GICD_SGIR, val); + GICDREG_WRITE(0, GICD_SGIR, val); + +#endif /* else GIC_VERSION > 2 */ return NO_ERROR; } -status_t mask_interrupt(unsigned int vector) { +status_t mask_interrupt(unsigned int vector) +{ if (vector >= MAX_INT) return ERR_INVALID_ARGS; @@ -375,7 +523,8 @@ status_t mask_interrupt(unsigned int vector) { return NO_ERROR; } -status_t unmask_interrupt(unsigned int vector) { +status_t unmask_interrupt(unsigned int vector) +{ if (vector >= MAX_INT) return ERR_INVALID_ARGS; @@ -386,14 +535,20 @@ status_t unmask_interrupt(unsigned int vector) { } static -enum handler_return __platform_irq(struct iframe *frame) { +enum handler_return __platform_irq(struct iframe *frame) +{ // get the current vector - uint32_t iar = gicreg_read32(0, GICC_IAR); + uint32_t iar = GICCREG_READ(0, GICC_PRIMARY_IAR); unsigned int vector = iar & 0x3ff; if (vector >= 0x3fe) { +#if WITH_LIB_SM && ARM_GIC_USE_DOORBELL_NS_IRQ + // spurious or non-secure interrupt + return sm_handle_irq(); +#else // spurious return INT_NO_RESCHEDULE; +#endif } THREAD_STATS_INC(interrupts); @@ -401,7 +556,7 @@ enum handler_return __platform_irq(struct iframe *frame) { uint cpu = arch_curr_cpu_num(); - LTRACEF_LEVEL(2, "iar 0x%x cpu %u currthread %p vector %d pc 0x%lx\n", iar, cpu, + LTRACEF_LEVEL(2, "iar 0x%x cpu %u currthread %p vector %d pc 0x%" PRIxPTR "\n", iar, cpu, get_current_thread(), vector, (uintptr_t)IFRAME_PC(frame)); // deliver the interrupt @@ -412,7 +567,7 @@ enum handler_return __platform_irq(struct iframe *frame) { if (handler->handler) ret = handler->handler(handler->arg); - gicreg_write32(0, GICC_EOIR, iar); + GICCREG_WRITE(0, GICC_PRIMARY_EOIR, iar); LTRACEF_LEVEL(2, "cpu %u exit %d\n", cpu, ret); @@ -421,14 +576,25 @@ enum handler_return __platform_irq(struct iframe *frame) { return ret; } -enum handler_return platform_irq(struct iframe *frame); -enum handler_return platform_irq(struct iframe *frame) { -#if WITH_LIB_SM - uint32_t ahppir = gicreg_read32(0, GICC_AHPPIR); +enum handler_return platform_irq(struct iframe *frame) +{ +#if WITH_LIB_SM && !ARM_GIC_USE_DOORBELL_NS_IRQ + uint32_t ahppir = GICCREG_READ(0, GICC_PRIMARY_HPPIR); uint32_t pending_irq = ahppir & 0x3ff; struct int_handler_struct *h; uint cpu = arch_curr_cpu_num(); +#if ARM_MERGE_FIQ_IRQ + { + uint32_t hppir = GICCREG_READ(0, GICC_HPPIR); + uint32_t pending_fiq = hppir & 0x3ff; + if (pending_fiq < MAX_INT) { + platform_fiq(frame); + return INT_NO_RESCHEDULE; + } + } +#endif + LTRACEF("ahppir %d\n", ahppir); if (pending_irq < MAX_INT && get_int_handler(pending_irq, cpu)->handler) { enum handler_return ret = 0; @@ -445,7 +611,7 @@ enum handler_return platform_irq(struct iframe *frame) { old_priority = arm_gic_get_priority(pending_irq); arm_gic_set_priority_locked(pending_irq, 0); DSB; - irq = gicreg_read32(0, GICC_AIAR) & 0x3ff; + irq = GICCREG_READ(0, GICC_PRIMARY_IAR) & 0x3ff; arm_gic_set_priority_locked(pending_irq, old_priority); spin_unlock_restore(&gicd_lock, state, GICD_LOCK_FLAGS); @@ -455,7 +621,7 @@ enum handler_return platform_irq(struct iframe *frame) { ret = h->handler(h->arg); else TRACEF("unexpected irq %d != %d may get lost\n", irq, pending_irq); - gicreg_write32(0, GICC_AEOIR, irq); + GICCREG_WRITE(0, GICC_PRIMARY_EOIR, irq); return ret; } return sm_handle_irq(); @@ -464,8 +630,8 @@ enum handler_return platform_irq(struct iframe *frame) { #endif } -void platform_fiq(struct iframe *frame); -void platform_fiq(struct iframe *frame) { +void platform_fiq(struct iframe *frame) +{ #if WITH_LIB_SM sm_handle_fiq(); #else @@ -474,28 +640,40 @@ void platform_fiq(struct iframe *frame) { } #if WITH_LIB_SM -static status_t arm_gic_get_next_irq_locked(u_int min_irq, bool per_cpu) { +static status_t arm_gic_get_next_irq_locked(u_int min_irq, uint type) +{ +#if ARM_GIC_USE_DOORBELL_NS_IRQ + if (type == TRUSTY_IRQ_TYPE_DOORBELL && min_irq <= ARM_GIC_DOORBELL_IRQ) { + doorbell_enabled = true; + return ARM_GIC_DOORBELL_IRQ; + } +#else u_int irq; - u_int max_irq = per_cpu ? GIC_MAX_PER_CPU_INT : MAX_INT; + u_int max_irq = type == TRUSTY_IRQ_TYPE_PER_CPU ? GIC_MAX_PER_CPU_INT : + type == TRUSTY_IRQ_TYPE_NORMAL ? MAX_INT : 0; uint cpu = arch_curr_cpu_num(); - if (!per_cpu && min_irq < GIC_MAX_PER_CPU_INT) + if (type == TRUSTY_IRQ_TYPE_NORMAL && min_irq < GIC_MAX_PER_CPU_INT) min_irq = GIC_MAX_PER_CPU_INT; for (irq = min_irq; irq < max_irq; irq++) if (get_int_handler(irq, cpu)->handler) return irq; +#endif return SM_ERR_END_OF_INPUT; } -long smc_intc_get_next_irq(smc32_args_t *args) { +long smc_intc_get_next_irq(struct smc32_args *args) +{ status_t ret; spin_lock_saved_state_t state; spin_lock_save(&gicd_lock, &state, GICD_LOCK_FLAGS); +#if !ARM_GIC_USE_DOORBELL_NS_IRQ arm_gic_non_secure_interrupts_frozen = true; +#endif ret = arm_gic_get_next_irq_locked(args->params[0], args->params[1]); LTRACEF("min_irq %d, per_cpu %d, ret %d\n", args->params[0], args->params[1], ret); @@ -505,120 +683,49 @@ long smc_intc_get_next_irq(smc32_args_t *args) { return ret; } -static u_long enabled_fiq_mask[BITMAP_NUM_WORDS(MAX_INT)]; - -static void bitmap_update_locked(u_long *bitmap, u_int bit, bool set) { - u_long mask = 1UL << BITMAP_BIT_IN_WORD(bit); - - bitmap += BITMAP_WORD(bit); - if (set) - *bitmap |= mask; - else - *bitmap &= ~mask; -} - -long smc_intc_request_fiq(smc32_args_t *args) { - u_int fiq = args->params[0]; - bool enable = args->params[1]; - spin_lock_saved_state_t state; - - dprintf(SPEW, "%s: fiq %d, enable %d\n", __func__, fiq, enable); - spin_lock_save(&gicd_lock, &state, GICD_LOCK_FLAGS); - - arm_gic_set_secure_locked(fiq, true); - arm_gic_set_target_locked(fiq, ~0, ~0); - arm_gic_set_priority_locked(fiq, 0); - - gic_set_enable(fiq, enable); - bitmap_update_locked(enabled_fiq_mask, fiq, enable); - - dprintf(SPEW, "%s: fiq %d, enable %d done\n", __func__, fiq, enable); - - spin_unlock_restore(&gicd_lock, state, GICD_LOCK_FLAGS); - - return NO_ERROR; -} - -static u_int current_fiq[8] = { 0x3ff, 0x3ff, 0x3ff, 0x3ff, 0x3ff, 0x3ff, 0x3ff, 0x3ff }; - -static bool update_fiq_targets(u_int cpu, bool enable, u_int triggered_fiq, bool resume_gicd) { - u_int i, j; - u_long mask; - u_int fiq; - bool smp = arm_gic_max_cpu() > 0; - bool ret = false; - - spin_lock(&gicd_lock); /* IRQs and FIQs are already masked */ - for (i = 0; i < BITMAP_NUM_WORDS(MAX_INT); i++) { - mask = enabled_fiq_mask[i]; - while (mask) { - j = _ffz(~mask); - mask &= ~(1UL << j); - fiq = i * BITMAP_BITS_PER_WORD + j; - if (fiq == triggered_fiq) - ret = true; - LTRACEF("cpu %d, irq %i, enable %d\n", cpu, fiq, enable); - if (smp) - arm_gic_set_target_locked(fiq, 1U << cpu, enable ? ~0 : 0); - if (!smp || resume_gicd) - gic_set_enable(fiq, enable); - } - } - spin_unlock(&gicd_lock); - return ret; -} - -static void suspend_resume_fiq(bool resume_gicc, bool resume_gicd) { - u_int cpu = arch_curr_cpu_num(); - - ASSERT(cpu < 8); - - update_fiq_targets(cpu, resume_gicc, ~0, resume_gicd); +void sm_intc_enable_interrupts(void) +{ +#if ARM_GIC_USE_DOORBELL_NS_IRQ + GICCREG_WRITE(0, icc_igrpen1_el1, 1); /* Enable secure Group 1 */ + DSB; +#endif } -status_t sm_intc_fiq_enter(void) { +status_t sm_intc_fiq_enter(void) +{ u_int cpu = arch_curr_cpu_num(); - u_int irq = gicreg_read32(0, GICC_IAR) & 0x3ff; - bool fiq_enabled; - - ASSERT(cpu < 8); +#if GIC_VERSION > 2 + u_int irq = GICCREG_READ(0, icc_iar0_el1) & 0x3ff; +#else + u_int irq = GICCREG_READ(0, GICC_IAR) & 0x3ff; +#endif LTRACEF("cpu %d, irq %i\n", cpu, irq); if (irq >= 1020) { - LTRACEF("spurious fiq: cpu %d, old %d, new %d\n", cpu, current_fiq[cpu], irq); - return ERR_NO_MSG; - } +#if ARM_GIC_USE_DOORBELL_NS_IRQ + uint64_t val = arm_gicv3_sgir_val(ARM_GIC_DOORBELL_IRQ, cpu); - fiq_enabled = update_fiq_targets(cpu, false, irq, false); - gicreg_write32(0, GICC_EOIR, irq); - - if (current_fiq[cpu] != 0x3ff) { - dprintf(INFO, "more than one fiq active: cpu %d, old %d, new %d\n", cpu, current_fiq[cpu], irq); - return ERR_ALREADY_STARTED; - } + GICCREG_WRITE(0, icc_igrpen1_el1, 0); /* Disable secure Group 1 */ + DSB; - if (!fiq_enabled) { - dprintf(INFO, "got disabled fiq: cpu %d, new %d\n", cpu, irq); - return ERR_NOT_READY; + if (doorbell_enabled) { + LTRACEF("GICD_SGIR: %" PRIx64 "\n", val); + GICCREG_WRITE(0, icc_asgi1r_el1, val); + } +#else + LTRACEF("spurious fiq: cpu %d, new %d\n", cpu, irq); +#endif + return ERR_NO_MSG; } - current_fiq[cpu] = irq; - - return 0; -} - -void sm_intc_fiq_exit(void) { - u_int cpu = arch_curr_cpu_num(); - - ASSERT(cpu < 8); +#if GIC_VERSION > 2 + GICCREG_WRITE(0, icc_eoir0_el1, irq); +#else + GICCREG_WRITE(0, GICC_EOIR, irq); +#endif - LTRACEF("cpu %d, irq %i\n", cpu, current_fiq[cpu]); - if (current_fiq[cpu] == 0x3ff) { - dprintf(INFO, "%s: no fiq active, cpu %d\n", __func__, cpu); - return; - } - update_fiq_targets(cpu, true, current_fiq[cpu], false); - current_fiq[cpu] = 0x3ff; + dprintf(INFO, "got disabled fiq: cpu %d, new %d\n", cpu, irq); + return ERR_NOT_READY; } #endif diff --git a/dev/interrupt/arm_gic/arm_gic_common.h b/dev/interrupt/arm_gic/arm_gic_common.h new file mode 100644 index 000000000..519dd6df2 --- /dev/null +++ b/dev/interrupt/arm_gic/arm_gic_common.h @@ -0,0 +1,312 @@ +/* + * Copyright (c) 2012-2019 LK Trusty Authors. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files + * (the "Software"), to deal in the Software without restriction, + * including without limitation the rights to use, copy, modify, merge, + * publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +#pragma once + +#include +#include +#include + +#if ARCH_ARM +#include +#endif +#if ARCH_ARM64 +#include +#endif + +#ifdef ARCH_ARM +/* + * AArch32 does not have 64 bit mmio support, but the gic spec allows 32 bit + * upper and lower access to _most_ 64 bit gic registers (not GICR_VSGIPENDR, + * GICR_VSGIR or GITS_SGIR). + */ +/* TODO: add mmio_read32 when needed */ +static inline void mmio_write64(volatile uint64_t *ptr64, uint64_t val) { + volatile uint32_t *ptr = (volatile uint32_t *)ptr64; + mmio_write32(ptr, (uint32_t)val); + mmio_write32(ptr + 1, val >> 32); +} +#endif + +struct arm_gic { + vaddr_t gicc_vaddr; + size_t gicc_size; + vaddr_t gicd_vaddr; + size_t gicd_size; + vaddr_t gicr_vaddr; + size_t gicr_size; +}; + +#define NUM_ARM_GICS 1 + +extern struct arm_gic arm_gics[NUM_ARM_GICS]; + +#if GIC_VERSION > 2 + +#if WITH_LIB_SM +#define ARM_GIC_USE_DOORBELL_NS_IRQ 1 +#define ARM_GIC_DOORBELL_IRQ 13 +#endif + + /* GICv3/v4 */ + +#define GICV3_IRQ_GROUP_GRP0S 0 +#define GICV3_IRQ_GROUP_GRP1NS 1 +#define GICV3_IRQ_GROUP_GRP1S 2 + +#ifndef ARM_GIC_SELECTED_IRQ_GROUP +#define ARM_GIC_SELECTED_IRQ_GROUP GRP1S +#endif + +#define COMBINE2(a, b) a ## b +#define XCOMBINE2(a, b) COMBINE2(a,b) +#define GICV3_IRQ_GROUP XCOMBINE2(GICV3_IRQ_GROUP_, ARM_GIC_SELECTED_IRQ_GROUP) + +/* + * In ARMv8 for GICv3/v4, ARM suggest to use system register + * to access GICC instead of memory map. + */ +#ifdef ARCH_ARM64 + +#define GICCREG_READ(gic, reg) ARM64_READ_SYSREG(reg) +#define GICCREG_WRITE(gic, reg, val) ARM64_WRITE_SYSREG(reg, (uint64_t)val) + +#else /* ARCH_ARM64 */ + +/* For 32bit mode, use different way to access registers */ +#define GICCREG_READ(gic, reg) COMBINE2(arm_read_,reg)() +#define GICCREG_WRITE(gic, reg, val) COMBINE2(arm_write_,reg)(val) + +GEN_CP15_REG_FUNCS(icc_ctlr_el1, 0, c12, c12, 4); +GEN_CP15_REG_FUNCS(icc_pmr_el1, 0, c4, c6, 0); +GEN_CP15_REG_FUNCS(icc_bpr0_el1, 0, c12, c8, 3); +GEN_CP15_REG_FUNCS(icc_iar0_el1, 0, c12, c8, 0); +GEN_CP15_REG_FUNCS(icc_eoir0_el1, 0, c12, c8, 1); +GEN_CP15_REG_FUNCS(icc_rpr_el1, 0, c12, c11, 3); +GEN_CP15_REG_FUNCS(icc_hppir0_el1, 0, c12, c8, 2); +GEN_CP15_REG_FUNCS(icc_bpr1_el1, 0, c12, c12, 3); +GEN_CP15_REG_FUNCS(icc_iar1_el1, 0, c12, c12, 0); +GEN_CP15_REG_FUNCS(icc_eoir1_el1, 0, c12, c12, 1); +GEN_CP15_REG_FUNCS(icc_hppir1_el1, 0, c12, c12, 2); +GEN_CP15_REG_FUNCS(icc_dir_el1, 0, c12, c11, 1); +GEN_CP15_REG_FUNCS(icc_sre_el1, 0, c12, c12, 5); +GEN_CP15_REG_FUNCS(icc_igrpen0_el1, 0, c12, c12, 6); +GEN_CP15_REG_FUNCS(icc_igrpen1_el1, 0, c12, c12, 7); +GEN_CP15_REG_FUNCS(icc_ap0r0_el1, 0, c12, c8, 4); +GEN_CP15_REG_FUNCS(icc_ap0r1_el1, 0, c12, c8, 5); +GEN_CP15_REG_FUNCS(icc_ap0r2_el1, 0, c12, c8, 6); +GEN_CP15_REG_FUNCS(icc_ap0r3_el1, 0, c12, c8, 7); +GEN_CP15_REG_FUNCS(icc_ap1r0_el1, 0, c12, c9, 0); +GEN_CP15_REG_FUNCS(icc_ap1r1_el1, 0, c12, c9, 1); +GEN_CP15_REG_FUNCS(icc_ap1r2_el1, 0, c12, c9, 2); +GEN_CP15_REG_FUNCS(icc_ap1r3_el1, 0, c12, c9, 3); +GEN_CP15_REG64_FUNCS(icc_sgi1r_el1, 0, c12); +GEN_CP15_REG64_FUNCS(icc_asgi1r_el1, 1, c12); +GEN_CP15_REG64_FUNCS(icc_sgi0r_el1, 2, c12); + +#endif /* ARCH_ARM64 */ + +#if GICV3_IRQ_GROUP == GICV3_IRQ_GROUP_GRP0S +#define GICC_PRIMARY_HPPIR icc_hppir0_el1 +#define GICC_PRIMARY_IAR icc_iar0_el1 +#define GICC_PRIMARY_EOIR icc_eoir0_el1 +#define GICC_PRIMARY_SGIR icc_sgi0r_el1 +#else +#define GICC_PRIMARY_HPPIR icc_hppir1_el1 +#define GICC_PRIMARY_IAR icc_iar1_el1 +#define GICC_PRIMARY_EOIR icc_eoir1_el1 +#define GICC_PRIMARY_SGIR icc_sgi1r_el1 +#endif + +#define GICC_LIMIT (0x0000) + +#else /* GIC_VERSION > 2 */ + +#ifndef GICC_OFFSET +#define GICC_OFFSET (0x0000) +#endif + +#define GICCREG_READ(gic, reg) ({ \ + ASSERT(gic < NUM_ARM_GICS); \ + ASSERT(reg >= GICC_OFFSET); \ + ASSERT(reg < GICC_LIMIT); \ + mmio_read32((volatile uint32_t *)(arm_gics[(gic)].gicc_vaddr + ((reg) - GICC_OFFSET))); \ + }) +#define GICCREG_WRITE(gic, reg, val) ({ \ + ASSERT(gic < NUM_ARM_GICS); \ + ASSERT(reg >= GICC_OFFSET); \ + ASSERT(reg < GICC_LIMIT); \ + mmio_write32((volatile uint32_t *)(arm_gics[(gic)].gicc_vaddr + ((reg) - GICC_OFFSET)), (val)); \ + }) +/* main cpu regs */ +#define GICC_CTLR (GICC_OFFSET + 0x0000) +#define GICC_PMR (GICC_OFFSET + 0x0004) +#define GICC_BPR (GICC_OFFSET + 0x0008) +#define GICC_IAR (GICC_OFFSET + 0x000c) +#define GICC_EOIR (GICC_OFFSET + 0x0010) +#define GICC_RPR (GICC_OFFSET + 0x0014) +#define GICC_HPPIR (GICC_OFFSET + 0x0018) +#define GICC_ABPR (GICC_OFFSET + 0x001c) +#define GICC_AIAR (GICC_OFFSET + 0x0020) +#define GICC_AEOIR (GICC_OFFSET + 0x0024) +#define GICC_AHPPIR (GICC_OFFSET + 0x0028) +#define GICC_APR(n) (GICC_OFFSET + 0x00d0 + (n) * 4) +#define GICC_NSAPR(n) (GICC_OFFSET + 0x00e0 + (n) * 4) +#define GICC_IIDR (GICC_OFFSET + 0x00fc) +#if 0 /* GICC_DIR is not currently used by anything */ +#define GICC_DIR (GICC_OFFSET + 0x1000) +#endif +#define GICC_LIMIT (GICC_OFFSET + 0x1000) +#define GICC_MIN_SIZE (GICC_LIMIT - GICC_OFFSET) + +#if WITH_LIB_SM +#define GICC_PRIMARY_HPPIR GICC_AHPPIR +#define GICC_PRIMARY_IAR GICC_AIAR +#define GICC_PRIMARY_EOIR GICC_AEOIR +#else +#define GICC_PRIMARY_HPPIR GICC_HPPIR +#define GICC_PRIMARY_IAR GICC_IAR +#define GICC_PRIMARY_EOIR GICC_EOIR +#endif + +#endif /* GIC_VERSION > 2 */ + +#ifndef GICD_OFFSET +#define GICD_OFFSET (GICC_LIMIT) +#endif + +#define GICDREG_READ(gic, reg) ({ \ + ASSERT(gic < NUM_ARM_GICS); \ + ASSERT(reg >= GICD_OFFSET); \ + ASSERT(reg < GICD_LIMIT); \ + mmio_read32((volatile uint32_t *)(arm_gics[(gic)].gicd_vaddr + ((reg) - GICD_OFFSET))); \ + }) +#define GICDREG_WRITE(gic, reg, val) ({ \ + ASSERT(gic < NUM_ARM_GICS); \ + ASSERT(reg >= GICD_OFFSET); \ + ASSERT(reg < GICD_LIMIT); \ + mmio_write32((volatile uint32_t *)(arm_gics[(gic)].gicd_vaddr + ((reg) - GICD_OFFSET)), (val)); \ + }) +/* distribution regs */ +#define GICD_CTLR (GICD_OFFSET + 0x000) +#define GICD_TYPER (GICD_OFFSET + 0x004) +#define GICD_IIDR (GICD_OFFSET + 0x008) +#define GICD_IGROUPR(n) (GICD_OFFSET + 0x080 + (n) * 4) +#define GICD_ISENABLER(n) (GICD_OFFSET + 0x100 + (n) * 4) +#define GICD_ICENABLER(n) (GICD_OFFSET + 0x180 + (n) * 4) +#define GICD_ISPENDR(n) (GICD_OFFSET + 0x200 + (n) * 4) +#define GICD_ICPENDR(n) (GICD_OFFSET + 0x280 + (n) * 4) +#define GICD_ISACTIVER(n) (GICD_OFFSET + 0x300 + (n) * 4) +#define GICD_ICACTIVER(n) (GICD_OFFSET + 0x380 + (n) * 4) +#define GICD_IPRIORITYR(n) (GICD_OFFSET + 0x400 + (n) * 4) +#define GICD_ITARGETSR(n) (GICD_OFFSET + 0x800 + (n) * 4) +#define GICD_ICFGR(n) (GICD_OFFSET + 0xc00 + (n) * 4) +#define GICD_NSACR(n) (GICD_OFFSET + 0xe00 + (n) * 4) +#define GICD_SGIR (GICD_OFFSET + 0xf00) +#define GICD_CPENDSGIR(n) (GICD_OFFSET + 0xf10 + (n) * 4) +#define GICD_SPENDSGIR(n) (GICD_OFFSET + 0xf20 + (n) * 4) +#if GIC_VERSION <= 2 +/* for v3 and higher, these are defined later */ +#define GICD_LIMIT (GICD_OFFSET + 0x1000) +#define GICD_MIN_SIZE (GICD_LIMIT - GICD_OFFSET) +#endif /* GIC_VERSION <= 2 */ + +#if GIC_VERSION > 2 +/* some registers of GICD are 64 bit */ +#define GICDREG_READ64(gic, reg) ({ \ + ASSERT(gic < NUM_ARM_GICS); \ + ASSERT(reg >= GICD_OFFSET); \ + ASSERT(reg < GICD_LIMIT); \ + mmio_read64((volatile uint64_t *)(arm_gics[(gic)].gicd_vaddr + ((reg) - GICD_OFFSET))); \ + }) +#define GICDREG_WRITE64(gic, reg, val) ({ \ + ASSERT(gic < NUM_ARM_GICS); \ + ASSERT(reg >= GICD_OFFSET); \ + ASSERT(reg < GICD_LIMIT); \ + mmio_write64((volatile uint64_t *)(arm_gics[(gic)].gicd_vaddr + ((reg) - GICD_OFFSET)), (val)); \ + }) + +/* GICv3/v4 Distributor interface */ +#define GICD_STATUSR (GICD_OFFSET + 0x0010) +#define GICD_SETSPI_NSR (GICD_OFFSET + 0x0040) +#define GICD_CLRSPI_NSR (GICD_OFFSET + 0x0048) +#define GICD_SETSPI_SR (GICD_OFFSET + 0x0050) +#define GICD_CLRSPI_SR (GICD_OFFSET + 0x0058) +#define GICD_IGRPMODR(n) (GICD_OFFSET + 0x0D00 + (n) * 4) +#define GICD_IROUTER(n) (GICD_OFFSET + 0x6000 + (n) * 8) +#define GICD_LIMIT (GICD_OFFSET + 0x10000) +#define GICD_MIN_SIZE (GICD_LIMIT - GICD_OFFSET) + +/* GICv3/v4 Redistrubutor interface */ +#if GIC_VERSION == 3 +#define GICR_CPU_OFFSET(cpu) ((cpu) * 0x20000) +#endif +#if GIC_VERSION == 4 +#define GICR_CPU_OFFSET(cpu) ((cpu) * 0x40000) +#endif + +#ifndef GICR_OFFSET +#define GICR_OFFSET (GICD_LIMIT) +#endif + +#define GICRREG_READ(gic, cpu, reg) ({ \ + ASSERT(gic < NUM_ARM_GICS); \ + ASSERT(cpu < SMP_MAX_CPUS); \ + ASSERT(reg >= GICR_OFFSET); \ + ASSERT(reg < GICR_LIMIT); \ + mmio_read32((volatile uint32_t *)(arm_gics[(gic)].gicr_vaddr + GICR_CPU_OFFSET(cpu) + ((reg) - GICR_OFFSET))); \ + }) +#define GICRREG_WRITE(gic, cpu, reg, val) ({ \ + ASSERT(gic < NUM_ARM_GICS); \ + ASSERT(cpu < SMP_MAX_CPUS); \ + ASSERT(reg >= GICR_OFFSET); \ + ASSERT(reg < GICR_LIMIT); \ + mmio_write32((volatile uint32_t *)(arm_gics[(gic)].gicr_vaddr + GICR_CPU_OFFSET(cpu) + ((reg) - GICR_OFFSET)), (val)); \ + }) + +#define GICR_CTRL (GICR_OFFSET + 0x0000) +#define GICR_IIDR (GICR_OFFSET + 0x0004) +#define GICR_TYPER (GICR_OFFSET + 0x0008) +#define GICR_STATUSR (GICR_OFFSET + 0x0010) +#define GICR_WAKER (GICR_OFFSET + 0x0014) + +/* The following GICR registers are on separate 64KB page */ +#define GICR_SGI_OFFSET (GICR_OFFSET + 0x10000) +#define GICR_IGROUPR0 (GICR_SGI_OFFSET + 0x0080) +#define GICR_ISENABLER0 (GICR_SGI_OFFSET + 0x0100) +#define GICR_ICENABLER0 (GICR_SGI_OFFSET + 0x0180) +#define GICR_ISPENDR0 (GICR_SGI_OFFSET + 0x0200) +#define GICR_ICPENDR0 (GICR_SGI_OFFSET + 0x0280) +#define GICR_ISACTIVER0 (GICR_SGI_OFFSET + 0x0300) +#define GICR_ICACTIVER0 (GICR_SGI_OFFSET + 0x0380) +#define GICR_IPRIORITYR(n) (GICR_SGI_OFFSET + 0x0400 + (n) * 4) +#define GICR_ICFGR(n) (GICR_SGI_OFFSET + 0x0C00 + (n) * 4) +#define GICR_IGRPMODR0 (GICR_SGI_OFFSET + 0x0D00) +#define GICR_NSACR (GICR_SGI_OFFSET + 0x0E00) +#define GICR_LIMIT (GICR_SGI_OFFSET + 0x1000) +#define GICR_MIN_SIZE (GICR_LIMIT - GICR_OFFSET) +#endif /* GIC_VERSION > 2 */ + +// XXX: from trusty macros.h + +#define ROUND_UP(n, d) (((n) + (size_t)(d) - 1) & ~((size_t)(d) - 1)) +#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d)) diff --git a/dev/interrupt/arm_gic/gic_v3.c b/dev/interrupt/arm_gic/gic_v3.c new file mode 100644 index 000000000..d207a252f --- /dev/null +++ b/dev/interrupt/arm_gic/gic_v3.c @@ -0,0 +1,328 @@ +/* + * Copyright (c) 2019 LK Trusty Authors. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files + * (the "Software"), to deal in the Software without restriction, + * including without limitation the rights to use, copy, modify, merge, + * publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +#include +#include +#include +#include +#include + +#include + +#include "arm_gic_common.h" +#include "gic_v3.h" + +#define WAKER_QSC_BIT (0x1u << 31) +#define WAKER_CA_BIT (0x1u << 2) +#define WAKER_PS_BIT (0x1u << 1) +#define WAKER_SL_BIT (0x1u << 0) + +static void gicv3_gicr_exit_sleep(uint32_t cpu) { + uint32_t val = GICRREG_READ(0, cpu, GICR_WAKER); + + if (val & WAKER_QSC_BIT) { + /* clear sleep bit */ + GICRREG_WRITE(0, cpu, GICR_WAKER, val & ~WAKER_SL_BIT); + while (GICRREG_READ(0, cpu, GICR_WAKER) & WAKER_QSC_BIT) { + } + } +} + +static void gicv3_gicr_mark_awake(uint32_t cpu) { + uint32_t val = GICRREG_READ(0, cpu, GICR_WAKER); + + if (val & WAKER_CA_BIT) { + /* mark CPU as awake */ + GICRREG_WRITE(0, cpu, GICR_WAKER, val & ~WAKER_PS_BIT); + while (GICRREG_READ(0, cpu, GICR_WAKER) & WAKER_CA_BIT) { + } + } +} + +#if GIC600 +/* + * GIC-600 implements an additional GICR power control register + */ +#define GICR_PWRR (GICR_OFFSET + 0x0024) + +#define PWRR_ON (0x0u << 0) +#define PWRR_OFF (0x1u << 0) +#define PWRR_RDGPD (0x1u << 2) +#define PWRR_RDGPO (0x1u << 3) +#define PWRR_RDGP_MASK (PWRR_RDGPD | PWRR_RDGPO) + +static void gicv3_gicr_power_on(uint32_t cpu) { + /* Initiate power up */ + GICRREG_WRITE(0, cpu, GICR_PWRR, PWRR_ON); + + /* wait until it is complete (both bits are clear) */ + while (GICRREG_READ(0, cpu, GICR_PWRR) & PWRR_RDGP_MASK) { + } +} + +static void gicv3_gicr_off(uint32_t cpu) { + /* initiate power down */ + GICRREG_WRITE(0, cpu, GICR_PWRR, PWRR_OFF); + + /* wait until it is complete (both bits are set) */ + while ((GICRREG_READ(0, cpu, GICR_PWRR) & PWRR_RDGP_MASK) != + PWRR_RDGP_MASK) { + } +} +#else /* GIC600 */ + +static void gicv3_gicr_power_on(uint32_t cpu) {} +static void gicv3_gicr_power_off(uint32_t cpu) {} + +#endif /* GIC600 */ + +static void gicv3_gicr_init(void) { + uint32_t cpu = arch_curr_cpu_num(); + + gicv3_gicr_exit_sleep(cpu); + gicv3_gicr_power_on(cpu); + gicv3_gicr_mark_awake(cpu); +} + + +/* GICD_CTRL Register write pending bit */ +#define GICD_CTLR_RWP (0x1U << 31) + +void arm_gicv3_wait_for_write_complete(void) { + /* wait until write complete */ + while (GICDREG_READ(0, GICD_CTLR) & GICD_CTLR_RWP) { + } +} + +static void gicv3_gicd_ctrl_write(uint32_t val) { + /* write CTRL register */ + GICDREG_WRITE(0, GICD_CTLR, val); + + /* wait until write complete */ + arm_gicv3_wait_for_write_complete(); +} + +static void gicv3_gicd_setup_irq_group(uint32_t vector, uint32_t grp) { + uint32_t val; + uint32_t mask; + + ASSERT((vector >= 32) && (vector < MAX_INT)); + + mask = (0x1u << (vector % 32)); + + val = GICDREG_READ(0, GICD_IGROUPR(vector / 32)); + if (grp & 0x1u) { + val |= mask; + } else { + val &= ~mask; + } + GICDREG_WRITE(0, GICD_IGROUPR(vector / 32), val); + + val = GICDREG_READ(0, GICD_IGRPMODR(vector / 32)); + if (grp & 0x2u) { + val |= mask; + } else { + val &= ~mask; + } + GICDREG_WRITE(0, GICD_IGRPMODR(vector / 32), val); +} + +static void gicv3_gicd_setup_default_group(uint32_t grp) { + uint32_t i; + + /* Assign all interrupts to selected group */ + for (i = 32; i < MAX_INT; i += 32) { + GICDREG_WRITE(0, GICD_IGROUPR(i / 32), (grp & 0x1u) ? ~0U : 0); + GICDREG_WRITE(0, GICD_IGRPMODR(i / 32), (grp & 0x2u) ? ~0U : 0); + } +} + +static void gicv3_gicr_setup_irq_group(uint32_t vector, uint32_t grp) { + uint32_t val; + uint32_t mask; + uint32_t cpu = arch_curr_cpu_num(); + + ASSERT(vector < 32); + + mask = (0x1u << vector); + + val = GICRREG_READ(0, cpu, GICR_IGROUPR0); + if (grp & 0x1u) { + val |= mask; + } else { + val &= ~mask; + } + GICRREG_WRITE(0, cpu, GICR_IGROUPR0, val); + + val = GICRREG_READ(0, cpu, GICR_IGRPMODR0); + if (grp & 0x2u) { + val |= mask; + } else { + val &= ~mask; + } + GICRREG_WRITE(0, cpu, GICR_IGRPMODR0, val); +} + +static void gicv3_gicr_setup_default_group(uint32_t grp) { + uint32_t cpu = arch_curr_cpu_num(); + + GICRREG_WRITE(0, cpu, GICR_IGROUPR0, (grp & 0x1u) ? ~0U : 0); + GICRREG_WRITE(0, cpu, GICR_IGRPMODR0, (grp & 0x2u) ? ~0U : 0); +} + +void arm_gicv3_init(void) { + uint32_t grp_mask = (0x1u << GICV3_IRQ_GROUP); + +#if !WITH_LIB_SM + /* non-TZ */ + int i; + + /* Disable all groups before making changes */ + gicv3_gicd_ctrl_write(GICDREG_READ(0, GICD_CTLR) & ~0x7U); + + for (i = 0; i < MAX_INT; i += 32) { + GICDREG_WRITE(0, GICD_ICENABLER(i / 32), ~0U); + GICDREG_WRITE(0, GICD_ICPENDR(i / 32), ~0U); + } + + /* Direct SPI interrupts to any core */ + for (i = 32; i < MAX_INT; i++) { + GICDREG_WRITE64(0, GICD_IROUTER(i), 0x80000000); + } +#endif + + /* Enable selected group */ + gicv3_gicd_ctrl_write(GICDREG_READ(0, GICD_CTLR) | grp_mask); +} + +void arm_gicv3_init_percpu(void) { +#if WITH_LIB_SM + /* TZ */ + /* Initialized by ATF */ +#if ARM_GIC_USE_DOORBELL_NS_IRQ + gicv3_gicr_setup_irq_group(ARM_GIC_DOORBELL_IRQ, GICV3_IRQ_GROUP_GRP1NS); +#endif +#else + /* non-TZ */ + + /* Init registributor interface */ + gicv3_gicr_init(); + + /* Enable CPU interface access */ + GICCREG_WRITE(0, icc_sre_el1, (GICCREG_READ(0, icc_sre_el1) | 0x7)); +#endif + + /* enable selected percpu group */ + if (GICV3_IRQ_GROUP == 0) { + GICCREG_WRITE(0, icc_igrpen0_el1, 1); + } else { + GICCREG_WRITE(0, icc_igrpen1_el1, 1); + } + + /* Unmask interrupts at all priority levels */ + GICCREG_WRITE(0, icc_pmr_el1, 0xFF); +} + +void arm_gicv3_configure_irq_locked(unsigned int cpu, unsigned int vector) { + uint32_t grp = GICV3_IRQ_GROUP; + + ASSERT(vector < MAX_INT); + + if (vector < 32) { + /* PPIs */ + gicv3_gicr_setup_irq_group(vector, grp); + } else { + /* SPIs */ + gicv3_gicd_setup_irq_group(vector, grp); + } +} + +static uint32_t enabled_spi_mask[DIV_ROUND_UP(MAX_INT, 32)]; +static uint32_t enabled_ppi_mask[SMP_MAX_CPUS]; + +void arm_gicv3_suspend_cpu(unsigned int cpu) { + uint32_t i; + ASSERT(cpu < SMP_MAX_CPUS); + + if (cpu == 0) { + /* also save gicd */ + for (i = 32; i < MAX_INT; i += 32) { + enabled_spi_mask[i / 32] = GICDREG_READ(0, GICD_ISENABLER(i / 32)); + } + } + enabled_ppi_mask[cpu] = GICRREG_READ(0, cpu, GICR_ISENABLER0); +} + +void arm_gicv3_resume_cpu_locked(unsigned int cpu, bool gicd) { + uint32_t i; + ASSERT(cpu < SMP_MAX_CPUS); + + GICRREG_WRITE(0, cpu, GICR_ISENABLER0, enabled_ppi_mask[cpu]); + + if (gicd) { + /* also resume gicd */ + for (i = 32; i < MAX_INT; i += 32) { + GICDREG_WRITE(0, GICD_ISENABLER(i / 32), enabled_spi_mask[i / 32]); + } + } +} + +#if WITH_SMP +STATIC_ASSERT(SMP_CPU_CLUSTER_SHIFT <= 8); +/* SMP_MAX_CPUs needs to be addressable with only two affinities */ +STATIC_ASSERT((SMP_MAX_CPUS >> SMP_CPU_CLUSTER_SHIFT) <= 0x100U); + +__WEAK struct arm_gic_affinities arch_cpu_num_to_gic_affinities(size_t cpu_num) { + const size_t max_cluster_size = 1U << SMP_CPU_CLUSTER_SHIFT; + const uint8_t cluster_mask = max_cluster_size - 1; + struct arm_gic_affinities out = { + .aff0 = cpu_num & cluster_mask, + .aff1 = cpu_num >> SMP_CPU_CLUSTER_SHIFT, + .aff2 = 0, + .aff3 = 0, + }; + return out; +} +#endif + +#define SGIR_AFF1_SHIFT (16) +#define SGIR_AFF2_SHIFT (32) +#define SGIR_AFF3_SHIFT (48) +#define SGIR_IRQ_SHIFT (24) +#define SGIR_RS_SHIFT (44) +#define SGIR_TARGET_LIST_SHIFT (0) +#define SGIR_ASSEMBLE(val, shift) ((uint64_t)val << shift) + +uint64_t arm_gicv3_sgir_val(u_int irq, size_t cpu_num) { + struct arm_gic_affinities affs = arch_cpu_num_to_gic_affinities(cpu_num); + DEBUG_ASSERT(irq < 16); + + uint8_t range_selector = affs.aff0 >> 4; + uint16_t target_list = 1U << (affs.aff0 & 0xf); + return SGIR_ASSEMBLE(irq, SGIR_IRQ_SHIFT) | + SGIR_ASSEMBLE(affs.aff3, SGIR_AFF3_SHIFT) | + SGIR_ASSEMBLE(affs.aff2, SGIR_AFF2_SHIFT) | + SGIR_ASSEMBLE(affs.aff1, SGIR_AFF1_SHIFT) | + SGIR_ASSEMBLE(range_selector, SGIR_RS_SHIFT) | + SGIR_ASSEMBLE(target_list, SGIR_TARGET_LIST_SHIFT); +} diff --git a/dev/interrupt/arm_gic/gic_v3.h b/dev/interrupt/arm_gic/gic_v3.h new file mode 100644 index 000000000..0a327f7f5 --- /dev/null +++ b/dev/interrupt/arm_gic/gic_v3.h @@ -0,0 +1,32 @@ +/* + * Copyright (c) 2019 LK Trusty Authors. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files + * (the "Software"), to deal in the Software without restriction, + * including without limitation the rights to use, copy, modify, merge, + * publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +void arm_gicv3_init(void); +void arm_gicv3_init_percpu(void); +void arm_gicv3_configure_irq_locked(unsigned int cpu, unsigned int vector); +void arm_gicv3_suspend_cpu(unsigned int cpu); +void arm_gicv3_resume_cpu_locked(unsigned int cpu, bool gicd); +uint64_t arm_gicv3_sgir_val(u_int irq, size_t cpu_num); +void arm_gicv3_wait_for_write_complete(void); diff --git a/dev/interrupt/arm_gic/include/dev/interrupt/arm_gic.h b/dev/interrupt/arm_gic/include/dev/interrupt/arm_gic.h index a0dd643ca..a371809f3 100644 --- a/dev/interrupt/arm_gic/include/dev/interrupt/arm_gic.h +++ b/dev/interrupt/arm_gic/include/dev/interrupt/arm_gic.h @@ -1,30 +1,70 @@ /* * Copyright (c) 2013, Google Inc. All rights reserved. * - * Use of this source code is governed by a MIT-style - * license that can be found in the LICENSE file or at - * https://opensource.org/licenses/MIT + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files + * (the "Software"), to deal in the Software without restriction, + * including without limitation the rights to use, copy, modify, merge, + * publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #ifndef __DEV_INTERRUPT_ARM_GIC_H #define __DEV_INTERRUPT_ARM_GIC_H #include +/** + * arm_gic_init() - Legacy GIC initialization routine. + * + * This initializes the GIC using the %GICBASE and %GICx_OFFSET + * macros as the virtual addresses of the GIC banks, and assumes + * that the platform code has already mapped them into the + * address space. + */ void arm_gic_init(void); -#define GIC_BASE_SGI 0 -#define GIC_BASE_PPI 16 -#define GIC_BASE_SPI 32 - -enum interrupt_trigger_mode { - IRQ_TRIGGER_MODE_EDGE = 0, - IRQ_TRIGGER_MODE_LEVEL = 1, +/** + * struct arm_gic_init_info - Initialization information for the GIC. + * @gicc_paddr: Physical address of GIC CPU interface registers. + * @gicc_size: Total size of GIC CPU interface registers. + * @gicd_paddr: Physical address of GIC Distributor registers. + * @gicd_size: Total size of GIC Distributor registers. + * @gicr_paddr: Physical address of GIC Redistributor registers. + * @gicr_size: Total size of GIC Redistributor registers. + */ +struct arm_gic_init_info { + paddr_t gicc_paddr; + size_t gicc_size; + paddr_t gicd_paddr; + size_t gicd_size; + paddr_t gicr_paddr; + size_t gicr_size; }; -enum interrupt_polarity { - IRQ_POLARITY_ACTIVE_HIGH = 0, - IRQ_POLARITY_ACTIVE_LOW = 1, -}; +/** + * arm_gic_init_map() - Map the GIC into the virtual address space and + * initialize it. + * @init_info: Pointer to a &struct arm_gic_init_info structure with the extra + * initialization information, e.g., the physical addresses and + * sizes of the GIC registers. + * + * This function maps the registers of the GICs then initializes the GIC. + * If ASLR is enabled then the virtual addresses are randomized. + * + */ +void arm_gic_init_map(struct arm_gic_init_info* init_info); enum { /* Ignore cpu_mask and forward interrupt to all CPUs other than the current cpu */ @@ -38,5 +78,14 @@ enum { }; status_t arm_gic_sgi(u_int irq, u_int flags, u_int cpu_mask); +struct arm_gic_affinities { + uint8_t aff0; + uint8_t aff1; + uint8_t aff2; + uint8_t aff3; +}; + +struct arm_gic_affinities arch_cpu_num_to_gic_affinities(size_t cpu_num); + #endif diff --git a/dev/interrupt/arm_gic/rules.mk b/dev/interrupt/arm_gic/rules.mk index d77ca24a9..67578abc2 100644 --- a/dev/interrupt/arm_gic/rules.mk +++ b/dev/interrupt/arm_gic/rules.mk @@ -2,7 +2,19 @@ LOCAL_DIR := $(GET_LOCAL_DIR) MODULE := $(LOCAL_DIR) +GIC_VERSION ?= 2 + +MODULE_DEFINES += \ + GIC_VERSION=$(GIC_VERSION) \ + MODULE_SRCS += \ $(LOCAL_DIR)/arm_gic.c +MODULE_COMPILEFLAGS += -Wno-type-limits + +# Build gic_v3 for versions 3 and 4 +ifeq (,$(filter-out 3 4,$(GIC_VERSION))) +MODULE_SRCS += $(LOCAL_DIR)/gic_v3.c +endif + include make/module.mk