Skip to content

Commit

Permalink
arm: Support the safe WFE sequence for Krait CPUs
Browse files Browse the repository at this point in the history
Certain version of the Krait processor require a specific
code sequence to be executed prior to executing a WFE
instruction to permit that instruction to place the
processor into a low-power state.

Change-Id: I614f8ce24936c793c91d5c43c7a7931a04f11dda
Signed-off-by: Shruthi Krishna <skrish@codeaurora.org>
Stepan Moskovchenko authored and Shruthi Krishna committed Aug 3, 2012
1 parent 0b8ece3 commit fc4df71
Showing 3 changed files with 67 additions and 14 deletions.
58 changes: 44 additions & 14 deletions arch/arm/include/asm/spinlock.h
Original file line number Diff line number Diff line change
@@ -7,6 +7,8 @@

#include <asm/processor.h>

extern int msm_krait_need_wfe_fixup;

/*
* sev and wfe are ARMv6K extensions. Uniprocessor ARMv6 may not have the K
* extensions, so when running on UP, we have to patch these instructions away.
@@ -34,6 +36,31 @@
#define WFE() ALT_SMP("wfe", "nop")
#endif

/*
* The fixup involves disabling interrupts during execution of the WFE
* instruction. This could potentially lead to deadlock if a thread is trying
* to acquire a spinlock which is being released from an interrupt context.
*/
#ifdef CONFIG_MSM_KRAIT_WFE_FIXUP
#define WFE_SAFE(fixup, tmp) \
" mrs " tmp ", cpsr\n" \
" cmp " fixup ", #0\n" \
" wfeeq\n" \
" beq 10f\n" \
" cpsid if\n" \
" mrc p15, 7, " fixup ", c15, c0, 5\n" \
" bic " fixup ", " fixup ", #0x10000\n" \
" mcr p15, 7, " fixup ", c15, c0, 5\n" \
" isb\n" \
" wfe\n" \
" orr " fixup ", " fixup ", #0x10000\n" \
" mcr p15, 7, " fixup ", c15, c0, 5\n" \
" isb\n" \
"10: msr cpsr_cf, " tmp "\n"
#else
#define WFE_SAFE(fixup, tmp) " wfe\n"
#endif

static inline void dsb_sev(void)
{
#if __LINUX_ARM_ARCH__ >= 7
@@ -71,18 +98,18 @@ static inline void dsb_sev(void)

static inline void arch_spin_lock(arch_spinlock_t *lock)
{
unsigned long tmp;
unsigned long tmp, fixup = msm_krait_need_wfe_fixup;

__asm__ __volatile__(
"1: ldrex %[tmp], [%[lock]]\n"
" teq %[tmp], #0\n"
" beq 2f\n"
WFE()
WFE_SAFE("%[fixup]", "%[tmp]")
"2:\n"
" strexeq %[tmp], %[bit0], [%[lock]]\n"
" teqeq %[tmp], #0\n"
" bne 1b"
: [tmp] "=&r" (tmp)
: [tmp] "=&r" (tmp), [fixup] "+r" (fixup)
: [lock] "r" (&lock->lock), [bit0] "r" (1)
: "cc");

@@ -149,6 +176,7 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
static inline void arch_spin_lock(arch_spinlock_t *lock)
{
unsigned long tmp, ticket, next_ticket;
unsigned long fixup = msm_krait_need_wfe_fixup;

/* Grab the next ticket and wait for it to be "served" */
__asm__ __volatile__(
@@ -161,13 +189,14 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
"2:\n"
#ifdef CONFIG_CPU_32v6K
" beq 3f\n"
WFE()
WFE_SAFE("%[fixup]", "%[tmp]")
"3:\n"
#endif
" ldr %[tmp], [%[lockaddr]]\n"
" cmp %[ticket], %[tmp], lsr #16\n"
" bne 2b"
: [ticket]"=&r" (ticket), [tmp]"=&r" (tmp), [next_ticket]"=&r" (next_ticket)
: [ticket]"=&r" (ticket), [tmp]"=&r" (tmp),
[next_ticket]"=&r" (next_ticket), [fixup]"+r" (fixup)
: [lockaddr]"r" (&lock->lock), [val1]"r" (1)
: "cc");
smp_mb();
@@ -216,15 +245,15 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)

static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
unsigned long ticket;
unsigned long ticket, tmp, fixup = msm_krait_need_wfe_fixup;

/* Wait for now_serving == next_ticket */
__asm__ __volatile__(
#ifdef CONFIG_CPU_32v6K
" cmpne %[lockaddr], %[lockaddr]\n"
"1:\n"
" beq 2f\n"
WFE()
WFE_SAFE("%[fixup]", "%[tmp]")
"2:\n"
#else
"1:\n"
@@ -234,7 +263,8 @@ static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
" uxth %[ticket], %[ticket]\n"
" cmp %[ticket], #0\n"
" bne 1b"
: [ticket]"=&r" (ticket)
: [ticket]"=&r" (ticket), [tmp]"=&r" (tmp),
[fixup]"+r" (fixup)
: [lockaddr]"r" (&lock->lock)
: "cc");
}
@@ -262,18 +292,18 @@ static inline int arch_spin_is_contended(arch_spinlock_t *lock)

static inline void arch_write_lock(arch_rwlock_t *rw)
{
unsigned long tmp;
unsigned long tmp, fixup = msm_krait_need_wfe_fixup;

__asm__ __volatile__(
"1: ldrex %[tmp], [%[lock]]\n"
" teq %[tmp], #0\n"
" beq 2f\n"
WFE()
WFE_SAFE("%[fixup]", "%[tmp]")
"2:\n"
" strexeq %[tmp], %[bit31], [%[lock]]\n"
" teq %[tmp], #0\n"
" bne 1b"
: [tmp] "=&r" (tmp)
: [tmp] "=&r" (tmp), [fixup] "+r" (fixup)
: [lock] "r" (&rw->lock), [bit31] "r" (0x80000000)
: "cc");

@@ -330,18 +360,18 @@ static inline void arch_write_unlock(arch_rwlock_t *rw)
*/
static inline void arch_read_lock(arch_rwlock_t *rw)
{
unsigned long tmp, tmp2;
unsigned long tmp, tmp2, fixup = msm_krait_need_wfe_fixup;

__asm__ __volatile__(
"1: ldrex %[tmp], [%[lock]]\n"
" adds %[tmp], %[tmp], #1\n"
" strexpl %[tmp2], %[tmp], [%[lock]]\n"
" bpl 2f\n"
WFE()
WFE_SAFE("%[fixup]", "%[tmp]")
"2:\n"
" rsbpls %[tmp], %[tmp2], #0\n"
" bmi 1b"
: [tmp] "=&r" (tmp), [tmp2] "=&r" (tmp2)
: [tmp] "=&r" (tmp), [tmp2] "=&r" (tmp2), [fixup] "+r" (fixup)
: [lock] "r" (&rw->lock)
: "cc");

6 changes: 6 additions & 0 deletions arch/arm/mach-msm/Kconfig
Original file line number Diff line number Diff line change
@@ -168,6 +168,7 @@ config ARCH_MSM8960
select MULTI_IRQ_HANDLER
select MSM_PM8X60 if PM
select HOLES_IN_ZONE if SPARSEMEM
select MSM_KRAIT_WFE_FIXUP

config ARCH_MSM8930
bool "MSM8930"
@@ -205,6 +206,7 @@ config ARCH_MSM8930
select MULTI_IRQ_HANDLER
select MSM_PM8X60 if PM
select HOLES_IN_ZONE if SPARSEMEM
select MSM_KRAIT_WFE_FIXUP

config ARCH_APQ8064
bool "APQ8064"
@@ -222,6 +224,7 @@ config ARCH_APQ8064
select MULTI_IRQ_HANDLER
select MSM_PM8X60 if PM
select HOLES_IN_ZONE if SPARSEMEM
select MSM_KRAIT_WFE_FIXUP

config ARCH_MSMCOPPER
bool "MSM Copper"
@@ -308,6 +311,9 @@ config ARCH_MSM_KRAITMP
select MSM_SMP
bool

config MSM_KRAIT_WFE_FIXUP
bool

config ARCH_MSM_CORTEX_A5
bool
select HAVE_HW_BRKPT_RESERVED_RW_ACCESS
17 changes: 17 additions & 0 deletions arch/arm/mm/init.c
Original file line number Diff line number Diff line change
@@ -28,6 +28,7 @@
#include <asm/sizes.h>
#include <asm/tlb.h>
#include <asm/fixmap.h>
#include <asm/cputype.h>

#include <asm/mach/arch.h>
#include <asm/mach/map.h>
@@ -36,6 +37,8 @@

static unsigned long phys_initrd_start __initdata = 0;
static unsigned long phys_initrd_size __initdata = 0;
int msm_krait_need_wfe_fixup;
EXPORT_SYMBOL(msm_krait_need_wfe_fixup);

static int __init early_initrd(char *p)
{
@@ -891,3 +894,17 @@ static int __init keepinitrd_setup(char *__unused)

__setup("keepinitrd", keepinitrd_setup);
#endif

#ifdef CONFIG_MSM_KRAIT_WFE_FIXUP
static int __init msm_krait_wfe_init(void)
{
unsigned int val, midr;
midr = read_cpuid_id() & 0xffffff00;
if ((midr == 0x511f0400) || (midr == 0x510f0600)) {
asm volatile("mrc p15, 7, %0, c15, c0, 5" : "=r" (val));
msm_krait_need_wfe_fixup = (val & 0x10000) ? 1 : 0;
}
return 0;
}
pure_initcall(msm_krait_wfe_init);
#endif

0 comments on commit fc4df71

Please sign in to comment.