Skip to content

Commit 08e875c

Browse files
committed
arm64: SMP support
This patch adds SMP initialisation and spinlocks implementation for AArch64. The spinlock support uses the new load-acquire/store-release instructions to avoid explicit barriers. The architecture also specifies that an event is automatically generated when clearing the exclusive monitor state to wake up processors in WFE, so there is no need for an explicit DSB/SEV instruction sequence. The SEVL instruction is used to set the exclusive monitor locally as there is no conditional WFE and a branch is more expensive. For the SMP booting protocol, see Documentation/arm64/booting.txt. Signed-off-by: Will Deacon <[email protected]> Signed-off-by: Marc Zyngier <[email protected]> Signed-off-by: Catalin Marinas <[email protected]> Acked-by: Arnd Bergmann <[email protected]> Acked-by: Tony Lindgren <[email protected]> Acked-by: Nicolas Pitre <[email protected]> Acked-by: Olof Johansson <[email protected]> Acked-by: Santosh Shilimkar <[email protected]> Acked-by: Arnd Bergmann <[email protected]>
1 parent 09b5541 commit 08e875c

File tree

5 files changed

+783
-0
lines changed

5 files changed

+783
-0
lines changed

Diff for: arch/arm64/include/asm/hardirq.h

+5
Original file line numberDiff line numberDiff line change
@@ -20,8 +20,13 @@
2020
#include <linux/threads.h>
2121
#include <asm/irq.h>
2222

23+
#define NR_IPI 4
24+
2325
typedef struct {
2426
unsigned int __softirq_pending;
27+
#ifdef CONFIG_SMP
28+
unsigned int ipi_irqs[NR_IPI];
29+
#endif
2530
} ____cacheline_aligned irq_cpustat_t;
2631

2732
#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */

Diff for: arch/arm64/include/asm/smp.h

+69
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,69 @@
1+
/*
2+
* Copyright (C) 2012 ARM Ltd.
3+
*
4+
* This program is free software; you can redistribute it and/or modify
5+
* it under the terms of the GNU General Public License version 2 as
6+
* published by the Free Software Foundation.
7+
*
8+
* This program is distributed in the hope that it will be useful,
9+
* but WITHOUT ANY WARRANTY; without even the implied warranty of
10+
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11+
* GNU General Public License for more details.
12+
*
13+
* You should have received a copy of the GNU General Public License
14+
* along with this program. If not, see <http://www.gnu.org/licenses/>.
15+
*/
16+
#ifndef __ASM_SMP_H
17+
#define __ASM_SMP_H
18+
19+
#include <linux/threads.h>
20+
#include <linux/cpumask.h>
21+
#include <linux/thread_info.h>
22+
23+
#ifndef CONFIG_SMP
24+
# error "<asm/smp.h> included in non-SMP build"
25+
#endif
26+
27+
#define raw_smp_processor_id() (current_thread_info()->cpu)
28+
29+
struct seq_file;
30+
31+
/*
32+
* generate IPI list text
33+
*/
34+
extern void show_ipi_list(struct seq_file *p, int prec);
35+
36+
/*
37+
* Called from C code, this handles an IPI.
38+
*/
39+
extern void handle_IPI(int ipinr, struct pt_regs *regs);
40+
41+
/*
42+
* Setup the set of possible CPUs (via set_cpu_possible)
43+
*/
44+
extern void smp_init_cpus(void);
45+
46+
/*
47+
* Provide a function to raise an IPI cross call on CPUs in callmap.
48+
*/
49+
extern void set_smp_cross_call(void (*)(const struct cpumask *, unsigned int));
50+
51+
/*
52+
* Called from the secondary holding pen, this is the secondary CPU entry point.
53+
*/
54+
asmlinkage void secondary_start_kernel(void);
55+
56+
/*
57+
* Initial data for bringing up a secondary CPU.
58+
*/
59+
struct secondary_data {
60+
void *stack;
61+
};
62+
extern struct secondary_data secondary_data;
63+
extern void secondary_holding_pen(void);
64+
extern volatile unsigned long secondary_holding_pen_release;
65+
66+
extern void arch_send_call_function_single_ipi(int cpu);
67+
extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
68+
69+
#endif /* ifndef __ASM_SMP_H */

Diff for: arch/arm64/include/asm/spinlock.h

+202
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,202 @@
1+
/*
2+
* Copyright (C) 2012 ARM Ltd.
3+
*
4+
* This program is free software; you can redistribute it and/or modify
5+
* it under the terms of the GNU General Public License version 2 as
6+
* published by the Free Software Foundation.
7+
*
8+
* This program is distributed in the hope that it will be useful,
9+
* but WITHOUT ANY WARRANTY; without even the implied warranty of
10+
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11+
* GNU General Public License for more details.
12+
*
13+
* You should have received a copy of the GNU General Public License
14+
* along with this program. If not, see <http://www.gnu.org/licenses/>.
15+
*/
16+
#ifndef __ASM_SPINLOCK_H
17+
#define __ASM_SPINLOCK_H
18+
19+
#include <asm/spinlock_types.h>
20+
#include <asm/processor.h>
21+
22+
/*
23+
* Spinlock implementation.
24+
*
25+
* The old value is read exclusively and the new one, if unlocked, is written
26+
* exclusively. In case of failure, the loop is restarted.
27+
*
28+
* The memory barriers are implicit with the load-acquire and store-release
29+
* instructions.
30+
*
31+
* Unlocked value: 0
32+
* Locked value: 1
33+
*/
34+
35+
#define arch_spin_is_locked(x) ((x)->lock != 0)
36+
#define arch_spin_unlock_wait(lock) \
37+
do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
38+
39+
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
40+
41+
static inline void arch_spin_lock(arch_spinlock_t *lock)
42+
{
43+
unsigned int tmp;
44+
45+
asm volatile(
46+
" sevl\n"
47+
"1: wfe\n"
48+
"2: ldaxr %w0, [%1]\n"
49+
" cbnz %w0, 1b\n"
50+
" stxr %w0, %w2, [%1]\n"
51+
" cbnz %w0, 2b\n"
52+
: "=&r" (tmp)
53+
: "r" (&lock->lock), "r" (1)
54+
: "memory");
55+
}
56+
57+
static inline int arch_spin_trylock(arch_spinlock_t *lock)
58+
{
59+
unsigned int tmp;
60+
61+
asm volatile(
62+
" ldaxr %w0, [%1]\n"
63+
" cbnz %w0, 1f\n"
64+
" stxr %w0, %w2, [%1]\n"
65+
"1:\n"
66+
: "=&r" (tmp)
67+
: "r" (&lock->lock), "r" (1)
68+
: "memory");
69+
70+
return !tmp;
71+
}
72+
73+
static inline void arch_spin_unlock(arch_spinlock_t *lock)
74+
{
75+
asm volatile(
76+
" stlr %w1, [%0]\n"
77+
: : "r" (&lock->lock), "r" (0) : "memory");
78+
}
79+
80+
/*
81+
* Write lock implementation.
82+
*
83+
* Write locks set bit 31. Unlocking, is done by writing 0 since the lock is
84+
* exclusively held.
85+
*
86+
* The memory barriers are implicit with the load-acquire and store-release
87+
* instructions.
88+
*/
89+
90+
static inline void arch_write_lock(arch_rwlock_t *rw)
91+
{
92+
unsigned int tmp;
93+
94+
asm volatile(
95+
" sevl\n"
96+
"1: wfe\n"
97+
"2: ldaxr %w0, [%1]\n"
98+
" cbnz %w0, 1b\n"
99+
" stxr %w0, %w2, [%1]\n"
100+
" cbnz %w0, 2b\n"
101+
: "=&r" (tmp)
102+
: "r" (&rw->lock), "r" (0x80000000)
103+
: "memory");
104+
}
105+
106+
static inline int arch_write_trylock(arch_rwlock_t *rw)
107+
{
108+
unsigned int tmp;
109+
110+
asm volatile(
111+
" ldaxr %w0, [%1]\n"
112+
" cbnz %w0, 1f\n"
113+
" stxr %w0, %w2, [%1]\n"
114+
"1:\n"
115+
: "=&r" (tmp)
116+
: "r" (&rw->lock), "r" (0x80000000)
117+
: "memory");
118+
119+
return !tmp;
120+
}
121+
122+
static inline void arch_write_unlock(arch_rwlock_t *rw)
123+
{
124+
asm volatile(
125+
" stlr %w1, [%0]\n"
126+
: : "r" (&rw->lock), "r" (0) : "memory");
127+
}
128+
129+
/* write_can_lock - would write_trylock() succeed? */
130+
#define arch_write_can_lock(x) ((x)->lock == 0)
131+
132+
/*
133+
* Read lock implementation.
134+
*
135+
* It exclusively loads the lock value, increments it and stores the new value
136+
* back if positive and the CPU still exclusively owns the location. If the
137+
* value is negative, the lock is already held.
138+
*
139+
* During unlocking there may be multiple active read locks but no write lock.
140+
*
141+
* The memory barriers are implicit with the load-acquire and store-release
142+
* instructions.
143+
*/
144+
static inline void arch_read_lock(arch_rwlock_t *rw)
145+
{
146+
unsigned int tmp, tmp2;
147+
148+
asm volatile(
149+
" sevl\n"
150+
"1: wfe\n"
151+
"2: ldaxr %w0, [%2]\n"
152+
" add %w0, %w0, #1\n"
153+
" tbnz %w0, #31, 1b\n"
154+
" stxr %w1, %w0, [%2]\n"
155+
" cbnz %w1, 2b\n"
156+
: "=&r" (tmp), "=&r" (tmp2)
157+
: "r" (&rw->lock)
158+
: "memory");
159+
}
160+
161+
static inline void arch_read_unlock(arch_rwlock_t *rw)
162+
{
163+
unsigned int tmp, tmp2;
164+
165+
asm volatile(
166+
"1: ldxr %w0, [%2]\n"
167+
" sub %w0, %w0, #1\n"
168+
" stlxr %w1, %w0, [%2]\n"
169+
" cbnz %w1, 1b\n"
170+
: "=&r" (tmp), "=&r" (tmp2)
171+
: "r" (&rw->lock)
172+
: "memory");
173+
}
174+
175+
static inline int arch_read_trylock(arch_rwlock_t *rw)
176+
{
177+
unsigned int tmp, tmp2 = 1;
178+
179+
asm volatile(
180+
" ldaxr %w0, [%2]\n"
181+
" add %w0, %w0, #1\n"
182+
" tbnz %w0, #31, 1f\n"
183+
" stxr %w1, %w0, [%2]\n"
184+
"1:\n"
185+
: "=&r" (tmp), "+r" (tmp2)
186+
: "r" (&rw->lock)
187+
: "memory");
188+
189+
return !tmp2;
190+
}
191+
192+
/* read_can_lock - would read_trylock() succeed? */
193+
#define arch_read_can_lock(x) ((x)->lock < 0x80000000)
194+
195+
#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
196+
#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
197+
198+
#define arch_spin_relax(lock) cpu_relax()
199+
#define arch_read_relax(lock) cpu_relax()
200+
#define arch_write_relax(lock) cpu_relax()
201+
202+
#endif /* __ASM_SPINLOCK_H */

Diff for: arch/arm64/include/asm/spinlock_types.h

+38
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,38 @@
1+
/*
2+
* Copyright (C) 2012 ARM Ltd.
3+
*
4+
* This program is free software; you can redistribute it and/or modify
5+
* it under the terms of the GNU General Public License version 2 as
6+
* published by the Free Software Foundation.
7+
*
8+
* This program is distributed in the hope that it will be useful,
9+
* but WITHOUT ANY WARRANTY; without even the implied warranty of
10+
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11+
* GNU General Public License for more details.
12+
*
13+
* You should have received a copy of the GNU General Public License
14+
* along with this program. If not, see <http://www.gnu.org/licenses/>.
15+
*/
16+
#ifndef __ASM_SPINLOCK_TYPES_H
17+
#define __ASM_SPINLOCK_TYPES_H
18+
19+
#if !defined(__LINUX_SPINLOCK_TYPES_H) && !defined(__ASM_SPINLOCK_H)
20+
# error "please don't include this file directly"
21+
#endif
22+
23+
/* We only require natural alignment for exclusive accesses. */
24+
#define __lock_aligned
25+
26+
typedef struct {
27+
volatile unsigned int lock;
28+
} arch_spinlock_t;
29+
30+
#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
31+
32+
typedef struct {
33+
volatile unsigned int lock;
34+
} arch_rwlock_t;
35+
36+
#define __ARCH_RW_LOCK_UNLOCKED { 0 }
37+
38+
#endif

0 commit comments

Comments
 (0)