Skip to content

Commit 88ecd15

Browse files
melverpaulmckrcu
authored andcommitted
seqlock, kcsan: Add annotations for KCSAN
Since seqlocks in the Linux kernel do not require the use of marked atomic accesses in critical sections, we teach KCSAN to assume such accesses are atomic. KCSAN currently also pretends that writes to `sequence` are atomic, although currently plain writes are used (their corresponding reads are READ_ONCE). Further, to avoid false positives in the absence of clear ending of a seqlock reader critical section (only when using the raw interface), KCSAN assumes a fixed number of accesses after start of a seqlock critical section are atomic. === Commentary on design around absence of clear begin/end markings === Seqlock usage via seqlock_t follows a predictable usage pattern, where clear critical section begin/end is enforced. With subtle special cases for readers needing to be flat atomic regions, e.g. because usage such as in: - fs/namespace.c:__legitimize_mnt - unbalanced read_seqretry - fs/dcache.c:d_walk - unbalanced need_seqretry But, anything directly accessing seqcount_t seems to be unpredictable. Filtering for usage of read_seqcount_retry not following 'do { .. } while (read_seqcount_retry(..));': $ git grep 'read_seqcount_retry' | grep -Ev 'while \(|seqlock.h|Doc|\* ' => about 1/3 of the total read_seqcount_retry usage. Just looking at fs/namei.c, we conclude that it is non-trivial to prescribe and migrate to an interface that would force clear begin/end seqlock markings for critical sections. As such, we concluded that the best design currently, is to simply ensure that KCSAN works well with the existing code. Signed-off-by: Marco Elver <[email protected]> Acked-by: Paul E. McKenney <[email protected]> Signed-off-by: Paul E. McKenney <[email protected]>
1 parent 0ebba71 commit 88ecd15

File tree

1 file changed

+38
-2
lines changed

1 file changed

+38
-2
lines changed

Diff for: include/linux/seqlock.h

+38-2
Original file line numberDiff line numberDiff line change
@@ -37,8 +37,24 @@
3737
#include <linux/preempt.h>
3838
#include <linux/lockdep.h>
3939
#include <linux/compiler.h>
40+
#include <linux/kcsan.h>
4041
#include <asm/processor.h>
4142

43+
/*
44+
* The seqlock interface does not prescribe a precise sequence of read
45+
* begin/retry/end. For readers, typically there is a call to
46+
* read_seqcount_begin() and read_seqcount_retry(), however, there are more
47+
* esoteric cases which do not follow this pattern.
48+
*
49+
* As a consequence, we take the following best-effort approach for raw usage
50+
* via seqcount_t under KCSAN: upon beginning a seq-reader critical section,
51+
* pessimistically mark then next KCSAN_SEQLOCK_REGION_MAX memory accesses as
52+
* atomics; if there is a matching read_seqcount_retry() call, no following
53+
* memory operations are considered atomic. Usage of seqlocks via seqlock_t
54+
* interface is not affected.
55+
*/
56+
#define KCSAN_SEQLOCK_REGION_MAX 1000
57+
4258
/*
4359
* Version using sequence counter only.
4460
* This can be used when code has its own mutex protecting the
@@ -115,6 +131,7 @@ static inline unsigned __read_seqcount_begin(const seqcount_t *s)
115131
cpu_relax();
116132
goto repeat;
117133
}
134+
kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX);
118135
return ret;
119136
}
120137

@@ -131,6 +148,7 @@ static inline unsigned raw_read_seqcount(const seqcount_t *s)
131148
{
132149
unsigned ret = READ_ONCE(s->sequence);
133150
smp_rmb();
151+
kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX);
134152
return ret;
135153
}
136154

@@ -183,6 +201,7 @@ static inline unsigned raw_seqcount_begin(const seqcount_t *s)
183201
{
184202
unsigned ret = READ_ONCE(s->sequence);
185203
smp_rmb();
204+
kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX);
186205
return ret & ~1;
187206
}
188207

@@ -202,7 +221,8 @@ static inline unsigned raw_seqcount_begin(const seqcount_t *s)
202221
*/
203222
static inline int __read_seqcount_retry(const seqcount_t *s, unsigned start)
204223
{
205-
return unlikely(s->sequence != start);
224+
kcsan_atomic_next(0);
225+
return unlikely(READ_ONCE(s->sequence) != start);
206226
}
207227

208228
/**
@@ -225,6 +245,7 @@ static inline int read_seqcount_retry(const seqcount_t *s, unsigned start)
225245

226246
static inline void raw_write_seqcount_begin(seqcount_t *s)
227247
{
248+
kcsan_nestable_atomic_begin();
228249
s->sequence++;
229250
smp_wmb();
230251
}
@@ -233,6 +254,7 @@ static inline void raw_write_seqcount_end(seqcount_t *s)
233254
{
234255
smp_wmb();
235256
s->sequence++;
257+
kcsan_nestable_atomic_end();
236258
}
237259

238260
/**
@@ -271,9 +293,11 @@ static inline void raw_write_seqcount_end(seqcount_t *s)
271293
*/
272294
static inline void raw_write_seqcount_barrier(seqcount_t *s)
273295
{
296+
kcsan_nestable_atomic_begin();
274297
s->sequence++;
275298
smp_wmb();
276299
s->sequence++;
300+
kcsan_nestable_atomic_end();
277301
}
278302

279303
static inline int raw_read_seqcount_latch(seqcount_t *s)
@@ -398,7 +422,9 @@ static inline void write_seqcount_end(seqcount_t *s)
398422
static inline void write_seqcount_invalidate(seqcount_t *s)
399423
{
400424
smp_wmb();
425+
kcsan_nestable_atomic_begin();
401426
s->sequence+=2;
427+
kcsan_nestable_atomic_end();
402428
}
403429

404430
typedef struct {
@@ -430,11 +456,21 @@ typedef struct {
430456
*/
431457
static inline unsigned read_seqbegin(const seqlock_t *sl)
432458
{
433-
return read_seqcount_begin(&sl->seqcount);
459+
unsigned ret = read_seqcount_begin(&sl->seqcount);
460+
461+
kcsan_atomic_next(0); /* non-raw usage, assume closing read_seqretry */
462+
kcsan_flat_atomic_begin();
463+
return ret;
434464
}
435465

436466
static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
437467
{
468+
/*
469+
* Assume not nested: read_seqretry may be called multiple times when
470+
* completing read critical section.
471+
*/
472+
kcsan_flat_atomic_end();
473+
438474
return read_seqcount_retry(&sl->seqcount, start);
439475
}
440476

0 commit comments

Comments
 (0)