ASoc: Another series to convert to struct
[linux-block.git] / include / linux / seqlock.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __LINUX_SEQLOCK_H
3 #define __LINUX_SEQLOCK_H
4
5 /*
6  * seqcount_t / seqlock_t - a reader-writer consistency mechanism with
7  * lockless readers (read-only retry loops), and no writer starvation.
8  *
9  * See Documentation/locking/seqlock.rst
10  *
11  * Copyrights:
12  * - Based on x86_64 vsyscall gettimeofday: Keith Owens, Andrea Arcangeli
13  * - Sequence counters with associated locks, (C) 2020 Linutronix GmbH
14  */
15
16 #include <linux/compiler.h>
17 #include <linux/kcsan-checks.h>
18 #include <linux/lockdep.h>
19 #include <linux/mutex.h>
20 #include <linux/preempt.h>
21 #include <linux/spinlock.h>
22
23 #include <asm/processor.h>
24
25 /*
26  * The seqlock seqcount_t interface does not prescribe a precise sequence of
27  * read begin/retry/end. For readers, typically there is a call to
28  * read_seqcount_begin() and read_seqcount_retry(), however, there are more
29  * esoteric cases which do not follow this pattern.
30  *
31  * As a consequence, we take the following best-effort approach for raw usage
32  * via seqcount_t under KCSAN: upon beginning a seq-reader critical section,
33  * pessimistically mark the next KCSAN_SEQLOCK_REGION_MAX memory accesses as
34  * atomics; if there is a matching read_seqcount_retry() call, no following
35  * memory operations are considered atomic. Usage of the seqlock_t interface
36  * is not affected.
37  */
38 #define KCSAN_SEQLOCK_REGION_MAX 1000
39
40 /*
41  * Sequence counters (seqcount_t)
42  *
43  * This is the raw counting mechanism, without any writer protection.
44  *
45  * Write side critical sections must be serialized and non-preemptible.
46  *
47  * If readers can be invoked from hardirq or softirq contexts,
48  * interrupts or bottom halves must also be respectively disabled before
49  * entering the write section.
50  *
51  * This mechanism can't be used if the protected data contains pointers,
52  * as the writer can invalidate a pointer that a reader is following.
53  *
54  * If the write serialization mechanism is one of the common kernel
55  * locking primitives, use a sequence counter with associated lock
56  * (seqcount_LOCKNAME_t) instead.
57  *
58  * If it's desired to automatically handle the sequence counter writer
59  * serialization and non-preemptibility requirements, use a sequential
60  * lock (seqlock_t) instead.
61  *
62  * See Documentation/locking/seqlock.rst
63  */
64 typedef struct seqcount {
65         unsigned sequence;
66 #ifdef CONFIG_DEBUG_LOCK_ALLOC
67         struct lockdep_map dep_map;
68 #endif
69 } seqcount_t;
70
71 static inline void __seqcount_init(seqcount_t *s, const char *name,
72                                           struct lock_class_key *key)
73 {
74         /*
75          * Make sure we are not reinitializing a held lock:
76          */
77         lockdep_init_map(&s->dep_map, name, key, 0);
78         s->sequence = 0;
79 }
80
81 #ifdef CONFIG_DEBUG_LOCK_ALLOC
82
83 # define SEQCOUNT_DEP_MAP_INIT(lockname)                                \
84                 .dep_map = { .name = #lockname }
85
86 /**
87  * seqcount_init() - runtime initializer for seqcount_t
88  * @s: Pointer to the seqcount_t instance
89  */
90 # define seqcount_init(s)                                               \
91         do {                                                            \
92                 static struct lock_class_key __key;                     \
93                 __seqcount_init((s), #s, &__key);                       \
94         } while (0)
95
96 static inline void seqcount_lockdep_reader_access(const seqcount_t *s)
97 {
98         seqcount_t *l = (seqcount_t *)s;
99         unsigned long flags;
100
101         local_irq_save(flags);
102         seqcount_acquire_read(&l->dep_map, 0, 0, _RET_IP_);
103         seqcount_release(&l->dep_map, _RET_IP_);
104         local_irq_restore(flags);
105 }
106
107 #else
108 # define SEQCOUNT_DEP_MAP_INIT(lockname)
109 # define seqcount_init(s) __seqcount_init(s, NULL, NULL)
110 # define seqcount_lockdep_reader_access(x)
111 #endif
112
113 /**
114  * SEQCNT_ZERO() - static initializer for seqcount_t
115  * @name: Name of the seqcount_t instance
116  */
117 #define SEQCNT_ZERO(name) { .sequence = 0, SEQCOUNT_DEP_MAP_INIT(name) }
118
119 /*
120  * Sequence counters with associated locks (seqcount_LOCKNAME_t)
121  *
122  * A sequence counter which associates the lock used for writer
123  * serialization at initialization time. This enables lockdep to validate
124  * that the write side critical section is properly serialized.
125  *
126  * For associated locks which do not implicitly disable preemption,
127  * preemption protection is enforced in the write side function.
128  *
129  * Lockdep is never used in any for the raw write variants.
130  *
131  * See Documentation/locking/seqlock.rst
132  */
133
134 /*
135  * For PREEMPT_RT, seqcount_LOCKNAME_t write side critical sections cannot
136  * disable preemption. It can lead to higher latencies, and the write side
137  * sections will not be able to acquire locks which become sleeping locks
138  * (e.g. spinlock_t).
139  *
140  * To remain preemptible while avoiding a possible livelock caused by the
141  * reader preempting the writer, use a different technique: let the reader
142  * detect if a seqcount_LOCKNAME_t writer is in progress. If that is the
143  * case, acquire then release the associated LOCKNAME writer serialization
144  * lock. This will allow any possibly-preempted writer to make progress
145  * until the end of its writer serialization lock critical section.
146  *
147  * This lock-unlock technique must be implemented for all of PREEMPT_RT
148  * sleeping locks.  See Documentation/locking/locktypes.rst
149  */
150 #if defined(CONFIG_LOCKDEP) || defined(CONFIG_PREEMPT_RT)
151 #define __SEQ_LOCK(expr)        expr
152 #else
153 #define __SEQ_LOCK(expr)
154 #endif
155
156 /*
157  * typedef seqcount_LOCKNAME_t - sequence counter with LOCKNAME associated
158  * @seqcount:   The real sequence counter
159  * @lock:       Pointer to the associated lock
160  *
161  * A plain sequence counter with external writer synchronization by
162  * LOCKNAME @lock. The lock is associated to the sequence counter in the
163  * static initializer or init function. This enables lockdep to validate
164  * that the write side critical section is properly serialized.
165  *
166  * LOCKNAME:    raw_spinlock, spinlock, rwlock or mutex
167  */
168
169 /*
170  * seqcount_LOCKNAME_init() - runtime initializer for seqcount_LOCKNAME_t
171  * @s:          Pointer to the seqcount_LOCKNAME_t instance
172  * @lock:       Pointer to the associated lock
173  */
174
175 #define seqcount_LOCKNAME_init(s, _lock, lockname)                      \
176         do {                                                            \
177                 seqcount_##lockname##_t *____s = (s);                   \
178                 seqcount_init(&____s->seqcount);                        \
179                 __SEQ_LOCK(____s->lock = (_lock));                      \
180         } while (0)
181
182 #define seqcount_raw_spinlock_init(s, lock)     seqcount_LOCKNAME_init(s, lock, raw_spinlock)
183 #define seqcount_spinlock_init(s, lock)         seqcount_LOCKNAME_init(s, lock, spinlock)
184 #define seqcount_rwlock_init(s, lock)           seqcount_LOCKNAME_init(s, lock, rwlock)
185 #define seqcount_mutex_init(s, lock)            seqcount_LOCKNAME_init(s, lock, mutex)
186
187 /*
188  * SEQCOUNT_LOCKNAME()  - Instantiate seqcount_LOCKNAME_t and helpers
189  * seqprop_LOCKNAME_*() - Property accessors for seqcount_LOCKNAME_t
190  *
191  * @lockname:           "LOCKNAME" part of seqcount_LOCKNAME_t
192  * @locktype:           LOCKNAME canonical C data type
193  * @preemptible:        preemptibility of above locktype
194  * @lockmember:         argument for lockdep_assert_held()
195  * @lockbase:           associated lock release function (prefix only)
196  * @lock_acquire:       associated lock acquisition function (full call)
197  */
198 #define SEQCOUNT_LOCKNAME(lockname, locktype, preemptible, lockmember, lockbase, lock_acquire) \
199 typedef struct seqcount_##lockname {                                    \
200         seqcount_t              seqcount;                               \
201         __SEQ_LOCK(locktype     *lock);                                 \
202 } seqcount_##lockname##_t;                                              \
203                                                                         \
204 static __always_inline seqcount_t *                                     \
205 __seqprop_##lockname##_ptr(seqcount_##lockname##_t *s)                  \
206 {                                                                       \
207         return &s->seqcount;                                            \
208 }                                                                       \
209                                                                         \
210 static __always_inline unsigned                                         \
211 __seqprop_##lockname##_sequence(const seqcount_##lockname##_t *s)       \
212 {                                                                       \
213         unsigned seq = READ_ONCE(s->seqcount.sequence);                 \
214                                                                         \
215         if (!IS_ENABLED(CONFIG_PREEMPT_RT))                             \
216                 return seq;                                             \
217                                                                         \
218         if (preemptible && unlikely(seq & 1)) {                         \
219                 __SEQ_LOCK(lock_acquire);                               \
220                 __SEQ_LOCK(lockbase##_unlock(s->lock));                 \
221                                                                         \
222                 /*                                                      \
223                  * Re-read the sequence counter since the (possibly     \
224                  * preempted) writer made progress.                     \
225                  */                                                     \
226                 seq = READ_ONCE(s->seqcount.sequence);                  \
227         }                                                               \
228                                                                         \
229         return seq;                                                     \
230 }                                                                       \
231                                                                         \
232 static __always_inline bool                                             \
233 __seqprop_##lockname##_preemptible(const seqcount_##lockname##_t *s)    \
234 {                                                                       \
235         if (!IS_ENABLED(CONFIG_PREEMPT_RT))                             \
236                 return preemptible;                                     \
237                                                                         \
238         /* PREEMPT_RT relies on the above LOCK+UNLOCK */                \
239         return false;                                                   \
240 }                                                                       \
241                                                                         \
242 static __always_inline void                                             \
243 __seqprop_##lockname##_assert(const seqcount_##lockname##_t *s)         \
244 {                                                                       \
245         __SEQ_LOCK(lockdep_assert_held(lockmember));                    \
246 }
247
248 /*
249  * __seqprop() for seqcount_t
250  */
251
252 static inline seqcount_t *__seqprop_ptr(seqcount_t *s)
253 {
254         return s;
255 }
256
257 static inline unsigned __seqprop_sequence(const seqcount_t *s)
258 {
259         return READ_ONCE(s->sequence);
260 }
261
262 static inline bool __seqprop_preemptible(const seqcount_t *s)
263 {
264         return false;
265 }
266
267 static inline void __seqprop_assert(const seqcount_t *s)
268 {
269         lockdep_assert_preemption_disabled();
270 }
271
272 #define __SEQ_RT        IS_ENABLED(CONFIG_PREEMPT_RT)
273
274 SEQCOUNT_LOCKNAME(raw_spinlock, raw_spinlock_t,  false,    s->lock,        raw_spin, raw_spin_lock(s->lock))
275 SEQCOUNT_LOCKNAME(spinlock,     spinlock_t,      __SEQ_RT, s->lock,        spin,     spin_lock(s->lock))
276 SEQCOUNT_LOCKNAME(rwlock,       rwlock_t,        __SEQ_RT, s->lock,        read,     read_lock(s->lock))
277 SEQCOUNT_LOCKNAME(mutex,        struct mutex,    true,     s->lock,        mutex,    mutex_lock(s->lock))
278
279 /*
280  * SEQCNT_LOCKNAME_ZERO - static initializer for seqcount_LOCKNAME_t
281  * @name:       Name of the seqcount_LOCKNAME_t instance
282  * @lock:       Pointer to the associated LOCKNAME
283  */
284
285 #define SEQCOUNT_LOCKNAME_ZERO(seq_name, assoc_lock) {                  \
286         .seqcount               = SEQCNT_ZERO(seq_name.seqcount),       \
287         __SEQ_LOCK(.lock        = (assoc_lock))                         \
288 }
289
290 #define SEQCNT_RAW_SPINLOCK_ZERO(name, lock)    SEQCOUNT_LOCKNAME_ZERO(name, lock)
291 #define SEQCNT_SPINLOCK_ZERO(name, lock)        SEQCOUNT_LOCKNAME_ZERO(name, lock)
292 #define SEQCNT_RWLOCK_ZERO(name, lock)          SEQCOUNT_LOCKNAME_ZERO(name, lock)
293 #define SEQCNT_MUTEX_ZERO(name, lock)           SEQCOUNT_LOCKNAME_ZERO(name, lock)
294 #define SEQCNT_WW_MUTEX_ZERO(name, lock)        SEQCOUNT_LOCKNAME_ZERO(name, lock)
295
296 #define __seqprop_case(s, lockname, prop)                               \
297         seqcount_##lockname##_t: __seqprop_##lockname##_##prop((void *)(s))
298
299 #define __seqprop(s, prop) _Generic(*(s),                               \
300         seqcount_t:             __seqprop_##prop((void *)(s)),          \
301         __seqprop_case((s),     raw_spinlock,   prop),                  \
302         __seqprop_case((s),     spinlock,       prop),                  \
303         __seqprop_case((s),     rwlock,         prop),                  \
304         __seqprop_case((s),     mutex,          prop))
305
306 #define seqprop_ptr(s)                  __seqprop(s, ptr)
307 #define seqprop_sequence(s)             __seqprop(s, sequence)
308 #define seqprop_preemptible(s)          __seqprop(s, preemptible)
309 #define seqprop_assert(s)               __seqprop(s, assert)
310
311 /**
312  * __read_seqcount_begin() - begin a seqcount_t read section w/o barrier
313  * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
314  *
315  * __read_seqcount_begin is like read_seqcount_begin, but has no smp_rmb()
316  * barrier. Callers should ensure that smp_rmb() or equivalent ordering is
317  * provided before actually loading any of the variables that are to be
318  * protected in this critical section.
319  *
320  * Use carefully, only in critical code, and comment how the barrier is
321  * provided.
322  *
323  * Return: count to be passed to read_seqcount_retry()
324  */
325 #define __read_seqcount_begin(s)                                        \
326 ({                                                                      \
327         unsigned __seq;                                                 \
328                                                                         \
329         while ((__seq = seqprop_sequence(s)) & 1)                       \
330                 cpu_relax();                                            \
331                                                                         \
332         kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX);                    \
333         __seq;                                                          \
334 })
335
336 /**
337  * raw_read_seqcount_begin() - begin a seqcount_t read section w/o lockdep
338  * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
339  *
340  * Return: count to be passed to read_seqcount_retry()
341  */
342 #define raw_read_seqcount_begin(s)                                      \
343 ({                                                                      \
344         unsigned _seq = __read_seqcount_begin(s);                       \
345                                                                         \
346         smp_rmb();                                                      \
347         _seq;                                                           \
348 })
349
350 /**
351  * read_seqcount_begin() - begin a seqcount_t read critical section
352  * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
353  *
354  * Return: count to be passed to read_seqcount_retry()
355  */
356 #define read_seqcount_begin(s)                                          \
357 ({                                                                      \
358         seqcount_lockdep_reader_access(seqprop_ptr(s));                 \
359         raw_read_seqcount_begin(s);                                     \
360 })
361
362 /**
363  * raw_read_seqcount() - read the raw seqcount_t counter value
364  * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
365  *
366  * raw_read_seqcount opens a read critical section of the given
367  * seqcount_t, without any lockdep checking, and without checking or
368  * masking the sequence counter LSB. Calling code is responsible for
369  * handling that.
370  *
371  * Return: count to be passed to read_seqcount_retry()
372  */
373 #define raw_read_seqcount(s)                                            \
374 ({                                                                      \
375         unsigned __seq = seqprop_sequence(s);                           \
376                                                                         \
377         smp_rmb();                                                      \
378         kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX);                    \
379         __seq;                                                          \
380 })
381
382 /**
383  * raw_seqcount_begin() - begin a seqcount_t read critical section w/o
384  *                        lockdep and w/o counter stabilization
385  * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
386  *
387  * raw_seqcount_begin opens a read critical section of the given
388  * seqcount_t. Unlike read_seqcount_begin(), this function will not wait
389  * for the count to stabilize. If a writer is active when it begins, it
390  * will fail the read_seqcount_retry() at the end of the read critical
391  * section instead of stabilizing at the beginning of it.
392  *
393  * Use this only in special kernel hot paths where the read section is
394  * small and has a high probability of success through other external
395  * means. It will save a single branching instruction.
396  *
397  * Return: count to be passed to read_seqcount_retry()
398  */
399 #define raw_seqcount_begin(s)                                           \
400 ({                                                                      \
401         /*                                                              \
402          * If the counter is odd, let read_seqcount_retry() fail        \
403          * by decrementing the counter.                                 \
404          */                                                             \
405         raw_read_seqcount(s) & ~1;                                      \
406 })
407
408 /**
409  * __read_seqcount_retry() - end a seqcount_t read section w/o barrier
410  * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
411  * @start: count, from read_seqcount_begin()
412  *
413  * __read_seqcount_retry is like read_seqcount_retry, but has no smp_rmb()
414  * barrier. Callers should ensure that smp_rmb() or equivalent ordering is
415  * provided before actually loading any of the variables that are to be
416  * protected in this critical section.
417  *
418  * Use carefully, only in critical code, and comment how the barrier is
419  * provided.
420  *
421  * Return: true if a read section retry is required, else false
422  */
423 #define __read_seqcount_retry(s, start)                                 \
424         do___read_seqcount_retry(seqprop_ptr(s), start)
425
426 static inline int do___read_seqcount_retry(const seqcount_t *s, unsigned start)
427 {
428         kcsan_atomic_next(0);
429         return unlikely(READ_ONCE(s->sequence) != start);
430 }
431
432 /**
433  * read_seqcount_retry() - end a seqcount_t read critical section
434  * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
435  * @start: count, from read_seqcount_begin()
436  *
437  * read_seqcount_retry closes the read critical section of given
438  * seqcount_t.  If the critical section was invalid, it must be ignored
439  * (and typically retried).
440  *
441  * Return: true if a read section retry is required, else false
442  */
443 #define read_seqcount_retry(s, start)                                   \
444         do_read_seqcount_retry(seqprop_ptr(s), start)
445
446 static inline int do_read_seqcount_retry(const seqcount_t *s, unsigned start)
447 {
448         smp_rmb();
449         return do___read_seqcount_retry(s, start);
450 }
451
452 /**
453  * raw_write_seqcount_begin() - start a seqcount_t write section w/o lockdep
454  * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
455  *
456  * Context: check write_seqcount_begin()
457  */
458 #define raw_write_seqcount_begin(s)                                     \
459 do {                                                                    \
460         if (seqprop_preemptible(s))                                     \
461                 preempt_disable();                                      \
462                                                                         \
463         do_raw_write_seqcount_begin(seqprop_ptr(s));                    \
464 } while (0)
465
466 static inline void do_raw_write_seqcount_begin(seqcount_t *s)
467 {
468         kcsan_nestable_atomic_begin();
469         s->sequence++;
470         smp_wmb();
471 }
472
473 /**
474  * raw_write_seqcount_end() - end a seqcount_t write section w/o lockdep
475  * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
476  *
477  * Context: check write_seqcount_end()
478  */
479 #define raw_write_seqcount_end(s)                                       \
480 do {                                                                    \
481         do_raw_write_seqcount_end(seqprop_ptr(s));                      \
482                                                                         \
483         if (seqprop_preemptible(s))                                     \
484                 preempt_enable();                                       \
485 } while (0)
486
487 static inline void do_raw_write_seqcount_end(seqcount_t *s)
488 {
489         smp_wmb();
490         s->sequence++;
491         kcsan_nestable_atomic_end();
492 }
493
494 /**
495  * write_seqcount_begin_nested() - start a seqcount_t write section with
496  *                                 custom lockdep nesting level
497  * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
498  * @subclass: lockdep nesting level
499  *
500  * See Documentation/locking/lockdep-design.rst
501  * Context: check write_seqcount_begin()
502  */
503 #define write_seqcount_begin_nested(s, subclass)                        \
504 do {                                                                    \
505         seqprop_assert(s);                                              \
506                                                                         \
507         if (seqprop_preemptible(s))                                     \
508                 preempt_disable();                                      \
509                                                                         \
510         do_write_seqcount_begin_nested(seqprop_ptr(s), subclass);       \
511 } while (0)
512
513 static inline void do_write_seqcount_begin_nested(seqcount_t *s, int subclass)
514 {
515         do_raw_write_seqcount_begin(s);
516         seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_);
517 }
518
519 /**
520  * write_seqcount_begin() - start a seqcount_t write side critical section
521  * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
522  *
523  * Context: sequence counter write side sections must be serialized and
524  * non-preemptible. Preemption will be automatically disabled if and
525  * only if the seqcount write serialization lock is associated, and
526  * preemptible.  If readers can be invoked from hardirq or softirq
527  * context, interrupts or bottom halves must be respectively disabled.
528  */
529 #define write_seqcount_begin(s)                                         \
530 do {                                                                    \
531         seqprop_assert(s);                                              \
532                                                                         \
533         if (seqprop_preemptible(s))                                     \
534                 preempt_disable();                                      \
535                                                                         \
536         do_write_seqcount_begin(seqprop_ptr(s));                        \
537 } while (0)
538
539 static inline void do_write_seqcount_begin(seqcount_t *s)
540 {
541         do_write_seqcount_begin_nested(s, 0);
542 }
543
544 /**
545  * write_seqcount_end() - end a seqcount_t write side critical section
546  * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
547  *
548  * Context: Preemption will be automatically re-enabled if and only if
549  * the seqcount write serialization lock is associated, and preemptible.
550  */
551 #define write_seqcount_end(s)                                           \
552 do {                                                                    \
553         do_write_seqcount_end(seqprop_ptr(s));                          \
554                                                                         \
555         if (seqprop_preemptible(s))                                     \
556                 preempt_enable();                                       \
557 } while (0)
558
559 static inline void do_write_seqcount_end(seqcount_t *s)
560 {
561         seqcount_release(&s->dep_map, _RET_IP_);
562         do_raw_write_seqcount_end(s);
563 }
564
565 /**
566  * raw_write_seqcount_barrier() - do a seqcount_t write barrier
567  * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
568  *
569  * This can be used to provide an ordering guarantee instead of the usual
570  * consistency guarantee. It is one wmb cheaper, because it can collapse
571  * the two back-to-back wmb()s.
572  *
573  * Note that writes surrounding the barrier should be declared atomic (e.g.
574  * via WRITE_ONCE): a) to ensure the writes become visible to other threads
575  * atomically, avoiding compiler optimizations; b) to document which writes are
576  * meant to propagate to the reader critical section. This is necessary because
577  * neither writes before and after the barrier are enclosed in a seq-writer
578  * critical section that would ensure readers are aware of ongoing writes::
579  *
580  *      seqcount_t seq;
581  *      bool X = true, Y = false;
582  *
583  *      void read(void)
584  *      {
585  *              bool x, y;
586  *
587  *              do {
588  *                      int s = read_seqcount_begin(&seq);
589  *
590  *                      x = X; y = Y;
591  *
592  *              } while (read_seqcount_retry(&seq, s));
593  *
594  *              BUG_ON(!x && !y);
595  *      }
596  *
597  *      void write(void)
598  *      {
599  *              WRITE_ONCE(Y, true);
600  *
601  *              raw_write_seqcount_barrier(seq);
602  *
603  *              WRITE_ONCE(X, false);
604  *      }
605  */
606 #define raw_write_seqcount_barrier(s)                                   \
607         do_raw_write_seqcount_barrier(seqprop_ptr(s))
608
609 static inline void do_raw_write_seqcount_barrier(seqcount_t *s)
610 {
611         kcsan_nestable_atomic_begin();
612         s->sequence++;
613         smp_wmb();
614         s->sequence++;
615         kcsan_nestable_atomic_end();
616 }
617
618 /**
619  * write_seqcount_invalidate() - invalidate in-progress seqcount_t read
620  *                               side operations
621  * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
622  *
623  * After write_seqcount_invalidate, no seqcount_t read side operations
624  * will complete successfully and see data older than this.
625  */
626 #define write_seqcount_invalidate(s)                                    \
627         do_write_seqcount_invalidate(seqprop_ptr(s))
628
629 static inline void do_write_seqcount_invalidate(seqcount_t *s)
630 {
631         smp_wmb();
632         kcsan_nestable_atomic_begin();
633         s->sequence+=2;
634         kcsan_nestable_atomic_end();
635 }
636
637 /*
638  * Latch sequence counters (seqcount_latch_t)
639  *
640  * A sequence counter variant where the counter even/odd value is used to
641  * switch between two copies of protected data. This allows the read path,
642  * typically NMIs, to safely interrupt the write side critical section.
643  *
644  * As the write sections are fully preemptible, no special handling for
645  * PREEMPT_RT is needed.
646  */
647 typedef struct {
648         seqcount_t seqcount;
649 } seqcount_latch_t;
650
651 /**
652  * SEQCNT_LATCH_ZERO() - static initializer for seqcount_latch_t
653  * @seq_name: Name of the seqcount_latch_t instance
654  */
655 #define SEQCNT_LATCH_ZERO(seq_name) {                                   \
656         .seqcount               = SEQCNT_ZERO(seq_name.seqcount),       \
657 }
658
659 /**
660  * seqcount_latch_init() - runtime initializer for seqcount_latch_t
661  * @s: Pointer to the seqcount_latch_t instance
662  */
663 #define seqcount_latch_init(s) seqcount_init(&(s)->seqcount)
664
665 /**
666  * raw_read_seqcount_latch() - pick even/odd latch data copy
667  * @s: Pointer to seqcount_latch_t
668  *
669  * See raw_write_seqcount_latch() for details and a full reader/writer
670  * usage example.
671  *
672  * Return: sequence counter raw value. Use the lowest bit as an index for
673  * picking which data copy to read. The full counter must then be checked
674  * with raw_read_seqcount_latch_retry().
675  */
676 static __always_inline unsigned raw_read_seqcount_latch(const seqcount_latch_t *s)
677 {
678         /*
679          * Pairs with the first smp_wmb() in raw_write_seqcount_latch().
680          * Due to the dependent load, a full smp_rmb() is not needed.
681          */
682         return READ_ONCE(s->seqcount.sequence);
683 }
684
685 /**
686  * raw_read_seqcount_latch_retry() - end a seqcount_latch_t read section
687  * @s:          Pointer to seqcount_latch_t
688  * @start:      count, from raw_read_seqcount_latch()
689  *
690  * Return: true if a read section retry is required, else false
691  */
692 static __always_inline int
693 raw_read_seqcount_latch_retry(const seqcount_latch_t *s, unsigned start)
694 {
695         smp_rmb();
696         return unlikely(READ_ONCE(s->seqcount.sequence) != start);
697 }
698
699 /**
700  * raw_write_seqcount_latch() - redirect latch readers to even/odd copy
701  * @s: Pointer to seqcount_latch_t
702  *
703  * The latch technique is a multiversion concurrency control method that allows
704  * queries during non-atomic modifications. If you can guarantee queries never
705  * interrupt the modification -- e.g. the concurrency is strictly between CPUs
706  * -- you most likely do not need this.
707  *
708  * Where the traditional RCU/lockless data structures rely on atomic
709  * modifications to ensure queries observe either the old or the new state the
710  * latch allows the same for non-atomic updates. The trade-off is doubling the
711  * cost of storage; we have to maintain two copies of the entire data
712  * structure.
713  *
714  * Very simply put: we first modify one copy and then the other. This ensures
715  * there is always one copy in a stable state, ready to give us an answer.
716  *
717  * The basic form is a data structure like::
718  *
719  *      struct latch_struct {
720  *              seqcount_latch_t        seq;
721  *              struct data_struct      data[2];
722  *      };
723  *
724  * Where a modification, which is assumed to be externally serialized, does the
725  * following::
726  *
727  *      void latch_modify(struct latch_struct *latch, ...)
728  *      {
729  *              smp_wmb();      // Ensure that the last data[1] update is visible
730  *              latch->seq.sequence++;
731  *              smp_wmb();      // Ensure that the seqcount update is visible
732  *
733  *              modify(latch->data[0], ...);
734  *
735  *              smp_wmb();      // Ensure that the data[0] update is visible
736  *              latch->seq.sequence++;
737  *              smp_wmb();      // Ensure that the seqcount update is visible
738  *
739  *              modify(latch->data[1], ...);
740  *      }
741  *
742  * The query will have a form like::
743  *
744  *      struct entry *latch_query(struct latch_struct *latch, ...)
745  *      {
746  *              struct entry *entry;
747  *              unsigned seq, idx;
748  *
749  *              do {
750  *                      seq = raw_read_seqcount_latch(&latch->seq);
751  *
752  *                      idx = seq & 0x01;
753  *                      entry = data_query(latch->data[idx], ...);
754  *
755  *              // This includes needed smp_rmb()
756  *              } while (raw_read_seqcount_latch_retry(&latch->seq, seq));
757  *
758  *              return entry;
759  *      }
760  *
761  * So during the modification, queries are first redirected to data[1]. Then we
762  * modify data[0]. When that is complete, we redirect queries back to data[0]
763  * and we can modify data[1].
764  *
765  * NOTE:
766  *
767  *      The non-requirement for atomic modifications does _NOT_ include
768  *      the publishing of new entries in the case where data is a dynamic
769  *      data structure.
770  *
771  *      An iteration might start in data[0] and get suspended long enough
772  *      to miss an entire modification sequence, once it resumes it might
773  *      observe the new entry.
774  *
775  * NOTE2:
776  *
777  *      When data is a dynamic data structure; one should use regular RCU
778  *      patterns to manage the lifetimes of the objects within.
779  */
780 static inline void raw_write_seqcount_latch(seqcount_latch_t *s)
781 {
782         smp_wmb();      /* prior stores before incrementing "sequence" */
783         s->seqcount.sequence++;
784         smp_wmb();      /* increment "sequence" before following stores */
785 }
786
787 /*
788  * Sequential locks (seqlock_t)
789  *
790  * Sequence counters with an embedded spinlock for writer serialization
791  * and non-preemptibility.
792  *
793  * For more info, see:
794  *    - Comments on top of seqcount_t
795  *    - Documentation/locking/seqlock.rst
796  */
797 typedef struct {
798         /*
799          * Make sure that readers don't starve writers on PREEMPT_RT: use
800          * seqcount_spinlock_t instead of seqcount_t. Check __SEQ_LOCK().
801          */
802         seqcount_spinlock_t seqcount;
803         spinlock_t lock;
804 } seqlock_t;
805
806 #define __SEQLOCK_UNLOCKED(lockname)                                    \
807         {                                                               \
808                 .seqcount = SEQCNT_SPINLOCK_ZERO(lockname, &(lockname).lock), \
809                 .lock = __SPIN_LOCK_UNLOCKED(lockname)                  \
810         }
811
812 /**
813  * seqlock_init() - dynamic initializer for seqlock_t
814  * @sl: Pointer to the seqlock_t instance
815  */
816 #define seqlock_init(sl)                                                \
817         do {                                                            \
818                 spin_lock_init(&(sl)->lock);                            \
819                 seqcount_spinlock_init(&(sl)->seqcount, &(sl)->lock);   \
820         } while (0)
821
822 /**
823  * DEFINE_SEQLOCK(sl) - Define a statically allocated seqlock_t
824  * @sl: Name of the seqlock_t instance
825  */
826 #define DEFINE_SEQLOCK(sl) \
827                 seqlock_t sl = __SEQLOCK_UNLOCKED(sl)
828
829 /**
830  * read_seqbegin() - start a seqlock_t read side critical section
831  * @sl: Pointer to seqlock_t
832  *
833  * Return: count, to be passed to read_seqretry()
834  */
835 static inline unsigned read_seqbegin(const seqlock_t *sl)
836 {
837         unsigned ret = read_seqcount_begin(&sl->seqcount);
838
839         kcsan_atomic_next(0);  /* non-raw usage, assume closing read_seqretry() */
840         kcsan_flat_atomic_begin();
841         return ret;
842 }
843
844 /**
845  * read_seqretry() - end a seqlock_t read side section
846  * @sl: Pointer to seqlock_t
847  * @start: count, from read_seqbegin()
848  *
849  * read_seqretry closes the read side critical section of given seqlock_t.
850  * If the critical section was invalid, it must be ignored (and typically
851  * retried).
852  *
853  * Return: true if a read section retry is required, else false
854  */
855 static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
856 {
857         /*
858          * Assume not nested: read_seqretry() may be called multiple times when
859          * completing read critical section.
860          */
861         kcsan_flat_atomic_end();
862
863         return read_seqcount_retry(&sl->seqcount, start);
864 }
865
866 /*
867  * For all seqlock_t write side functions, use the the internal
868  * do_write_seqcount_begin() instead of generic write_seqcount_begin().
869  * This way, no redundant lockdep_assert_held() checks are added.
870  */
871
872 /**
873  * write_seqlock() - start a seqlock_t write side critical section
874  * @sl: Pointer to seqlock_t
875  *
876  * write_seqlock opens a write side critical section for the given
877  * seqlock_t.  It also implicitly acquires the spinlock_t embedded inside
878  * that sequential lock. All seqlock_t write side sections are thus
879  * automatically serialized and non-preemptible.
880  *
881  * Context: if the seqlock_t read section, or other write side critical
882  * sections, can be invoked from hardirq or softirq contexts, use the
883  * _irqsave or _bh variants of this function instead.
884  */
885 static inline void write_seqlock(seqlock_t *sl)
886 {
887         spin_lock(&sl->lock);
888         do_write_seqcount_begin(&sl->seqcount.seqcount);
889 }
890
891 /**
892  * write_sequnlock() - end a seqlock_t write side critical section
893  * @sl: Pointer to seqlock_t
894  *
895  * write_sequnlock closes the (serialized and non-preemptible) write side
896  * critical section of given seqlock_t.
897  */
898 static inline void write_sequnlock(seqlock_t *sl)
899 {
900         do_write_seqcount_end(&sl->seqcount.seqcount);
901         spin_unlock(&sl->lock);
902 }
903
904 /**
905  * write_seqlock_bh() - start a softirqs-disabled seqlock_t write section
906  * @sl: Pointer to seqlock_t
907  *
908  * _bh variant of write_seqlock(). Use only if the read side section, or
909  * other write side sections, can be invoked from softirq contexts.
910  */
911 static inline void write_seqlock_bh(seqlock_t *sl)
912 {
913         spin_lock_bh(&sl->lock);
914         do_write_seqcount_begin(&sl->seqcount.seqcount);
915 }
916
917 /**
918  * write_sequnlock_bh() - end a softirqs-disabled seqlock_t write section
919  * @sl: Pointer to seqlock_t
920  *
921  * write_sequnlock_bh closes the serialized, non-preemptible, and
922  * softirqs-disabled, seqlock_t write side critical section opened with
923  * write_seqlock_bh().
924  */
925 static inline void write_sequnlock_bh(seqlock_t *sl)
926 {
927         do_write_seqcount_end(&sl->seqcount.seqcount);
928         spin_unlock_bh(&sl->lock);
929 }
930
931 /**
932  * write_seqlock_irq() - start a non-interruptible seqlock_t write section
933  * @sl: Pointer to seqlock_t
934  *
935  * _irq variant of write_seqlock(). Use only if the read side section, or
936  * other write sections, can be invoked from hardirq contexts.
937  */
938 static inline void write_seqlock_irq(seqlock_t *sl)
939 {
940         spin_lock_irq(&sl->lock);
941         do_write_seqcount_begin(&sl->seqcount.seqcount);
942 }
943
944 /**
945  * write_sequnlock_irq() - end a non-interruptible seqlock_t write section
946  * @sl: Pointer to seqlock_t
947  *
948  * write_sequnlock_irq closes the serialized and non-interruptible
949  * seqlock_t write side section opened with write_seqlock_irq().
950  */
951 static inline void write_sequnlock_irq(seqlock_t *sl)
952 {
953         do_write_seqcount_end(&sl->seqcount.seqcount);
954         spin_unlock_irq(&sl->lock);
955 }
956
957 static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
958 {
959         unsigned long flags;
960
961         spin_lock_irqsave(&sl->lock, flags);
962         do_write_seqcount_begin(&sl->seqcount.seqcount);
963         return flags;
964 }
965
966 /**
967  * write_seqlock_irqsave() - start a non-interruptible seqlock_t write
968  *                           section
969  * @lock:  Pointer to seqlock_t
970  * @flags: Stack-allocated storage for saving caller's local interrupt
971  *         state, to be passed to write_sequnlock_irqrestore().
972  *
973  * _irqsave variant of write_seqlock(). Use it only if the read side
974  * section, or other write sections, can be invoked from hardirq context.
975  */
976 #define write_seqlock_irqsave(lock, flags)                              \
977         do { flags = __write_seqlock_irqsave(lock); } while (0)
978
979 /**
980  * write_sequnlock_irqrestore() - end non-interruptible seqlock_t write
981  *                                section
982  * @sl:    Pointer to seqlock_t
983  * @flags: Caller's saved interrupt state, from write_seqlock_irqsave()
984  *
985  * write_sequnlock_irqrestore closes the serialized and non-interruptible
986  * seqlock_t write section previously opened with write_seqlock_irqsave().
987  */
988 static inline void
989 write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags)
990 {
991         do_write_seqcount_end(&sl->seqcount.seqcount);
992         spin_unlock_irqrestore(&sl->lock, flags);
993 }
994
995 /**
996  * read_seqlock_excl() - begin a seqlock_t locking reader section
997  * @sl: Pointer to seqlock_t
998  *
999  * read_seqlock_excl opens a seqlock_t locking reader critical section.  A
1000  * locking reader exclusively locks out *both* other writers *and* other
1001  * locking readers, but it does not update the embedded sequence number.
1002  *
1003  * Locking readers act like a normal spin_lock()/spin_unlock().
1004  *
1005  * Context: if the seqlock_t write section, *or other read sections*, can
1006  * be invoked from hardirq or softirq contexts, use the _irqsave or _bh
1007  * variant of this function instead.
1008  *
1009  * The opened read section must be closed with read_sequnlock_excl().
1010  */
1011 static inline void read_seqlock_excl(seqlock_t *sl)
1012 {
1013         spin_lock(&sl->lock);
1014 }
1015
1016 /**
1017  * read_sequnlock_excl() - end a seqlock_t locking reader critical section
1018  * @sl: Pointer to seqlock_t
1019  */
1020 static inline void read_sequnlock_excl(seqlock_t *sl)
1021 {
1022         spin_unlock(&sl->lock);
1023 }
1024
1025 /**
1026  * read_seqlock_excl_bh() - start a seqlock_t locking reader section with
1027  *                          softirqs disabled
1028  * @sl: Pointer to seqlock_t
1029  *
1030  * _bh variant of read_seqlock_excl(). Use this variant only if the
1031  * seqlock_t write side section, *or other read sections*, can be invoked
1032  * from softirq contexts.
1033  */
1034 static inline void read_seqlock_excl_bh(seqlock_t *sl)
1035 {
1036         spin_lock_bh(&sl->lock);
1037 }
1038
1039 /**
1040  * read_sequnlock_excl_bh() - stop a seqlock_t softirq-disabled locking
1041  *                            reader section
1042  * @sl: Pointer to seqlock_t
1043  */
1044 static inline void read_sequnlock_excl_bh(seqlock_t *sl)
1045 {
1046         spin_unlock_bh(&sl->lock);
1047 }
1048
1049 /**
1050  * read_seqlock_excl_irq() - start a non-interruptible seqlock_t locking
1051  *                           reader section
1052  * @sl: Pointer to seqlock_t
1053  *
1054  * _irq variant of read_seqlock_excl(). Use this only if the seqlock_t
1055  * write side section, *or other read sections*, can be invoked from a
1056  * hardirq context.
1057  */
1058 static inline void read_seqlock_excl_irq(seqlock_t *sl)
1059 {
1060         spin_lock_irq(&sl->lock);
1061 }
1062
1063 /**
1064  * read_sequnlock_excl_irq() - end an interrupts-disabled seqlock_t
1065  *                             locking reader section
1066  * @sl: Pointer to seqlock_t
1067  */
1068 static inline void read_sequnlock_excl_irq(seqlock_t *sl)
1069 {
1070         spin_unlock_irq(&sl->lock);
1071 }
1072
1073 static inline unsigned long __read_seqlock_excl_irqsave(seqlock_t *sl)
1074 {
1075         unsigned long flags;
1076
1077         spin_lock_irqsave(&sl->lock, flags);
1078         return flags;
1079 }
1080
1081 /**
1082  * read_seqlock_excl_irqsave() - start a non-interruptible seqlock_t
1083  *                               locking reader section
1084  * @lock:  Pointer to seqlock_t
1085  * @flags: Stack-allocated storage for saving caller's local interrupt
1086  *         state, to be passed to read_sequnlock_excl_irqrestore().
1087  *
1088  * _irqsave variant of read_seqlock_excl(). Use this only if the seqlock_t
1089  * write side section, *or other read sections*, can be invoked from a
1090  * hardirq context.
1091  */
1092 #define read_seqlock_excl_irqsave(lock, flags)                          \
1093         do { flags = __read_seqlock_excl_irqsave(lock); } while (0)
1094
1095 /**
1096  * read_sequnlock_excl_irqrestore() - end non-interruptible seqlock_t
1097  *                                    locking reader section
1098  * @sl:    Pointer to seqlock_t
1099  * @flags: Caller saved interrupt state, from read_seqlock_excl_irqsave()
1100  */
1101 static inline void
1102 read_sequnlock_excl_irqrestore(seqlock_t *sl, unsigned long flags)
1103 {
1104         spin_unlock_irqrestore(&sl->lock, flags);
1105 }
1106
1107 /**
1108  * read_seqbegin_or_lock() - begin a seqlock_t lockless or locking reader
1109  * @lock: Pointer to seqlock_t
1110  * @seq : Marker and return parameter. If the passed value is even, the
1111  * reader will become a *lockless* seqlock_t reader as in read_seqbegin().
1112  * If the passed value is odd, the reader will become a *locking* reader
1113  * as in read_seqlock_excl().  In the first call to this function, the
1114  * caller *must* initialize and pass an even value to @seq; this way, a
1115  * lockless read can be optimistically tried first.
1116  *
1117  * read_seqbegin_or_lock is an API designed to optimistically try a normal
1118  * lockless seqlock_t read section first.  If an odd counter is found, the
1119  * lockless read trial has failed, and the next read iteration transforms
1120  * itself into a full seqlock_t locking reader.
1121  *
1122  * This is typically used to avoid seqlock_t lockless readers starvation
1123  * (too much retry loops) in the case of a sharp spike in write side
1124  * activity.
1125  *
1126  * Context: if the seqlock_t write section, *or other read sections*, can
1127  * be invoked from hardirq or softirq contexts, use the _irqsave or _bh
1128  * variant of this function instead.
1129  *
1130  * Check Documentation/locking/seqlock.rst for template example code.
1131  *
1132  * Return: the encountered sequence counter value, through the @seq
1133  * parameter, which is overloaded as a return parameter. This returned
1134  * value must be checked with need_seqretry(). If the read section need to
1135  * be retried, this returned value must also be passed as the @seq
1136  * parameter of the next read_seqbegin_or_lock() iteration.
1137  */
1138 static inline void read_seqbegin_or_lock(seqlock_t *lock, int *seq)
1139 {
1140         if (!(*seq & 1))        /* Even */
1141                 *seq = read_seqbegin(lock);
1142         else                    /* Odd */
1143                 read_seqlock_excl(lock);
1144 }
1145
1146 /**
1147  * need_seqretry() - validate seqlock_t "locking or lockless" read section
1148  * @lock: Pointer to seqlock_t
1149  * @seq: sequence count, from read_seqbegin_or_lock()
1150  *
1151  * Return: true if a read section retry is required, false otherwise
1152  */
1153 static inline int need_seqretry(seqlock_t *lock, int seq)
1154 {
1155         return !(seq & 1) && read_seqretry(lock, seq);
1156 }
1157
1158 /**
1159  * done_seqretry() - end seqlock_t "locking or lockless" reader section
1160  * @lock: Pointer to seqlock_t
1161  * @seq: count, from read_seqbegin_or_lock()
1162  *
1163  * done_seqretry finishes the seqlock_t read side critical section started
1164  * with read_seqbegin_or_lock() and validated by need_seqretry().
1165  */
1166 static inline void done_seqretry(seqlock_t *lock, int seq)
1167 {
1168         if (seq & 1)
1169                 read_sequnlock_excl(lock);
1170 }
1171
1172 /**
1173  * read_seqbegin_or_lock_irqsave() - begin a seqlock_t lockless reader, or
1174  *                                   a non-interruptible locking reader
1175  * @lock: Pointer to seqlock_t
1176  * @seq:  Marker and return parameter. Check read_seqbegin_or_lock().
1177  *
1178  * This is the _irqsave variant of read_seqbegin_or_lock(). Use it only if
1179  * the seqlock_t write section, *or other read sections*, can be invoked
1180  * from hardirq context.
1181  *
1182  * Note: Interrupts will be disabled only for "locking reader" mode.
1183  *
1184  * Return:
1185  *
1186  *   1. The saved local interrupts state in case of a locking reader, to
1187  *      be passed to done_seqretry_irqrestore().
1188  *
1189  *   2. The encountered sequence counter value, returned through @seq
1190  *      overloaded as a return parameter. Check read_seqbegin_or_lock().
1191  */
1192 static inline unsigned long
1193 read_seqbegin_or_lock_irqsave(seqlock_t *lock, int *seq)
1194 {
1195         unsigned long flags = 0;
1196
1197         if (!(*seq & 1))        /* Even */
1198                 *seq = read_seqbegin(lock);
1199         else                    /* Odd */
1200                 read_seqlock_excl_irqsave(lock, flags);
1201
1202         return flags;
1203 }
1204
1205 /**
1206  * done_seqretry_irqrestore() - end a seqlock_t lockless reader, or a
1207  *                              non-interruptible locking reader section
1208  * @lock:  Pointer to seqlock_t
1209  * @seq:   Count, from read_seqbegin_or_lock_irqsave()
1210  * @flags: Caller's saved local interrupt state in case of a locking
1211  *         reader, also from read_seqbegin_or_lock_irqsave()
1212  *
1213  * This is the _irqrestore variant of done_seqretry(). The read section
1214  * must've been opened with read_seqbegin_or_lock_irqsave(), and validated
1215  * by need_seqretry().
1216  */
1217 static inline void
1218 done_seqretry_irqrestore(seqlock_t *lock, int seq, unsigned long flags)
1219 {
1220         if (seq & 1)
1221                 read_sequnlock_excl_irqrestore(lock, flags);
1222 }
1223 #endif /* __LINUX_SEQLOCK_H */