1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_U64_STATS_SYNC_H
3 #define _LINUX_U64_STATS_SYNC_H
6 * To properly implement 64bits network statistics on 32bit and 64bit hosts,
7 * we provide a synchronization point, that is a noop on 64bit or UP kernels.
10 * 1) Use a seqcount on SMP 32bits, with low overhead.
11 * 2) Whole thing is a noop on 64bit arches or UP kernels.
12 * 3) Write side must ensure mutual exclusion or one seqcount update could
13 * be lost, thus blocking readers forever.
14 * If this synchronization point is not a mutex, but a spinlock or
15 * spinlock_bh() or disable_bh() :
16 * 3.1) Write side should not sleep.
17 * 3.2) Write side should not allow preemption.
18 * 3.3) If applicable, interrupts should be disabled.
20 * 4) If reader fetches several counters, there is no guarantee the whole values
21 * are consistent (remember point 1) : this is a noop on 64bit arches anyway)
23 * 5) readers are allowed to sleep or be preempted/interrupted : They perform
24 * pure reads. But if they have to fetch many values, it's better to not allow
25 * preemptions/interruptions to avoid many retries.
27 * 6) If counter might be written by an interrupt, readers should block interrupts.
28 * (On UP, there is no seqcount_t protection, a reader allowing interrupts could
29 * read partial values)
31 * 7) For irq and softirq uses, readers can use u64_stats_fetch_begin_irq() and
32 * u64_stats_fetch_retry_irq() helpers
36 * Stats producer (writer) should use following template granted it already got
37 * an exclusive access to counters (a lock is already taken, or per cpu
38 * data is used [in a non preemptable context])
40 * spin_lock_bh(...) or other synchronization to get exclusive access
42 * u64_stats_update_begin(&stats->syncp);
43 * u64_stats_add(&stats->bytes64, len); // non atomic operation
44 * u64_stats_inc(&stats->packets64); // non atomic operation
45 * u64_stats_update_end(&stats->syncp);
47 * While a consumer (reader) should use following template to get consistent
48 * snapshot for each variable (but no guarantee on several ones)
50 * u64 tbytes, tpackets;
54 * start = u64_stats_fetch_begin(&stats->syncp);
55 * tbytes = u64_stats_read(&stats->bytes64); // non atomic operation
56 * tpackets = u64_stats_read(&stats->packets64); // non atomic operation
57 * } while (u64_stats_fetch_retry(&stats->syncp, start));
60 * Example of use in drivers/net/loopback.c, using per_cpu containers,
61 * in BH disabled context.
63 #include <linux/seqlock.h>
65 struct u64_stats_sync {
66 #if BITS_PER_LONG==32 && defined(CONFIG_SMP)
71 #if BITS_PER_LONG == 64
72 #include <asm/local64.h>
78 static inline u64 u64_stats_read(const u64_stats_t *p)
80 return local64_read(&p->v);
83 static inline void u64_stats_add(u64_stats_t *p, unsigned long val)
85 local64_add(val, &p->v);
88 static inline void u64_stats_inc(u64_stats_t *p)
99 static inline u64 u64_stats_read(const u64_stats_t *p)
104 static inline void u64_stats_add(u64_stats_t *p, unsigned long val)
109 static inline void u64_stats_inc(u64_stats_t *p)
115 static inline void u64_stats_init(struct u64_stats_sync *syncp)
117 #if BITS_PER_LONG == 32 && defined(CONFIG_SMP)
118 seqcount_init(&syncp->seq);
122 static inline void u64_stats_update_begin(struct u64_stats_sync *syncp)
124 #if BITS_PER_LONG==32 && defined(CONFIG_SMP)
125 write_seqcount_begin(&syncp->seq);
129 static inline void u64_stats_update_end(struct u64_stats_sync *syncp)
131 #if BITS_PER_LONG==32 && defined(CONFIG_SMP)
132 write_seqcount_end(&syncp->seq);
136 static inline unsigned long
137 u64_stats_update_begin_irqsave(struct u64_stats_sync *syncp)
139 unsigned long flags = 0;
141 #if BITS_PER_LONG==32 && defined(CONFIG_SMP)
142 local_irq_save(flags);
143 write_seqcount_begin(&syncp->seq);
149 u64_stats_update_end_irqrestore(struct u64_stats_sync *syncp,
152 #if BITS_PER_LONG==32 && defined(CONFIG_SMP)
153 write_seqcount_end(&syncp->seq);
154 local_irq_restore(flags);
158 static inline unsigned int __u64_stats_fetch_begin(const struct u64_stats_sync *syncp)
160 #if BITS_PER_LONG==32 && defined(CONFIG_SMP)
161 return read_seqcount_begin(&syncp->seq);
167 static inline unsigned int u64_stats_fetch_begin(const struct u64_stats_sync *syncp)
169 #if BITS_PER_LONG==32 && !defined(CONFIG_SMP)
172 return __u64_stats_fetch_begin(syncp);
175 static inline bool __u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
178 #if BITS_PER_LONG==32 && defined(CONFIG_SMP)
179 return read_seqcount_retry(&syncp->seq, start);
185 static inline bool u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
188 #if BITS_PER_LONG==32 && !defined(CONFIG_SMP)
191 return __u64_stats_fetch_retry(syncp, start);
195 * In case irq handlers can update u64 counters, readers can use following helpers
196 * - SMP 32bit arches use seqcount protection, irq safe.
197 * - UP 32bit must disable irqs.
198 * - 64bit have no problem atomically reading u64 values, irq safe.
200 static inline unsigned int u64_stats_fetch_begin_irq(const struct u64_stats_sync *syncp)
202 #if BITS_PER_LONG==32 && !defined(CONFIG_SMP)
205 return __u64_stats_fetch_begin(syncp);
208 static inline bool u64_stats_fetch_retry_irq(const struct u64_stats_sync *syncp,
211 #if BITS_PER_LONG==32 && !defined(CONFIG_SMP)
214 return __u64_stats_fetch_retry(syncp, start);
217 #endif /* _LINUX_U64_STATS_SYNC_H */