Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef __LINUX_SEQLOCK_H |
2 | #define __LINUX_SEQLOCK_H | |
3 | /* | |
4 | * Reader/writer consistent mechanism without starving writers. This type of | |
5 | * lock for data where the reader wants a consitent set of information | |
6 | * and is willing to retry if the information changes. Readers never | |
7 | * block but they may have to retry if a writer is in | |
8 | * progress. Writers do not wait for readers. | |
9 | * | |
10 | * This is not as cache friendly as brlock. Also, this will not work | |
11 | * for data that contains pointers, because any writer could | |
12 | * invalidate a pointer that a reader was following. | |
13 | * | |
14 | * Expected reader usage: | |
15 | * do { | |
16 | * seq = read_seqbegin(&foo); | |
17 | * ... | |
18 | * } while (read_seqretry(&foo, seq)); | |
19 | * | |
20 | * | |
21 | * On non-SMP the spin locks disappear but the writer still needs | |
22 | * to increment the sequence variables because an interrupt routine could | |
23 | * change the state of the data. | |
24 | * | |
25 | * Based on x86_64 vsyscall gettimeofday | |
26 | * by Keith Owens and Andrea Arcangeli | |
27 | */ | |
28 | ||
1da177e4 LT |
29 | #include <linux/spinlock.h> |
30 | #include <linux/preempt.h> | |
31 | ||
32 | typedef struct { | |
33 | unsigned sequence; | |
34 | spinlock_t lock; | |
35 | } seqlock_t; | |
36 | ||
37 | /* | |
38 | * These macros triggered gcc-3.x compile-time problems. We think these are | |
39 | * OK now. Be cautious. | |
40 | */ | |
41 | #define SEQLOCK_UNLOCKED { 0, SPIN_LOCK_UNLOCKED } | |
42 | #define seqlock_init(x) do { *(x) = (seqlock_t) SEQLOCK_UNLOCKED; } while (0) | |
43 | ||
44 | ||
45 | /* Lock out other writers and update the count. | |
46 | * Acts like a normal spin_lock/unlock. | |
47 | * Don't need preempt_disable() because that is in the spin_lock already. | |
48 | */ | |
49 | static inline void write_seqlock(seqlock_t *sl) | |
50 | { | |
51 | spin_lock(&sl->lock); | |
52 | ++sl->sequence; | |
53 | smp_wmb(); | |
54 | } | |
55 | ||
56 | static inline void write_sequnlock(seqlock_t *sl) | |
57 | { | |
58 | smp_wmb(); | |
59 | sl->sequence++; | |
60 | spin_unlock(&sl->lock); | |
61 | } | |
62 | ||
63 | static inline int write_tryseqlock(seqlock_t *sl) | |
64 | { | |
65 | int ret = spin_trylock(&sl->lock); | |
66 | ||
67 | if (ret) { | |
68 | ++sl->sequence; | |
69 | smp_wmb(); | |
70 | } | |
71 | return ret; | |
72 | } | |
73 | ||
74 | /* Start of read calculation -- fetch last complete writer token */ | |
cde227af | 75 | static __always_inline unsigned read_seqbegin(const seqlock_t *sl) |
1da177e4 LT |
76 | { |
77 | unsigned ret = sl->sequence; | |
78 | smp_rmb(); | |
79 | return ret; | |
80 | } | |
81 | ||
82 | /* Test if reader processed invalid data. | |
83 | * If initial values is odd, | |
84 | * then writer had already started when section was entered | |
85 | * If sequence value changed | |
86 | * then writer changed data while in section | |
87 | * | |
88 | * Using xor saves one conditional branch. | |
89 | */ | |
cde227af | 90 | static __always_inline int read_seqretry(const seqlock_t *sl, unsigned iv) |
1da177e4 LT |
91 | { |
92 | smp_rmb(); | |
93 | return (iv & 1) | (sl->sequence ^ iv); | |
94 | } | |
95 | ||
96 | ||
97 | /* | |
98 | * Version using sequence counter only. | |
99 | * This can be used when code has its own mutex protecting the | |
100 | * updating starting before the write_seqcountbeqin() and ending | |
101 | * after the write_seqcount_end(). | |
102 | */ | |
103 | ||
104 | typedef struct seqcount { | |
105 | unsigned sequence; | |
106 | } seqcount_t; | |
107 | ||
108 | #define SEQCNT_ZERO { 0 } | |
109 | #define seqcount_init(x) do { *(x) = (seqcount_t) SEQCNT_ZERO; } while (0) | |
110 | ||
111 | /* Start of read using pointer to a sequence counter only. */ | |
112 | static inline unsigned read_seqcount_begin(const seqcount_t *s) | |
113 | { | |
114 | unsigned ret = s->sequence; | |
115 | smp_rmb(); | |
116 | return ret; | |
117 | } | |
118 | ||
119 | /* Test if reader processed invalid data. | |
120 | * Equivalent to: iv is odd or sequence number has changed. | |
121 | * (iv & 1) || (*s != iv) | |
122 | * Using xor saves one conditional branch. | |
123 | */ | |
124 | static inline int read_seqcount_retry(const seqcount_t *s, unsigned iv) | |
125 | { | |
126 | smp_rmb(); | |
127 | return (iv & 1) | (s->sequence ^ iv); | |
128 | } | |
129 | ||
130 | ||
131 | /* | |
132 | * Sequence counter only version assumes that callers are using their | |
133 | * own mutexing. | |
134 | */ | |
135 | static inline void write_seqcount_begin(seqcount_t *s) | |
136 | { | |
137 | s->sequence++; | |
138 | smp_wmb(); | |
139 | } | |
140 | ||
141 | static inline void write_seqcount_end(seqcount_t *s) | |
142 | { | |
143 | smp_wmb(); | |
144 | s->sequence++; | |
145 | } | |
146 | ||
147 | /* | |
148 | * Possible sw/hw IRQ protected versions of the interfaces. | |
149 | */ | |
150 | #define write_seqlock_irqsave(lock, flags) \ | |
151 | do { local_irq_save(flags); write_seqlock(lock); } while (0) | |
152 | #define write_seqlock_irq(lock) \ | |
153 | do { local_irq_disable(); write_seqlock(lock); } while (0) | |
154 | #define write_seqlock_bh(lock) \ | |
155 | do { local_bh_disable(); write_seqlock(lock); } while (0) | |
156 | ||
157 | #define write_sequnlock_irqrestore(lock, flags) \ | |
158 | do { write_sequnlock(lock); local_irq_restore(flags); } while(0) | |
159 | #define write_sequnlock_irq(lock) \ | |
160 | do { write_sequnlock(lock); local_irq_enable(); } while(0) | |
161 | #define write_sequnlock_bh(lock) \ | |
162 | do { write_sequnlock(lock); local_bh_enable(); } while(0) | |
163 | ||
164 | #define read_seqbegin_irqsave(lock, flags) \ | |
165 | ({ local_irq_save(flags); read_seqbegin(lock); }) | |
166 | ||
167 | #define read_seqretry_irqrestore(lock, iv, flags) \ | |
168 | ({ \ | |
169 | int ret = read_seqretry(lock, iv); \ | |
170 | local_irq_restore(flags); \ | |
171 | ret; \ | |
172 | }) | |
173 | ||
174 | #endif /* __LINUX_SEQLOCK_H */ |