Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
82524746 FBH |
2 | #ifndef _LINUX_RCULIST_H |
3 | #define _LINUX_RCULIST_H | |
4 | ||
5 | #ifdef __KERNEL__ | |
6 | ||
7 | /* | |
8 | * RCU-protected list version | |
9 | */ | |
10 | #include <linux/list.h> | |
10aa9d2c | 11 | #include <linux/rcupdate.h> |
82524746 | 12 | |
2a855b64 PM |
13 | /* |
14 | * INIT_LIST_HEAD_RCU - Initialize a list_head visible to RCU readers | |
15 | * @list: list to be initialized | |
16 | * | |
17 | * You should instead use INIT_LIST_HEAD() for normal initialization and | |
18 | * cleanup tasks, when readers have no access to the list being initialized. | |
19 | * However, if the list being initialized is visible to readers, you | |
20 | * need to keep the compiler from being too mischievous. | |
21 | */ | |
22 | static inline void INIT_LIST_HEAD_RCU(struct list_head *list) | |
23 | { | |
7d0ae808 PM |
24 | WRITE_ONCE(list->next, list); |
25 | WRITE_ONCE(list->prev, list); | |
2a855b64 PM |
26 | } |
27 | ||
67bdbffd AB |
28 | /* |
29 | * return the ->next pointer of a list_head in an rcu safe | |
30 | * way, we must not access it directly | |
31 | */ | |
32 | #define list_next_rcu(list) (*((struct list_head __rcu **)(&(list)->next))) | |
67d676bb CB |
33 | /* |
34 | * Return the ->prev pointer of a list_head in an rcu safe way. Don't | |
35 | * access it directly. | |
36 | * | |
37 | * Any list traversed with list_bidir_prev_rcu() must never use | |
38 | * list_del_rcu(). Doing so will poison the ->prev pointer that | |
39 | * list_bidir_prev_rcu() relies on, which will result in segfaults. | |
40 | * To prevent these segfaults, use list_bidir_del_rcu() instead | |
41 | * of list_del_rcu(). | |
42 | */ | |
43 | #define list_bidir_prev_rcu(list) (*((struct list_head __rcu **)(&(list)->prev))) | |
67bdbffd | 44 | |
afa47fdf MB |
45 | /** |
46 | * list_tail_rcu - returns the prev pointer of the head of the list | |
47 | * @head: the head of the list | |
48 | * | |
49 | * Note: This should only be used with the list header, and even then | |
50 | * only if list_del() and similar primitives are not also used on the | |
51 | * list header. | |
52 | */ | |
53 | #define list_tail_rcu(head) (*((struct list_head __rcu **)(&(head)->prev))) | |
54 | ||
28875945 JFG |
55 | /* |
56 | * Check during list traversal that we are within an RCU reader | |
57 | */ | |
58 | ||
59 | #define check_arg_count_one(dummy) | |
60 | ||
61 | #ifdef CONFIG_PROVE_RCU_LIST | |
62 | #define __list_check_rcu(dummy, cond, extra...) \ | |
63 | ({ \ | |
64 | check_arg_count_one(extra); \ | |
4dfd5cd8 | 65 | RCU_LOCKDEP_WARN(!(cond) && !rcu_read_lock_any_held(), \ |
28875945 | 66 | "RCU-list traversed in non-reader section!"); \ |
4dfd5cd8 | 67 | }) |
ae2212a7 MB |
68 | |
69 | #define __list_check_srcu(cond) \ | |
70 | ({ \ | |
71 | RCU_LOCKDEP_WARN(!(cond), \ | |
72 | "RCU-list traversed without holding the required lock!");\ | |
73 | }) | |
28875945 JFG |
74 | #else |
75 | #define __list_check_rcu(dummy, cond, extra...) \ | |
76 | ({ check_arg_count_one(extra); }) | |
ae2212a7 MB |
77 | |
78 | #define __list_check_srcu(cond) ({ }) | |
28875945 JFG |
79 | #endif |
80 | ||
82524746 FBH |
81 | /* |
82 | * Insert a new entry between two known consecutive entries. | |
83 | * | |
84 | * This is only for internal list manipulation where we know | |
85 | * the prev/next entries already! | |
86 | */ | |
87 | static inline void __list_add_rcu(struct list_head *new, | |
88 | struct list_head *prev, struct list_head *next) | |
89 | { | |
54acd439 KC |
90 | if (!__list_add_valid(new, prev, next)) |
91 | return; | |
92 | ||
82524746 FBH |
93 | new->next = next; |
94 | new->prev = prev; | |
67bdbffd | 95 | rcu_assign_pointer(list_next_rcu(prev), new); |
82524746 | 96 | next->prev = new; |
82524746 FBH |
97 | } |
98 | ||
99 | /** | |
100 | * list_add_rcu - add a new entry to rcu-protected list | |
101 | * @new: new entry to be added | |
102 | * @head: list head to add it after | |
103 | * | |
104 | * Insert a new entry after the specified head. | |
105 | * This is good for implementing stacks. | |
106 | * | |
107 | * The caller must take whatever precautions are necessary | |
108 | * (such as holding appropriate locks) to avoid racing | |
109 | * with another list-mutation primitive, such as list_add_rcu() | |
110 | * or list_del_rcu(), running on this same list. | |
111 | * However, it is perfectly legal to run concurrently with | |
112 | * the _rcu list-traversal primitives, such as | |
113 | * list_for_each_entry_rcu(). | |
114 | */ | |
115 | static inline void list_add_rcu(struct list_head *new, struct list_head *head) | |
116 | { | |
117 | __list_add_rcu(new, head, head->next); | |
118 | } | |
119 | ||
120 | /** | |
121 | * list_add_tail_rcu - add a new entry to rcu-protected list | |
122 | * @new: new entry to be added | |
123 | * @head: list head to add it before | |
124 | * | |
125 | * Insert a new entry before the specified head. | |
126 | * This is useful for implementing queues. | |
127 | * | |
128 | * The caller must take whatever precautions are necessary | |
129 | * (such as holding appropriate locks) to avoid racing | |
130 | * with another list-mutation primitive, such as list_add_tail_rcu() | |
131 | * or list_del_rcu(), running on this same list. | |
132 | * However, it is perfectly legal to run concurrently with | |
133 | * the _rcu list-traversal primitives, such as | |
134 | * list_for_each_entry_rcu(). | |
135 | */ | |
136 | static inline void list_add_tail_rcu(struct list_head *new, | |
137 | struct list_head *head) | |
138 | { | |
139 | __list_add_rcu(new, head->prev, head); | |
140 | } | |
141 | ||
142 | /** | |
143 | * list_del_rcu - deletes entry from list without re-initialization | |
144 | * @entry: the element to delete from the list. | |
145 | * | |
146 | * Note: list_empty() on entry does not return true after this, | |
147 | * the entry is in an undefined state. It is useful for RCU based | |
148 | * lockfree traversal. | |
149 | * | |
150 | * In particular, it means that we can not poison the forward | |
151 | * pointers that may still be used for walking the list. | |
152 | * | |
153 | * The caller must take whatever precautions are necessary | |
154 | * (such as holding appropriate locks) to avoid racing | |
155 | * with another list-mutation primitive, such as list_del_rcu() | |
156 | * or list_add_rcu(), running on this same list. | |
157 | * However, it is perfectly legal to run concurrently with | |
158 | * the _rcu list-traversal primitives, such as | |
159 | * list_for_each_entry_rcu(). | |
160 | * | |
161 | * Note that the caller is not permitted to immediately free | |
162 | * the newly deleted entry. Instead, either synchronize_rcu() | |
163 | * or call_rcu() must be used to defer freeing until an RCU | |
164 | * grace period has elapsed. | |
165 | */ | |
166 | static inline void list_del_rcu(struct list_head *entry) | |
167 | { | |
559f9bad | 168 | __list_del_entry(entry); |
82524746 FBH |
169 | entry->prev = LIST_POISON2; |
170 | } | |
171 | ||
67d676bb CB |
172 | /** |
173 | * list_bidir_del_rcu - deletes entry from list without re-initialization | |
174 | * @entry: the element to delete from the list. | |
175 | * | |
176 | * In contrast to list_del_rcu() doesn't poison the prev pointer thus | |
177 | * allowing backwards traversal via list_bidir_prev_rcu(). | |
178 | * | |
179 | * Note: list_empty() on entry does not return true after this because | |
180 | * the entry is in a special undefined state that permits RCU-based | |
181 | * lockfree reverse traversal. In particular this means that we can not | |
182 | * poison the forward and backwards pointers that may still be used for | |
183 | * walking the list. | |
184 | * | |
185 | * The caller must take whatever precautions are necessary (such as | |
186 | * holding appropriate locks) to avoid racing with another list-mutation | |
187 | * primitive, such as list_bidir_del_rcu() or list_add_rcu(), running on | |
188 | * this same list. However, it is perfectly legal to run concurrently | |
189 | * with the _rcu list-traversal primitives, such as | |
190 | * list_for_each_entry_rcu(). | |
191 | * | |
192 | * Note that list_del_rcu() and list_bidir_del_rcu() must not be used on | |
193 | * the same list. | |
194 | * | |
195 | * Note that the caller is not permitted to immediately free | |
196 | * the newly deleted entry. Instead, either synchronize_rcu() | |
197 | * or call_rcu() must be used to defer freeing until an RCU | |
198 | * grace period has elapsed. | |
199 | */ | |
200 | static inline void list_bidir_del_rcu(struct list_head *entry) | |
201 | { | |
202 | __list_del_entry(entry); | |
203 | } | |
204 | ||
6beeac76 AA |
205 | /** |
206 | * hlist_del_init_rcu - deletes entry from hash list with re-initialization | |
207 | * @n: the element to delete from the hash list. | |
208 | * | |
209 | * Note: list_unhashed() on the node return true after this. It is | |
210 | * useful for RCU based read lockfree traversal if the writer side | |
211 | * must know if the list entry is still hashed or already unhashed. | |
212 | * | |
213 | * In particular, it means that we can not poison the forward pointers | |
214 | * that may still be used for walking the hash list and we can only | |
215 | * zero the pprev pointer so list_unhashed() will return true after | |
216 | * this. | |
217 | * | |
218 | * The caller must take whatever precautions are necessary (such as | |
219 | * holding appropriate locks) to avoid racing with another | |
220 | * list-mutation primitive, such as hlist_add_head_rcu() or | |
221 | * hlist_del_rcu(), running on this same list. However, it is | |
222 | * perfectly legal to run concurrently with the _rcu list-traversal | |
223 | * primitives, such as hlist_for_each_entry_rcu(). | |
224 | */ | |
225 | static inline void hlist_del_init_rcu(struct hlist_node *n) | |
226 | { | |
227 | if (!hlist_unhashed(n)) { | |
228 | __hlist_del(n); | |
c54a2744 | 229 | WRITE_ONCE(n->pprev, NULL); |
6beeac76 AA |
230 | } |
231 | } | |
232 | ||
82524746 FBH |
233 | /** |
234 | * list_replace_rcu - replace old entry by new one | |
235 | * @old : the element to be replaced | |
236 | * @new : the new element to insert | |
237 | * | |
9aed3b51 PM |
238 | * The @old entry will be replaced with the @new entry atomically from |
239 | * the perspective of concurrent readers. It is the caller's responsibility | |
240 | * to synchronize with concurrent updaters, if any. | |
241 | * | |
82524746 FBH |
242 | * Note: @old should not be empty. |
243 | */ | |
244 | static inline void list_replace_rcu(struct list_head *old, | |
245 | struct list_head *new) | |
246 | { | |
247 | new->next = old->next; | |
248 | new->prev = old->prev; | |
67bdbffd | 249 | rcu_assign_pointer(list_next_rcu(new->prev), new); |
82524746 | 250 | new->next->prev = new; |
82524746 FBH |
251 | old->prev = LIST_POISON2; |
252 | } | |
253 | ||
254 | /** | |
7d86dccf | 255 | * __list_splice_init_rcu - join an RCU-protected list into an existing list. |
82524746 | 256 | * @list: the RCU-protected list to splice |
7d86dccf PM |
257 | * @prev: points to the last element of the existing list |
258 | * @next: points to the first element of the existing list | |
aff5f036 | 259 | * @sync: synchronize_rcu, synchronize_rcu_expedited, ... |
82524746 | 260 | * |
7d86dccf PM |
261 | * The list pointed to by @prev and @next can be RCU-read traversed |
262 | * concurrently with this function. | |
82524746 FBH |
263 | * |
264 | * Note that this function blocks. | |
265 | * | |
7d86dccf PM |
266 | * Important note: the caller must take whatever action is necessary to prevent |
267 | * any other updates to the existing list. In principle, it is possible to | |
268 | * modify the list as soon as sync() begins execution. If this sort of thing | |
269 | * becomes necessary, an alternative version based on call_rcu() could be | |
270 | * created. But only if -really- needed -- there is no shortage of RCU API | |
271 | * members. | |
82524746 | 272 | */ |
7d86dccf PM |
273 | static inline void __list_splice_init_rcu(struct list_head *list, |
274 | struct list_head *prev, | |
275 | struct list_head *next, | |
276 | void (*sync)(void)) | |
82524746 FBH |
277 | { |
278 | struct list_head *first = list->next; | |
279 | struct list_head *last = list->prev; | |
82524746 | 280 | |
2a855b64 PM |
281 | /* |
282 | * "first" and "last" tracking list, so initialize it. RCU readers | |
283 | * have access to this list, so we must use INIT_LIST_HEAD_RCU() | |
284 | * instead of INIT_LIST_HEAD(). | |
285 | */ | |
82524746 | 286 | |
2a855b64 | 287 | INIT_LIST_HEAD_RCU(list); |
82524746 FBH |
288 | |
289 | /* | |
290 | * At this point, the list body still points to the source list. | |
291 | * Wait for any readers to finish using the list before splicing | |
292 | * the list body into the new list. Any new readers will see | |
293 | * an empty list. | |
294 | */ | |
295 | ||
296 | sync(); | |
c93773c1 PM |
297 | ASSERT_EXCLUSIVE_ACCESS(*first); |
298 | ASSERT_EXCLUSIVE_ACCESS(*last); | |
82524746 FBH |
299 | |
300 | /* | |
301 | * Readers are finished with the source list, so perform splice. | |
302 | * The order is important if the new list is global and accessible | |
303 | * to concurrent RCU readers. Note that RCU readers are not | |
304 | * permitted to traverse the prev pointers without excluding | |
305 | * this function. | |
306 | */ | |
307 | ||
7d86dccf PM |
308 | last->next = next; |
309 | rcu_assign_pointer(list_next_rcu(prev), first); | |
310 | first->prev = prev; | |
311 | next->prev = last; | |
312 | } | |
313 | ||
314 | /** | |
315 | * list_splice_init_rcu - splice an RCU-protected list into an existing list, | |
316 | * designed for stacks. | |
317 | * @list: the RCU-protected list to splice | |
318 | * @head: the place in the existing list to splice the first list into | |
aff5f036 | 319 | * @sync: synchronize_rcu, synchronize_rcu_expedited, ... |
7d86dccf PM |
320 | */ |
321 | static inline void list_splice_init_rcu(struct list_head *list, | |
322 | struct list_head *head, | |
323 | void (*sync)(void)) | |
324 | { | |
325 | if (!list_empty(list)) | |
326 | __list_splice_init_rcu(list, head, head->next, sync); | |
327 | } | |
328 | ||
329 | /** | |
330 | * list_splice_tail_init_rcu - splice an RCU-protected list into an existing | |
331 | * list, designed for queues. | |
332 | * @list: the RCU-protected list to splice | |
333 | * @head: the place in the existing list to splice the first list into | |
aff5f036 | 334 | * @sync: synchronize_rcu, synchronize_rcu_expedited, ... |
7d86dccf PM |
335 | */ |
336 | static inline void list_splice_tail_init_rcu(struct list_head *list, | |
337 | struct list_head *head, | |
338 | void (*sync)(void)) | |
339 | { | |
340 | if (!list_empty(list)) | |
341 | __list_splice_init_rcu(list, head->prev, head, sync); | |
82524746 FBH |
342 | } |
343 | ||
72c6a987 JP |
344 | /** |
345 | * list_entry_rcu - get the struct for this entry | |
346 | * @ptr: the &struct list_head pointer. | |
347 | * @type: the type of the struct this is embedded in. | |
3943f42c | 348 | * @member: the name of the list_head within the struct. |
72c6a987 JP |
349 | * |
350 | * This primitive may safely run concurrently with the _rcu list-mutation | |
351 | * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock(). | |
352 | */ | |
353 | #define list_entry_rcu(ptr, type, member) \ | |
506458ef | 354 | container_of(READ_ONCE(ptr), type, member) |
72c6a987 | 355 | |
27fdb35f | 356 | /* |
f88022a4 MM |
357 | * Where are list_empty_rcu() and list_first_entry_rcu()? |
358 | * | |
751b1710 | 359 | * They do not exist because they would lead to subtle race conditions: |
f88022a4 MM |
360 | * |
361 | * if (!list_empty_rcu(mylist)) { | |
362 | * struct foo *bar = list_first_entry_rcu(mylist, struct foo, list_member); | |
363 | * do_something(bar); | |
364 | * } | |
365 | * | |
751b1710 JW |
366 | * The list might be non-empty when list_empty_rcu() checks it, but it |
367 | * might have become empty by the time that list_first_entry_rcu() rereads | |
368 | * the ->next pointer, which would result in a SEGV. | |
369 | * | |
370 | * When not using RCU, it is OK for list_first_entry() to re-read that | |
371 | * pointer because both functions should be protected by some lock that | |
372 | * blocks writers. | |
f88022a4 | 373 | * |
751b1710 JW |
374 | * When using RCU, list_empty() uses READ_ONCE() to fetch the |
375 | * RCU-protected ->next pointer and then compares it to the address of the | |
376 | * list head. However, it neither dereferences this pointer nor provides | |
377 | * this pointer to its caller. Thus, READ_ONCE() suffices (that is, | |
378 | * rcu_dereference() is not needed), which means that list_empty() can be | |
379 | * used anywhere you would want to use list_empty_rcu(). Just don't | |
380 | * expect anything useful to happen if you do a subsequent lockless | |
381 | * call to list_first_entry_rcu()!!! | |
f88022a4 MM |
382 | * |
383 | * See list_first_or_null_rcu for an alternative. | |
384 | */ | |
385 | ||
386 | /** | |
387 | * list_first_or_null_rcu - get the first element from a list | |
72c6a987 JP |
388 | * @ptr: the list head to take the element from. |
389 | * @type: the type of the struct this is embedded in. | |
3943f42c | 390 | * @member: the name of the list_head within the struct. |
72c6a987 | 391 | * |
f88022a4 | 392 | * Note that if the list is empty, it returns NULL. |
72c6a987 JP |
393 | * |
394 | * This primitive may safely run concurrently with the _rcu list-mutation | |
395 | * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock(). | |
396 | */ | |
f88022a4 | 397 | #define list_first_or_null_rcu(ptr, type, member) \ |
0adab9b9 JP |
398 | ({ \ |
399 | struct list_head *__ptr = (ptr); \ | |
7d0ae808 | 400 | struct list_head *__next = READ_ONCE(__ptr->next); \ |
0adab9b9 JP |
401 | likely(__ptr != __next) ? list_entry_rcu(__next, type, member) : NULL; \ |
402 | }) | |
72c6a987 | 403 | |
ff3c44e6 | 404 | /** |
493dffa3 | 405 | * list_next_or_null_rcu - get the next element from a list |
ff3c44e6 TH |
406 | * @head: the head for the list. |
407 | * @ptr: the list head to take the next element from. | |
408 | * @type: the type of the struct this is embedded in. | |
409 | * @member: the name of the list_head within the struct. | |
410 | * | |
411 | * Note that if the ptr is at the end of the list, NULL is returned. | |
412 | * | |
413 | * This primitive may safely run concurrently with the _rcu list-mutation | |
414 | * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock(). | |
415 | */ | |
416 | #define list_next_or_null_rcu(head, ptr, type, member) \ | |
417 | ({ \ | |
418 | struct list_head *__head = (head); \ | |
419 | struct list_head *__ptr = (ptr); \ | |
420 | struct list_head *__next = READ_ONCE(__ptr->next); \ | |
421 | likely(__next != __head) ? list_entry_rcu(__next, type, \ | |
422 | member) : NULL; \ | |
423 | }) | |
424 | ||
82524746 FBH |
425 | /** |
426 | * list_for_each_entry_rcu - iterate over rcu list of given type | |
427 | * @pos: the type * to use as a loop cursor. | |
428 | * @head: the head for your list. | |
3943f42c | 429 | * @member: the name of the list_head within the struct. |
ddc46593 | 430 | * @cond: optional lockdep expression if called from non-RCU protection. |
82524746 FBH |
431 | * |
432 | * This list-traversal primitive may safely run concurrently with | |
433 | * the _rcu list-mutation primitives such as list_add_rcu() | |
434 | * as long as the traversal is guarded by rcu_read_lock(). | |
435 | */ | |
28875945 JFG |
436 | #define list_for_each_entry_rcu(pos, head, member, cond...) \ |
437 | for (__list_check_rcu(dummy, ## cond, 0), \ | |
438 | pos = list_entry_rcu((head)->next, typeof(*pos), member); \ | |
439 | &pos->member != (head); \ | |
72c6a987 | 440 | pos = list_entry_rcu(pos->member.next, typeof(*pos), member)) |
82524746 | 441 | |
ae2212a7 MB |
442 | /** |
443 | * list_for_each_entry_srcu - iterate over rcu list of given type | |
444 | * @pos: the type * to use as a loop cursor. | |
445 | * @head: the head for your list. | |
446 | * @member: the name of the list_head within the struct. | |
447 | * @cond: lockdep expression for the lock required to traverse the list. | |
448 | * | |
449 | * This list-traversal primitive may safely run concurrently with | |
450 | * the _rcu list-mutation primitives such as list_add_rcu() | |
451 | * as long as the traversal is guarded by srcu_read_lock(). | |
452 | * The lockdep expression srcu_read_lock_held() can be passed as the | |
453 | * cond argument from read side. | |
454 | */ | |
455 | #define list_for_each_entry_srcu(pos, head, member, cond) \ | |
456 | for (__list_check_srcu(cond), \ | |
457 | pos = list_entry_rcu((head)->next, typeof(*pos), member); \ | |
458 | &pos->member != (head); \ | |
459 | pos = list_entry_rcu(pos->member.next, typeof(*pos), member)) | |
460 | ||
69b90729 AK |
461 | /** |
462 | * list_entry_lockless - get the struct for this entry | |
463 | * @ptr: the &struct list_head pointer. | |
464 | * @type: the type of the struct this is embedded in. | |
465 | * @member: the name of the list_head within the struct. | |
466 | * | |
aff5f036 PM |
467 | * This primitive may safely run concurrently with the _rcu |
468 | * list-mutation primitives such as list_add_rcu(), but requires some | |
469 | * implicit RCU read-side guarding. One example is running within a special | |
470 | * exception-time environment where preemption is disabled and where lockdep | |
471 | * cannot be invoked. Another example is when items are added to the list, | |
472 | * but never deleted. | |
69b90729 AK |
473 | */ |
474 | #define list_entry_lockless(ptr, type, member) \ | |
506458ef | 475 | container_of((typeof(ptr))READ_ONCE(ptr), type, member) |
69b90729 AK |
476 | |
477 | /** | |
478 | * list_for_each_entry_lockless - iterate over rcu list of given type | |
479 | * @pos: the type * to use as a loop cursor. | |
480 | * @head: the head for your list. | |
481 | * @member: the name of the list_struct within the struct. | |
482 | * | |
aff5f036 PM |
483 | * This primitive may safely run concurrently with the _rcu |
484 | * list-mutation primitives such as list_add_rcu(), but requires some | |
485 | * implicit RCU read-side guarding. One example is running within a special | |
486 | * exception-time environment where preemption is disabled and where lockdep | |
487 | * cannot be invoked. Another example is when items are added to the list, | |
488 | * but never deleted. | |
69b90729 AK |
489 | */ |
490 | #define list_for_each_entry_lockless(pos, head, member) \ | |
491 | for (pos = list_entry_lockless((head)->next, typeof(*pos), member); \ | |
492 | &pos->member != (head); \ | |
493 | pos = list_entry_lockless(pos->member.next, typeof(*pos), member)) | |
494 | ||
254245d2 | 495 | /** |
496 | * list_for_each_entry_continue_rcu - continue iteration over list of given type | |
497 | * @pos: the type * to use as a loop cursor. | |
498 | * @head: the head for your list. | |
3943f42c | 499 | * @member: the name of the list_head within the struct. |
254245d2 | 500 | * |
501 | * Continue to iterate over list of given type, continuing after | |
b7b6f94c N |
502 | * the current position which must have been in the list when the RCU read |
503 | * lock was taken. | |
504 | * This would typically require either that you obtained the node from a | |
505 | * previous walk of the list in the same RCU read-side critical section, or | |
506 | * that you held some sort of non-RCU reference (such as a reference count) | |
507 | * to keep the node alive *and* in the list. | |
508 | * | |
509 | * This iterator is similar to list_for_each_entry_from_rcu() except | |
510 | * this starts after the given position and that one starts at the given | |
511 | * position. | |
254245d2 | 512 | */ |
513 | #define list_for_each_entry_continue_rcu(pos, head, member) \ | |
514 | for (pos = list_entry_rcu(pos->member.next, typeof(*pos), member); \ | |
e66eed65 | 515 | &pos->member != (head); \ |
254245d2 | 516 | pos = list_entry_rcu(pos->member.next, typeof(*pos), member)) |
517 | ||
ead9ad72 N |
518 | /** |
519 | * list_for_each_entry_from_rcu - iterate over a list from current point | |
520 | * @pos: the type * to use as a loop cursor. | |
521 | * @head: the head for your list. | |
522 | * @member: the name of the list_node within the struct. | |
523 | * | |
524 | * Iterate over the tail of a list starting from a given position, | |
525 | * which must have been in the list when the RCU read lock was taken. | |
b7b6f94c N |
526 | * This would typically require either that you obtained the node from a |
527 | * previous walk of the list in the same RCU read-side critical section, or | |
528 | * that you held some sort of non-RCU reference (such as a reference count) | |
529 | * to keep the node alive *and* in the list. | |
530 | * | |
531 | * This iterator is similar to list_for_each_entry_continue_rcu() except | |
532 | * this starts from the given position and that one starts from the position | |
533 | * after the given position. | |
ead9ad72 N |
534 | */ |
535 | #define list_for_each_entry_from_rcu(pos, head, member) \ | |
536 | for (; &(pos)->member != (head); \ | |
537 | pos = list_entry_rcu(pos->member.next, typeof(*(pos)), member)) | |
538 | ||
82524746 FBH |
539 | /** |
540 | * hlist_del_rcu - deletes entry from hash list without re-initialization | |
541 | * @n: the element to delete from the hash list. | |
542 | * | |
543 | * Note: list_unhashed() on entry does not return true after this, | |
544 | * the entry is in an undefined state. It is useful for RCU based | |
545 | * lockfree traversal. | |
546 | * | |
547 | * In particular, it means that we can not poison the forward | |
548 | * pointers that may still be used for walking the hash list. | |
549 | * | |
550 | * The caller must take whatever precautions are necessary | |
551 | * (such as holding appropriate locks) to avoid racing | |
552 | * with another list-mutation primitive, such as hlist_add_head_rcu() | |
553 | * or hlist_del_rcu(), running on this same list. | |
554 | * However, it is perfectly legal to run concurrently with | |
555 | * the _rcu list-traversal primitives, such as | |
556 | * hlist_for_each_entry(). | |
557 | */ | |
558 | static inline void hlist_del_rcu(struct hlist_node *n) | |
559 | { | |
560 | __hlist_del(n); | |
c54a2744 | 561 | WRITE_ONCE(n->pprev, LIST_POISON2); |
82524746 FBH |
562 | } |
563 | ||
564 | /** | |
565 | * hlist_replace_rcu - replace old entry by new one | |
566 | * @old : the element to be replaced | |
567 | * @new : the new element to insert | |
568 | * | |
9aed3b51 PM |
569 | * The @old entry will be replaced with the @new entry atomically from |
570 | * the perspective of concurrent readers. It is the caller's responsibility | |
571 | * to synchronize with concurrent updaters, if any. | |
82524746 FBH |
572 | */ |
573 | static inline void hlist_replace_rcu(struct hlist_node *old, | |
574 | struct hlist_node *new) | |
575 | { | |
576 | struct hlist_node *next = old->next; | |
577 | ||
578 | new->next = next; | |
c54a2744 | 579 | WRITE_ONCE(new->pprev, old->pprev); |
67bdbffd | 580 | rcu_assign_pointer(*(struct hlist_node __rcu **)new->pprev, new); |
82524746 | 581 | if (next) |
c54a2744 ED |
582 | WRITE_ONCE(new->next->pprev, &new->next); |
583 | WRITE_ONCE(old->pprev, LIST_POISON2); | |
82524746 FBH |
584 | } |
585 | ||
35fc0e3b EB |
586 | /** |
587 | * hlists_swap_heads_rcu - swap the lists the hlist heads point to | |
588 | * @left: The hlist head on the left | |
589 | * @right: The hlist head on the right | |
590 | * | |
591 | * The lists start out as [@left ][node1 ... ] and | |
24692fa2 | 592 | * [@right ][node2 ... ] |
35fc0e3b EB |
593 | * The lists end up as [@left ][node2 ... ] |
594 | * [@right ][node1 ... ] | |
595 | */ | |
596 | static inline void hlists_swap_heads_rcu(struct hlist_head *left, struct hlist_head *right) | |
597 | { | |
598 | struct hlist_node *node1 = left->first; | |
599 | struct hlist_node *node2 = right->first; | |
600 | ||
601 | rcu_assign_pointer(left->first, node2); | |
602 | rcu_assign_pointer(right->first, node1); | |
603 | WRITE_ONCE(node2->pprev, &left->first); | |
604 | WRITE_ONCE(node1->pprev, &right->first); | |
605 | } | |
606 | ||
67bdbffd AB |
607 | /* |
608 | * return the first or the next element in an RCU protected hlist | |
609 | */ | |
610 | #define hlist_first_rcu(head) (*((struct hlist_node __rcu **)(&(head)->first))) | |
611 | #define hlist_next_rcu(node) (*((struct hlist_node __rcu **)(&(node)->next))) | |
612 | #define hlist_pprev_rcu(node) (*((struct hlist_node __rcu **)((node)->pprev))) | |
613 | ||
82524746 FBH |
614 | /** |
615 | * hlist_add_head_rcu | |
616 | * @n: the element to add to the hash list. | |
617 | * @h: the list to add to. | |
618 | * | |
619 | * Description: | |
620 | * Adds the specified element to the specified hlist, | |
621 | * while permitting racing traversals. | |
622 | * | |
623 | * The caller must take whatever precautions are necessary | |
624 | * (such as holding appropriate locks) to avoid racing | |
625 | * with another list-mutation primitive, such as hlist_add_head_rcu() | |
626 | * or hlist_del_rcu(), running on this same list. | |
627 | * However, it is perfectly legal to run concurrently with | |
628 | * the _rcu list-traversal primitives, such as | |
629 | * hlist_for_each_entry_rcu(), used to prevent memory-consistency | |
630 | * problems on Alpha CPUs. Regardless of the type of CPU, the | |
631 | * list-traversal primitive must be guarded by rcu_read_lock(). | |
632 | */ | |
633 | static inline void hlist_add_head_rcu(struct hlist_node *n, | |
634 | struct hlist_head *h) | |
635 | { | |
636 | struct hlist_node *first = h->first; | |
10aa9d2c | 637 | |
82524746 | 638 | n->next = first; |
c54a2744 | 639 | WRITE_ONCE(n->pprev, &h->first); |
67bdbffd | 640 | rcu_assign_pointer(hlist_first_rcu(h), n); |
82524746 | 641 | if (first) |
c54a2744 | 642 | WRITE_ONCE(first->pprev, &n->next); |
82524746 FBH |
643 | } |
644 | ||
1602f49b DM |
645 | /** |
646 | * hlist_add_tail_rcu | |
647 | * @n: the element to add to the hash list. | |
648 | * @h: the list to add to. | |
649 | * | |
650 | * Description: | |
651 | * Adds the specified element to the specified hlist, | |
652 | * while permitting racing traversals. | |
653 | * | |
654 | * The caller must take whatever precautions are necessary | |
655 | * (such as holding appropriate locks) to avoid racing | |
656 | * with another list-mutation primitive, such as hlist_add_head_rcu() | |
657 | * or hlist_del_rcu(), running on this same list. | |
658 | * However, it is perfectly legal to run concurrently with | |
659 | * the _rcu list-traversal primitives, such as | |
660 | * hlist_for_each_entry_rcu(), used to prevent memory-consistency | |
661 | * problems on Alpha CPUs. Regardless of the type of CPU, the | |
662 | * list-traversal primitive must be guarded by rcu_read_lock(). | |
663 | */ | |
664 | static inline void hlist_add_tail_rcu(struct hlist_node *n, | |
665 | struct hlist_head *h) | |
666 | { | |
667 | struct hlist_node *i, *last = NULL; | |
668 | ||
48ac3466 MT |
669 | /* Note: write side code, so rcu accessors are not needed. */ |
670 | for (i = h->first; i; i = i->next) | |
1602f49b DM |
671 | last = i; |
672 | ||
673 | if (last) { | |
674 | n->next = last->next; | |
c54a2744 | 675 | WRITE_ONCE(n->pprev, &last->next); |
1602f49b DM |
676 | rcu_assign_pointer(hlist_next_rcu(last), n); |
677 | } else { | |
678 | hlist_add_head_rcu(n, h); | |
679 | } | |
680 | } | |
681 | ||
82524746 FBH |
682 | /** |
683 | * hlist_add_before_rcu | |
684 | * @n: the new element to add to the hash list. | |
685 | * @next: the existing element to add the new element before. | |
686 | * | |
687 | * Description: | |
688 | * Adds the specified element to the specified hlist | |
689 | * before the specified node while permitting racing traversals. | |
690 | * | |
691 | * The caller must take whatever precautions are necessary | |
692 | * (such as holding appropriate locks) to avoid racing | |
693 | * with another list-mutation primitive, such as hlist_add_head_rcu() | |
694 | * or hlist_del_rcu(), running on this same list. | |
695 | * However, it is perfectly legal to run concurrently with | |
696 | * the _rcu list-traversal primitives, such as | |
697 | * hlist_for_each_entry_rcu(), used to prevent memory-consistency | |
698 | * problems on Alpha CPUs. | |
699 | */ | |
700 | static inline void hlist_add_before_rcu(struct hlist_node *n, | |
701 | struct hlist_node *next) | |
702 | { | |
c54a2744 | 703 | WRITE_ONCE(n->pprev, next->pprev); |
82524746 | 704 | n->next = next; |
67bdbffd | 705 | rcu_assign_pointer(hlist_pprev_rcu(n), n); |
c54a2744 | 706 | WRITE_ONCE(next->pprev, &n->next); |
82524746 FBH |
707 | } |
708 | ||
709 | /** | |
1d023284 | 710 | * hlist_add_behind_rcu |
82524746 | 711 | * @n: the new element to add to the hash list. |
1d023284 | 712 | * @prev: the existing element to add the new element after. |
82524746 FBH |
713 | * |
714 | * Description: | |
715 | * Adds the specified element to the specified hlist | |
716 | * after the specified node while permitting racing traversals. | |
717 | * | |
718 | * The caller must take whatever precautions are necessary | |
719 | * (such as holding appropriate locks) to avoid racing | |
720 | * with another list-mutation primitive, such as hlist_add_head_rcu() | |
721 | * or hlist_del_rcu(), running on this same list. | |
722 | * However, it is perfectly legal to run concurrently with | |
723 | * the _rcu list-traversal primitives, such as | |
724 | * hlist_for_each_entry_rcu(), used to prevent memory-consistency | |
725 | * problems on Alpha CPUs. | |
726 | */ | |
1d023284 KH |
727 | static inline void hlist_add_behind_rcu(struct hlist_node *n, |
728 | struct hlist_node *prev) | |
82524746 FBH |
729 | { |
730 | n->next = prev->next; | |
c54a2744 | 731 | WRITE_ONCE(n->pprev, &prev->next); |
67bdbffd | 732 | rcu_assign_pointer(hlist_next_rcu(prev), n); |
82524746 | 733 | if (n->next) |
c54a2744 | 734 | WRITE_ONCE(n->next->pprev, &n->next); |
82524746 FBH |
735 | } |
736 | ||
67bdbffd AB |
737 | #define __hlist_for_each_rcu(pos, head) \ |
738 | for (pos = rcu_dereference(hlist_first_rcu(head)); \ | |
75d65a42 | 739 | pos; \ |
67bdbffd | 740 | pos = rcu_dereference(hlist_next_rcu(pos))) |
1cc52327 | 741 | |
82524746 FBH |
742 | /** |
743 | * hlist_for_each_entry_rcu - iterate over rcu list of given type | |
b67bfe0d | 744 | * @pos: the type * to use as a loop cursor. |
82524746 FBH |
745 | * @head: the head for your list. |
746 | * @member: the name of the hlist_node within the struct. | |
ddc46593 | 747 | * @cond: optional lockdep expression if called from non-RCU protection. |
82524746 FBH |
748 | * |
749 | * This list-traversal primitive may safely run concurrently with | |
750 | * the _rcu list-mutation primitives such as hlist_add_head_rcu() | |
751 | * as long as the traversal is guarded by rcu_read_lock(). | |
752 | */ | |
28875945 JFG |
753 | #define hlist_for_each_entry_rcu(pos, head, member, cond...) \ |
754 | for (__list_check_rcu(dummy, ## cond, 0), \ | |
755 | pos = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)),\ | |
b67bfe0d SL |
756 | typeof(*(pos)), member); \ |
757 | pos; \ | |
758 | pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(\ | |
ae2212a7 MB |
759 | &(pos)->member)), typeof(*(pos)), member)) |
760 | ||
761 | /** | |
762 | * hlist_for_each_entry_srcu - iterate over rcu list of given type | |
763 | * @pos: the type * to use as a loop cursor. | |
764 | * @head: the head for your list. | |
765 | * @member: the name of the hlist_node within the struct. | |
766 | * @cond: lockdep expression for the lock required to traverse the list. | |
767 | * | |
768 | * This list-traversal primitive may safely run concurrently with | |
769 | * the _rcu list-mutation primitives such as hlist_add_head_rcu() | |
770 | * as long as the traversal is guarded by srcu_read_lock(). | |
771 | * The lockdep expression srcu_read_lock_held() can be passed as the | |
772 | * cond argument from read side. | |
773 | */ | |
774 | #define hlist_for_each_entry_srcu(pos, head, member, cond) \ | |
775 | for (__list_check_srcu(cond), \ | |
776 | pos = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)),\ | |
777 | typeof(*(pos)), member); \ | |
778 | pos; \ | |
779 | pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(\ | |
b67bfe0d | 780 | &(pos)->member)), typeof(*(pos)), member)) |
82524746 | 781 | |
12bcbe66 SR |
782 | /** |
783 | * hlist_for_each_entry_rcu_notrace - iterate over rcu list of given type (for tracing) | |
784 | * @pos: the type * to use as a loop cursor. | |
785 | * @head: the head for your list. | |
786 | * @member: the name of the hlist_node within the struct. | |
787 | * | |
788 | * This list-traversal primitive may safely run concurrently with | |
789 | * the _rcu list-mutation primitives such as hlist_add_head_rcu() | |
790 | * as long as the traversal is guarded by rcu_read_lock(). | |
791 | * | |
792 | * This is the same as hlist_for_each_entry_rcu() except that it does | |
793 | * not do any RCU debugging or tracing. | |
794 | */ | |
795 | #define hlist_for_each_entry_rcu_notrace(pos, head, member) \ | |
0a5b99f5 | 796 | for (pos = hlist_entry_safe(rcu_dereference_raw_check(hlist_first_rcu(head)),\ |
12bcbe66 SR |
797 | typeof(*(pos)), member); \ |
798 | pos; \ | |
0a5b99f5 | 799 | pos = hlist_entry_safe(rcu_dereference_raw_check(hlist_next_rcu(\ |
12bcbe66 SR |
800 | &(pos)->member)), typeof(*(pos)), member)) |
801 | ||
4f70ecca ED |
802 | /** |
803 | * hlist_for_each_entry_rcu_bh - iterate over rcu list of given type | |
b67bfe0d | 804 | * @pos: the type * to use as a loop cursor. |
4f70ecca ED |
805 | * @head: the head for your list. |
806 | * @member: the name of the hlist_node within the struct. | |
807 | * | |
808 | * This list-traversal primitive may safely run concurrently with | |
809 | * the _rcu list-mutation primitives such as hlist_add_head_rcu() | |
810 | * as long as the traversal is guarded by rcu_read_lock(). | |
811 | */ | |
b67bfe0d SL |
812 | #define hlist_for_each_entry_rcu_bh(pos, head, member) \ |
813 | for (pos = hlist_entry_safe(rcu_dereference_bh(hlist_first_rcu(head)),\ | |
814 | typeof(*(pos)), member); \ | |
815 | pos; \ | |
816 | pos = hlist_entry_safe(rcu_dereference_bh(hlist_next_rcu(\ | |
817 | &(pos)->member)), typeof(*(pos)), member)) | |
4f70ecca | 818 | |
5c578aed | 819 | /** |
820 | * hlist_for_each_entry_continue_rcu - iterate over a hlist continuing after current point | |
b67bfe0d | 821 | * @pos: the type * to use as a loop cursor. |
5c578aed | 822 | * @member: the name of the hlist_node within the struct. |
823 | */ | |
b67bfe0d | 824 | #define hlist_for_each_entry_continue_rcu(pos, member) \ |
f520c98e YX |
825 | for (pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu( \ |
826 | &(pos)->member)), typeof(*(pos)), member); \ | |
b67bfe0d | 827 | pos; \ |
f520c98e YX |
828 | pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu( \ |
829 | &(pos)->member)), typeof(*(pos)), member)) | |
5c578aed | 830 | |
4f70ecca ED |
831 | /** |
832 | * hlist_for_each_entry_continue_rcu_bh - iterate over a hlist continuing after current point | |
b67bfe0d | 833 | * @pos: the type * to use as a loop cursor. |
4f70ecca ED |
834 | * @member: the name of the hlist_node within the struct. |
835 | */ | |
b67bfe0d | 836 | #define hlist_for_each_entry_continue_rcu_bh(pos, member) \ |
f520c98e YX |
837 | for (pos = hlist_entry_safe(rcu_dereference_bh(hlist_next_rcu( \ |
838 | &(pos)->member)), typeof(*(pos)), member); \ | |
b67bfe0d | 839 | pos; \ |
f520c98e YX |
840 | pos = hlist_entry_safe(rcu_dereference_bh(hlist_next_rcu( \ |
841 | &(pos)->member)), typeof(*(pos)), member)) | |
4f70ecca | 842 | |
97ede29e YX |
843 | /** |
844 | * hlist_for_each_entry_from_rcu - iterate over a hlist continuing from current point | |
845 | * @pos: the type * to use as a loop cursor. | |
846 | * @member: the name of the hlist_node within the struct. | |
847 | */ | |
848 | #define hlist_for_each_entry_from_rcu(pos, member) \ | |
849 | for (; pos; \ | |
f517700c YX |
850 | pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu( \ |
851 | &(pos)->member)), typeof(*(pos)), member)) | |
5c578aed | 852 | |
82524746 FBH |
853 | #endif /* __KERNEL__ */ |
854 | #endif |