| 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | #ifndef _LINUX_RCULIST_H |
| 3 | #define _LINUX_RCULIST_H |
| 4 | |
| 5 | #ifdef __KERNEL__ |
| 6 | |
| 7 | /* |
| 8 | * RCU-protected list version |
| 9 | */ |
| 10 | #include <linux/list.h> |
| 11 | #include <linux/rcupdate.h> |
| 12 | |
| 13 | /* |
| 14 | * INIT_LIST_HEAD_RCU - Initialize a list_head visible to RCU readers |
| 15 | * @list: list to be initialized |
| 16 | * |
| 17 | * You should instead use INIT_LIST_HEAD() for normal initialization and |
| 18 | * cleanup tasks, when readers have no access to the list being initialized. |
| 19 | * However, if the list being initialized is visible to readers, you |
| 20 | * need to keep the compiler from being too mischievous. |
| 21 | */ |
| 22 | static inline void INIT_LIST_HEAD_RCU(struct list_head *list) |
| 23 | { |
| 24 | WRITE_ONCE(list->next, list); |
| 25 | WRITE_ONCE(list->prev, list); |
| 26 | } |
| 27 | |
| 28 | /* |
| 29 | * return the ->next pointer of a list_head in an rcu safe |
| 30 | * way, we must not access it directly |
| 31 | */ |
| 32 | #define list_next_rcu(list) (*((struct list_head __rcu **)(&(list)->next))) |
| 33 | /* |
| 34 | * Return the ->prev pointer of a list_head in an rcu safe way. Don't |
| 35 | * access it directly. |
| 36 | * |
| 37 | * Any list traversed with list_bidir_prev_rcu() must never use |
| 38 | * list_del_rcu(). Doing so will poison the ->prev pointer that |
| 39 | * list_bidir_prev_rcu() relies on, which will result in segfaults. |
| 40 | * To prevent these segfaults, use list_bidir_del_rcu() instead |
| 41 | * of list_del_rcu(). |
| 42 | */ |
| 43 | #define list_bidir_prev_rcu(list) (*((struct list_head __rcu **)(&(list)->prev))) |
| 44 | |
| 45 | /** |
| 46 | * list_tail_rcu - returns the prev pointer of the head of the list |
| 47 | * @head: the head of the list |
| 48 | * |
| 49 | * Note: This should only be used with the list header, and even then |
| 50 | * only if list_del() and similar primitives are not also used on the |
| 51 | * list header. |
| 52 | */ |
| 53 | #define list_tail_rcu(head) (*((struct list_head __rcu **)(&(head)->prev))) |
| 54 | |
| 55 | /* |
| 56 | * Check during list traversal that we are within an RCU reader |
| 57 | */ |
| 58 | |
| 59 | #define check_arg_count_one(dummy) |
| 60 | |
| 61 | #ifdef CONFIG_PROVE_RCU_LIST |
| 62 | #define __list_check_rcu(dummy, cond, extra...) \ |
| 63 | ({ \ |
| 64 | check_arg_count_one(extra); \ |
| 65 | RCU_LOCKDEP_WARN(!(cond) && !rcu_read_lock_any_held(), \ |
| 66 | "RCU-list traversed in non-reader section!"); \ |
| 67 | }) |
| 68 | |
| 69 | #define __list_check_srcu(cond) \ |
| 70 | ({ \ |
| 71 | RCU_LOCKDEP_WARN(!(cond), \ |
| 72 | "RCU-list traversed without holding the required lock!");\ |
| 73 | }) |
| 74 | #else |
| 75 | #define __list_check_rcu(dummy, cond, extra...) \ |
| 76 | ({ check_arg_count_one(extra); }) |
| 77 | |
| 78 | #define __list_check_srcu(cond) ({ }) |
| 79 | #endif |
| 80 | |
| 81 | /* |
| 82 | * Insert a new entry between two known consecutive entries. |
| 83 | * |
| 84 | * This is only for internal list manipulation where we know |
| 85 | * the prev/next entries already! |
| 86 | */ |
| 87 | static inline void __list_add_rcu(struct list_head *new, |
| 88 | struct list_head *prev, struct list_head *next) |
| 89 | { |
| 90 | if (!__list_add_valid(new, prev, next)) |
| 91 | return; |
| 92 | |
| 93 | new->next = next; |
| 94 | new->prev = prev; |
| 95 | rcu_assign_pointer(list_next_rcu(prev), new); |
| 96 | next->prev = new; |
| 97 | } |
| 98 | |
| 99 | /** |
| 100 | * list_add_rcu - add a new entry to rcu-protected list |
| 101 | * @new: new entry to be added |
| 102 | * @head: list head to add it after |
| 103 | * |
| 104 | * Insert a new entry after the specified head. |
| 105 | * This is good for implementing stacks. |
| 106 | * |
| 107 | * The caller must take whatever precautions are necessary |
| 108 | * (such as holding appropriate locks) to avoid racing |
| 109 | * with another list-mutation primitive, such as list_add_rcu() |
| 110 | * or list_del_rcu(), running on this same list. |
| 111 | * However, it is perfectly legal to run concurrently with |
| 112 | * the _rcu list-traversal primitives, such as |
| 113 | * list_for_each_entry_rcu(). |
| 114 | */ |
| 115 | static inline void list_add_rcu(struct list_head *new, struct list_head *head) |
| 116 | { |
| 117 | __list_add_rcu(new, head, head->next); |
| 118 | } |
| 119 | |
| 120 | /** |
| 121 | * list_add_tail_rcu - add a new entry to rcu-protected list |
| 122 | * @new: new entry to be added |
| 123 | * @head: list head to add it before |
| 124 | * |
| 125 | * Insert a new entry before the specified head. |
| 126 | * This is useful for implementing queues. |
| 127 | * |
| 128 | * The caller must take whatever precautions are necessary |
| 129 | * (such as holding appropriate locks) to avoid racing |
| 130 | * with another list-mutation primitive, such as list_add_tail_rcu() |
| 131 | * or list_del_rcu(), running on this same list. |
| 132 | * However, it is perfectly legal to run concurrently with |
| 133 | * the _rcu list-traversal primitives, such as |
| 134 | * list_for_each_entry_rcu(). |
| 135 | */ |
| 136 | static inline void list_add_tail_rcu(struct list_head *new, |
| 137 | struct list_head *head) |
| 138 | { |
| 139 | __list_add_rcu(new, head->prev, head); |
| 140 | } |
| 141 | |
| 142 | /** |
| 143 | * list_del_rcu - deletes entry from list without re-initialization |
| 144 | * @entry: the element to delete from the list. |
| 145 | * |
| 146 | * Note: list_empty() on entry does not return true after this, |
| 147 | * the entry is in an undefined state. It is useful for RCU based |
| 148 | * lockfree traversal. |
| 149 | * |
| 150 | * In particular, it means that we can not poison the forward |
| 151 | * pointers that may still be used for walking the list. |
| 152 | * |
| 153 | * The caller must take whatever precautions are necessary |
| 154 | * (such as holding appropriate locks) to avoid racing |
| 155 | * with another list-mutation primitive, such as list_del_rcu() |
| 156 | * or list_add_rcu(), running on this same list. |
| 157 | * However, it is perfectly legal to run concurrently with |
| 158 | * the _rcu list-traversal primitives, such as |
| 159 | * list_for_each_entry_rcu(). |
| 160 | * |
| 161 | * Note that the caller is not permitted to immediately free |
| 162 | * the newly deleted entry. Instead, either synchronize_rcu() |
| 163 | * or call_rcu() must be used to defer freeing until an RCU |
| 164 | * grace period has elapsed. |
| 165 | */ |
| 166 | static inline void list_del_rcu(struct list_head *entry) |
| 167 | { |
| 168 | __list_del_entry(entry); |
| 169 | entry->prev = LIST_POISON2; |
| 170 | } |
| 171 | |
| 172 | /** |
| 173 | * list_bidir_del_rcu - deletes entry from list without re-initialization |
| 174 | * @entry: the element to delete from the list. |
| 175 | * |
| 176 | * In contrast to list_del_rcu() doesn't poison the prev pointer thus |
| 177 | * allowing backwards traversal via list_bidir_prev_rcu(). |
| 178 | * |
| 179 | * Note: list_empty() on entry does not return true after this because |
| 180 | * the entry is in a special undefined state that permits RCU-based |
| 181 | * lockfree reverse traversal. In particular this means that we can not |
| 182 | * poison the forward and backwards pointers that may still be used for |
| 183 | * walking the list. |
| 184 | * |
| 185 | * The caller must take whatever precautions are necessary (such as |
| 186 | * holding appropriate locks) to avoid racing with another list-mutation |
| 187 | * primitive, such as list_bidir_del_rcu() or list_add_rcu(), running on |
| 188 | * this same list. However, it is perfectly legal to run concurrently |
| 189 | * with the _rcu list-traversal primitives, such as |
| 190 | * list_for_each_entry_rcu(). |
| 191 | * |
| 192 | * Note that list_del_rcu() and list_bidir_del_rcu() must not be used on |
| 193 | * the same list. |
| 194 | * |
| 195 | * Note that the caller is not permitted to immediately free |
| 196 | * the newly deleted entry. Instead, either synchronize_rcu() |
| 197 | * or call_rcu() must be used to defer freeing until an RCU |
| 198 | * grace period has elapsed. |
| 199 | */ |
| 200 | static inline void list_bidir_del_rcu(struct list_head *entry) |
| 201 | { |
| 202 | __list_del_entry(entry); |
| 203 | } |
| 204 | |
| 205 | /** |
| 206 | * hlist_del_init_rcu - deletes entry from hash list with re-initialization |
| 207 | * @n: the element to delete from the hash list. |
| 208 | * |
| 209 | * Note: list_unhashed() on the node return true after this. It is |
| 210 | * useful for RCU based read lockfree traversal if the writer side |
| 211 | * must know if the list entry is still hashed or already unhashed. |
| 212 | * |
| 213 | * In particular, it means that we can not poison the forward pointers |
| 214 | * that may still be used for walking the hash list and we can only |
| 215 | * zero the pprev pointer so list_unhashed() will return true after |
| 216 | * this. |
| 217 | * |
| 218 | * The caller must take whatever precautions are necessary (such as |
| 219 | * holding appropriate locks) to avoid racing with another |
| 220 | * list-mutation primitive, such as hlist_add_head_rcu() or |
| 221 | * hlist_del_rcu(), running on this same list. However, it is |
| 222 | * perfectly legal to run concurrently with the _rcu list-traversal |
| 223 | * primitives, such as hlist_for_each_entry_rcu(). |
| 224 | */ |
| 225 | static inline void hlist_del_init_rcu(struct hlist_node *n) |
| 226 | { |
| 227 | if (!hlist_unhashed(n)) { |
| 228 | __hlist_del(n); |
| 229 | WRITE_ONCE(n->pprev, NULL); |
| 230 | } |
| 231 | } |
| 232 | |
| 233 | /** |
| 234 | * list_replace_rcu - replace old entry by new one |
| 235 | * @old : the element to be replaced |
| 236 | * @new : the new element to insert |
| 237 | * |
| 238 | * The @old entry will be replaced with the @new entry atomically from |
| 239 | * the perspective of concurrent readers. It is the caller's responsibility |
| 240 | * to synchronize with concurrent updaters, if any. |
| 241 | * |
| 242 | * Note: @old should not be empty. |
| 243 | */ |
| 244 | static inline void list_replace_rcu(struct list_head *old, |
| 245 | struct list_head *new) |
| 246 | { |
| 247 | new->next = old->next; |
| 248 | new->prev = old->prev; |
| 249 | rcu_assign_pointer(list_next_rcu(new->prev), new); |
| 250 | new->next->prev = new; |
| 251 | old->prev = LIST_POISON2; |
| 252 | } |
| 253 | |
| 254 | /** |
| 255 | * __list_splice_init_rcu - join an RCU-protected list into an existing list. |
| 256 | * @list: the RCU-protected list to splice |
| 257 | * @prev: points to the last element of the existing list |
| 258 | * @next: points to the first element of the existing list |
| 259 | * @sync: synchronize_rcu, synchronize_rcu_expedited, ... |
| 260 | * |
| 261 | * The list pointed to by @prev and @next can be RCU-read traversed |
| 262 | * concurrently with this function. |
| 263 | * |
| 264 | * Note that this function blocks. |
| 265 | * |
| 266 | * Important note: the caller must take whatever action is necessary to prevent |
| 267 | * any other updates to the existing list. In principle, it is possible to |
| 268 | * modify the list as soon as sync() begins execution. If this sort of thing |
| 269 | * becomes necessary, an alternative version based on call_rcu() could be |
| 270 | * created. But only if -really- needed -- there is no shortage of RCU API |
| 271 | * members. |
| 272 | */ |
| 273 | static inline void __list_splice_init_rcu(struct list_head *list, |
| 274 | struct list_head *prev, |
| 275 | struct list_head *next, |
| 276 | void (*sync)(void)) |
| 277 | { |
| 278 | struct list_head *first = list->next; |
| 279 | struct list_head *last = list->prev; |
| 280 | |
| 281 | /* |
| 282 | * "first" and "last" tracking list, so initialize it. RCU readers |
| 283 | * have access to this list, so we must use INIT_LIST_HEAD_RCU() |
| 284 | * instead of INIT_LIST_HEAD(). |
| 285 | */ |
| 286 | |
| 287 | INIT_LIST_HEAD_RCU(list); |
| 288 | |
| 289 | /* |
| 290 | * At this point, the list body still points to the source list. |
| 291 | * Wait for any readers to finish using the list before splicing |
| 292 | * the list body into the new list. Any new readers will see |
| 293 | * an empty list. |
| 294 | */ |
| 295 | |
| 296 | sync(); |
| 297 | ASSERT_EXCLUSIVE_ACCESS(*first); |
| 298 | ASSERT_EXCLUSIVE_ACCESS(*last); |
| 299 | |
| 300 | /* |
| 301 | * Readers are finished with the source list, so perform splice. |
| 302 | * The order is important if the new list is global and accessible |
| 303 | * to concurrent RCU readers. Note that RCU readers are not |
| 304 | * permitted to traverse the prev pointers without excluding |
| 305 | * this function. |
| 306 | */ |
| 307 | |
| 308 | last->next = next; |
| 309 | rcu_assign_pointer(list_next_rcu(prev), first); |
| 310 | first->prev = prev; |
| 311 | next->prev = last; |
| 312 | } |
| 313 | |
| 314 | /** |
| 315 | * list_splice_init_rcu - splice an RCU-protected list into an existing list, |
| 316 | * designed for stacks. |
| 317 | * @list: the RCU-protected list to splice |
| 318 | * @head: the place in the existing list to splice the first list into |
| 319 | * @sync: synchronize_rcu, synchronize_rcu_expedited, ... |
| 320 | */ |
| 321 | static inline void list_splice_init_rcu(struct list_head *list, |
| 322 | struct list_head *head, |
| 323 | void (*sync)(void)) |
| 324 | { |
| 325 | if (!list_empty(list)) |
| 326 | __list_splice_init_rcu(list, head, head->next, sync); |
| 327 | } |
| 328 | |
| 329 | /** |
| 330 | * list_splice_tail_init_rcu - splice an RCU-protected list into an existing |
| 331 | * list, designed for queues. |
| 332 | * @list: the RCU-protected list to splice |
| 333 | * @head: the place in the existing list to splice the first list into |
| 334 | * @sync: synchronize_rcu, synchronize_rcu_expedited, ... |
| 335 | */ |
| 336 | static inline void list_splice_tail_init_rcu(struct list_head *list, |
| 337 | struct list_head *head, |
| 338 | void (*sync)(void)) |
| 339 | { |
| 340 | if (!list_empty(list)) |
| 341 | __list_splice_init_rcu(list, head->prev, head, sync); |
| 342 | } |
| 343 | |
| 344 | /** |
| 345 | * list_entry_rcu - get the struct for this entry |
| 346 | * @ptr: the &struct list_head pointer. |
| 347 | * @type: the type of the struct this is embedded in. |
| 348 | * @member: the name of the list_head within the struct. |
| 349 | * |
| 350 | * This primitive may safely run concurrently with the _rcu list-mutation |
| 351 | * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock(). |
| 352 | */ |
| 353 | #define list_entry_rcu(ptr, type, member) \ |
| 354 | container_of(READ_ONCE(ptr), type, member) |
| 355 | |
| 356 | /* |
| 357 | * Where are list_empty_rcu() and list_first_entry_rcu()? |
| 358 | * |
| 359 | * They do not exist because they would lead to subtle race conditions: |
| 360 | * |
| 361 | * if (!list_empty_rcu(mylist)) { |
| 362 | * struct foo *bar = list_first_entry_rcu(mylist, struct foo, list_member); |
| 363 | * do_something(bar); |
| 364 | * } |
| 365 | * |
| 366 | * The list might be non-empty when list_empty_rcu() checks it, but it |
| 367 | * might have become empty by the time that list_first_entry_rcu() rereads |
| 368 | * the ->next pointer, which would result in a SEGV. |
| 369 | * |
| 370 | * When not using RCU, it is OK for list_first_entry() to re-read that |
| 371 | * pointer because both functions should be protected by some lock that |
| 372 | * blocks writers. |
| 373 | * |
| 374 | * When using RCU, list_empty() uses READ_ONCE() to fetch the |
| 375 | * RCU-protected ->next pointer and then compares it to the address of the |
| 376 | * list head. However, it neither dereferences this pointer nor provides |
| 377 | * this pointer to its caller. Thus, READ_ONCE() suffices (that is, |
| 378 | * rcu_dereference() is not needed), which means that list_empty() can be |
| 379 | * used anywhere you would want to use list_empty_rcu(). Just don't |
| 380 | * expect anything useful to happen if you do a subsequent lockless |
| 381 | * call to list_first_entry_rcu()!!! |
| 382 | * |
| 383 | * See list_first_or_null_rcu for an alternative. |
| 384 | */ |
| 385 | |
| 386 | /** |
| 387 | * list_first_or_null_rcu - get the first element from a list |
| 388 | * @ptr: the list head to take the element from. |
| 389 | * @type: the type of the struct this is embedded in. |
| 390 | * @member: the name of the list_head within the struct. |
| 391 | * |
| 392 | * Note that if the list is empty, it returns NULL. |
| 393 | * |
| 394 | * This primitive may safely run concurrently with the _rcu list-mutation |
| 395 | * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock(). |
| 396 | */ |
| 397 | #define list_first_or_null_rcu(ptr, type, member) \ |
| 398 | ({ \ |
| 399 | struct list_head *__ptr = (ptr); \ |
| 400 | struct list_head *__next = READ_ONCE(__ptr->next); \ |
| 401 | likely(__ptr != __next) ? list_entry_rcu(__next, type, member) : NULL; \ |
| 402 | }) |
| 403 | |
| 404 | /** |
| 405 | * list_next_or_null_rcu - get the next element from a list |
| 406 | * @head: the head for the list. |
| 407 | * @ptr: the list head to take the next element from. |
| 408 | * @type: the type of the struct this is embedded in. |
| 409 | * @member: the name of the list_head within the struct. |
| 410 | * |
| 411 | * Note that if the ptr is at the end of the list, NULL is returned. |
| 412 | * |
| 413 | * This primitive may safely run concurrently with the _rcu list-mutation |
| 414 | * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock(). |
| 415 | */ |
| 416 | #define list_next_or_null_rcu(head, ptr, type, member) \ |
| 417 | ({ \ |
| 418 | struct list_head *__head = (head); \ |
| 419 | struct list_head *__ptr = (ptr); \ |
| 420 | struct list_head *__next = READ_ONCE(__ptr->next); \ |
| 421 | likely(__next != __head) ? list_entry_rcu(__next, type, \ |
| 422 | member) : NULL; \ |
| 423 | }) |
| 424 | |
| 425 | /** |
| 426 | * list_for_each_entry_rcu - iterate over rcu list of given type |
| 427 | * @pos: the type * to use as a loop cursor. |
| 428 | * @head: the head for your list. |
| 429 | * @member: the name of the list_head within the struct. |
| 430 | * @cond: optional lockdep expression if called from non-RCU protection. |
| 431 | * |
| 432 | * This list-traversal primitive may safely run concurrently with |
| 433 | * the _rcu list-mutation primitives such as list_add_rcu() |
| 434 | * as long as the traversal is guarded by rcu_read_lock(). |
| 435 | */ |
| 436 | #define list_for_each_entry_rcu(pos, head, member, cond...) \ |
| 437 | for (__list_check_rcu(dummy, ## cond, 0), \ |
| 438 | pos = list_entry_rcu((head)->next, typeof(*pos), member); \ |
| 439 | &pos->member != (head); \ |
| 440 | pos = list_entry_rcu(pos->member.next, typeof(*pos), member)) |
| 441 | |
| 442 | /** |
| 443 | * list_for_each_entry_srcu - iterate over rcu list of given type |
| 444 | * @pos: the type * to use as a loop cursor. |
| 445 | * @head: the head for your list. |
| 446 | * @member: the name of the list_head within the struct. |
| 447 | * @cond: lockdep expression for the lock required to traverse the list. |
| 448 | * |
| 449 | * This list-traversal primitive may safely run concurrently with |
| 450 | * the _rcu list-mutation primitives such as list_add_rcu() |
| 451 | * as long as the traversal is guarded by srcu_read_lock(). |
| 452 | * The lockdep expression srcu_read_lock_held() can be passed as the |
| 453 | * cond argument from read side. |
| 454 | */ |
| 455 | #define list_for_each_entry_srcu(pos, head, member, cond) \ |
| 456 | for (__list_check_srcu(cond), \ |
| 457 | pos = list_entry_rcu((head)->next, typeof(*pos), member); \ |
| 458 | &pos->member != (head); \ |
| 459 | pos = list_entry_rcu(pos->member.next, typeof(*pos), member)) |
| 460 | |
| 461 | /** |
| 462 | * list_entry_lockless - get the struct for this entry |
| 463 | * @ptr: the &struct list_head pointer. |
| 464 | * @type: the type of the struct this is embedded in. |
| 465 | * @member: the name of the list_head within the struct. |
| 466 | * |
| 467 | * This primitive may safely run concurrently with the _rcu |
| 468 | * list-mutation primitives such as list_add_rcu(), but requires some |
| 469 | * implicit RCU read-side guarding. One example is running within a special |
| 470 | * exception-time environment where preemption is disabled and where lockdep |
| 471 | * cannot be invoked. Another example is when items are added to the list, |
| 472 | * but never deleted. |
| 473 | */ |
| 474 | #define list_entry_lockless(ptr, type, member) \ |
| 475 | container_of((typeof(ptr))READ_ONCE(ptr), type, member) |
| 476 | |
| 477 | /** |
| 478 | * list_for_each_entry_lockless - iterate over rcu list of given type |
| 479 | * @pos: the type * to use as a loop cursor. |
| 480 | * @head: the head for your list. |
| 481 | * @member: the name of the list_struct within the struct. |
| 482 | * |
| 483 | * This primitive may safely run concurrently with the _rcu |
| 484 | * list-mutation primitives such as list_add_rcu(), but requires some |
| 485 | * implicit RCU read-side guarding. One example is running within a special |
| 486 | * exception-time environment where preemption is disabled and where lockdep |
| 487 | * cannot be invoked. Another example is when items are added to the list, |
| 488 | * but never deleted. |
| 489 | */ |
| 490 | #define list_for_each_entry_lockless(pos, head, member) \ |
| 491 | for (pos = list_entry_lockless((head)->next, typeof(*pos), member); \ |
| 492 | &pos->member != (head); \ |
| 493 | pos = list_entry_lockless(pos->member.next, typeof(*pos), member)) |
| 494 | |
| 495 | /** |
| 496 | * list_for_each_entry_continue_rcu - continue iteration over list of given type |
| 497 | * @pos: the type * to use as a loop cursor. |
| 498 | * @head: the head for your list. |
| 499 | * @member: the name of the list_head within the struct. |
| 500 | * |
| 501 | * Continue to iterate over list of given type, continuing after |
| 502 | * the current position which must have been in the list when the RCU read |
| 503 | * lock was taken. |
| 504 | * This would typically require either that you obtained the node from a |
| 505 | * previous walk of the list in the same RCU read-side critical section, or |
| 506 | * that you held some sort of non-RCU reference (such as a reference count) |
| 507 | * to keep the node alive *and* in the list. |
| 508 | * |
| 509 | * This iterator is similar to list_for_each_entry_from_rcu() except |
| 510 | * this starts after the given position and that one starts at the given |
| 511 | * position. |
| 512 | */ |
| 513 | #define list_for_each_entry_continue_rcu(pos, head, member) \ |
| 514 | for (pos = list_entry_rcu(pos->member.next, typeof(*pos), member); \ |
| 515 | &pos->member != (head); \ |
| 516 | pos = list_entry_rcu(pos->member.next, typeof(*pos), member)) |
| 517 | |
| 518 | /** |
| 519 | * list_for_each_entry_from_rcu - iterate over a list from current point |
| 520 | * @pos: the type * to use as a loop cursor. |
| 521 | * @head: the head for your list. |
| 522 | * @member: the name of the list_node within the struct. |
| 523 | * |
| 524 | * Iterate over the tail of a list starting from a given position, |
| 525 | * which must have been in the list when the RCU read lock was taken. |
| 526 | * This would typically require either that you obtained the node from a |
| 527 | * previous walk of the list in the same RCU read-side critical section, or |
| 528 | * that you held some sort of non-RCU reference (such as a reference count) |
| 529 | * to keep the node alive *and* in the list. |
| 530 | * |
| 531 | * This iterator is similar to list_for_each_entry_continue_rcu() except |
| 532 | * this starts from the given position and that one starts from the position |
| 533 | * after the given position. |
| 534 | */ |
| 535 | #define list_for_each_entry_from_rcu(pos, head, member) \ |
| 536 | for (; &(pos)->member != (head); \ |
| 537 | pos = list_entry_rcu(pos->member.next, typeof(*(pos)), member)) |
| 538 | |
| 539 | /** |
| 540 | * hlist_del_rcu - deletes entry from hash list without re-initialization |
| 541 | * @n: the element to delete from the hash list. |
| 542 | * |
| 543 | * Note: list_unhashed() on entry does not return true after this, |
| 544 | * the entry is in an undefined state. It is useful for RCU based |
| 545 | * lockfree traversal. |
| 546 | * |
| 547 | * In particular, it means that we can not poison the forward |
| 548 | * pointers that may still be used for walking the hash list. |
| 549 | * |
| 550 | * The caller must take whatever precautions are necessary |
| 551 | * (such as holding appropriate locks) to avoid racing |
| 552 | * with another list-mutation primitive, such as hlist_add_head_rcu() |
| 553 | * or hlist_del_rcu(), running on this same list. |
| 554 | * However, it is perfectly legal to run concurrently with |
| 555 | * the _rcu list-traversal primitives, such as |
| 556 | * hlist_for_each_entry(). |
| 557 | */ |
| 558 | static inline void hlist_del_rcu(struct hlist_node *n) |
| 559 | { |
| 560 | __hlist_del(n); |
| 561 | WRITE_ONCE(n->pprev, LIST_POISON2); |
| 562 | } |
| 563 | |
| 564 | /** |
| 565 | * hlist_replace_rcu - replace old entry by new one |
| 566 | * @old : the element to be replaced |
| 567 | * @new : the new element to insert |
| 568 | * |
| 569 | * The @old entry will be replaced with the @new entry atomically from |
| 570 | * the perspective of concurrent readers. It is the caller's responsibility |
| 571 | * to synchronize with concurrent updaters, if any. |
| 572 | */ |
| 573 | static inline void hlist_replace_rcu(struct hlist_node *old, |
| 574 | struct hlist_node *new) |
| 575 | { |
| 576 | struct hlist_node *next = old->next; |
| 577 | |
| 578 | new->next = next; |
| 579 | WRITE_ONCE(new->pprev, old->pprev); |
| 580 | rcu_assign_pointer(*(struct hlist_node __rcu **)new->pprev, new); |
| 581 | if (next) |
| 582 | WRITE_ONCE(new->next->pprev, &new->next); |
| 583 | WRITE_ONCE(old->pprev, LIST_POISON2); |
| 584 | } |
| 585 | |
| 586 | /** |
| 587 | * hlists_swap_heads_rcu - swap the lists the hlist heads point to |
| 588 | * @left: The hlist head on the left |
| 589 | * @right: The hlist head on the right |
| 590 | * |
| 591 | * The lists start out as [@left ][node1 ... ] and |
| 592 | * [@right ][node2 ... ] |
| 593 | * The lists end up as [@left ][node2 ... ] |
| 594 | * [@right ][node1 ... ] |
| 595 | */ |
| 596 | static inline void hlists_swap_heads_rcu(struct hlist_head *left, struct hlist_head *right) |
| 597 | { |
| 598 | struct hlist_node *node1 = left->first; |
| 599 | struct hlist_node *node2 = right->first; |
| 600 | |
| 601 | rcu_assign_pointer(left->first, node2); |
| 602 | rcu_assign_pointer(right->first, node1); |
| 603 | WRITE_ONCE(node2->pprev, &left->first); |
| 604 | WRITE_ONCE(node1->pprev, &right->first); |
| 605 | } |
| 606 | |
| 607 | /* |
| 608 | * return the first or the next element in an RCU protected hlist |
| 609 | */ |
| 610 | #define hlist_first_rcu(head) (*((struct hlist_node __rcu **)(&(head)->first))) |
| 611 | #define hlist_next_rcu(node) (*((struct hlist_node __rcu **)(&(node)->next))) |
| 612 | #define hlist_pprev_rcu(node) (*((struct hlist_node __rcu **)((node)->pprev))) |
| 613 | |
| 614 | /** |
| 615 | * hlist_add_head_rcu |
| 616 | * @n: the element to add to the hash list. |
| 617 | * @h: the list to add to. |
| 618 | * |
| 619 | * Description: |
| 620 | * Adds the specified element to the specified hlist, |
| 621 | * while permitting racing traversals. |
| 622 | * |
| 623 | * The caller must take whatever precautions are necessary |
| 624 | * (such as holding appropriate locks) to avoid racing |
| 625 | * with another list-mutation primitive, such as hlist_add_head_rcu() |
| 626 | * or hlist_del_rcu(), running on this same list. |
| 627 | * However, it is perfectly legal to run concurrently with |
| 628 | * the _rcu list-traversal primitives, such as |
| 629 | * hlist_for_each_entry_rcu(), used to prevent memory-consistency |
| 630 | * problems on Alpha CPUs. Regardless of the type of CPU, the |
| 631 | * list-traversal primitive must be guarded by rcu_read_lock(). |
| 632 | */ |
| 633 | static inline void hlist_add_head_rcu(struct hlist_node *n, |
| 634 | struct hlist_head *h) |
| 635 | { |
| 636 | struct hlist_node *first = h->first; |
| 637 | |
| 638 | n->next = first; |
| 639 | WRITE_ONCE(n->pprev, &h->first); |
| 640 | rcu_assign_pointer(hlist_first_rcu(h), n); |
| 641 | if (first) |
| 642 | WRITE_ONCE(first->pprev, &n->next); |
| 643 | } |
| 644 | |
| 645 | /** |
| 646 | * hlist_add_tail_rcu |
| 647 | * @n: the element to add to the hash list. |
| 648 | * @h: the list to add to. |
| 649 | * |
| 650 | * Description: |
| 651 | * Adds the specified element to the specified hlist, |
| 652 | * while permitting racing traversals. |
| 653 | * |
| 654 | * The caller must take whatever precautions are necessary |
| 655 | * (such as holding appropriate locks) to avoid racing |
| 656 | * with another list-mutation primitive, such as hlist_add_head_rcu() |
| 657 | * or hlist_del_rcu(), running on this same list. |
| 658 | * However, it is perfectly legal to run concurrently with |
| 659 | * the _rcu list-traversal primitives, such as |
| 660 | * hlist_for_each_entry_rcu(), used to prevent memory-consistency |
| 661 | * problems on Alpha CPUs. Regardless of the type of CPU, the |
| 662 | * list-traversal primitive must be guarded by rcu_read_lock(). |
| 663 | */ |
| 664 | static inline void hlist_add_tail_rcu(struct hlist_node *n, |
| 665 | struct hlist_head *h) |
| 666 | { |
| 667 | struct hlist_node *i, *last = NULL; |
| 668 | |
| 669 | /* Note: write side code, so rcu accessors are not needed. */ |
| 670 | for (i = h->first; i; i = i->next) |
| 671 | last = i; |
| 672 | |
| 673 | if (last) { |
| 674 | n->next = last->next; |
| 675 | WRITE_ONCE(n->pprev, &last->next); |
| 676 | rcu_assign_pointer(hlist_next_rcu(last), n); |
| 677 | } else { |
| 678 | hlist_add_head_rcu(n, h); |
| 679 | } |
| 680 | } |
| 681 | |
| 682 | /** |
| 683 | * hlist_add_before_rcu |
| 684 | * @n: the new element to add to the hash list. |
| 685 | * @next: the existing element to add the new element before. |
| 686 | * |
| 687 | * Description: |
| 688 | * Adds the specified element to the specified hlist |
| 689 | * before the specified node while permitting racing traversals. |
| 690 | * |
| 691 | * The caller must take whatever precautions are necessary |
| 692 | * (such as holding appropriate locks) to avoid racing |
| 693 | * with another list-mutation primitive, such as hlist_add_head_rcu() |
| 694 | * or hlist_del_rcu(), running on this same list. |
| 695 | * However, it is perfectly legal to run concurrently with |
| 696 | * the _rcu list-traversal primitives, such as |
| 697 | * hlist_for_each_entry_rcu(), used to prevent memory-consistency |
| 698 | * problems on Alpha CPUs. |
| 699 | */ |
| 700 | static inline void hlist_add_before_rcu(struct hlist_node *n, |
| 701 | struct hlist_node *next) |
| 702 | { |
| 703 | WRITE_ONCE(n->pprev, next->pprev); |
| 704 | n->next = next; |
| 705 | rcu_assign_pointer(hlist_pprev_rcu(n), n); |
| 706 | WRITE_ONCE(next->pprev, &n->next); |
| 707 | } |
| 708 | |
| 709 | /** |
| 710 | * hlist_add_behind_rcu |
| 711 | * @n: the new element to add to the hash list. |
| 712 | * @prev: the existing element to add the new element after. |
| 713 | * |
| 714 | * Description: |
| 715 | * Adds the specified element to the specified hlist |
| 716 | * after the specified node while permitting racing traversals. |
| 717 | * |
| 718 | * The caller must take whatever precautions are necessary |
| 719 | * (such as holding appropriate locks) to avoid racing |
| 720 | * with another list-mutation primitive, such as hlist_add_head_rcu() |
| 721 | * or hlist_del_rcu(), running on this same list. |
| 722 | * However, it is perfectly legal to run concurrently with |
| 723 | * the _rcu list-traversal primitives, such as |
| 724 | * hlist_for_each_entry_rcu(), used to prevent memory-consistency |
| 725 | * problems on Alpha CPUs. |
| 726 | */ |
| 727 | static inline void hlist_add_behind_rcu(struct hlist_node *n, |
| 728 | struct hlist_node *prev) |
| 729 | { |
| 730 | n->next = prev->next; |
| 731 | WRITE_ONCE(n->pprev, &prev->next); |
| 732 | rcu_assign_pointer(hlist_next_rcu(prev), n); |
| 733 | if (n->next) |
| 734 | WRITE_ONCE(n->next->pprev, &n->next); |
| 735 | } |
| 736 | |
| 737 | #define __hlist_for_each_rcu(pos, head) \ |
| 738 | for (pos = rcu_dereference(hlist_first_rcu(head)); \ |
| 739 | pos; \ |
| 740 | pos = rcu_dereference(hlist_next_rcu(pos))) |
| 741 | |
| 742 | /** |
| 743 | * hlist_for_each_entry_rcu - iterate over rcu list of given type |
| 744 | * @pos: the type * to use as a loop cursor. |
| 745 | * @head: the head for your list. |
| 746 | * @member: the name of the hlist_node within the struct. |
| 747 | * @cond: optional lockdep expression if called from non-RCU protection. |
| 748 | * |
| 749 | * This list-traversal primitive may safely run concurrently with |
| 750 | * the _rcu list-mutation primitives such as hlist_add_head_rcu() |
| 751 | * as long as the traversal is guarded by rcu_read_lock(). |
| 752 | */ |
| 753 | #define hlist_for_each_entry_rcu(pos, head, member, cond...) \ |
| 754 | for (__list_check_rcu(dummy, ## cond, 0), \ |
| 755 | pos = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)),\ |
| 756 | typeof(*(pos)), member); \ |
| 757 | pos; \ |
| 758 | pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(\ |
| 759 | &(pos)->member)), typeof(*(pos)), member)) |
| 760 | |
| 761 | /** |
| 762 | * hlist_for_each_entry_srcu - iterate over rcu list of given type |
| 763 | * @pos: the type * to use as a loop cursor. |
| 764 | * @head: the head for your list. |
| 765 | * @member: the name of the hlist_node within the struct. |
| 766 | * @cond: lockdep expression for the lock required to traverse the list. |
| 767 | * |
| 768 | * This list-traversal primitive may safely run concurrently with |
| 769 | * the _rcu list-mutation primitives such as hlist_add_head_rcu() |
| 770 | * as long as the traversal is guarded by srcu_read_lock(). |
| 771 | * The lockdep expression srcu_read_lock_held() can be passed as the |
| 772 | * cond argument from read side. |
| 773 | */ |
| 774 | #define hlist_for_each_entry_srcu(pos, head, member, cond) \ |
| 775 | for (__list_check_srcu(cond), \ |
| 776 | pos = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)),\ |
| 777 | typeof(*(pos)), member); \ |
| 778 | pos; \ |
| 779 | pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(\ |
| 780 | &(pos)->member)), typeof(*(pos)), member)) |
| 781 | |
| 782 | /** |
| 783 | * hlist_for_each_entry_rcu_notrace - iterate over rcu list of given type (for tracing) |
| 784 | * @pos: the type * to use as a loop cursor. |
| 785 | * @head: the head for your list. |
| 786 | * @member: the name of the hlist_node within the struct. |
| 787 | * |
| 788 | * This list-traversal primitive may safely run concurrently with |
| 789 | * the _rcu list-mutation primitives such as hlist_add_head_rcu() |
| 790 | * as long as the traversal is guarded by rcu_read_lock(). |
| 791 | * |
| 792 | * This is the same as hlist_for_each_entry_rcu() except that it does |
| 793 | * not do any RCU debugging or tracing. |
| 794 | */ |
| 795 | #define hlist_for_each_entry_rcu_notrace(pos, head, member) \ |
| 796 | for (pos = hlist_entry_safe(rcu_dereference_raw_check(hlist_first_rcu(head)),\ |
| 797 | typeof(*(pos)), member); \ |
| 798 | pos; \ |
| 799 | pos = hlist_entry_safe(rcu_dereference_raw_check(hlist_next_rcu(\ |
| 800 | &(pos)->member)), typeof(*(pos)), member)) |
| 801 | |
| 802 | /** |
| 803 | * hlist_for_each_entry_rcu_bh - iterate over rcu list of given type |
| 804 | * @pos: the type * to use as a loop cursor. |
| 805 | * @head: the head for your list. |
| 806 | * @member: the name of the hlist_node within the struct. |
| 807 | * |
| 808 | * This list-traversal primitive may safely run concurrently with |
| 809 | * the _rcu list-mutation primitives such as hlist_add_head_rcu() |
| 810 | * as long as the traversal is guarded by rcu_read_lock(). |
| 811 | */ |
| 812 | #define hlist_for_each_entry_rcu_bh(pos, head, member) \ |
| 813 | for (pos = hlist_entry_safe(rcu_dereference_bh(hlist_first_rcu(head)),\ |
| 814 | typeof(*(pos)), member); \ |
| 815 | pos; \ |
| 816 | pos = hlist_entry_safe(rcu_dereference_bh(hlist_next_rcu(\ |
| 817 | &(pos)->member)), typeof(*(pos)), member)) |
| 818 | |
| 819 | /** |
| 820 | * hlist_for_each_entry_continue_rcu - iterate over a hlist continuing after current point |
| 821 | * @pos: the type * to use as a loop cursor. |
| 822 | * @member: the name of the hlist_node within the struct. |
| 823 | */ |
| 824 | #define hlist_for_each_entry_continue_rcu(pos, member) \ |
| 825 | for (pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu( \ |
| 826 | &(pos)->member)), typeof(*(pos)), member); \ |
| 827 | pos; \ |
| 828 | pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu( \ |
| 829 | &(pos)->member)), typeof(*(pos)), member)) |
| 830 | |
| 831 | /** |
| 832 | * hlist_for_each_entry_continue_rcu_bh - iterate over a hlist continuing after current point |
| 833 | * @pos: the type * to use as a loop cursor. |
| 834 | * @member: the name of the hlist_node within the struct. |
| 835 | */ |
| 836 | #define hlist_for_each_entry_continue_rcu_bh(pos, member) \ |
| 837 | for (pos = hlist_entry_safe(rcu_dereference_bh(hlist_next_rcu( \ |
| 838 | &(pos)->member)), typeof(*(pos)), member); \ |
| 839 | pos; \ |
| 840 | pos = hlist_entry_safe(rcu_dereference_bh(hlist_next_rcu( \ |
| 841 | &(pos)->member)), typeof(*(pos)), member)) |
| 842 | |
| 843 | /** |
| 844 | * hlist_for_each_entry_from_rcu - iterate over a hlist continuing from current point |
| 845 | * @pos: the type * to use as a loop cursor. |
| 846 | * @member: the name of the hlist_node within the struct. |
| 847 | */ |
| 848 | #define hlist_for_each_entry_from_rcu(pos, member) \ |
| 849 | for (; pos; \ |
| 850 | pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu( \ |
| 851 | &(pos)->member)), typeof(*(pos)), member)) |
| 852 | |
| 853 | #endif /* __KERNEL__ */ |
| 854 | #endif |