Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef _LINUX_LIST_H |
2 | #define _LINUX_LIST_H | |
3 | ||
4 | #ifdef __KERNEL__ | |
5 | ||
6 | #include <linux/stddef.h> | |
7 | #include <linux/prefetch.h> | |
8 | #include <asm/system.h> | |
9 | ||
10 | /* | |
11 | * These are non-NULL pointers that will result in page faults | |
12 | * under normal circumstances, used to verify that nobody uses | |
13 | * non-initialized list entries. | |
14 | */ | |
15 | #define LIST_POISON1 ((void *) 0x00100100) | |
16 | #define LIST_POISON2 ((void *) 0x00200200) | |
17 | ||
18 | /* | |
19 | * Simple doubly linked list implementation. | |
20 | * | |
21 | * Some of the internal functions ("__xxx") are useful when | |
22 | * manipulating whole lists rather than single entries, as | |
23 | * sometimes we already know the next/prev entries and we can | |
24 | * generate better code by using them directly rather than | |
25 | * using the generic single-entry routines. | |
26 | */ | |
27 | ||
28 | struct list_head { | |
29 | struct list_head *next, *prev; | |
30 | }; | |
31 | ||
32 | #define LIST_HEAD_INIT(name) { &(name), &(name) } | |
33 | ||
34 | #define LIST_HEAD(name) \ | |
35 | struct list_head name = LIST_HEAD_INIT(name) | |
36 | ||
490d6ab1 ZB |
37 | static inline void INIT_LIST_HEAD(struct list_head *list) |
38 | { | |
39 | list->next = list; | |
40 | list->prev = list; | |
41 | } | |
1da177e4 LT |
42 | |
43 | /* | |
44 | * Insert a new entry between two known consecutive entries. | |
45 | * | |
46 | * This is only for internal list manipulation where we know | |
47 | * the prev/next entries already! | |
48 | */ | |
49 | static inline void __list_add(struct list_head *new, | |
50 | struct list_head *prev, | |
51 | struct list_head *next) | |
52 | { | |
53 | next->prev = new; | |
54 | new->next = next; | |
55 | new->prev = prev; | |
56 | prev->next = new; | |
57 | } | |
58 | ||
59 | /** | |
60 | * list_add - add a new entry | |
61 | * @new: new entry to be added | |
62 | * @head: list head to add it after | |
63 | * | |
64 | * Insert a new entry after the specified head. | |
65 | * This is good for implementing stacks. | |
66 | */ | |
67 | static inline void list_add(struct list_head *new, struct list_head *head) | |
68 | { | |
69 | __list_add(new, head, head->next); | |
70 | } | |
71 | ||
72 | /** | |
73 | * list_add_tail - add a new entry | |
74 | * @new: new entry to be added | |
75 | * @head: list head to add it before | |
76 | * | |
77 | * Insert a new entry before the specified head. | |
78 | * This is useful for implementing queues. | |
79 | */ | |
80 | static inline void list_add_tail(struct list_head *new, struct list_head *head) | |
81 | { | |
82 | __list_add(new, head->prev, head); | |
83 | } | |
84 | ||
85 | /* | |
86 | * Insert a new entry between two known consecutive entries. | |
87 | * | |
88 | * This is only for internal list manipulation where we know | |
89 | * the prev/next entries already! | |
90 | */ | |
91 | static inline void __list_add_rcu(struct list_head * new, | |
92 | struct list_head * prev, struct list_head * next) | |
93 | { | |
94 | new->next = next; | |
95 | new->prev = prev; | |
96 | smp_wmb(); | |
97 | next->prev = new; | |
98 | prev->next = new; | |
99 | } | |
100 | ||
101 | /** | |
102 | * list_add_rcu - add a new entry to rcu-protected list | |
103 | * @new: new entry to be added | |
104 | * @head: list head to add it after | |
105 | * | |
106 | * Insert a new entry after the specified head. | |
107 | * This is good for implementing stacks. | |
108 | * | |
109 | * The caller must take whatever precautions are necessary | |
110 | * (such as holding appropriate locks) to avoid racing | |
111 | * with another list-mutation primitive, such as list_add_rcu() | |
112 | * or list_del_rcu(), running on this same list. | |
113 | * However, it is perfectly legal to run concurrently with | |
114 | * the _rcu list-traversal primitives, such as | |
115 | * list_for_each_entry_rcu(). | |
116 | */ | |
117 | static inline void list_add_rcu(struct list_head *new, struct list_head *head) | |
118 | { | |
119 | __list_add_rcu(new, head, head->next); | |
120 | } | |
121 | ||
122 | /** | |
123 | * list_add_tail_rcu - add a new entry to rcu-protected list | |
124 | * @new: new entry to be added | |
125 | * @head: list head to add it before | |
126 | * | |
127 | * Insert a new entry before the specified head. | |
128 | * This is useful for implementing queues. | |
129 | * | |
130 | * The caller must take whatever precautions are necessary | |
131 | * (such as holding appropriate locks) to avoid racing | |
132 | * with another list-mutation primitive, such as list_add_tail_rcu() | |
133 | * or list_del_rcu(), running on this same list. | |
134 | * However, it is perfectly legal to run concurrently with | |
135 | * the _rcu list-traversal primitives, such as | |
136 | * list_for_each_entry_rcu(). | |
137 | */ | |
138 | static inline void list_add_tail_rcu(struct list_head *new, | |
139 | struct list_head *head) | |
140 | { | |
141 | __list_add_rcu(new, head->prev, head); | |
142 | } | |
143 | ||
144 | /* | |
145 | * Delete a list entry by making the prev/next entries | |
146 | * point to each other. | |
147 | * | |
148 | * This is only for internal list manipulation where we know | |
149 | * the prev/next entries already! | |
150 | */ | |
151 | static inline void __list_del(struct list_head * prev, struct list_head * next) | |
152 | { | |
153 | next->prev = prev; | |
154 | prev->next = next; | |
155 | } | |
156 | ||
157 | /** | |
158 | * list_del - deletes entry from list. | |
159 | * @entry: the element to delete from the list. | |
160 | * Note: list_empty on entry does not return true after this, the entry is | |
161 | * in an undefined state. | |
162 | */ | |
163 | static inline void list_del(struct list_head *entry) | |
164 | { | |
165 | __list_del(entry->prev, entry->next); | |
166 | entry->next = LIST_POISON1; | |
167 | entry->prev = LIST_POISON2; | |
168 | } | |
169 | ||
170 | /** | |
171 | * list_del_rcu - deletes entry from list without re-initialization | |
172 | * @entry: the element to delete from the list. | |
173 | * | |
174 | * Note: list_empty on entry does not return true after this, | |
175 | * the entry is in an undefined state. It is useful for RCU based | |
176 | * lockfree traversal. | |
177 | * | |
178 | * In particular, it means that we can not poison the forward | |
179 | * pointers that may still be used for walking the list. | |
180 | * | |
181 | * The caller must take whatever precautions are necessary | |
182 | * (such as holding appropriate locks) to avoid racing | |
183 | * with another list-mutation primitive, such as list_del_rcu() | |
184 | * or list_add_rcu(), running on this same list. | |
185 | * However, it is perfectly legal to run concurrently with | |
186 | * the _rcu list-traversal primitives, such as | |
187 | * list_for_each_entry_rcu(). | |
188 | * | |
189 | * Note that the caller is not permitted to immediately free | |
b2b18660 | 190 | * the newly deleted entry. Instead, either synchronize_rcu() |
1da177e4 LT |
191 | * or call_rcu() must be used to defer freeing until an RCU |
192 | * grace period has elapsed. | |
193 | */ | |
194 | static inline void list_del_rcu(struct list_head *entry) | |
195 | { | |
196 | __list_del(entry->prev, entry->next); | |
197 | entry->prev = LIST_POISON2; | |
198 | } | |
199 | ||
200 | /* | |
201 | * list_replace_rcu - replace old entry by new one | |
202 | * @old : the element to be replaced | |
203 | * @new : the new element to insert | |
204 | * | |
205 | * The old entry will be replaced with the new entry atomically. | |
206 | */ | |
b88cb424 IM |
207 | static inline void list_replace_rcu(struct list_head *old, |
208 | struct list_head *new) | |
209 | { | |
1da177e4 LT |
210 | new->next = old->next; |
211 | new->prev = old->prev; | |
212 | smp_wmb(); | |
213 | new->next->prev = new; | |
214 | new->prev->next = new; | |
b88cb424 | 215 | old->prev = LIST_POISON2; |
1da177e4 LT |
216 | } |
217 | ||
218 | /** | |
219 | * list_del_init - deletes entry from list and reinitialize it. | |
220 | * @entry: the element to delete from the list. | |
221 | */ | |
222 | static inline void list_del_init(struct list_head *entry) | |
223 | { | |
224 | __list_del(entry->prev, entry->next); | |
225 | INIT_LIST_HEAD(entry); | |
226 | } | |
227 | ||
228 | /** | |
229 | * list_move - delete from one list and add as another's head | |
230 | * @list: the entry to move | |
231 | * @head: the head that will precede our entry | |
232 | */ | |
233 | static inline void list_move(struct list_head *list, struct list_head *head) | |
234 | { | |
235 | __list_del(list->prev, list->next); | |
236 | list_add(list, head); | |
237 | } | |
238 | ||
239 | /** | |
240 | * list_move_tail - delete from one list and add as another's tail | |
241 | * @list: the entry to move | |
242 | * @head: the head that will follow our entry | |
243 | */ | |
244 | static inline void list_move_tail(struct list_head *list, | |
245 | struct list_head *head) | |
246 | { | |
247 | __list_del(list->prev, list->next); | |
248 | list_add_tail(list, head); | |
249 | } | |
250 | ||
251 | /** | |
252 | * list_empty - tests whether a list is empty | |
253 | * @head: the list to test. | |
254 | */ | |
255 | static inline int list_empty(const struct list_head *head) | |
256 | { | |
257 | return head->next == head; | |
258 | } | |
259 | ||
260 | /** | |
261 | * list_empty_careful - tests whether a list is | |
262 | * empty _and_ checks that no other CPU might be | |
263 | * in the process of still modifying either member | |
264 | * | |
265 | * NOTE: using list_empty_careful() without synchronization | |
266 | * can only be safe if the only activity that can happen | |
267 | * to the list entry is list_del_init(). Eg. it cannot be used | |
268 | * if another CPU could re-list_add() it. | |
269 | * | |
270 | * @head: the list to test. | |
271 | */ | |
272 | static inline int list_empty_careful(const struct list_head *head) | |
273 | { | |
274 | struct list_head *next = head->next; | |
275 | return (next == head) && (next == head->prev); | |
276 | } | |
277 | ||
278 | static inline void __list_splice(struct list_head *list, | |
279 | struct list_head *head) | |
280 | { | |
281 | struct list_head *first = list->next; | |
282 | struct list_head *last = list->prev; | |
283 | struct list_head *at = head->next; | |
284 | ||
285 | first->prev = head; | |
286 | head->next = first; | |
287 | ||
288 | last->next = at; | |
289 | at->prev = last; | |
290 | } | |
291 | ||
292 | /** | |
293 | * list_splice - join two lists | |
294 | * @list: the new list to add. | |
295 | * @head: the place to add it in the first list. | |
296 | */ | |
297 | static inline void list_splice(struct list_head *list, struct list_head *head) | |
298 | { | |
299 | if (!list_empty(list)) | |
300 | __list_splice(list, head); | |
301 | } | |
302 | ||
303 | /** | |
304 | * list_splice_init - join two lists and reinitialise the emptied list. | |
305 | * @list: the new list to add. | |
306 | * @head: the place to add it in the first list. | |
307 | * | |
308 | * The list at @list is reinitialised | |
309 | */ | |
310 | static inline void list_splice_init(struct list_head *list, | |
311 | struct list_head *head) | |
312 | { | |
313 | if (!list_empty(list)) { | |
314 | __list_splice(list, head); | |
315 | INIT_LIST_HEAD(list); | |
316 | } | |
317 | } | |
318 | ||
319 | /** | |
320 | * list_entry - get the struct for this entry | |
321 | * @ptr: the &struct list_head pointer. | |
322 | * @type: the type of the struct this is embedded in. | |
323 | * @member: the name of the list_struct within the struct. | |
324 | */ | |
325 | #define list_entry(ptr, type, member) \ | |
326 | container_of(ptr, type, member) | |
327 | ||
328 | /** | |
329 | * list_for_each - iterate over a list | |
330 | * @pos: the &struct list_head to use as a loop counter. | |
331 | * @head: the head for your list. | |
332 | */ | |
333 | #define list_for_each(pos, head) \ | |
334 | for (pos = (head)->next; prefetch(pos->next), pos != (head); \ | |
335 | pos = pos->next) | |
336 | ||
337 | /** | |
338 | * __list_for_each - iterate over a list | |
339 | * @pos: the &struct list_head to use as a loop counter. | |
340 | * @head: the head for your list. | |
341 | * | |
342 | * This variant differs from list_for_each() in that it's the | |
343 | * simplest possible list iteration code, no prefetching is done. | |
344 | * Use this for code that knows the list to be very short (empty | |
345 | * or 1 entry) most of the time. | |
346 | */ | |
347 | #define __list_for_each(pos, head) \ | |
348 | for (pos = (head)->next; pos != (head); pos = pos->next) | |
349 | ||
350 | /** | |
351 | * list_for_each_prev - iterate over a list backwards | |
352 | * @pos: the &struct list_head to use as a loop counter. | |
353 | * @head: the head for your list. | |
354 | */ | |
355 | #define list_for_each_prev(pos, head) \ | |
356 | for (pos = (head)->prev; prefetch(pos->prev), pos != (head); \ | |
357 | pos = pos->prev) | |
358 | ||
359 | /** | |
360 | * list_for_each_safe - iterate over a list safe against removal of list entry | |
361 | * @pos: the &struct list_head to use as a loop counter. | |
362 | * @n: another &struct list_head to use as temporary storage | |
363 | * @head: the head for your list. | |
364 | */ | |
365 | #define list_for_each_safe(pos, n, head) \ | |
366 | for (pos = (head)->next, n = pos->next; pos != (head); \ | |
367 | pos = n, n = pos->next) | |
368 | ||
369 | /** | |
370 | * list_for_each_entry - iterate over list of given type | |
371 | * @pos: the type * to use as a loop counter. | |
372 | * @head: the head for your list. | |
373 | * @member: the name of the list_struct within the struct. | |
374 | */ | |
375 | #define list_for_each_entry(pos, head, member) \ | |
376 | for (pos = list_entry((head)->next, typeof(*pos), member); \ | |
377 | prefetch(pos->member.next), &pos->member != (head); \ | |
378 | pos = list_entry(pos->member.next, typeof(*pos), member)) | |
379 | ||
380 | /** | |
381 | * list_for_each_entry_reverse - iterate backwards over list of given type. | |
382 | * @pos: the type * to use as a loop counter. | |
383 | * @head: the head for your list. | |
384 | * @member: the name of the list_struct within the struct. | |
385 | */ | |
386 | #define list_for_each_entry_reverse(pos, head, member) \ | |
387 | for (pos = list_entry((head)->prev, typeof(*pos), member); \ | |
388 | prefetch(pos->member.prev), &pos->member != (head); \ | |
389 | pos = list_entry(pos->member.prev, typeof(*pos), member)) | |
390 | ||
391 | /** | |
392 | * list_prepare_entry - prepare a pos entry for use as a start point in | |
393 | * list_for_each_entry_continue | |
394 | * @pos: the type * to use as a start point | |
395 | * @head: the head of the list | |
396 | * @member: the name of the list_struct within the struct. | |
397 | */ | |
398 | #define list_prepare_entry(pos, head, member) \ | |
399 | ((pos) ? : list_entry(head, typeof(*pos), member)) | |
400 | ||
401 | /** | |
402 | * list_for_each_entry_continue - iterate over list of given type | |
403 | * continuing after existing point | |
404 | * @pos: the type * to use as a loop counter. | |
405 | * @head: the head for your list. | |
406 | * @member: the name of the list_struct within the struct. | |
407 | */ | |
408 | #define list_for_each_entry_continue(pos, head, member) \ | |
409 | for (pos = list_entry(pos->member.next, typeof(*pos), member); \ | |
410 | prefetch(pos->member.next), &pos->member != (head); \ | |
411 | pos = list_entry(pos->member.next, typeof(*pos), member)) | |
412 | ||
e229c2fb ACM |
413 | /** |
414 | * list_for_each_entry_from - iterate over list of given type | |
415 | * continuing from existing point | |
416 | * @pos: the type * to use as a loop counter. | |
417 | * @head: the head for your list. | |
418 | * @member: the name of the list_struct within the struct. | |
419 | */ | |
420 | #define list_for_each_entry_from(pos, head, member) \ | |
421 | for (; prefetch(pos->member.next), &pos->member != (head); \ | |
422 | pos = list_entry(pos->member.next, typeof(*pos), member)) | |
423 | ||
1da177e4 LT |
424 | /** |
425 | * list_for_each_entry_safe - iterate over list of given type safe against removal of list entry | |
426 | * @pos: the type * to use as a loop counter. | |
427 | * @n: another type * to use as temporary storage | |
428 | * @head: the head for your list. | |
429 | * @member: the name of the list_struct within the struct. | |
430 | */ | |
431 | #define list_for_each_entry_safe(pos, n, head, member) \ | |
432 | for (pos = list_entry((head)->next, typeof(*pos), member), \ | |
433 | n = list_entry(pos->member.next, typeof(*pos), member); \ | |
434 | &pos->member != (head); \ | |
435 | pos = n, n = list_entry(n->member.next, typeof(*n), member)) | |
436 | ||
74459dc7 ACM |
437 | /** |
438 | * list_for_each_entry_safe_continue - iterate over list of given type | |
439 | * continuing after existing point safe against removal of list entry | |
440 | * @pos: the type * to use as a loop counter. | |
441 | * @n: another type * to use as temporary storage | |
442 | * @head: the head for your list. | |
443 | * @member: the name of the list_struct within the struct. | |
444 | */ | |
445 | #define list_for_each_entry_safe_continue(pos, n, head, member) \ | |
8c60f3fa ACM |
446 | for (pos = list_entry(pos->member.next, typeof(*pos), member), \ |
447 | n = list_entry(pos->member.next, typeof(*pos), member); \ | |
d8dcffee ACM |
448 | &pos->member != (head); \ |
449 | pos = n, n = list_entry(n->member.next, typeof(*n), member)) | |
450 | ||
451 | /** | |
452 | * list_for_each_entry_safe_from - iterate over list of given type | |
453 | * from existing point safe against removal of list entry | |
454 | * @pos: the type * to use as a loop counter. | |
455 | * @n: another type * to use as temporary storage | |
456 | * @head: the head for your list. | |
457 | * @member: the name of the list_struct within the struct. | |
458 | */ | |
459 | #define list_for_each_entry_safe_from(pos, n, head, member) \ | |
460 | for (n = list_entry(pos->member.next, typeof(*pos), member); \ | |
74459dc7 ACM |
461 | &pos->member != (head); \ |
462 | pos = n, n = list_entry(n->member.next, typeof(*n), member)) | |
463 | ||
0ad42352 DH |
464 | /** |
465 | * list_for_each_entry_safe_reverse - iterate backwards over list of given type safe against | |
466 | * removal of list entry | |
467 | * @pos: the type * to use as a loop counter. | |
468 | * @n: another type * to use as temporary storage | |
469 | * @head: the head for your list. | |
470 | * @member: the name of the list_struct within the struct. | |
471 | */ | |
472 | #define list_for_each_entry_safe_reverse(pos, n, head, member) \ | |
473 | for (pos = list_entry((head)->prev, typeof(*pos), member), \ | |
474 | n = list_entry(pos->member.prev, typeof(*pos), member); \ | |
475 | &pos->member != (head); \ | |
476 | pos = n, n = list_entry(n->member.prev, typeof(*n), member)) | |
477 | ||
1da177e4 LT |
478 | /** |
479 | * list_for_each_rcu - iterate over an rcu-protected list | |
480 | * @pos: the &struct list_head to use as a loop counter. | |
481 | * @head: the head for your list. | |
482 | * | |
483 | * This list-traversal primitive may safely run concurrently with | |
484 | * the _rcu list-mutation primitives such as list_add_rcu() | |
485 | * as long as the traversal is guarded by rcu_read_lock(). | |
486 | */ | |
487 | #define list_for_each_rcu(pos, head) \ | |
b24d18aa HX |
488 | for (pos = (head)->next; \ |
489 | prefetch(rcu_dereference(pos)->next), pos != (head); \ | |
490 | pos = pos->next) | |
1da177e4 LT |
491 | |
492 | #define __list_for_each_rcu(pos, head) \ | |
b24d18aa HX |
493 | for (pos = (head)->next; \ |
494 | rcu_dereference(pos) != (head); \ | |
495 | pos = pos->next) | |
1da177e4 LT |
496 | |
497 | /** | |
498 | * list_for_each_safe_rcu - iterate over an rcu-protected list safe | |
499 | * against removal of list entry | |
500 | * @pos: the &struct list_head to use as a loop counter. | |
501 | * @n: another &struct list_head to use as temporary storage | |
502 | * @head: the head for your list. | |
503 | * | |
504 | * This list-traversal primitive may safely run concurrently with | |
505 | * the _rcu list-mutation primitives such as list_add_rcu() | |
506 | * as long as the traversal is guarded by rcu_read_lock(). | |
507 | */ | |
508 | #define list_for_each_safe_rcu(pos, n, head) \ | |
b24d18aa HX |
509 | for (pos = (head)->next; \ |
510 | n = rcu_dereference(pos)->next, pos != (head); \ | |
511 | pos = n) | |
1da177e4 LT |
512 | |
513 | /** | |
514 | * list_for_each_entry_rcu - iterate over rcu list of given type | |
515 | * @pos: the type * to use as a loop counter. | |
516 | * @head: the head for your list. | |
517 | * @member: the name of the list_struct within the struct. | |
518 | * | |
519 | * This list-traversal primitive may safely run concurrently with | |
520 | * the _rcu list-mutation primitives such as list_add_rcu() | |
521 | * as long as the traversal is guarded by rcu_read_lock(). | |
522 | */ | |
b24d18aa HX |
523 | #define list_for_each_entry_rcu(pos, head, member) \ |
524 | for (pos = list_entry((head)->next, typeof(*pos), member); \ | |
525 | prefetch(rcu_dereference(pos)->member.next), \ | |
526 | &pos->member != (head); \ | |
527 | pos = list_entry(pos->member.next, typeof(*pos), member)) | |
1da177e4 LT |
528 | |
529 | ||
530 | /** | |
531 | * list_for_each_continue_rcu - iterate over an rcu-protected list | |
532 | * continuing after existing point. | |
533 | * @pos: the &struct list_head to use as a loop counter. | |
534 | * @head: the head for your list. | |
535 | * | |
536 | * This list-traversal primitive may safely run concurrently with | |
537 | * the _rcu list-mutation primitives such as list_add_rcu() | |
538 | * as long as the traversal is guarded by rcu_read_lock(). | |
539 | */ | |
540 | #define list_for_each_continue_rcu(pos, head) \ | |
b24d18aa HX |
541 | for ((pos) = (pos)->next; \ |
542 | prefetch(rcu_dereference((pos))->next), (pos) != (head); \ | |
543 | (pos) = (pos)->next) | |
1da177e4 LT |
544 | |
545 | /* | |
546 | * Double linked lists with a single pointer list head. | |
547 | * Mostly useful for hash tables where the two pointer list head is | |
548 | * too wasteful. | |
549 | * You lose the ability to access the tail in O(1). | |
550 | */ | |
551 | ||
552 | struct hlist_head { | |
553 | struct hlist_node *first; | |
554 | }; | |
555 | ||
556 | struct hlist_node { | |
557 | struct hlist_node *next, **pprev; | |
558 | }; | |
559 | ||
560 | #define HLIST_HEAD_INIT { .first = NULL } | |
561 | #define HLIST_HEAD(name) struct hlist_head name = { .first = NULL } | |
562 | #define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL) | |
490d6ab1 ZB |
563 | static inline void INIT_HLIST_NODE(struct hlist_node *h) |
564 | { | |
565 | h->next = NULL; | |
566 | h->pprev = NULL; | |
567 | } | |
1da177e4 LT |
568 | |
569 | static inline int hlist_unhashed(const struct hlist_node *h) | |
570 | { | |
571 | return !h->pprev; | |
572 | } | |
573 | ||
574 | static inline int hlist_empty(const struct hlist_head *h) | |
575 | { | |
576 | return !h->first; | |
577 | } | |
578 | ||
579 | static inline void __hlist_del(struct hlist_node *n) | |
580 | { | |
581 | struct hlist_node *next = n->next; | |
582 | struct hlist_node **pprev = n->pprev; | |
583 | *pprev = next; | |
584 | if (next) | |
585 | next->pprev = pprev; | |
586 | } | |
587 | ||
588 | static inline void hlist_del(struct hlist_node *n) | |
589 | { | |
590 | __hlist_del(n); | |
591 | n->next = LIST_POISON1; | |
592 | n->pprev = LIST_POISON2; | |
593 | } | |
594 | ||
595 | /** | |
596 | * hlist_del_rcu - deletes entry from hash list without re-initialization | |
597 | * @n: the element to delete from the hash list. | |
598 | * | |
599 | * Note: list_unhashed() on entry does not return true after this, | |
600 | * the entry is in an undefined state. It is useful for RCU based | |
601 | * lockfree traversal. | |
602 | * | |
603 | * In particular, it means that we can not poison the forward | |
604 | * pointers that may still be used for walking the hash list. | |
605 | * | |
606 | * The caller must take whatever precautions are necessary | |
607 | * (such as holding appropriate locks) to avoid racing | |
608 | * with another list-mutation primitive, such as hlist_add_head_rcu() | |
609 | * or hlist_del_rcu(), running on this same list. | |
610 | * However, it is perfectly legal to run concurrently with | |
611 | * the _rcu list-traversal primitives, such as | |
612 | * hlist_for_each_entry(). | |
613 | */ | |
614 | static inline void hlist_del_rcu(struct hlist_node *n) | |
615 | { | |
616 | __hlist_del(n); | |
617 | n->pprev = LIST_POISON2; | |
618 | } | |
619 | ||
620 | static inline void hlist_del_init(struct hlist_node *n) | |
621 | { | |
622 | if (n->pprev) { | |
623 | __hlist_del(n); | |
624 | INIT_HLIST_NODE(n); | |
625 | } | |
626 | } | |
627 | ||
b88cb424 IM |
628 | /* |
629 | * hlist_replace_rcu - replace old entry by new one | |
630 | * @old : the element to be replaced | |
631 | * @new : the new element to insert | |
632 | * | |
633 | * The old entry will be replaced with the new entry atomically. | |
634 | */ | |
635 | static inline void hlist_replace_rcu(struct hlist_node *old, | |
636 | struct hlist_node *new) | |
637 | { | |
638 | struct hlist_node *next = old->next; | |
639 | ||
640 | new->next = next; | |
641 | new->pprev = old->pprev; | |
642 | smp_wmb(); | |
643 | if (next) | |
644 | new->next->pprev = &new->next; | |
645 | *new->pprev = new; | |
646 | old->pprev = LIST_POISON2; | |
647 | } | |
648 | ||
1da177e4 LT |
649 | static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h) |
650 | { | |
651 | struct hlist_node *first = h->first; | |
652 | n->next = first; | |
653 | if (first) | |
654 | first->pprev = &n->next; | |
655 | h->first = n; | |
656 | n->pprev = &h->first; | |
657 | } | |
658 | ||
659 | ||
660 | /** | |
661 | * hlist_add_head_rcu - adds the specified element to the specified hlist, | |
662 | * while permitting racing traversals. | |
663 | * @n: the element to add to the hash list. | |
664 | * @h: the list to add to. | |
665 | * | |
666 | * The caller must take whatever precautions are necessary | |
667 | * (such as holding appropriate locks) to avoid racing | |
668 | * with another list-mutation primitive, such as hlist_add_head_rcu() | |
669 | * or hlist_del_rcu(), running on this same list. | |
670 | * However, it is perfectly legal to run concurrently with | |
671 | * the _rcu list-traversal primitives, such as | |
665a7583 | 672 | * hlist_for_each_entry_rcu(), used to prevent memory-consistency |
1da177e4 LT |
673 | * problems on Alpha CPUs. Regardless of the type of CPU, the |
674 | * list-traversal primitive must be guarded by rcu_read_lock(). | |
675 | */ | |
676 | static inline void hlist_add_head_rcu(struct hlist_node *n, | |
677 | struct hlist_head *h) | |
678 | { | |
679 | struct hlist_node *first = h->first; | |
680 | n->next = first; | |
681 | n->pprev = &h->first; | |
682 | smp_wmb(); | |
683 | if (first) | |
684 | first->pprev = &n->next; | |
685 | h->first = n; | |
686 | } | |
687 | ||
688 | /* next must be != NULL */ | |
689 | static inline void hlist_add_before(struct hlist_node *n, | |
690 | struct hlist_node *next) | |
691 | { | |
692 | n->pprev = next->pprev; | |
693 | n->next = next; | |
694 | next->pprev = &n->next; | |
695 | *(n->pprev) = n; | |
696 | } | |
697 | ||
698 | static inline void hlist_add_after(struct hlist_node *n, | |
699 | struct hlist_node *next) | |
700 | { | |
701 | next->next = n->next; | |
702 | n->next = next; | |
703 | next->pprev = &n->next; | |
704 | ||
705 | if(next->next) | |
706 | next->next->pprev = &next->next; | |
707 | } | |
708 | ||
cf4ef014 PM |
709 | /** |
710 | * hlist_add_before_rcu - adds the specified element to the specified hlist | |
711 | * before the specified node while permitting racing traversals. | |
712 | * @n: the new element to add to the hash list. | |
713 | * @next: the existing element to add the new element before. | |
714 | * | |
715 | * The caller must take whatever precautions are necessary | |
716 | * (such as holding appropriate locks) to avoid racing | |
717 | * with another list-mutation primitive, such as hlist_add_head_rcu() | |
718 | * or hlist_del_rcu(), running on this same list. | |
719 | * However, it is perfectly legal to run concurrently with | |
720 | * the _rcu list-traversal primitives, such as | |
665a7583 | 721 | * hlist_for_each_entry_rcu(), used to prevent memory-consistency |
cf4ef014 PM |
722 | * problems on Alpha CPUs. |
723 | */ | |
e5b43760 RO |
724 | static inline void hlist_add_before_rcu(struct hlist_node *n, |
725 | struct hlist_node *next) | |
726 | { | |
727 | n->pprev = next->pprev; | |
728 | n->next = next; | |
729 | smp_wmb(); | |
730 | next->pprev = &n->next; | |
731 | *(n->pprev) = n; | |
732 | } | |
733 | ||
cf4ef014 PM |
734 | /** |
735 | * hlist_add_after_rcu - adds the specified element to the specified hlist | |
736 | * after the specified node while permitting racing traversals. | |
737 | * @prev: the existing element to add the new element after. | |
738 | * @n: the new element to add to the hash list. | |
739 | * | |
740 | * The caller must take whatever precautions are necessary | |
741 | * (such as holding appropriate locks) to avoid racing | |
742 | * with another list-mutation primitive, such as hlist_add_head_rcu() | |
743 | * or hlist_del_rcu(), running on this same list. | |
744 | * However, it is perfectly legal to run concurrently with | |
745 | * the _rcu list-traversal primitives, such as | |
665a7583 | 746 | * hlist_for_each_entry_rcu(), used to prevent memory-consistency |
cf4ef014 PM |
747 | * problems on Alpha CPUs. |
748 | */ | |
e5b43760 RO |
749 | static inline void hlist_add_after_rcu(struct hlist_node *prev, |
750 | struct hlist_node *n) | |
751 | { | |
752 | n->next = prev->next; | |
753 | n->pprev = &prev->next; | |
754 | smp_wmb(); | |
755 | prev->next = n; | |
756 | if (n->next) | |
757 | n->next->pprev = &n->next; | |
758 | } | |
759 | ||
1da177e4 LT |
760 | #define hlist_entry(ptr, type, member) container_of(ptr,type,member) |
761 | ||
762 | #define hlist_for_each(pos, head) \ | |
763 | for (pos = (head)->first; pos && ({ prefetch(pos->next); 1; }); \ | |
764 | pos = pos->next) | |
765 | ||
766 | #define hlist_for_each_safe(pos, n, head) \ | |
767 | for (pos = (head)->first; pos && ({ n = pos->next; 1; }); \ | |
768 | pos = n) | |
769 | ||
1da177e4 LT |
770 | /** |
771 | * hlist_for_each_entry - iterate over list of given type | |
772 | * @tpos: the type * to use as a loop counter. | |
773 | * @pos: the &struct hlist_node to use as a loop counter. | |
774 | * @head: the head for your list. | |
775 | * @member: the name of the hlist_node within the struct. | |
776 | */ | |
777 | #define hlist_for_each_entry(tpos, pos, head, member) \ | |
778 | for (pos = (head)->first; \ | |
779 | pos && ({ prefetch(pos->next); 1;}) && \ | |
780 | ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ | |
781 | pos = pos->next) | |
782 | ||
783 | /** | |
784 | * hlist_for_each_entry_continue - iterate over a hlist continuing after existing point | |
785 | * @tpos: the type * to use as a loop counter. | |
786 | * @pos: the &struct hlist_node to use as a loop counter. | |
787 | * @member: the name of the hlist_node within the struct. | |
788 | */ | |
789 | #define hlist_for_each_entry_continue(tpos, pos, member) \ | |
790 | for (pos = (pos)->next; \ | |
791 | pos && ({ prefetch(pos->next); 1;}) && \ | |
792 | ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ | |
793 | pos = pos->next) | |
794 | ||
795 | /** | |
796 | * hlist_for_each_entry_from - iterate over a hlist continuing from existing point | |
797 | * @tpos: the type * to use as a loop counter. | |
798 | * @pos: the &struct hlist_node to use as a loop counter. | |
799 | * @member: the name of the hlist_node within the struct. | |
800 | */ | |
801 | #define hlist_for_each_entry_from(tpos, pos, member) \ | |
802 | for (; pos && ({ prefetch(pos->next); 1;}) && \ | |
803 | ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ | |
804 | pos = pos->next) | |
805 | ||
806 | /** | |
807 | * hlist_for_each_entry_safe - iterate over list of given type safe against removal of list entry | |
808 | * @tpos: the type * to use as a loop counter. | |
809 | * @pos: the &struct hlist_node to use as a loop counter. | |
810 | * @n: another &struct hlist_node to use as temporary storage | |
811 | * @head: the head for your list. | |
812 | * @member: the name of the hlist_node within the struct. | |
813 | */ | |
814 | #define hlist_for_each_entry_safe(tpos, pos, n, head, member) \ | |
815 | for (pos = (head)->first; \ | |
816 | pos && ({ n = pos->next; 1; }) && \ | |
817 | ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ | |
818 | pos = n) | |
819 | ||
820 | /** | |
821 | * hlist_for_each_entry_rcu - iterate over rcu list of given type | |
665a7583 | 822 | * @tpos: the type * to use as a loop counter. |
1da177e4 LT |
823 | * @pos: the &struct hlist_node to use as a loop counter. |
824 | * @head: the head for your list. | |
825 | * @member: the name of the hlist_node within the struct. | |
826 | * | |
827 | * This list-traversal primitive may safely run concurrently with | |
e1ba0dab | 828 | * the _rcu list-mutation primitives such as hlist_add_head_rcu() |
1da177e4 LT |
829 | * as long as the traversal is guarded by rcu_read_lock(). |
830 | */ | |
831 | #define hlist_for_each_entry_rcu(tpos, pos, head, member) \ | |
832 | for (pos = (head)->first; \ | |
b24d18aa | 833 | rcu_dereference(pos) && ({ prefetch(pos->next); 1;}) && \ |
1da177e4 | 834 | ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ |
b24d18aa | 835 | pos = pos->next) |
1da177e4 LT |
836 | |
837 | #else | |
838 | #warning "don't include kernel headers in userspace" | |
839 | #endif /* __KERNEL__ */ | |
840 | #endif |