Commit | Line | Data |
---|---|---|
a38e4082 DC |
1 | /* |
2 | * Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved. | |
3 | * Authors: David Chinner and Glauber Costa | |
4 | * | |
5 | * Generic LRU infrastructure | |
6 | */ | |
7 | #ifndef _LRU_LIST_H | |
8 | #define _LRU_LIST_H | |
9 | ||
10 | #include <linux/list.h> | |
3b1d58a4 | 11 | #include <linux/nodemask.h> |
503c358c | 12 | #include <linux/shrinker.h> |
a38e4082 DC |
13 | |
14 | /* list_lru_walk_cb has to always return one of those */ | |
15 | enum lru_status { | |
16 | LRU_REMOVED, /* item removed from list */ | |
449dd698 JW |
17 | LRU_REMOVED_RETRY, /* item removed, but lock has been |
18 | dropped and reacquired */ | |
a38e4082 DC |
19 | LRU_ROTATE, /* item referenced, give another pass */ |
20 | LRU_SKIP, /* item cannot be locked, skip */ | |
21 | LRU_RETRY, /* item not freeable. May drop the lock | |
22 | internally, but has to return locked. */ | |
23 | }; | |
24 | ||
3b1d58a4 | 25 | struct list_lru_node { |
a38e4082 DC |
26 | spinlock_t lock; |
27 | struct list_head list; | |
28 | /* kept as signed so we can catch imbalance bugs */ | |
29 | long nr_items; | |
3b1d58a4 DC |
30 | } ____cacheline_aligned_in_smp; |
31 | ||
32 | struct list_lru { | |
5ca302c8 | 33 | struct list_lru_node *node; |
c0a5b560 VD |
34 | #ifdef CONFIG_MEMCG_KMEM |
35 | struct list_head list; | |
36 | #endif | |
a38e4082 DC |
37 | }; |
38 | ||
5ca302c8 | 39 | void list_lru_destroy(struct list_lru *lru); |
449dd698 JW |
40 | int list_lru_init_key(struct list_lru *lru, struct lock_class_key *key); |
41 | static inline int list_lru_init(struct list_lru *lru) | |
42 | { | |
43 | return list_lru_init_key(lru, NULL); | |
44 | } | |
a38e4082 DC |
45 | |
46 | /** | |
47 | * list_lru_add: add an element to the lru list's tail | |
48 | * @list_lru: the lru pointer | |
49 | * @item: the item to be added. | |
50 | * | |
51 | * If the element is already part of a list, this function returns doing | |
52 | * nothing. Therefore the caller does not need to keep state about whether or | |
53 | * not the element already belongs in the list and is allowed to lazy update | |
54 | * it. Note however that this is valid for *a* list, not *this* list. If | |
55 | * the caller organize itself in a way that elements can be in more than | |
56 | * one type of list, it is up to the caller to fully remove the item from | |
57 | * the previous list (with list_lru_del() for instance) before moving it | |
58 | * to @list_lru | |
59 | * | |
60 | * Return value: true if the list was updated, false otherwise | |
61 | */ | |
62 | bool list_lru_add(struct list_lru *lru, struct list_head *item); | |
63 | ||
64 | /** | |
65 | * list_lru_del: delete an element to the lru list | |
66 | * @list_lru: the lru pointer | |
67 | * @item: the item to be deleted. | |
68 | * | |
69 | * This function works analogously as list_lru_add in terms of list | |
70 | * manipulation. The comments about an element already pertaining to | |
71 | * a list are also valid for list_lru_del. | |
72 | * | |
73 | * Return value: true if the list was updated, false otherwise | |
74 | */ | |
75 | bool list_lru_del(struct list_lru *lru, struct list_head *item); | |
76 | ||
77 | /** | |
6a4f496f | 78 | * list_lru_count_node: return the number of objects currently held by @lru |
a38e4082 | 79 | * @lru: the lru pointer. |
6a4f496f | 80 | * @nid: the node id to count from. |
a38e4082 DC |
81 | * |
82 | * Always return a non-negative number, 0 for empty lists. There is no | |
83 | * guarantee that the list is not updated while the count is being computed. | |
84 | * Callers that want such a guarantee need to provide an outer lock. | |
85 | */ | |
6a4f496f | 86 | unsigned long list_lru_count_node(struct list_lru *lru, int nid); |
503c358c VD |
87 | |
88 | static inline unsigned long list_lru_shrink_count(struct list_lru *lru, | |
89 | struct shrink_control *sc) | |
90 | { | |
91 | return list_lru_count_node(lru, sc->nid); | |
92 | } | |
93 | ||
6a4f496f GC |
94 | static inline unsigned long list_lru_count(struct list_lru *lru) |
95 | { | |
96 | long count = 0; | |
97 | int nid; | |
98 | ||
ff0b67ef | 99 | for_each_node_state(nid, N_NORMAL_MEMORY) |
6a4f496f GC |
100 | count += list_lru_count_node(lru, nid); |
101 | ||
102 | return count; | |
103 | } | |
a38e4082 DC |
104 | |
105 | typedef enum lru_status | |
106 | (*list_lru_walk_cb)(struct list_head *item, spinlock_t *lock, void *cb_arg); | |
107 | /** | |
6a4f496f | 108 | * list_lru_walk_node: walk a list_lru, isolating and disposing freeable items. |
a38e4082 | 109 | * @lru: the lru pointer. |
6a4f496f | 110 | * @nid: the node id to scan from. |
a38e4082 DC |
111 | * @isolate: callback function that is resposible for deciding what to do with |
112 | * the item currently being scanned | |
113 | * @cb_arg: opaque type that will be passed to @isolate | |
114 | * @nr_to_walk: how many items to scan. | |
115 | * | |
116 | * This function will scan all elements in a particular list_lru, calling the | |
117 | * @isolate callback for each of those items, along with the current list | |
118 | * spinlock and a caller-provided opaque. The @isolate callback can choose to | |
119 | * drop the lock internally, but *must* return with the lock held. The callback | |
120 | * will return an enum lru_status telling the list_lru infrastructure what to | |
121 | * do with the object being scanned. | |
122 | * | |
123 | * Please note that nr_to_walk does not mean how many objects will be freed, | |
124 | * just how many objects will be scanned. | |
125 | * | |
126 | * Return value: the number of objects effectively removed from the LRU. | |
127 | */ | |
6a4f496f GC |
128 | unsigned long list_lru_walk_node(struct list_lru *lru, int nid, |
129 | list_lru_walk_cb isolate, void *cb_arg, | |
130 | unsigned long *nr_to_walk); | |
131 | ||
503c358c VD |
132 | static inline unsigned long |
133 | list_lru_shrink_walk(struct list_lru *lru, struct shrink_control *sc, | |
134 | list_lru_walk_cb isolate, void *cb_arg) | |
135 | { | |
136 | return list_lru_walk_node(lru, sc->nid, isolate, cb_arg, | |
137 | &sc->nr_to_scan); | |
138 | } | |
139 | ||
6a4f496f GC |
140 | static inline unsigned long |
141 | list_lru_walk(struct list_lru *lru, list_lru_walk_cb isolate, | |
142 | void *cb_arg, unsigned long nr_to_walk) | |
143 | { | |
144 | long isolated = 0; | |
145 | int nid; | |
146 | ||
ff0b67ef | 147 | for_each_node_state(nid, N_NORMAL_MEMORY) { |
6a4f496f GC |
148 | isolated += list_lru_walk_node(lru, nid, isolate, |
149 | cb_arg, &nr_to_walk); | |
150 | if (nr_to_walk <= 0) | |
151 | break; | |
152 | } | |
153 | return isolated; | |
154 | } | |
a38e4082 | 155 | #endif /* _LRU_LIST_H */ |