Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
a38e4082 DC |
2 | /* |
3 | * Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved. | |
4 | * Authors: David Chinner and Glauber Costa | |
5 | * | |
6 | * Generic LRU infrastructure | |
7 | */ | |
8 | #ifndef _LRU_LIST_H | |
9 | #define _LRU_LIST_H | |
10 | ||
11 | #include <linux/list.h> | |
3b1d58a4 | 12 | #include <linux/nodemask.h> |
503c358c | 13 | #include <linux/shrinker.h> |
a38e4082 | 14 | |
60d3fd32 VD |
15 | struct mem_cgroup; |
16 | ||
a38e4082 DC |
17 | /* list_lru_walk_cb has to always return one of those */ |
18 | enum lru_status { | |
19 | LRU_REMOVED, /* item removed from list */ | |
449dd698 JW |
20 | LRU_REMOVED_RETRY, /* item removed, but lock has been |
21 | dropped and reacquired */ | |
a38e4082 DC |
22 | LRU_ROTATE, /* item referenced, give another pass */ |
23 | LRU_SKIP, /* item cannot be locked, skip */ | |
24 | LRU_RETRY, /* item not freeable. May drop the lock | |
25 | internally, but has to return locked. */ | |
26 | }; | |
27 | ||
60d3fd32 | 28 | struct list_lru_one { |
a38e4082 | 29 | struct list_head list; |
2788cf0c | 30 | /* may become negative during memcg reparenting */ |
a38e4082 | 31 | long nr_items; |
60d3fd32 VD |
32 | }; |
33 | ||
34 | struct list_lru_memcg { | |
0c7c1bed | 35 | struct rcu_head rcu; |
60d3fd32 VD |
36 | /* array of per cgroup lists, indexed by memcg_cache_id */ |
37 | struct list_lru_one *lru[0]; | |
38 | }; | |
39 | ||
40 | struct list_lru_node { | |
41 | /* protects all lists on the node, including per cgroup */ | |
42 | spinlock_t lock; | |
43 | /* global list, used for the root cgroup in cgroup aware lrus */ | |
44 | struct list_lru_one lru; | |
84c07d11 | 45 | #ifdef CONFIG_MEMCG_KMEM |
60d3fd32 | 46 | /* for cgroup aware lrus points to per cgroup lists, otherwise NULL */ |
0c7c1bed | 47 | struct list_lru_memcg __rcu *memcg_lrus; |
60d3fd32 | 48 | #endif |
2c80cd57 | 49 | long nr_items; |
3b1d58a4 DC |
50 | } ____cacheline_aligned_in_smp; |
51 | ||
52 | struct list_lru { | |
5ca302c8 | 53 | struct list_lru_node *node; |
84c07d11 | 54 | #ifdef CONFIG_MEMCG_KMEM |
c0a5b560 | 55 | struct list_head list; |
c92e8e10 | 56 | int shrinker_id; |
c0a5b560 | 57 | #endif |
a38e4082 DC |
58 | }; |
59 | ||
5ca302c8 | 60 | void list_lru_destroy(struct list_lru *lru); |
60d3fd32 | 61 | int __list_lru_init(struct list_lru *lru, bool memcg_aware, |
c92e8e10 | 62 | struct lock_class_key *key, struct shrinker *shrinker); |
60d3fd32 | 63 | |
c92e8e10 KT |
64 | #define list_lru_init(lru) \ |
65 | __list_lru_init((lru), false, NULL, NULL) | |
66 | #define list_lru_init_key(lru, key) \ | |
67 | __list_lru_init((lru), false, (key), NULL) | |
68 | #define list_lru_init_memcg(lru, shrinker) \ | |
69 | __list_lru_init((lru), true, NULL, shrinker) | |
60d3fd32 VD |
70 | |
71 | int memcg_update_all_list_lrus(int num_memcgs); | |
9bec5c35 | 72 | void memcg_drain_all_list_lrus(int src_idx, struct mem_cgroup *dst_memcg); |
a38e4082 DC |
73 | |
74 | /** | |
75 | * list_lru_add: add an element to the lru list's tail | |
76 | * @list_lru: the lru pointer | |
77 | * @item: the item to be added. | |
78 | * | |
79 | * If the element is already part of a list, this function returns doing | |
80 | * nothing. Therefore the caller does not need to keep state about whether or | |
81 | * not the element already belongs in the list and is allowed to lazy update | |
82 | * it. Note however that this is valid for *a* list, not *this* list. If | |
83 | * the caller organize itself in a way that elements can be in more than | |
84 | * one type of list, it is up to the caller to fully remove the item from | |
85 | * the previous list (with list_lru_del() for instance) before moving it | |
86 | * to @list_lru | |
87 | * | |
88 | * Return value: true if the list was updated, false otherwise | |
89 | */ | |
90 | bool list_lru_add(struct list_lru *lru, struct list_head *item); | |
91 | ||
92 | /** | |
93 | * list_lru_del: delete an element to the lru list | |
94 | * @list_lru: the lru pointer | |
95 | * @item: the item to be deleted. | |
96 | * | |
97 | * This function works analogously as list_lru_add in terms of list | |
98 | * manipulation. The comments about an element already pertaining to | |
99 | * a list are also valid for list_lru_del. | |
100 | * | |
101 | * Return value: true if the list was updated, false otherwise | |
102 | */ | |
103 | bool list_lru_del(struct list_lru *lru, struct list_head *item); | |
104 | ||
105 | /** | |
60d3fd32 | 106 | * list_lru_count_one: return the number of objects currently held by @lru |
a38e4082 | 107 | * @lru: the lru pointer. |
6a4f496f | 108 | * @nid: the node id to count from. |
60d3fd32 | 109 | * @memcg: the cgroup to count from. |
a38e4082 DC |
110 | * |
111 | * Always return a non-negative number, 0 for empty lists. There is no | |
112 | * guarantee that the list is not updated while the count is being computed. | |
113 | * Callers that want such a guarantee need to provide an outer lock. | |
114 | */ | |
60d3fd32 VD |
115 | unsigned long list_lru_count_one(struct list_lru *lru, |
116 | int nid, struct mem_cgroup *memcg); | |
6a4f496f | 117 | unsigned long list_lru_count_node(struct list_lru *lru, int nid); |
503c358c VD |
118 | |
119 | static inline unsigned long list_lru_shrink_count(struct list_lru *lru, | |
120 | struct shrink_control *sc) | |
121 | { | |
60d3fd32 | 122 | return list_lru_count_one(lru, sc->nid, sc->memcg); |
503c358c VD |
123 | } |
124 | ||
6a4f496f GC |
125 | static inline unsigned long list_lru_count(struct list_lru *lru) |
126 | { | |
127 | long count = 0; | |
128 | int nid; | |
129 | ||
ff0b67ef | 130 | for_each_node_state(nid, N_NORMAL_MEMORY) |
6a4f496f GC |
131 | count += list_lru_count_node(lru, nid); |
132 | ||
133 | return count; | |
134 | } | |
a38e4082 | 135 | |
3f97b163 VD |
136 | void list_lru_isolate(struct list_lru_one *list, struct list_head *item); |
137 | void list_lru_isolate_move(struct list_lru_one *list, struct list_head *item, | |
138 | struct list_head *head); | |
139 | ||
140 | typedef enum lru_status (*list_lru_walk_cb)(struct list_head *item, | |
141 | struct list_lru_one *list, spinlock_t *lock, void *cb_arg); | |
142 | ||
a38e4082 | 143 | /** |
60d3fd32 | 144 | * list_lru_walk_one: walk a list_lru, isolating and disposing freeable items. |
a38e4082 | 145 | * @lru: the lru pointer. |
6a4f496f | 146 | * @nid: the node id to scan from. |
60d3fd32 | 147 | * @memcg: the cgroup to scan from. |
a38e4082 DC |
148 | * @isolate: callback function that is resposible for deciding what to do with |
149 | * the item currently being scanned | |
150 | * @cb_arg: opaque type that will be passed to @isolate | |
151 | * @nr_to_walk: how many items to scan. | |
152 | * | |
153 | * This function will scan all elements in a particular list_lru, calling the | |
154 | * @isolate callback for each of those items, along with the current list | |
155 | * spinlock and a caller-provided opaque. The @isolate callback can choose to | |
156 | * drop the lock internally, but *must* return with the lock held. The callback | |
157 | * will return an enum lru_status telling the list_lru infrastructure what to | |
158 | * do with the object being scanned. | |
159 | * | |
160 | * Please note that nr_to_walk does not mean how many objects will be freed, | |
161 | * just how many objects will be scanned. | |
162 | * | |
163 | * Return value: the number of objects effectively removed from the LRU. | |
164 | */ | |
60d3fd32 VD |
165 | unsigned long list_lru_walk_one(struct list_lru *lru, |
166 | int nid, struct mem_cgroup *memcg, | |
167 | list_lru_walk_cb isolate, void *cb_arg, | |
168 | unsigned long *nr_to_walk); | |
6b51e881 SAS |
169 | /** |
170 | * list_lru_walk_one_irq: walk a list_lru, isolating and disposing freeable items. | |
171 | * @lru: the lru pointer. | |
172 | * @nid: the node id to scan from. | |
173 | * @memcg: the cgroup to scan from. | |
174 | * @isolate: callback function that is resposible for deciding what to do with | |
175 | * the item currently being scanned | |
176 | * @cb_arg: opaque type that will be passed to @isolate | |
177 | * @nr_to_walk: how many items to scan. | |
178 | * | |
179 | * Same as @list_lru_walk_one except that the spinlock is acquired with | |
180 | * spin_lock_irq(). | |
181 | */ | |
182 | unsigned long list_lru_walk_one_irq(struct list_lru *lru, | |
183 | int nid, struct mem_cgroup *memcg, | |
184 | list_lru_walk_cb isolate, void *cb_arg, | |
185 | unsigned long *nr_to_walk); | |
6a4f496f GC |
186 | unsigned long list_lru_walk_node(struct list_lru *lru, int nid, |
187 | list_lru_walk_cb isolate, void *cb_arg, | |
188 | unsigned long *nr_to_walk); | |
189 | ||
503c358c VD |
190 | static inline unsigned long |
191 | list_lru_shrink_walk(struct list_lru *lru, struct shrink_control *sc, | |
192 | list_lru_walk_cb isolate, void *cb_arg) | |
193 | { | |
60d3fd32 VD |
194 | return list_lru_walk_one(lru, sc->nid, sc->memcg, isolate, cb_arg, |
195 | &sc->nr_to_scan); | |
503c358c VD |
196 | } |
197 | ||
6b51e881 SAS |
198 | static inline unsigned long |
199 | list_lru_shrink_walk_irq(struct list_lru *lru, struct shrink_control *sc, | |
200 | list_lru_walk_cb isolate, void *cb_arg) | |
201 | { | |
202 | return list_lru_walk_one_irq(lru, sc->nid, sc->memcg, isolate, cb_arg, | |
203 | &sc->nr_to_scan); | |
204 | } | |
205 | ||
6a4f496f GC |
206 | static inline unsigned long |
207 | list_lru_walk(struct list_lru *lru, list_lru_walk_cb isolate, | |
208 | void *cb_arg, unsigned long nr_to_walk) | |
209 | { | |
210 | long isolated = 0; | |
211 | int nid; | |
212 | ||
ff0b67ef | 213 | for_each_node_state(nid, N_NORMAL_MEMORY) { |
6a4f496f GC |
214 | isolated += list_lru_walk_node(lru, nid, isolate, |
215 | cb_arg, &nr_to_walk); | |
216 | if (nr_to_walk <= 0) | |
217 | break; | |
218 | } | |
219 | return isolated; | |
220 | } | |
a38e4082 | 221 | #endif /* _LRU_LIST_H */ |