2 * Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved.
3 * Authors: David Chinner and Glauber Costa
5 * Generic LRU infrastructure
7 #include <linux/kernel.h>
8 #include <linux/module.h>
10 #include <linux/list_lru.h>
11 #include <linux/slab.h>
12 #include <linux/mutex.h>
13 #include <linux/memcontrol.h>
15 #ifdef CONFIG_MEMCG_KMEM
16 static LIST_HEAD(list_lrus);
17 static DEFINE_MUTEX(list_lrus_mutex);
19 static void list_lru_register(struct list_lru *lru)
21 mutex_lock(&list_lrus_mutex);
22 list_add(&lru->list, &list_lrus);
23 mutex_unlock(&list_lrus_mutex);
26 static void list_lru_unregister(struct list_lru *lru)
28 mutex_lock(&list_lrus_mutex);
30 mutex_unlock(&list_lrus_mutex);
33 static void list_lru_register(struct list_lru *lru)
37 static void list_lru_unregister(struct list_lru *lru)
40 #endif /* CONFIG_MEMCG_KMEM */
42 #ifdef CONFIG_MEMCG_KMEM
43 static inline bool list_lru_memcg_aware(struct list_lru *lru)
45 return !!lru->node[0].memcg_lrus;
48 static inline struct list_lru_one *
49 list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx)
52 * The lock protects the array of per cgroup lists from relocation
53 * (see memcg_update_list_lru_node).
55 lockdep_assert_held(&nlru->lock);
56 if (nlru->memcg_lrus && idx >= 0)
57 return nlru->memcg_lrus->lru[idx];
62 static inline struct list_lru_one *
63 list_lru_from_kmem(struct list_lru_node *nlru, void *ptr)
65 struct mem_cgroup *memcg;
67 if (!nlru->memcg_lrus)
70 memcg = mem_cgroup_from_kmem(ptr);
74 return list_lru_from_memcg_idx(nlru, memcg_cache_id(memcg));
77 static inline bool list_lru_memcg_aware(struct list_lru *lru)
82 static inline struct list_lru_one *
83 list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx)
88 static inline struct list_lru_one *
89 list_lru_from_kmem(struct list_lru_node *nlru, void *ptr)
93 #endif /* CONFIG_MEMCG_KMEM */
95 bool list_lru_add(struct list_lru *lru, struct list_head *item)
97 int nid = page_to_nid(virt_to_page(item));
98 struct list_lru_node *nlru = &lru->node[nid];
99 struct list_lru_one *l;
101 spin_lock(&nlru->lock);
102 l = list_lru_from_kmem(nlru, item);
103 WARN_ON_ONCE(l->nr_items < 0);
104 if (list_empty(item)) {
105 list_add_tail(item, &l->list);
107 spin_unlock(&nlru->lock);
110 spin_unlock(&nlru->lock);
113 EXPORT_SYMBOL_GPL(list_lru_add);
115 bool list_lru_del(struct list_lru *lru, struct list_head *item)
117 int nid = page_to_nid(virt_to_page(item));
118 struct list_lru_node *nlru = &lru->node[nid];
119 struct list_lru_one *l;
121 spin_lock(&nlru->lock);
122 l = list_lru_from_kmem(nlru, item);
123 if (!list_empty(item)) {
126 WARN_ON_ONCE(l->nr_items < 0);
127 spin_unlock(&nlru->lock);
130 spin_unlock(&nlru->lock);
133 EXPORT_SYMBOL_GPL(list_lru_del);
135 void list_lru_isolate(struct list_lru_one *list, struct list_head *item)
140 EXPORT_SYMBOL_GPL(list_lru_isolate);
142 void list_lru_isolate_move(struct list_lru_one *list, struct list_head *item,
143 struct list_head *head)
145 list_move(item, head);
148 EXPORT_SYMBOL_GPL(list_lru_isolate_move);
150 static unsigned long __list_lru_count_one(struct list_lru *lru,
151 int nid, int memcg_idx)
153 struct list_lru_node *nlru = &lru->node[nid];
154 struct list_lru_one *l;
157 spin_lock(&nlru->lock);
158 l = list_lru_from_memcg_idx(nlru, memcg_idx);
159 WARN_ON_ONCE(l->nr_items < 0);
161 spin_unlock(&nlru->lock);
166 unsigned long list_lru_count_one(struct list_lru *lru,
167 int nid, struct mem_cgroup *memcg)
169 return __list_lru_count_one(lru, nid, memcg_cache_id(memcg));
171 EXPORT_SYMBOL_GPL(list_lru_count_one);
173 unsigned long list_lru_count_node(struct list_lru *lru, int nid)
178 count += __list_lru_count_one(lru, nid, -1);
179 if (list_lru_memcg_aware(lru)) {
180 for_each_memcg_cache_index(memcg_idx)
181 count += __list_lru_count_one(lru, nid, memcg_idx);
185 EXPORT_SYMBOL_GPL(list_lru_count_node);
188 __list_lru_walk_one(struct list_lru *lru, int nid, int memcg_idx,
189 list_lru_walk_cb isolate, void *cb_arg,
190 unsigned long *nr_to_walk)
193 struct list_lru_node *nlru = &lru->node[nid];
194 struct list_lru_one *l;
195 struct list_head *item, *n;
196 unsigned long isolated = 0;
198 spin_lock(&nlru->lock);
199 l = list_lru_from_memcg_idx(nlru, memcg_idx);
201 list_for_each_safe(item, n, &l->list) {
205 * decrement nr_to_walk first so that we don't livelock if we
206 * get stuck on large numbesr of LRU_RETRY items
212 ret = isolate(item, l, &nlru->lock, cb_arg);
214 case LRU_REMOVED_RETRY:
215 assert_spin_locked(&nlru->lock);
219 * If the lru lock has been dropped, our list
220 * traversal is now invalid and so we have to
221 * restart from scratch.
223 if (ret == LRU_REMOVED_RETRY)
227 list_move_tail(item, &l->list);
233 * The lru lock has been dropped, our list traversal is
234 * now invalid and so we have to restart from scratch.
236 assert_spin_locked(&nlru->lock);
243 spin_unlock(&nlru->lock);
248 list_lru_walk_one(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
249 list_lru_walk_cb isolate, void *cb_arg,
250 unsigned long *nr_to_walk)
252 return __list_lru_walk_one(lru, nid, memcg_cache_id(memcg),
253 isolate, cb_arg, nr_to_walk);
255 EXPORT_SYMBOL_GPL(list_lru_walk_one);
257 unsigned long list_lru_walk_node(struct list_lru *lru, int nid,
258 list_lru_walk_cb isolate, void *cb_arg,
259 unsigned long *nr_to_walk)
264 isolated += __list_lru_walk_one(lru, nid, -1, isolate, cb_arg,
266 if (*nr_to_walk > 0 && list_lru_memcg_aware(lru)) {
267 for_each_memcg_cache_index(memcg_idx) {
268 isolated += __list_lru_walk_one(lru, nid, memcg_idx,
269 isolate, cb_arg, nr_to_walk);
270 if (*nr_to_walk <= 0)
276 EXPORT_SYMBOL_GPL(list_lru_walk_node);
278 static void init_one_lru(struct list_lru_one *l)
280 INIT_LIST_HEAD(&l->list);
284 #ifdef CONFIG_MEMCG_KMEM
285 static void __memcg_destroy_list_lru_node(struct list_lru_memcg *memcg_lrus,
290 for (i = begin; i < end; i++)
291 kfree(memcg_lrus->lru[i]);
294 static int __memcg_init_list_lru_node(struct list_lru_memcg *memcg_lrus,
299 for (i = begin; i < end; i++) {
300 struct list_lru_one *l;
302 l = kmalloc(sizeof(struct list_lru_one), GFP_KERNEL);
307 memcg_lrus->lru[i] = l;
311 __memcg_destroy_list_lru_node(memcg_lrus, begin, i - 1);
315 static int memcg_init_list_lru_node(struct list_lru_node *nlru)
317 int size = memcg_nr_cache_ids;
319 nlru->memcg_lrus = kmalloc(size * sizeof(void *), GFP_KERNEL);
320 if (!nlru->memcg_lrus)
323 if (__memcg_init_list_lru_node(nlru->memcg_lrus, 0, size)) {
324 kfree(nlru->memcg_lrus);
331 static void memcg_destroy_list_lru_node(struct list_lru_node *nlru)
333 __memcg_destroy_list_lru_node(nlru->memcg_lrus, 0, memcg_nr_cache_ids);
334 kfree(nlru->memcg_lrus);
337 static int memcg_update_list_lru_node(struct list_lru_node *nlru,
338 int old_size, int new_size)
340 struct list_lru_memcg *old, *new;
342 BUG_ON(old_size > new_size);
344 old = nlru->memcg_lrus;
345 new = kmalloc(new_size * sizeof(void *), GFP_KERNEL);
349 if (__memcg_init_list_lru_node(new, old_size, new_size)) {
354 memcpy(new, old, old_size * sizeof(void *));
357 * The lock guarantees that we won't race with a reader
358 * (see list_lru_from_memcg_idx).
360 * Since list_lru_{add,del} may be called under an IRQ-safe lock,
361 * we have to use IRQ-safe primitives here to avoid deadlock.
363 spin_lock_irq(&nlru->lock);
364 nlru->memcg_lrus = new;
365 spin_unlock_irq(&nlru->lock);
371 static void memcg_cancel_update_list_lru_node(struct list_lru_node *nlru,
372 int old_size, int new_size)
374 /* do not bother shrinking the array back to the old size, because we
375 * cannot handle allocation failures here */
376 __memcg_destroy_list_lru_node(nlru->memcg_lrus, old_size, new_size);
379 static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
383 for (i = 0; i < nr_node_ids; i++) {
385 lru->node[i].memcg_lrus = NULL;
386 else if (memcg_init_list_lru_node(&lru->node[i]))
391 for (i = i - 1; i >= 0; i--)
392 memcg_destroy_list_lru_node(&lru->node[i]);
396 static void memcg_destroy_list_lru(struct list_lru *lru)
400 if (!list_lru_memcg_aware(lru))
403 for (i = 0; i < nr_node_ids; i++)
404 memcg_destroy_list_lru_node(&lru->node[i]);
407 static int memcg_update_list_lru(struct list_lru *lru,
408 int old_size, int new_size)
412 if (!list_lru_memcg_aware(lru))
415 for (i = 0; i < nr_node_ids; i++) {
416 if (memcg_update_list_lru_node(&lru->node[i],
422 for (i = i - 1; i >= 0; i--)
423 memcg_cancel_update_list_lru_node(&lru->node[i],
428 static void memcg_cancel_update_list_lru(struct list_lru *lru,
429 int old_size, int new_size)
433 if (!list_lru_memcg_aware(lru))
436 for (i = 0; i < nr_node_ids; i++)
437 memcg_cancel_update_list_lru_node(&lru->node[i],
441 int memcg_update_all_list_lrus(int new_size)
444 struct list_lru *lru;
445 int old_size = memcg_nr_cache_ids;
447 mutex_lock(&list_lrus_mutex);
448 list_for_each_entry(lru, &list_lrus, list) {
449 ret = memcg_update_list_lru(lru, old_size, new_size);
454 mutex_unlock(&list_lrus_mutex);
457 list_for_each_entry_continue_reverse(lru, &list_lrus, list)
458 memcg_cancel_update_list_lru(lru, old_size, new_size);
462 static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
467 static void memcg_destroy_list_lru(struct list_lru *lru)
470 #endif /* CONFIG_MEMCG_KMEM */
472 int __list_lru_init(struct list_lru *lru, bool memcg_aware,
473 struct lock_class_key *key)
476 size_t size = sizeof(*lru->node) * nr_node_ids;
479 memcg_get_cache_ids();
481 lru->node = kzalloc(size, GFP_KERNEL);
485 for (i = 0; i < nr_node_ids; i++) {
486 spin_lock_init(&lru->node[i].lock);
488 lockdep_set_class(&lru->node[i].lock, key);
489 init_one_lru(&lru->node[i].lru);
492 err = memcg_init_list_lru(lru, memcg_aware);
498 list_lru_register(lru);
500 memcg_put_cache_ids();
503 EXPORT_SYMBOL_GPL(__list_lru_init);
505 void list_lru_destroy(struct list_lru *lru)
507 /* Already destroyed or not yet initialized? */
511 memcg_get_cache_ids();
513 list_lru_unregister(lru);
515 memcg_destroy_list_lru(lru);
519 memcg_put_cache_ids();
521 EXPORT_SYMBOL_GPL(list_lru_destroy);