1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved.
4 * Authors: David Chinner and Glauber Costa
6 * Generic LRU infrastructure
8 #include <linux/kernel.h>
9 #include <linux/module.h>
11 #include <linux/list_lru.h>
12 #include <linux/slab.h>
13 #include <linux/mutex.h>
14 #include <linux/memcontrol.h>
17 #ifdef CONFIG_MEMCG_KMEM
18 static LIST_HEAD(memcg_list_lrus);
19 static DEFINE_MUTEX(list_lrus_mutex);
21 static inline bool list_lru_memcg_aware(struct list_lru *lru)
23 return lru->memcg_aware;
26 static void list_lru_register(struct list_lru *lru)
28 if (!list_lru_memcg_aware(lru))
31 mutex_lock(&list_lrus_mutex);
32 list_add(&lru->list, &memcg_list_lrus);
33 mutex_unlock(&list_lrus_mutex);
36 static void list_lru_unregister(struct list_lru *lru)
38 if (!list_lru_memcg_aware(lru))
41 mutex_lock(&list_lrus_mutex);
43 mutex_unlock(&list_lrus_mutex);
46 static int lru_shrinker_id(struct list_lru *lru)
48 return lru->shrinker_id;
51 static inline struct list_lru_one *
52 list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx)
54 struct list_lru_memcg *memcg_lrus;
56 * Either lock or RCU protects the array of per cgroup lists
57 * from relocation (see memcg_update_list_lru_node).
59 memcg_lrus = rcu_dereference_check(nlru->memcg_lrus,
60 lockdep_is_held(&nlru->lock));
61 if (memcg_lrus && idx >= 0)
62 return memcg_lrus->lru[idx];
66 static inline struct list_lru_one *
67 list_lru_from_kmem(struct list_lru_node *nlru, void *ptr,
68 struct mem_cgroup **memcg_ptr)
70 struct list_lru_one *l = &nlru->lru;
71 struct mem_cgroup *memcg = NULL;
73 if (!nlru->memcg_lrus)
76 memcg = mem_cgroup_from_obj(ptr);
80 l = list_lru_from_memcg_idx(nlru, memcg_cache_id(memcg));
87 static void list_lru_register(struct list_lru *lru)
91 static void list_lru_unregister(struct list_lru *lru)
95 static int lru_shrinker_id(struct list_lru *lru)
100 static inline bool list_lru_memcg_aware(struct list_lru *lru)
105 static inline struct list_lru_one *
106 list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx)
111 static inline struct list_lru_one *
112 list_lru_from_kmem(struct list_lru_node *nlru, void *ptr,
113 struct mem_cgroup **memcg_ptr)
119 #endif /* CONFIG_MEMCG_KMEM */
121 bool list_lru_add(struct list_lru *lru, struct list_head *item)
123 int nid = page_to_nid(virt_to_page(item));
124 struct list_lru_node *nlru = &lru->node[nid];
125 struct mem_cgroup *memcg;
126 struct list_lru_one *l;
128 spin_lock(&nlru->lock);
129 if (list_empty(item)) {
130 l = list_lru_from_kmem(nlru, item, &memcg);
131 list_add_tail(item, &l->list);
132 /* Set shrinker bit if the first element was added */
134 set_shrinker_bit(memcg, nid,
135 lru_shrinker_id(lru));
137 spin_unlock(&nlru->lock);
140 spin_unlock(&nlru->lock);
143 EXPORT_SYMBOL_GPL(list_lru_add);
145 bool list_lru_del(struct list_lru *lru, struct list_head *item)
147 int nid = page_to_nid(virt_to_page(item));
148 struct list_lru_node *nlru = &lru->node[nid];
149 struct list_lru_one *l;
151 spin_lock(&nlru->lock);
152 if (!list_empty(item)) {
153 l = list_lru_from_kmem(nlru, item, NULL);
157 spin_unlock(&nlru->lock);
160 spin_unlock(&nlru->lock);
163 EXPORT_SYMBOL_GPL(list_lru_del);
165 void list_lru_isolate(struct list_lru_one *list, struct list_head *item)
170 EXPORT_SYMBOL_GPL(list_lru_isolate);
172 void list_lru_isolate_move(struct list_lru_one *list, struct list_head *item,
173 struct list_head *head)
175 list_move(item, head);
178 EXPORT_SYMBOL_GPL(list_lru_isolate_move);
180 unsigned long list_lru_count_one(struct list_lru *lru,
181 int nid, struct mem_cgroup *memcg)
183 struct list_lru_node *nlru = &lru->node[nid];
184 struct list_lru_one *l;
188 l = list_lru_from_memcg_idx(nlru, memcg_cache_id(memcg));
189 count = READ_ONCE(l->nr_items);
192 if (unlikely(count < 0))
197 EXPORT_SYMBOL_GPL(list_lru_count_one);
199 unsigned long list_lru_count_node(struct list_lru *lru, int nid)
201 struct list_lru_node *nlru;
203 nlru = &lru->node[nid];
204 return nlru->nr_items;
206 EXPORT_SYMBOL_GPL(list_lru_count_node);
209 __list_lru_walk_one(struct list_lru_node *nlru, int memcg_idx,
210 list_lru_walk_cb isolate, void *cb_arg,
211 unsigned long *nr_to_walk)
214 struct list_lru_one *l;
215 struct list_head *item, *n;
216 unsigned long isolated = 0;
218 l = list_lru_from_memcg_idx(nlru, memcg_idx);
220 list_for_each_safe(item, n, &l->list) {
224 * decrement nr_to_walk first so that we don't livelock if we
225 * get stuck on large numbers of LRU_RETRY items
231 ret = isolate(item, l, &nlru->lock, cb_arg);
233 case LRU_REMOVED_RETRY:
234 assert_spin_locked(&nlru->lock);
240 * If the lru lock has been dropped, our list
241 * traversal is now invalid and so we have to
242 * restart from scratch.
244 if (ret == LRU_REMOVED_RETRY)
248 list_move_tail(item, &l->list);
254 * The lru lock has been dropped, our list traversal is
255 * now invalid and so we have to restart from scratch.
257 assert_spin_locked(&nlru->lock);
267 list_lru_walk_one(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
268 list_lru_walk_cb isolate, void *cb_arg,
269 unsigned long *nr_to_walk)
271 struct list_lru_node *nlru = &lru->node[nid];
274 spin_lock(&nlru->lock);
275 ret = __list_lru_walk_one(nlru, memcg_cache_id(memcg), isolate, cb_arg,
277 spin_unlock(&nlru->lock);
280 EXPORT_SYMBOL_GPL(list_lru_walk_one);
283 list_lru_walk_one_irq(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
284 list_lru_walk_cb isolate, void *cb_arg,
285 unsigned long *nr_to_walk)
287 struct list_lru_node *nlru = &lru->node[nid];
290 spin_lock_irq(&nlru->lock);
291 ret = __list_lru_walk_one(nlru, memcg_cache_id(memcg), isolate, cb_arg,
293 spin_unlock_irq(&nlru->lock);
297 unsigned long list_lru_walk_node(struct list_lru *lru, int nid,
298 list_lru_walk_cb isolate, void *cb_arg,
299 unsigned long *nr_to_walk)
304 isolated += list_lru_walk_one(lru, nid, NULL, isolate, cb_arg,
306 if (*nr_to_walk > 0 && list_lru_memcg_aware(lru)) {
307 for_each_memcg_cache_index(memcg_idx) {
308 struct list_lru_node *nlru = &lru->node[nid];
310 spin_lock(&nlru->lock);
311 isolated += __list_lru_walk_one(nlru, memcg_idx,
314 spin_unlock(&nlru->lock);
316 if (*nr_to_walk <= 0)
322 EXPORT_SYMBOL_GPL(list_lru_walk_node);
324 static void init_one_lru(struct list_lru_one *l)
326 INIT_LIST_HEAD(&l->list);
330 #ifdef CONFIG_MEMCG_KMEM
331 static void __memcg_destroy_list_lru_node(struct list_lru_memcg *memcg_lrus,
336 for (i = begin; i < end; i++)
337 kfree(memcg_lrus->lru[i]);
340 static int __memcg_init_list_lru_node(struct list_lru_memcg *memcg_lrus,
345 for (i = begin; i < end; i++) {
346 struct list_lru_one *l;
348 l = kmalloc(sizeof(struct list_lru_one), GFP_KERNEL);
353 memcg_lrus->lru[i] = l;
357 __memcg_destroy_list_lru_node(memcg_lrus, begin, i);
361 static int memcg_init_list_lru_node(struct list_lru_node *nlru)
363 struct list_lru_memcg *memcg_lrus;
364 int size = memcg_nr_cache_ids;
366 memcg_lrus = kvmalloc(struct_size(memcg_lrus, lru, size), GFP_KERNEL);
370 if (__memcg_init_list_lru_node(memcg_lrus, 0, size)) {
374 RCU_INIT_POINTER(nlru->memcg_lrus, memcg_lrus);
379 static void memcg_destroy_list_lru_node(struct list_lru_node *nlru)
381 struct list_lru_memcg *memcg_lrus;
383 * This is called when shrinker has already been unregistered,
384 * and nobody can use it. So, there is no need to use kvfree_rcu().
386 memcg_lrus = rcu_dereference_protected(nlru->memcg_lrus, true);
387 __memcg_destroy_list_lru_node(memcg_lrus, 0, memcg_nr_cache_ids);
391 static int memcg_update_list_lru_node(struct list_lru_node *nlru,
392 int old_size, int new_size)
394 struct list_lru_memcg *old, *new;
396 BUG_ON(old_size > new_size);
398 old = rcu_dereference_protected(nlru->memcg_lrus,
399 lockdep_is_held(&list_lrus_mutex));
400 new = kvmalloc(struct_size(new, lru, new_size), GFP_KERNEL);
404 if (__memcg_init_list_lru_node(new, old_size, new_size)) {
409 memcpy(&new->lru, &old->lru, flex_array_size(new, lru, old_size));
410 rcu_assign_pointer(nlru->memcg_lrus, new);
411 kvfree_rcu(old, rcu);
415 static void memcg_cancel_update_list_lru_node(struct list_lru_node *nlru,
416 int old_size, int new_size)
418 struct list_lru_memcg *memcg_lrus;
420 memcg_lrus = rcu_dereference_protected(nlru->memcg_lrus,
421 lockdep_is_held(&list_lrus_mutex));
422 /* do not bother shrinking the array back to the old size, because we
423 * cannot handle allocation failures here */
424 __memcg_destroy_list_lru_node(memcg_lrus, old_size, new_size);
427 static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
431 lru->memcg_aware = memcg_aware;
437 if (memcg_init_list_lru_node(&lru->node[i]))
442 for (i = i - 1; i >= 0; i--) {
443 if (!lru->node[i].memcg_lrus)
445 memcg_destroy_list_lru_node(&lru->node[i]);
450 static void memcg_destroy_list_lru(struct list_lru *lru)
454 if (!list_lru_memcg_aware(lru))
458 memcg_destroy_list_lru_node(&lru->node[i]);
461 static int memcg_update_list_lru(struct list_lru *lru,
462 int old_size, int new_size)
467 if (memcg_update_list_lru_node(&lru->node[i],
473 for (i = i - 1; i >= 0; i--) {
474 if (!lru->node[i].memcg_lrus)
477 memcg_cancel_update_list_lru_node(&lru->node[i],
483 static void memcg_cancel_update_list_lru(struct list_lru *lru,
484 int old_size, int new_size)
489 memcg_cancel_update_list_lru_node(&lru->node[i],
493 int memcg_update_all_list_lrus(int new_size)
496 struct list_lru *lru;
497 int old_size = memcg_nr_cache_ids;
499 mutex_lock(&list_lrus_mutex);
500 list_for_each_entry(lru, &memcg_list_lrus, list) {
501 ret = memcg_update_list_lru(lru, old_size, new_size);
506 mutex_unlock(&list_lrus_mutex);
509 list_for_each_entry_continue_reverse(lru, &memcg_list_lrus, list)
510 memcg_cancel_update_list_lru(lru, old_size, new_size);
514 static void memcg_drain_list_lru_node(struct list_lru *lru, int nid,
515 int src_idx, struct mem_cgroup *dst_memcg)
517 struct list_lru_node *nlru = &lru->node[nid];
518 int dst_idx = dst_memcg->kmemcg_id;
519 struct list_lru_one *src, *dst;
522 * Since list_lru_{add,del} may be called under an IRQ-safe lock,
523 * we have to use IRQ-safe primitives here to avoid deadlock.
525 spin_lock_irq(&nlru->lock);
527 src = list_lru_from_memcg_idx(nlru, src_idx);
528 dst = list_lru_from_memcg_idx(nlru, dst_idx);
530 list_splice_init(&src->list, &dst->list);
533 dst->nr_items += src->nr_items;
534 set_shrinker_bit(dst_memcg, nid, lru_shrinker_id(lru));
538 spin_unlock_irq(&nlru->lock);
541 static void memcg_drain_list_lru(struct list_lru *lru,
542 int src_idx, struct mem_cgroup *dst_memcg)
547 memcg_drain_list_lru_node(lru, i, src_idx, dst_memcg);
550 void memcg_drain_all_list_lrus(int src_idx, struct mem_cgroup *dst_memcg)
552 struct list_lru *lru;
554 mutex_lock(&list_lrus_mutex);
555 list_for_each_entry(lru, &memcg_list_lrus, list)
556 memcg_drain_list_lru(lru, src_idx, dst_memcg);
557 mutex_unlock(&list_lrus_mutex);
560 static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
565 static void memcg_destroy_list_lru(struct list_lru *lru)
568 #endif /* CONFIG_MEMCG_KMEM */
570 int __list_lru_init(struct list_lru *lru, bool memcg_aware,
571 struct lock_class_key *key, struct shrinker *shrinker)
576 #ifdef CONFIG_MEMCG_KMEM
578 lru->shrinker_id = shrinker->id;
580 lru->shrinker_id = -1;
582 memcg_get_cache_ids();
584 lru->node = kcalloc(nr_node_ids, sizeof(*lru->node), GFP_KERNEL);
589 spin_lock_init(&lru->node[i].lock);
591 lockdep_set_class(&lru->node[i].lock, key);
592 init_one_lru(&lru->node[i].lru);
595 err = memcg_init_list_lru(lru, memcg_aware);
598 /* Do this so a list_lru_destroy() doesn't crash: */
603 list_lru_register(lru);
605 memcg_put_cache_ids();
608 EXPORT_SYMBOL_GPL(__list_lru_init);
610 void list_lru_destroy(struct list_lru *lru)
612 /* Already destroyed or not yet initialized? */
616 memcg_get_cache_ids();
618 list_lru_unregister(lru);
620 memcg_destroy_list_lru(lru);
624 #ifdef CONFIG_MEMCG_KMEM
625 lru->shrinker_id = -1;
627 memcg_put_cache_ids();
629 EXPORT_SYMBOL_GPL(list_lru_destroy);