1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved.
4 * Authors: David Chinner and Glauber Costa
6 * Generic LRU infrastructure
8 #include <linux/kernel.h>
9 #include <linux/module.h>
11 #include <linux/list_lru.h>
12 #include <linux/slab.h>
13 #include <linux/mutex.h>
14 #include <linux/memcontrol.h>
19 static LIST_HEAD(memcg_list_lrus);
20 static DEFINE_MUTEX(list_lrus_mutex);
22 static inline bool list_lru_memcg_aware(struct list_lru *lru)
24 return lru->memcg_aware;
27 static void list_lru_register(struct list_lru *lru)
29 if (!list_lru_memcg_aware(lru))
32 mutex_lock(&list_lrus_mutex);
33 list_add(&lru->list, &memcg_list_lrus);
34 mutex_unlock(&list_lrus_mutex);
37 static void list_lru_unregister(struct list_lru *lru)
39 if (!list_lru_memcg_aware(lru))
42 mutex_lock(&list_lrus_mutex);
44 mutex_unlock(&list_lrus_mutex);
47 static int lru_shrinker_id(struct list_lru *lru)
49 return lru->shrinker_id;
52 static inline struct list_lru_one *
53 list_lru_from_memcg_idx(struct list_lru *lru, int nid, int idx)
55 if (list_lru_memcg_aware(lru) && idx >= 0) {
56 struct list_lru_memcg *mlru = xa_load(&lru->xa, idx);
58 return mlru ? &mlru->node[nid] : NULL;
60 return &lru->node[nid].lru;
63 static inline bool lock_list_lru(struct list_lru_one *l, bool irq)
66 spin_lock_irq(&l->lock);
69 if (unlikely(READ_ONCE(l->nr_items) == LONG_MIN)) {
71 spin_unlock_irq(&l->lock);
73 spin_unlock(&l->lock);
79 static inline struct list_lru_one *
80 lock_list_lru_of_memcg(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
81 bool irq, bool skip_empty)
83 struct list_lru_one *l;
87 l = list_lru_from_memcg_idx(lru, nid, memcg_kmem_id(memcg));
88 if (likely(l) && lock_list_lru(l, irq)) {
93 * Caller may simply bail out if raced with reparenting or
94 * may iterate through the list_lru and expect empty slots.
100 VM_WARN_ON(!css_is_dying(&memcg->css));
101 memcg = parent_mem_cgroup(memcg);
105 static inline void unlock_list_lru(struct list_lru_one *l, bool irq_off)
108 spin_unlock_irq(&l->lock);
110 spin_unlock(&l->lock);
113 static void list_lru_register(struct list_lru *lru)
117 static void list_lru_unregister(struct list_lru *lru)
121 static int lru_shrinker_id(struct list_lru *lru)
126 static inline bool list_lru_memcg_aware(struct list_lru *lru)
131 static inline struct list_lru_one *
132 list_lru_from_memcg_idx(struct list_lru *lru, int nid, int idx)
134 return &lru->node[nid].lru;
137 static inline struct list_lru_one *
138 lock_list_lru_of_memcg(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
139 bool irq, bool skip_empty)
141 struct list_lru_one *l = &lru->node[nid].lru;
144 spin_lock_irq(&l->lock);
151 static inline void unlock_list_lru(struct list_lru_one *l, bool irq_off)
154 spin_unlock_irq(&l->lock);
156 spin_unlock(&l->lock);
158 #endif /* CONFIG_MEMCG */
160 /* The caller must ensure the memcg lifetime. */
161 bool list_lru_add(struct list_lru *lru, struct list_head *item, int nid,
162 struct mem_cgroup *memcg)
164 struct list_lru_node *nlru = &lru->node[nid];
165 struct list_lru_one *l;
167 l = lock_list_lru_of_memcg(lru, nid, memcg, false, false);
170 if (list_empty(item)) {
171 list_add_tail(item, &l->list);
172 /* Set shrinker bit if the first element was added */
174 set_shrinker_bit(memcg, nid, lru_shrinker_id(lru));
175 unlock_list_lru(l, false);
176 atomic_long_inc(&nlru->nr_items);
179 unlock_list_lru(l, false);
183 bool list_lru_add_obj(struct list_lru *lru, struct list_head *item)
186 int nid = page_to_nid(virt_to_page(item));
188 if (list_lru_memcg_aware(lru)) {
190 ret = list_lru_add(lru, item, nid, mem_cgroup_from_slab_obj(item));
193 ret = list_lru_add(lru, item, nid, NULL);
198 EXPORT_SYMBOL_GPL(list_lru_add_obj);
200 /* The caller must ensure the memcg lifetime. */
201 bool list_lru_del(struct list_lru *lru, struct list_head *item, int nid,
202 struct mem_cgroup *memcg)
204 struct list_lru_node *nlru = &lru->node[nid];
205 struct list_lru_one *l;
206 l = lock_list_lru_of_memcg(lru, nid, memcg, false, false);
209 if (!list_empty(item)) {
212 unlock_list_lru(l, false);
213 atomic_long_dec(&nlru->nr_items);
216 unlock_list_lru(l, false);
220 bool list_lru_del_obj(struct list_lru *lru, struct list_head *item)
223 int nid = page_to_nid(virt_to_page(item));
225 if (list_lru_memcg_aware(lru)) {
227 ret = list_lru_del(lru, item, nid, mem_cgroup_from_slab_obj(item));
230 ret = list_lru_del(lru, item, nid, NULL);
235 EXPORT_SYMBOL_GPL(list_lru_del_obj);
237 void list_lru_isolate(struct list_lru_one *list, struct list_head *item)
242 EXPORT_SYMBOL_GPL(list_lru_isolate);
244 void list_lru_isolate_move(struct list_lru_one *list, struct list_head *item,
245 struct list_head *head)
247 list_move(item, head);
250 EXPORT_SYMBOL_GPL(list_lru_isolate_move);
252 unsigned long list_lru_count_one(struct list_lru *lru,
253 int nid, struct mem_cgroup *memcg)
255 struct list_lru_one *l;
259 l = list_lru_from_memcg_idx(lru, nid, memcg_kmem_id(memcg));
260 count = l ? READ_ONCE(l->nr_items) : 0;
263 if (unlikely(count < 0))
268 EXPORT_SYMBOL_GPL(list_lru_count_one);
270 unsigned long list_lru_count_node(struct list_lru *lru, int nid)
272 struct list_lru_node *nlru;
274 nlru = &lru->node[nid];
275 return atomic_long_read(&nlru->nr_items);
277 EXPORT_SYMBOL_GPL(list_lru_count_node);
280 __list_lru_walk_one(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
281 list_lru_walk_cb isolate, void *cb_arg,
282 unsigned long *nr_to_walk, bool irq_off)
284 struct list_lru_node *nlru = &lru->node[nid];
285 struct list_lru_one *l = NULL;
286 struct list_head *item, *n;
287 unsigned long isolated = 0;
290 l = lock_list_lru_of_memcg(lru, nid, memcg, irq_off, true);
293 list_for_each_safe(item, n, &l->list) {
297 * decrement nr_to_walk first so that we don't livelock if we
298 * get stuck on large numbers of LRU_RETRY items
304 ret = isolate(item, l, cb_arg);
307 * LRU_RETRY, LRU_REMOVED_RETRY and LRU_STOP will drop the lru
308 * lock. List traversal will have to restart from scratch.
312 case LRU_REMOVED_RETRY:
316 atomic_long_dec(&nlru->nr_items);
317 if (ret == LRU_REMOVED_RETRY)
321 list_move_tail(item, &l->list);
331 unlock_list_lru(l, irq_off);
337 list_lru_walk_one(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
338 list_lru_walk_cb isolate, void *cb_arg,
339 unsigned long *nr_to_walk)
341 return __list_lru_walk_one(lru, nid, memcg, isolate,
342 cb_arg, nr_to_walk, false);
344 EXPORT_SYMBOL_GPL(list_lru_walk_one);
347 list_lru_walk_one_irq(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
348 list_lru_walk_cb isolate, void *cb_arg,
349 unsigned long *nr_to_walk)
351 return __list_lru_walk_one(lru, nid, memcg, isolate,
352 cb_arg, nr_to_walk, true);
355 unsigned long list_lru_walk_node(struct list_lru *lru, int nid,
356 list_lru_walk_cb isolate, void *cb_arg,
357 unsigned long *nr_to_walk)
361 isolated += list_lru_walk_one(lru, nid, NULL, isolate, cb_arg,
365 if (*nr_to_walk > 0 && list_lru_memcg_aware(lru)) {
366 struct list_lru_memcg *mlru;
367 struct mem_cgroup *memcg;
370 xa_for_each(&lru->xa, index, mlru) {
372 memcg = mem_cgroup_from_id(index);
373 if (!mem_cgroup_tryget(memcg)) {
378 isolated += __list_lru_walk_one(lru, nid, memcg,
381 mem_cgroup_put(memcg);
383 if (*nr_to_walk <= 0)
391 EXPORT_SYMBOL_GPL(list_lru_walk_node);
393 static void init_one_lru(struct list_lru *lru, struct list_lru_one *l)
395 INIT_LIST_HEAD(&l->list);
396 spin_lock_init(&l->lock);
398 #ifdef CONFIG_LOCKDEP
400 lockdep_set_class(&l->lock, lru->key);
405 static struct list_lru_memcg *memcg_init_list_lru_one(struct list_lru *lru, gfp_t gfp)
408 struct list_lru_memcg *mlru;
410 mlru = kmalloc(struct_size(mlru, node, nr_node_ids), gfp);
415 init_one_lru(lru, &mlru->node[nid]);
420 static inline void memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
423 xa_init_flags(&lru->xa, XA_FLAGS_LOCK_IRQ);
424 lru->memcg_aware = memcg_aware;
427 static void memcg_destroy_list_lru(struct list_lru *lru)
429 XA_STATE(xas, &lru->xa, 0);
430 struct list_lru_memcg *mlru;
432 if (!list_lru_memcg_aware(lru))
436 xas_for_each(&xas, mlru, ULONG_MAX) {
438 xas_store(&xas, NULL);
440 xas_unlock_irq(&xas);
443 static void memcg_reparent_list_lru_one(struct list_lru *lru, int nid,
444 struct list_lru_one *src,
445 struct mem_cgroup *dst_memcg)
447 int dst_idx = dst_memcg->kmemcg_id;
448 struct list_lru_one *dst;
450 spin_lock_irq(&src->lock);
451 dst = list_lru_from_memcg_idx(lru, nid, dst_idx);
452 spin_lock_nested(&dst->lock, SINGLE_DEPTH_NESTING);
454 list_splice_init(&src->list, &dst->list);
456 WARN_ON(src->nr_items < 0);
457 dst->nr_items += src->nr_items;
458 set_shrinker_bit(dst_memcg, nid, lru_shrinker_id(lru));
460 /* Mark the list_lru_one dead */
461 src->nr_items = LONG_MIN;
463 spin_unlock(&dst->lock);
464 spin_unlock_irq(&src->lock);
467 void memcg_reparent_list_lrus(struct mem_cgroup *memcg, struct mem_cgroup *parent)
469 struct list_lru *lru;
472 mutex_lock(&list_lrus_mutex);
473 list_for_each_entry(lru, &memcg_list_lrus, list) {
474 struct list_lru_memcg *mlru;
475 XA_STATE(xas, &lru->xa, memcg->kmemcg_id);
478 * Lock the Xarray to ensure no on going list_lru_memcg
479 * allocation and further allocation will see css_is_dying().
482 mlru = xas_store(&xas, NULL);
483 xas_unlock_irq(&xas);
488 * With Xarray value set to NULL, holding the lru lock below
489 * prevents list_lru_{add,del,isolate} from touching the lru,
493 memcg_reparent_list_lru_one(lru, i, &mlru->node[i], parent);
496 * Here all list_lrus corresponding to the cgroup are guaranteed
497 * to remain empty, we can safely free this lru, any further
498 * memcg_list_lru_alloc() call will simply bail out.
500 kvfree_rcu(mlru, rcu);
502 mutex_unlock(&list_lrus_mutex);
505 static inline bool memcg_list_lru_allocated(struct mem_cgroup *memcg,
506 struct list_lru *lru)
508 int idx = memcg->kmemcg_id;
510 return idx < 0 || xa_load(&lru->xa, idx);
513 int memcg_list_lru_alloc(struct mem_cgroup *memcg, struct list_lru *lru,
517 struct list_lru_memcg *mlru = NULL;
518 struct mem_cgroup *pos, *parent;
519 XA_STATE(xas, &lru->xa, 0);
521 if (!list_lru_memcg_aware(lru) || memcg_list_lru_allocated(memcg, lru))
524 gfp &= GFP_RECLAIM_MASK;
526 * Because the list_lru can be reparented to the parent cgroup's
527 * list_lru, we should make sure that this cgroup and all its
528 * ancestors have allocated list_lru_memcg.
532 * Keep finding the farest parent that wasn't populated
533 * until found memcg itself.
536 parent = parent_mem_cgroup(pos);
537 while (!memcg_list_lru_allocated(parent, lru)) {
539 parent = parent_mem_cgroup(pos);
543 mlru = memcg_init_list_lru_one(lru, gfp);
547 xas_set(&xas, pos->kmemcg_id);
549 xas_lock_irqsave(&xas, flags);
550 if (!xas_load(&xas) && !css_is_dying(&pos->css)) {
551 xas_store(&xas, mlru);
552 if (!xas_error(&xas))
555 xas_unlock_irqrestore(&xas, flags);
556 } while (xas_nomem(&xas, gfp));
557 } while (pos != memcg && !css_is_dying(&pos->css));
562 return xas_error(&xas);
565 static inline void memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
569 static void memcg_destroy_list_lru(struct list_lru *lru)
572 #endif /* CONFIG_MEMCG */
574 int __list_lru_init(struct list_lru *lru, bool memcg_aware, struct shrinker *shrinker)
580 lru->shrinker_id = shrinker->id;
582 lru->shrinker_id = -1;
584 if (mem_cgroup_kmem_disabled())
588 lru->node = kcalloc(nr_node_ids, sizeof(*lru->node), GFP_KERNEL);
593 init_one_lru(lru, &lru->node[i].lru);
595 memcg_init_list_lru(lru, memcg_aware);
596 list_lru_register(lru);
600 EXPORT_SYMBOL_GPL(__list_lru_init);
602 void list_lru_destroy(struct list_lru *lru)
604 /* Already destroyed or not yet initialized? */
608 list_lru_unregister(lru);
610 memcg_destroy_list_lru(lru);
615 lru->shrinker_id = -1;
618 EXPORT_SYMBOL_GPL(list_lru_destroy);