2 * Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved.
3 * Authors: David Chinner and Glauber Costa
5 * Generic LRU infrastructure
7 #include <linux/kernel.h>
8 #include <linux/module.h>
10 #include <linux/list_lru.h>
11 #include <linux/slab.h>
12 #include <linux/mutex.h>
14 #ifdef CONFIG_MEMCG_KMEM
15 static LIST_HEAD(list_lrus);
16 static DEFINE_MUTEX(list_lrus_mutex);
18 static void list_lru_register(struct list_lru *lru)
20 mutex_lock(&list_lrus_mutex);
21 list_add(&lru->list, &list_lrus);
22 mutex_unlock(&list_lrus_mutex);
25 static void list_lru_unregister(struct list_lru *lru)
27 mutex_lock(&list_lrus_mutex);
29 mutex_unlock(&list_lrus_mutex);
32 static void list_lru_register(struct list_lru *lru)
36 static void list_lru_unregister(struct list_lru *lru)
39 #endif /* CONFIG_MEMCG_KMEM */
41 bool list_lru_add(struct list_lru *lru, struct list_head *item)
43 int nid = page_to_nid(virt_to_page(item));
44 struct list_lru_node *nlru = &lru->node[nid];
46 spin_lock(&nlru->lock);
47 WARN_ON_ONCE(nlru->nr_items < 0);
48 if (list_empty(item)) {
49 list_add_tail(item, &nlru->list);
51 spin_unlock(&nlru->lock);
54 spin_unlock(&nlru->lock);
57 EXPORT_SYMBOL_GPL(list_lru_add);
59 bool list_lru_del(struct list_lru *lru, struct list_head *item)
61 int nid = page_to_nid(virt_to_page(item));
62 struct list_lru_node *nlru = &lru->node[nid];
64 spin_lock(&nlru->lock);
65 if (!list_empty(item)) {
68 WARN_ON_ONCE(nlru->nr_items < 0);
69 spin_unlock(&nlru->lock);
72 spin_unlock(&nlru->lock);
75 EXPORT_SYMBOL_GPL(list_lru_del);
78 list_lru_count_node(struct list_lru *lru, int nid)
80 unsigned long count = 0;
81 struct list_lru_node *nlru = &lru->node[nid];
83 spin_lock(&nlru->lock);
84 WARN_ON_ONCE(nlru->nr_items < 0);
85 count += nlru->nr_items;
86 spin_unlock(&nlru->lock);
90 EXPORT_SYMBOL_GPL(list_lru_count_node);
93 list_lru_walk_node(struct list_lru *lru, int nid, list_lru_walk_cb isolate,
94 void *cb_arg, unsigned long *nr_to_walk)
97 struct list_lru_node *nlru = &lru->node[nid];
98 struct list_head *item, *n;
99 unsigned long isolated = 0;
101 spin_lock(&nlru->lock);
103 list_for_each_safe(item, n, &nlru->list) {
107 * decrement nr_to_walk first so that we don't livelock if we
108 * get stuck on large numbesr of LRU_RETRY items
114 ret = isolate(item, &nlru->lock, cb_arg);
116 case LRU_REMOVED_RETRY:
117 assert_spin_locked(&nlru->lock);
120 WARN_ON_ONCE(nlru->nr_items < 0);
123 * If the lru lock has been dropped, our list
124 * traversal is now invalid and so we have to
125 * restart from scratch.
127 if (ret == LRU_REMOVED_RETRY)
131 list_move_tail(item, &nlru->list);
137 * The lru lock has been dropped, our list traversal is
138 * now invalid and so we have to restart from scratch.
140 assert_spin_locked(&nlru->lock);
147 spin_unlock(&nlru->lock);
150 EXPORT_SYMBOL_GPL(list_lru_walk_node);
152 int list_lru_init_key(struct list_lru *lru, struct lock_class_key *key)
155 size_t size = sizeof(*lru->node) * nr_node_ids;
157 lru->node = kzalloc(size, GFP_KERNEL);
161 for (i = 0; i < nr_node_ids; i++) {
162 spin_lock_init(&lru->node[i].lock);
164 lockdep_set_class(&lru->node[i].lock, key);
165 INIT_LIST_HEAD(&lru->node[i].list);
166 lru->node[i].nr_items = 0;
168 list_lru_register(lru);
171 EXPORT_SYMBOL_GPL(list_lru_init_key);
173 void list_lru_destroy(struct list_lru *lru)
175 /* Already destroyed or not yet initialized? */
178 list_lru_unregister(lru);
182 EXPORT_SYMBOL_GPL(list_lru_destroy);