list_lru: organize all list_lrus to list
[linux-2.6-block.git] / mm / list_lru.c
1 /*
2  * Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved.
3  * Authors: David Chinner and Glauber Costa
4  *
5  * Generic LRU infrastructure
6  */
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/mm.h>
10 #include <linux/list_lru.h>
11 #include <linux/slab.h>
12 #include <linux/mutex.h>
13
14 #ifdef CONFIG_MEMCG_KMEM
15 static LIST_HEAD(list_lrus);
16 static DEFINE_MUTEX(list_lrus_mutex);
17
18 static void list_lru_register(struct list_lru *lru)
19 {
20         mutex_lock(&list_lrus_mutex);
21         list_add(&lru->list, &list_lrus);
22         mutex_unlock(&list_lrus_mutex);
23 }
24
25 static void list_lru_unregister(struct list_lru *lru)
26 {
27         mutex_lock(&list_lrus_mutex);
28         list_del(&lru->list);
29         mutex_unlock(&list_lrus_mutex);
30 }
31 #else
32 static void list_lru_register(struct list_lru *lru)
33 {
34 }
35
36 static void list_lru_unregister(struct list_lru *lru)
37 {
38 }
39 #endif /* CONFIG_MEMCG_KMEM */
40
41 bool list_lru_add(struct list_lru *lru, struct list_head *item)
42 {
43         int nid = page_to_nid(virt_to_page(item));
44         struct list_lru_node *nlru = &lru->node[nid];
45
46         spin_lock(&nlru->lock);
47         WARN_ON_ONCE(nlru->nr_items < 0);
48         if (list_empty(item)) {
49                 list_add_tail(item, &nlru->list);
50                 nlru->nr_items++;
51                 spin_unlock(&nlru->lock);
52                 return true;
53         }
54         spin_unlock(&nlru->lock);
55         return false;
56 }
57 EXPORT_SYMBOL_GPL(list_lru_add);
58
59 bool list_lru_del(struct list_lru *lru, struct list_head *item)
60 {
61         int nid = page_to_nid(virt_to_page(item));
62         struct list_lru_node *nlru = &lru->node[nid];
63
64         spin_lock(&nlru->lock);
65         if (!list_empty(item)) {
66                 list_del_init(item);
67                 nlru->nr_items--;
68                 WARN_ON_ONCE(nlru->nr_items < 0);
69                 spin_unlock(&nlru->lock);
70                 return true;
71         }
72         spin_unlock(&nlru->lock);
73         return false;
74 }
75 EXPORT_SYMBOL_GPL(list_lru_del);
76
77 unsigned long
78 list_lru_count_node(struct list_lru *lru, int nid)
79 {
80         unsigned long count = 0;
81         struct list_lru_node *nlru = &lru->node[nid];
82
83         spin_lock(&nlru->lock);
84         WARN_ON_ONCE(nlru->nr_items < 0);
85         count += nlru->nr_items;
86         spin_unlock(&nlru->lock);
87
88         return count;
89 }
90 EXPORT_SYMBOL_GPL(list_lru_count_node);
91
92 unsigned long
93 list_lru_walk_node(struct list_lru *lru, int nid, list_lru_walk_cb isolate,
94                    void *cb_arg, unsigned long *nr_to_walk)
95 {
96
97         struct list_lru_node    *nlru = &lru->node[nid];
98         struct list_head *item, *n;
99         unsigned long isolated = 0;
100
101         spin_lock(&nlru->lock);
102 restart:
103         list_for_each_safe(item, n, &nlru->list) {
104                 enum lru_status ret;
105
106                 /*
107                  * decrement nr_to_walk first so that we don't livelock if we
108                  * get stuck on large numbesr of LRU_RETRY items
109                  */
110                 if (!*nr_to_walk)
111                         break;
112                 --*nr_to_walk;
113
114                 ret = isolate(item, &nlru->lock, cb_arg);
115                 switch (ret) {
116                 case LRU_REMOVED_RETRY:
117                         assert_spin_locked(&nlru->lock);
118                 case LRU_REMOVED:
119                         nlru->nr_items--;
120                         WARN_ON_ONCE(nlru->nr_items < 0);
121                         isolated++;
122                         /*
123                          * If the lru lock has been dropped, our list
124                          * traversal is now invalid and so we have to
125                          * restart from scratch.
126                          */
127                         if (ret == LRU_REMOVED_RETRY)
128                                 goto restart;
129                         break;
130                 case LRU_ROTATE:
131                         list_move_tail(item, &nlru->list);
132                         break;
133                 case LRU_SKIP:
134                         break;
135                 case LRU_RETRY:
136                         /*
137                          * The lru lock has been dropped, our list traversal is
138                          * now invalid and so we have to restart from scratch.
139                          */
140                         assert_spin_locked(&nlru->lock);
141                         goto restart;
142                 default:
143                         BUG();
144                 }
145         }
146
147         spin_unlock(&nlru->lock);
148         return isolated;
149 }
150 EXPORT_SYMBOL_GPL(list_lru_walk_node);
151
152 int list_lru_init_key(struct list_lru *lru, struct lock_class_key *key)
153 {
154         int i;
155         size_t size = sizeof(*lru->node) * nr_node_ids;
156
157         lru->node = kzalloc(size, GFP_KERNEL);
158         if (!lru->node)
159                 return -ENOMEM;
160
161         for (i = 0; i < nr_node_ids; i++) {
162                 spin_lock_init(&lru->node[i].lock);
163                 if (key)
164                         lockdep_set_class(&lru->node[i].lock, key);
165                 INIT_LIST_HEAD(&lru->node[i].list);
166                 lru->node[i].nr_items = 0;
167         }
168         list_lru_register(lru);
169         return 0;
170 }
171 EXPORT_SYMBOL_GPL(list_lru_init_key);
172
173 void list_lru_destroy(struct list_lru *lru)
174 {
175         /* Already destroyed or not yet initialized? */
176         if (!lru->node)
177                 return;
178         list_lru_unregister(lru);
179         kfree(lru->node);
180         lru->node = NULL;
181 }
182 EXPORT_SYMBOL_GPL(list_lru_destroy);