mm: list_lru: rename memcg_drain_all_list_lrus to memcg_reparent_list_lrus
[linux-block.git] / mm / list_lru.c
CommitLineData
457c8996 1// SPDX-License-Identifier: GPL-2.0-only
a38e4082
DC
2/*
3 * Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved.
4 * Authors: David Chinner and Glauber Costa
5 *
6 * Generic LRU infrastructure
7 */
8#include <linux/kernel.h>
9#include <linux/module.h>
3b1d58a4 10#include <linux/mm.h>
a38e4082 11#include <linux/list_lru.h>
5ca302c8 12#include <linux/slab.h>
c0a5b560 13#include <linux/mutex.h>
60d3fd32 14#include <linux/memcontrol.h>
4d96ba35 15#include "slab.h"
88f2ef73 16#include "internal.h"
c0a5b560 17
84c07d11 18#ifdef CONFIG_MEMCG_KMEM
3eef1127 19static LIST_HEAD(memcg_list_lrus);
c0a5b560
VD
20static DEFINE_MUTEX(list_lrus_mutex);
21
3eef1127
MS
22static inline bool list_lru_memcg_aware(struct list_lru *lru)
23{
24 return lru->memcg_aware;
25}
26
c0a5b560
VD
27static void list_lru_register(struct list_lru *lru)
28{
3eef1127
MS
29 if (!list_lru_memcg_aware(lru))
30 return;
31
c0a5b560 32 mutex_lock(&list_lrus_mutex);
3eef1127 33 list_add(&lru->list, &memcg_list_lrus);
c0a5b560
VD
34 mutex_unlock(&list_lrus_mutex);
35}
36
37static void list_lru_unregister(struct list_lru *lru)
38{
3eef1127
MS
39 if (!list_lru_memcg_aware(lru))
40 return;
41
c0a5b560
VD
42 mutex_lock(&list_lrus_mutex);
43 list_del(&lru->list);
44 mutex_unlock(&list_lrus_mutex);
45}
c0a5b560 46
fae91d6d
KT
47static int lru_shrinker_id(struct list_lru *lru)
48{
49 return lru->shrinker_id;
50}
51
60d3fd32 52static inline struct list_lru_one *
6a6b7b77 53list_lru_from_memcg_idx(struct list_lru *lru, int nid, int idx)
60d3fd32 54{
6a6b7b77
MS
55 struct list_lru_memcg *mlrus;
56 struct list_lru_node *nlru = &lru->node[nid];
57
60d3fd32 58 /*
0c7c1bed 59 * Either lock or RCU protects the array of per cgroup lists
6a6b7b77 60 * from relocation (see memcg_update_list_lru).
60d3fd32 61 */
6a6b7b77 62 mlrus = rcu_dereference_check(lru->mlrus, lockdep_is_held(&nlru->lock));
5abc1e37
MS
63 if (mlrus && idx >= 0) {
64 struct list_lru_per_memcg *mlru;
65
66 mlru = rcu_dereference_check(mlrus->mlru[idx], true);
67 return mlru ? &mlru->node[nid] : NULL;
68 }
60d3fd32
VD
69 return &nlru->lru;
70}
71
72static inline struct list_lru_one *
6a6b7b77 73list_lru_from_kmem(struct list_lru *lru, int nid, void *ptr,
44bd4a47 74 struct mem_cgroup **memcg_ptr)
60d3fd32 75{
6a6b7b77 76 struct list_lru_node *nlru = &lru->node[nid];
44bd4a47
KT
77 struct list_lru_one *l = &nlru->lru;
78 struct mem_cgroup *memcg = NULL;
60d3fd32 79
6a6b7b77 80 if (!lru->mlrus)
44bd4a47 81 goto out;
60d3fd32 82
4f103c63 83 memcg = mem_cgroup_from_obj(ptr);
60d3fd32 84 if (!memcg)
44bd4a47 85 goto out;
60d3fd32 86
6a6b7b77 87 l = list_lru_from_memcg_idx(lru, nid, memcg_cache_id(memcg));
44bd4a47
KT
88out:
89 if (memcg_ptr)
90 *memcg_ptr = memcg;
91 return l;
60d3fd32
VD
92}
93#else
e0295238
KT
94static void list_lru_register(struct list_lru *lru)
95{
96}
97
98static void list_lru_unregister(struct list_lru *lru)
99{
100}
101
fae91d6d
KT
102static int lru_shrinker_id(struct list_lru *lru)
103{
104 return -1;
105}
106
60d3fd32
VD
107static inline bool list_lru_memcg_aware(struct list_lru *lru)
108{
109 return false;
110}
111
112static inline struct list_lru_one *
6a6b7b77 113list_lru_from_memcg_idx(struct list_lru *lru, int nid, int idx)
60d3fd32 114{
6a6b7b77 115 return &lru->node[nid].lru;
60d3fd32
VD
116}
117
118static inline struct list_lru_one *
6a6b7b77 119list_lru_from_kmem(struct list_lru *lru, int nid, void *ptr,
44bd4a47 120 struct mem_cgroup **memcg_ptr)
60d3fd32 121{
44bd4a47
KT
122 if (memcg_ptr)
123 *memcg_ptr = NULL;
6a6b7b77 124 return &lru->node[nid].lru;
60d3fd32 125}
84c07d11 126#endif /* CONFIG_MEMCG_KMEM */
60d3fd32 127
a38e4082
DC
128bool list_lru_add(struct list_lru *lru, struct list_head *item)
129{
3b1d58a4
DC
130 int nid = page_to_nid(virt_to_page(item));
131 struct list_lru_node *nlru = &lru->node[nid];
fae91d6d 132 struct mem_cgroup *memcg;
60d3fd32 133 struct list_lru_one *l;
3b1d58a4
DC
134
135 spin_lock(&nlru->lock);
a38e4082 136 if (list_empty(item)) {
6a6b7b77 137 l = list_lru_from_kmem(lru, nid, item, &memcg);
60d3fd32 138 list_add_tail(item, &l->list);
fae91d6d
KT
139 /* Set shrinker bit if the first element was added */
140 if (!l->nr_items++)
2bfd3637
YS
141 set_shrinker_bit(memcg, nid,
142 lru_shrinker_id(lru));
2c80cd57 143 nlru->nr_items++;
3b1d58a4 144 spin_unlock(&nlru->lock);
a38e4082
DC
145 return true;
146 }
3b1d58a4 147 spin_unlock(&nlru->lock);
a38e4082
DC
148 return false;
149}
150EXPORT_SYMBOL_GPL(list_lru_add);
151
152bool list_lru_del(struct list_lru *lru, struct list_head *item)
153{
3b1d58a4
DC
154 int nid = page_to_nid(virt_to_page(item));
155 struct list_lru_node *nlru = &lru->node[nid];
60d3fd32 156 struct list_lru_one *l;
3b1d58a4
DC
157
158 spin_lock(&nlru->lock);
a38e4082 159 if (!list_empty(item)) {
6a6b7b77 160 l = list_lru_from_kmem(lru, nid, item, NULL);
a38e4082 161 list_del_init(item);
60d3fd32 162 l->nr_items--;
2c80cd57 163 nlru->nr_items--;
3b1d58a4 164 spin_unlock(&nlru->lock);
a38e4082
DC
165 return true;
166 }
3b1d58a4 167 spin_unlock(&nlru->lock);
a38e4082
DC
168 return false;
169}
170EXPORT_SYMBOL_GPL(list_lru_del);
171
3f97b163
VD
172void list_lru_isolate(struct list_lru_one *list, struct list_head *item)
173{
174 list_del_init(item);
175 list->nr_items--;
176}
177EXPORT_SYMBOL_GPL(list_lru_isolate);
178
179void list_lru_isolate_move(struct list_lru_one *list, struct list_head *item,
180 struct list_head *head)
181{
182 list_move(item, head);
183 list->nr_items--;
184}
185EXPORT_SYMBOL_GPL(list_lru_isolate_move);
186
930eaac5
AM
187unsigned long list_lru_count_one(struct list_lru *lru,
188 int nid, struct mem_cgroup *memcg)
a38e4082 189{
60d3fd32 190 struct list_lru_one *l;
41d17431 191 long count;
3b1d58a4 192
0c7c1bed 193 rcu_read_lock();
6a6b7b77 194 l = list_lru_from_memcg_idx(lru, nid, memcg_cache_id(memcg));
5abc1e37 195 count = l ? READ_ONCE(l->nr_items) : 0;
0c7c1bed 196 rcu_read_unlock();
3b1d58a4 197
41d17431
MS
198 if (unlikely(count < 0))
199 count = 0;
200
3b1d58a4
DC
201 return count;
202}
60d3fd32
VD
203EXPORT_SYMBOL_GPL(list_lru_count_one);
204
205unsigned long list_lru_count_node(struct list_lru *lru, int nid)
206{
2c80cd57 207 struct list_lru_node *nlru;
60d3fd32 208
2c80cd57
ST
209 nlru = &lru->node[nid];
210 return nlru->nr_items;
60d3fd32 211}
6a4f496f 212EXPORT_SYMBOL_GPL(list_lru_count_node);
3b1d58a4 213
60d3fd32 214static unsigned long
6a6b7b77 215__list_lru_walk_one(struct list_lru *lru, int nid, int memcg_idx,
60d3fd32
VD
216 list_lru_walk_cb isolate, void *cb_arg,
217 unsigned long *nr_to_walk)
3b1d58a4 218{
6a6b7b77 219 struct list_lru_node *nlru = &lru->node[nid];
60d3fd32 220 struct list_lru_one *l;
a38e4082 221 struct list_head *item, *n;
3b1d58a4 222 unsigned long isolated = 0;
a38e4082 223
a38e4082 224restart:
5abc1e37
MS
225 l = list_lru_from_memcg_idx(lru, nid, memcg_idx);
226 if (!l)
227 goto out;
228
60d3fd32 229 list_for_each_safe(item, n, &l->list) {
a38e4082 230 enum lru_status ret;
5cedf721
DC
231
232 /*
233 * decrement nr_to_walk first so that we don't livelock if we
3dc5f032 234 * get stuck on large numbers of LRU_RETRY items
5cedf721 235 */
c56b097a 236 if (!*nr_to_walk)
5cedf721 237 break;
c56b097a 238 --*nr_to_walk;
5cedf721 239
3f97b163 240 ret = isolate(item, l, &nlru->lock, cb_arg);
a38e4082 241 switch (ret) {
449dd698
JW
242 case LRU_REMOVED_RETRY:
243 assert_spin_locked(&nlru->lock);
e4a9bc58 244 fallthrough;
a38e4082 245 case LRU_REMOVED:
3b1d58a4 246 isolated++;
2c80cd57 247 nlru->nr_items--;
449dd698
JW
248 /*
249 * If the lru lock has been dropped, our list
250 * traversal is now invalid and so we have to
251 * restart from scratch.
252 */
253 if (ret == LRU_REMOVED_RETRY)
254 goto restart;
a38e4082
DC
255 break;
256 case LRU_ROTATE:
60d3fd32 257 list_move_tail(item, &l->list);
a38e4082
DC
258 break;
259 case LRU_SKIP:
260 break;
261 case LRU_RETRY:
5cedf721
DC
262 /*
263 * The lru lock has been dropped, our list traversal is
264 * now invalid and so we have to restart from scratch.
265 */
449dd698 266 assert_spin_locked(&nlru->lock);
a38e4082
DC
267 goto restart;
268 default:
269 BUG();
270 }
a38e4082 271 }
5abc1e37 272out:
3b1d58a4
DC
273 return isolated;
274}
60d3fd32
VD
275
276unsigned long
277list_lru_walk_one(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
278 list_lru_walk_cb isolate, void *cb_arg,
279 unsigned long *nr_to_walk)
280{
6cfe57a9
SAS
281 struct list_lru_node *nlru = &lru->node[nid];
282 unsigned long ret;
283
284 spin_lock(&nlru->lock);
6a6b7b77
MS
285 ret = __list_lru_walk_one(lru, nid, memcg_cache_id(memcg), isolate,
286 cb_arg, nr_to_walk);
6cfe57a9
SAS
287 spin_unlock(&nlru->lock);
288 return ret;
60d3fd32
VD
289}
290EXPORT_SYMBOL_GPL(list_lru_walk_one);
291
6b51e881
SAS
292unsigned long
293list_lru_walk_one_irq(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
294 list_lru_walk_cb isolate, void *cb_arg,
295 unsigned long *nr_to_walk)
296{
297 struct list_lru_node *nlru = &lru->node[nid];
298 unsigned long ret;
299
300 spin_lock_irq(&nlru->lock);
6a6b7b77
MS
301 ret = __list_lru_walk_one(lru, nid, memcg_cache_id(memcg), isolate,
302 cb_arg, nr_to_walk);
6b51e881
SAS
303 spin_unlock_irq(&nlru->lock);
304 return ret;
305}
306
60d3fd32
VD
307unsigned long list_lru_walk_node(struct list_lru *lru, int nid,
308 list_lru_walk_cb isolate, void *cb_arg,
309 unsigned long *nr_to_walk)
310{
311 long isolated = 0;
312 int memcg_idx;
313
87a5ffc1
SAS
314 isolated += list_lru_walk_one(lru, nid, NULL, isolate, cb_arg,
315 nr_to_walk);
60d3fd32
VD
316 if (*nr_to_walk > 0 && list_lru_memcg_aware(lru)) {
317 for_each_memcg_cache_index(memcg_idx) {
6cfe57a9
SAS
318 struct list_lru_node *nlru = &lru->node[nid];
319
320 spin_lock(&nlru->lock);
6a6b7b77 321 isolated += __list_lru_walk_one(lru, nid, memcg_idx,
6e018968
SAS
322 isolate, cb_arg,
323 nr_to_walk);
6cfe57a9
SAS
324 spin_unlock(&nlru->lock);
325
60d3fd32
VD
326 if (*nr_to_walk <= 0)
327 break;
328 }
329 }
330 return isolated;
331}
3b1d58a4
DC
332EXPORT_SYMBOL_GPL(list_lru_walk_node);
333
60d3fd32
VD
334static void init_one_lru(struct list_lru_one *l)
335{
336 INIT_LIST_HEAD(&l->list);
337 l->nr_items = 0;
338}
339
84c07d11 340#ifdef CONFIG_MEMCG_KMEM
6a6b7b77
MS
341static void memcg_destroy_list_lru_range(struct list_lru_memcg *mlrus,
342 int begin, int end)
60d3fd32
VD
343{
344 int i;
345
346 for (i = begin; i < end; i++)
6a6b7b77 347 kfree(mlrus->mlru[i]);
60d3fd32
VD
348}
349
88f2ef73
MS
350static struct list_lru_per_memcg *memcg_init_list_lru_one(gfp_t gfp)
351{
352 int nid;
353 struct list_lru_per_memcg *mlru;
354
355 mlru = kmalloc(struct_size(mlru, node, nr_node_ids), gfp);
356 if (!mlru)
357 return NULL;
358
359 for_each_node(nid)
360 init_one_lru(&mlru->node[nid]);
361
362 return mlru;
363}
364
5abc1e37 365static void memcg_list_lru_free(struct list_lru *lru, int src_idx)
60d3fd32 366{
5abc1e37
MS
367 struct list_lru_memcg *mlrus;
368 struct list_lru_per_memcg *mlru;
60d3fd32 369
5abc1e37
MS
370 spin_lock_irq(&lru->lock);
371 mlrus = rcu_dereference_protected(lru->mlrus, true);
372 mlru = rcu_dereference_protected(mlrus->mlru[src_idx], true);
373 rcu_assign_pointer(mlrus->mlru[src_idx], NULL);
374 spin_unlock_irq(&lru->lock);
375
376 /*
377 * The __list_lru_walk_one() can walk the list of this node.
378 * We need kvfree_rcu() here. And the walking of the list
379 * is under lru->node[nid]->lock, which can serve as a RCU
380 * read-side critical section.
381 */
382 if (mlru)
383 kvfree_rcu(mlru, rcu);
60d3fd32
VD
384}
385
6a6b7b77 386static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
60d3fd32 387{
6a6b7b77 388 struct list_lru_memcg *mlrus;
60d3fd32
VD
389 int size = memcg_nr_cache_ids;
390
6a6b7b77
MS
391 lru->memcg_aware = memcg_aware;
392 if (!memcg_aware)
393 return 0;
394
88f2ef73
MS
395 spin_lock_init(&lru->lock);
396
5abc1e37 397 mlrus = kvzalloc(struct_size(mlrus, mlru, size), GFP_KERNEL);
6a6b7b77 398 if (!mlrus)
60d3fd32
VD
399 return -ENOMEM;
400
6a6b7b77 401 RCU_INIT_POINTER(lru->mlrus, mlrus);
60d3fd32
VD
402
403 return 0;
404}
405
6a6b7b77 406static void memcg_destroy_list_lru(struct list_lru *lru)
60d3fd32 407{
6a6b7b77
MS
408 struct list_lru_memcg *mlrus;
409
410 if (!list_lru_memcg_aware(lru))
411 return;
412
0c7c1bed
KT
413 /*
414 * This is called when shrinker has already been unregistered,
a7b7e1df 415 * and nobody can use it. So, there is no need to use kvfree_rcu().
0c7c1bed 416 */
6a6b7b77
MS
417 mlrus = rcu_dereference_protected(lru->mlrus, true);
418 memcg_destroy_list_lru_range(mlrus, 0, memcg_nr_cache_ids);
419 kvfree(mlrus);
0c7c1bed
KT
420}
421
6a6b7b77 422static int memcg_update_list_lru(struct list_lru *lru, int old_size, int new_size)
60d3fd32
VD
423{
424 struct list_lru_memcg *old, *new;
425
426 BUG_ON(old_size > new_size);
427
6a6b7b77 428 old = rcu_dereference_protected(lru->mlrus,
0c7c1bed 429 lockdep_is_held(&list_lrus_mutex));
6a6b7b77 430 new = kvmalloc(struct_size(new, mlru, new_size), GFP_KERNEL);
60d3fd32
VD
431 if (!new)
432 return -ENOMEM;
433
88f2ef73 434 spin_lock_irq(&lru->lock);
6a6b7b77 435 memcpy(&new->mlru, &old->mlru, flex_array_size(new, mlru, old_size));
5abc1e37 436 memset(&new->mlru[old_size], 0, flex_array_size(new, mlru, new_size - old_size));
6a6b7b77 437 rcu_assign_pointer(lru->mlrus, new);
88f2ef73
MS
438 spin_unlock_irq(&lru->lock);
439
a7b7e1df 440 kvfree_rcu(old, rcu);
60d3fd32
VD
441 return 0;
442}
443
60d3fd32
VD
444int memcg_update_all_list_lrus(int new_size)
445{
446 int ret = 0;
447 struct list_lru *lru;
448 int old_size = memcg_nr_cache_ids;
449
450 mutex_lock(&list_lrus_mutex);
3eef1127 451 list_for_each_entry(lru, &memcg_list_lrus, list) {
60d3fd32
VD
452 ret = memcg_update_list_lru(lru, old_size, new_size);
453 if (ret)
5abc1e37 454 break;
60d3fd32 455 }
60d3fd32
VD
456 mutex_unlock(&list_lrus_mutex);
457 return ret;
60d3fd32 458}
2788cf0c 459
1f391eb2
MS
460static void memcg_reparent_list_lru_node(struct list_lru *lru, int nid,
461 int src_idx, struct mem_cgroup *dst_memcg)
2788cf0c 462{
3b82c4dc 463 struct list_lru_node *nlru = &lru->node[nid];
9bec5c35 464 int dst_idx = dst_memcg->kmemcg_id;
2788cf0c
VD
465 struct list_lru_one *src, *dst;
466
467 /*
468 * Since list_lru_{add,del} may be called under an IRQ-safe lock,
469 * we have to use IRQ-safe primitives here to avoid deadlock.
470 */
471 spin_lock_irq(&nlru->lock);
472
6a6b7b77 473 src = list_lru_from_memcg_idx(lru, nid, src_idx);
5abc1e37
MS
474 if (!src)
475 goto out;
6a6b7b77 476 dst = list_lru_from_memcg_idx(lru, nid, dst_idx);
2788cf0c
VD
477
478 list_splice_init(&src->list, &dst->list);
8199be00
YS
479
480 if (src->nr_items) {
481 dst->nr_items += src->nr_items;
2bfd3637 482 set_shrinker_bit(dst_memcg, nid, lru_shrinker_id(lru));
8199be00
YS
483 src->nr_items = 0;
484 }
5abc1e37 485out:
2788cf0c
VD
486 spin_unlock_irq(&nlru->lock);
487}
488
1f391eb2
MS
489static void memcg_reparent_list_lru(struct list_lru *lru,
490 int src_idx, struct mem_cgroup *dst_memcg)
2788cf0c
VD
491{
492 int i;
493
145949a1 494 for_each_node(i)
1f391eb2 495 memcg_reparent_list_lru_node(lru, i, src_idx, dst_memcg);
5abc1e37
MS
496
497 memcg_list_lru_free(lru, src_idx);
2788cf0c
VD
498}
499
1f391eb2 500void memcg_reparent_list_lrus(struct mem_cgroup *memcg, struct mem_cgroup *parent)
2788cf0c 501{
5abc1e37 502 struct cgroup_subsys_state *css;
2788cf0c 503 struct list_lru *lru;
1f391eb2 504 int src_idx = memcg->kmemcg_id;
5abc1e37
MS
505
506 /*
507 * Change kmemcg_id of this cgroup and all its descendants to the
508 * parent's id, and then move all entries from this cgroup's list_lrus
509 * to ones of the parent.
510 *
511 * After we have finished, all list_lrus corresponding to this cgroup
512 * are guaranteed to remain empty. So we can safely free this cgroup's
513 * list lrus in memcg_list_lru_free().
514 *
515 * Changing ->kmemcg_id to the parent can prevent memcg_list_lru_alloc()
516 * from allocating list lrus for this cgroup after memcg_list_lru_free()
517 * call.
518 */
519 rcu_read_lock();
1f391eb2
MS
520 css_for_each_descendant_pre(css, &memcg->css) {
521 struct mem_cgroup *child;
5abc1e37 522
1f391eb2
MS
523 child = mem_cgroup_from_css(css);
524 child->kmemcg_id = parent->kmemcg_id;
5abc1e37
MS
525 }
526 rcu_read_unlock();
2788cf0c
VD
527
528 mutex_lock(&list_lrus_mutex);
3eef1127 529 list_for_each_entry(lru, &memcg_list_lrus, list)
1f391eb2 530 memcg_reparent_list_lru(lru, src_idx, parent);
2788cf0c
VD
531 mutex_unlock(&list_lrus_mutex);
532}
88f2ef73
MS
533
534static bool memcg_list_lru_allocated(struct mem_cgroup *memcg,
535 struct list_lru *lru)
536{
537 bool allocated;
538 int idx;
539
540 idx = memcg->kmemcg_id;
541 if (unlikely(idx < 0))
542 return true;
543
544 rcu_read_lock();
5abc1e37 545 allocated = !!rcu_access_pointer(rcu_dereference(lru->mlrus)->mlru[idx]);
88f2ef73
MS
546 rcu_read_unlock();
547
548 return allocated;
549}
550
551int memcg_list_lru_alloc(struct mem_cgroup *memcg, struct list_lru *lru,
552 gfp_t gfp)
553{
554 int i;
555 unsigned long flags;
556 struct list_lru_memcg *mlrus;
557 struct list_lru_memcg_table {
558 struct list_lru_per_memcg *mlru;
559 struct mem_cgroup *memcg;
560 } *table;
561
562 if (!list_lru_memcg_aware(lru) || memcg_list_lru_allocated(memcg, lru))
563 return 0;
564
565 gfp &= GFP_RECLAIM_MASK;
566 table = kmalloc_array(memcg->css.cgroup->level, sizeof(*table), gfp);
567 if (!table)
568 return -ENOMEM;
569
570 /*
571 * Because the list_lru can be reparented to the parent cgroup's
572 * list_lru, we should make sure that this cgroup and all its
573 * ancestors have allocated list_lru_per_memcg.
574 */
575 for (i = 0; memcg; memcg = parent_mem_cgroup(memcg), i++) {
576 if (memcg_list_lru_allocated(memcg, lru))
577 break;
578
579 table[i].memcg = memcg;
580 table[i].mlru = memcg_init_list_lru_one(gfp);
581 if (!table[i].mlru) {
582 while (i--)
583 kfree(table[i].mlru);
584 kfree(table);
585 return -ENOMEM;
586 }
587 }
588
589 spin_lock_irqsave(&lru->lock, flags);
590 mlrus = rcu_dereference_protected(lru->mlrus, true);
591 while (i--) {
592 int index = table[i].memcg->kmemcg_id;
5abc1e37 593 struct list_lru_per_memcg *mlru = table[i].mlru;
88f2ef73 594
5abc1e37
MS
595 if (index < 0 || rcu_dereference_protected(mlrus->mlru[index], true))
596 kfree(mlru);
88f2ef73 597 else
5abc1e37 598 rcu_assign_pointer(mlrus->mlru[index], mlru);
88f2ef73
MS
599 }
600 spin_unlock_irqrestore(&lru->lock, flags);
601
602 kfree(table);
603
604 return 0;
605}
60d3fd32
VD
606#else
607static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
608{
609 return 0;
610}
611
612static void memcg_destroy_list_lru(struct list_lru *lru)
613{
614}
84c07d11 615#endif /* CONFIG_MEMCG_KMEM */
60d3fd32
VD
616
617int __list_lru_init(struct list_lru *lru, bool memcg_aware,
c92e8e10 618 struct lock_class_key *key, struct shrinker *shrinker)
a38e4082 619{
3b1d58a4 620 int i;
60d3fd32
VD
621 int err = -ENOMEM;
622
c92e8e10
KT
623#ifdef CONFIG_MEMCG_KMEM
624 if (shrinker)
625 lru->shrinker_id = shrinker->id;
626 else
627 lru->shrinker_id = -1;
628#endif
60d3fd32 629 memcg_get_cache_ids();
5ca302c8 630
b9726c26 631 lru->node = kcalloc(nr_node_ids, sizeof(*lru->node), GFP_KERNEL);
5ca302c8 632 if (!lru->node)
60d3fd32 633 goto out;
a38e4082 634
145949a1 635 for_each_node(i) {
3b1d58a4 636 spin_lock_init(&lru->node[i].lock);
449dd698
JW
637 if (key)
638 lockdep_set_class(&lru->node[i].lock, key);
60d3fd32
VD
639 init_one_lru(&lru->node[i].lru);
640 }
641
642 err = memcg_init_list_lru(lru, memcg_aware);
643 if (err) {
644 kfree(lru->node);
1bc11d70
AP
645 /* Do this so a list_lru_destroy() doesn't crash: */
646 lru->node = NULL;
60d3fd32 647 goto out;
3b1d58a4 648 }
60d3fd32 649
c0a5b560 650 list_lru_register(lru);
60d3fd32
VD
651out:
652 memcg_put_cache_ids();
653 return err;
a38e4082 654}
60d3fd32 655EXPORT_SYMBOL_GPL(__list_lru_init);
5ca302c8
GC
656
657void list_lru_destroy(struct list_lru *lru)
658{
c0a5b560
VD
659 /* Already destroyed or not yet initialized? */
660 if (!lru->node)
661 return;
60d3fd32
VD
662
663 memcg_get_cache_ids();
664
c0a5b560 665 list_lru_unregister(lru);
60d3fd32
VD
666
667 memcg_destroy_list_lru(lru);
5ca302c8 668 kfree(lru->node);
c0a5b560 669 lru->node = NULL;
60d3fd32 670
c92e8e10
KT
671#ifdef CONFIG_MEMCG_KMEM
672 lru->shrinker_id = -1;
673#endif
60d3fd32 674 memcg_put_cache_ids();
5ca302c8
GC
675}
676EXPORT_SYMBOL_GPL(list_lru_destroy);