mm: memcontrol: clarify migration where old page is uncharged
[linux-2.6-block.git] / mm / memcontrol.c
CommitLineData
8cdea7c0
BS
1/* memcontrol.c - Memory Controller
2 *
3 * Copyright IBM Corporation, 2007
4 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
5 *
78fb7466
PE
6 * Copyright 2007 OpenVZ SWsoft Inc
7 * Author: Pavel Emelianov <xemul@openvz.org>
8 *
2e72b634
KS
9 * Memory thresholds
10 * Copyright (C) 2009 Nokia Corporation
11 * Author: Kirill A. Shutemov
12 *
7ae1e1d0
GC
13 * Kernel Memory Controller
14 * Copyright (C) 2012 Parallels Inc. and Google Inc.
15 * Authors: Glauber Costa and Suleiman Souhlal
16 *
8cdea7c0
BS
17 * This program is free software; you can redistribute it and/or modify
18 * it under the terms of the GNU General Public License as published by
19 * the Free Software Foundation; either version 2 of the License, or
20 * (at your option) any later version.
21 *
22 * This program is distributed in the hope that it will be useful,
23 * but WITHOUT ANY WARRANTY; without even the implied warranty of
24 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
25 * GNU General Public License for more details.
26 */
27
3e32cb2e 28#include <linux/page_counter.h>
8cdea7c0
BS
29#include <linux/memcontrol.h>
30#include <linux/cgroup.h>
78fb7466 31#include <linux/mm.h>
4ffef5fe 32#include <linux/hugetlb.h>
d13d1443 33#include <linux/pagemap.h>
d52aa412 34#include <linux/smp.h>
8a9f3ccd 35#include <linux/page-flags.h>
66e1707b 36#include <linux/backing-dev.h>
8a9f3ccd
BS
37#include <linux/bit_spinlock.h>
38#include <linux/rcupdate.h>
e222432b 39#include <linux/limits.h>
b9e15baf 40#include <linux/export.h>
8c7c6e34 41#include <linux/mutex.h>
bb4cc1a8 42#include <linux/rbtree.h>
b6ac57d5 43#include <linux/slab.h>
66e1707b 44#include <linux/swap.h>
02491447 45#include <linux/swapops.h>
66e1707b 46#include <linux/spinlock.h>
2e72b634 47#include <linux/eventfd.h>
79bd9814 48#include <linux/poll.h>
2e72b634 49#include <linux/sort.h>
66e1707b 50#include <linux/fs.h>
d2ceb9b7 51#include <linux/seq_file.h>
70ddf637 52#include <linux/vmpressure.h>
b69408e8 53#include <linux/mm_inline.h>
52d4b9ac 54#include <linux/page_cgroup.h>
cdec2e42 55#include <linux/cpu.h>
158e0a2d 56#include <linux/oom.h>
0056f4e6 57#include <linux/lockdep.h>
79bd9814 58#include <linux/file.h>
08e552c6 59#include "internal.h"
d1a4c0b3 60#include <net/sock.h>
4bd2c1ee 61#include <net/ip.h>
d1a4c0b3 62#include <net/tcp_memcontrol.h>
f35c3a8e 63#include "slab.h"
8cdea7c0 64
8697d331
BS
65#include <asm/uaccess.h>
66
cc8e970c
KM
67#include <trace/events/vmscan.h>
68
073219e9
TH
69struct cgroup_subsys memory_cgrp_subsys __read_mostly;
70EXPORT_SYMBOL(memory_cgrp_subsys);
68ae564b 71
a181b0e8 72#define MEM_CGROUP_RECLAIM_RETRIES 5
6bbda35c 73static struct mem_cgroup *root_mem_cgroup __read_mostly;
8cdea7c0 74
c255a458 75#ifdef CONFIG_MEMCG_SWAP
338c8431 76/* Turned on only when memory cgroup is enabled && really_do_swap_account = 1 */
c077719b 77int do_swap_account __read_mostly;
a42c390c
MH
78
79/* for remember boot option*/
c255a458 80#ifdef CONFIG_MEMCG_SWAP_ENABLED
a42c390c
MH
81static int really_do_swap_account __initdata = 1;
82#else
ada4ba59 83static int really_do_swap_account __initdata;
a42c390c
MH
84#endif
85
c077719b 86#else
a0db00fc 87#define do_swap_account 0
c077719b
KH
88#endif
89
90
af7c4b0e
JW
91static const char * const mem_cgroup_stat_names[] = {
92 "cache",
93 "rss",
b070e65c 94 "rss_huge",
af7c4b0e 95 "mapped_file",
3ea67d06 96 "writeback",
af7c4b0e
JW
97 "swap",
98};
99
e9f8974f
JW
100enum mem_cgroup_events_index {
101 MEM_CGROUP_EVENTS_PGPGIN, /* # of pages paged in */
102 MEM_CGROUP_EVENTS_PGPGOUT, /* # of pages paged out */
456f998e
YH
103 MEM_CGROUP_EVENTS_PGFAULT, /* # of page-faults */
104 MEM_CGROUP_EVENTS_PGMAJFAULT, /* # of major page-faults */
e9f8974f
JW
105 MEM_CGROUP_EVENTS_NSTATS,
106};
af7c4b0e
JW
107
108static const char * const mem_cgroup_events_names[] = {
109 "pgpgin",
110 "pgpgout",
111 "pgfault",
112 "pgmajfault",
113};
114
58cf188e
SZ
115static const char * const mem_cgroup_lru_names[] = {
116 "inactive_anon",
117 "active_anon",
118 "inactive_file",
119 "active_file",
120 "unevictable",
121};
122
7a159cc9
JW
123/*
124 * Per memcg event counter is incremented at every pagein/pageout. With THP,
125 * it will be incremated by the number of pages. This counter is used for
126 * for trigger some periodic events. This is straightforward and better
127 * than using jiffies etc. to handle periodic memcg event.
128 */
129enum mem_cgroup_events_target {
130 MEM_CGROUP_TARGET_THRESH,
bb4cc1a8 131 MEM_CGROUP_TARGET_SOFTLIMIT,
453a9bf3 132 MEM_CGROUP_TARGET_NUMAINFO,
7a159cc9
JW
133 MEM_CGROUP_NTARGETS,
134};
a0db00fc
KS
135#define THRESHOLDS_EVENTS_TARGET 128
136#define SOFTLIMIT_EVENTS_TARGET 1024
137#define NUMAINFO_EVENTS_TARGET 1024
e9f8974f 138
d52aa412 139struct mem_cgroup_stat_cpu {
7a159cc9 140 long count[MEM_CGROUP_STAT_NSTATS];
e9f8974f 141 unsigned long events[MEM_CGROUP_EVENTS_NSTATS];
13114716 142 unsigned long nr_page_events;
7a159cc9 143 unsigned long targets[MEM_CGROUP_NTARGETS];
d52aa412
KH
144};
145
5ac8fb31
JW
146struct reclaim_iter {
147 struct mem_cgroup *position;
527a5ec9
JW
148 /* scan generation, increased every round-trip */
149 unsigned int generation;
150};
151
6d12e2d8
KH
152/*
153 * per-zone information in memory controller.
154 */
6d12e2d8 155struct mem_cgroup_per_zone {
6290df54 156 struct lruvec lruvec;
1eb49272 157 unsigned long lru_size[NR_LRU_LISTS];
3e2f41f1 158
5ac8fb31 159 struct reclaim_iter iter[DEF_PRIORITY + 1];
527a5ec9 160
bb4cc1a8 161 struct rb_node tree_node; /* RB tree node */
3e32cb2e 162 unsigned long usage_in_excess;/* Set to the value by which */
bb4cc1a8
AM
163 /* the soft limit is exceeded*/
164 bool on_tree;
d79154bb 165 struct mem_cgroup *memcg; /* Back pointer, we cannot */
4e416953 166 /* use container_of */
6d12e2d8 167};
6d12e2d8
KH
168
169struct mem_cgroup_per_node {
170 struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES];
171};
172
bb4cc1a8
AM
173/*
174 * Cgroups above their limits are maintained in a RB-Tree, independent of
175 * their hierarchy representation
176 */
177
178struct mem_cgroup_tree_per_zone {
179 struct rb_root rb_root;
180 spinlock_t lock;
181};
182
183struct mem_cgroup_tree_per_node {
184 struct mem_cgroup_tree_per_zone rb_tree_per_zone[MAX_NR_ZONES];
185};
186
187struct mem_cgroup_tree {
188 struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
189};
190
191static struct mem_cgroup_tree soft_limit_tree __read_mostly;
192
2e72b634
KS
193struct mem_cgroup_threshold {
194 struct eventfd_ctx *eventfd;
3e32cb2e 195 unsigned long threshold;
2e72b634
KS
196};
197
9490ff27 198/* For threshold */
2e72b634 199struct mem_cgroup_threshold_ary {
748dad36 200 /* An array index points to threshold just below or equal to usage. */
5407a562 201 int current_threshold;
2e72b634
KS
202 /* Size of entries[] */
203 unsigned int size;
204 /* Array of thresholds */
205 struct mem_cgroup_threshold entries[0];
206};
2c488db2
KS
207
208struct mem_cgroup_thresholds {
209 /* Primary thresholds array */
210 struct mem_cgroup_threshold_ary *primary;
211 /*
212 * Spare threshold array.
213 * This is needed to make mem_cgroup_unregister_event() "never fail".
214 * It must be able to store at least primary->size - 1 entries.
215 */
216 struct mem_cgroup_threshold_ary *spare;
217};
218
9490ff27
KH
219/* for OOM */
220struct mem_cgroup_eventfd_list {
221 struct list_head list;
222 struct eventfd_ctx *eventfd;
223};
2e72b634 224
79bd9814
TH
225/*
226 * cgroup_event represents events which userspace want to receive.
227 */
3bc942f3 228struct mem_cgroup_event {
79bd9814 229 /*
59b6f873 230 * memcg which the event belongs to.
79bd9814 231 */
59b6f873 232 struct mem_cgroup *memcg;
79bd9814
TH
233 /*
234 * eventfd to signal userspace about the event.
235 */
236 struct eventfd_ctx *eventfd;
237 /*
238 * Each of these stored in a list by the cgroup.
239 */
240 struct list_head list;
fba94807
TH
241 /*
242 * register_event() callback will be used to add new userspace
243 * waiter for changes related to this event. Use eventfd_signal()
244 * on eventfd to send notification to userspace.
245 */
59b6f873 246 int (*register_event)(struct mem_cgroup *memcg,
347c4a87 247 struct eventfd_ctx *eventfd, const char *args);
fba94807
TH
248 /*
249 * unregister_event() callback will be called when userspace closes
250 * the eventfd or on cgroup removing. This callback must be set,
251 * if you want provide notification functionality.
252 */
59b6f873 253 void (*unregister_event)(struct mem_cgroup *memcg,
fba94807 254 struct eventfd_ctx *eventfd);
79bd9814
TH
255 /*
256 * All fields below needed to unregister event when
257 * userspace closes eventfd.
258 */
259 poll_table pt;
260 wait_queue_head_t *wqh;
261 wait_queue_t wait;
262 struct work_struct remove;
263};
264
c0ff4b85
R
265static void mem_cgroup_threshold(struct mem_cgroup *memcg);
266static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
2e72b634 267
8cdea7c0
BS
268/*
269 * The memory controller data structure. The memory controller controls both
270 * page cache and RSS per cgroup. We would eventually like to provide
271 * statistics based on the statistics developed by Rik Van Riel for clock-pro,
272 * to help the administrator determine what knobs to tune.
273 *
274 * TODO: Add a water mark for the memory controller. Reclaim will begin when
8a9f3ccd
BS
275 * we hit the water mark. May be even add a low water mark, such that
276 * no reclaim occurs from a cgroup at it's low water mark, this is
277 * a feature that will be implemented much later in the future.
8cdea7c0
BS
278 */
279struct mem_cgroup {
280 struct cgroup_subsys_state css;
3e32cb2e
JW
281
282 /* Accounted resources */
283 struct page_counter memory;
284 struct page_counter memsw;
285 struct page_counter kmem;
286
287 unsigned long soft_limit;
59927fb9 288
70ddf637
AV
289 /* vmpressure notifications */
290 struct vmpressure vmpressure;
291
2f7dd7a4
JW
292 /* css_online() has been completed */
293 int initialized;
294
18f59ea7
BS
295 /*
296 * Should the accounting and control be hierarchical, per subtree?
297 */
298 bool use_hierarchy;
510fc4e1 299 unsigned long kmem_account_flags; /* See KMEM_ACCOUNTED_*, below */
79dfdacc
MH
300
301 bool oom_lock;
302 atomic_t under_oom;
3812c8c8 303 atomic_t oom_wakeups;
79dfdacc 304
1f4c025b 305 int swappiness;
3c11ecf4
KH
306 /* OOM-Killer disable */
307 int oom_kill_disable;
a7885eb8 308
2e72b634
KS
309 /* protect arrays of thresholds */
310 struct mutex thresholds_lock;
311
312 /* thresholds for memory usage. RCU-protected */
2c488db2 313 struct mem_cgroup_thresholds thresholds;
907860ed 314
2e72b634 315 /* thresholds for mem+swap usage. RCU-protected */
2c488db2 316 struct mem_cgroup_thresholds memsw_thresholds;
907860ed 317
9490ff27
KH
318 /* For oom notifier event fd */
319 struct list_head oom_notify;
185efc0f 320
7dc74be0
DN
321 /*
322 * Should we move charges of a task when a task is moved into this
323 * mem_cgroup ? And what type of charges should we move ?
324 */
f894ffa8 325 unsigned long move_charge_at_immigrate;
619d094b
KH
326 /*
327 * set > 0 if pages under this cgroup are moving to other cgroup.
328 */
329 atomic_t moving_account;
312734c0
KH
330 /* taken only while moving_account > 0 */
331 spinlock_t move_lock;
d52aa412 332 /*
c62b1a3b 333 * percpu counter.
d52aa412 334 */
3a7951b4 335 struct mem_cgroup_stat_cpu __percpu *stat;
711d3d2c
KH
336 /*
337 * used when a cpu is offlined or other synchronizations
338 * See mem_cgroup_read_stat().
339 */
340 struct mem_cgroup_stat_cpu nocpu_base;
341 spinlock_t pcp_counter_lock;
d1a4c0b3 342
4bd2c1ee 343#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_INET)
2e685cad 344 struct cg_proto tcp_mem;
d1a4c0b3 345#endif
2633d7a0 346#if defined(CONFIG_MEMCG_KMEM)
bd673145
VD
347 /* analogous to slab_common's slab_caches list, but per-memcg;
348 * protected by memcg_slab_mutex */
2633d7a0 349 struct list_head memcg_slab_caches;
2633d7a0
GC
350 /* Index in the kmem_cache->memcg_params->memcg_caches array */
351 int kmemcg_id;
352#endif
45cf7ebd
GC
353
354 int last_scanned_node;
355#if MAX_NUMNODES > 1
356 nodemask_t scan_nodes;
357 atomic_t numainfo_events;
358 atomic_t numainfo_updating;
359#endif
70ddf637 360
fba94807
TH
361 /* List of events which userspace want to receive */
362 struct list_head event_list;
363 spinlock_t event_list_lock;
364
54f72fe0
JW
365 struct mem_cgroup_per_node *nodeinfo[0];
366 /* WARNING: nodeinfo must be the last member here */
8cdea7c0
BS
367};
368
510fc4e1
GC
369/* internal only representation about the status of kmem accounting. */
370enum {
6de64beb 371 KMEM_ACCOUNTED_ACTIVE, /* accounted by this cgroup itself */
510fc4e1
GC
372};
373
510fc4e1
GC
374#ifdef CONFIG_MEMCG_KMEM
375static inline void memcg_kmem_set_active(struct mem_cgroup *memcg)
376{
377 set_bit(KMEM_ACCOUNTED_ACTIVE, &memcg->kmem_account_flags);
378}
7de37682
GC
379
380static bool memcg_kmem_is_active(struct mem_cgroup *memcg)
381{
382 return test_bit(KMEM_ACCOUNTED_ACTIVE, &memcg->kmem_account_flags);
383}
384
510fc4e1
GC
385#endif
386
7dc74be0
DN
387/* Stuffs for move charges at task migration. */
388/*
ee5e8472
GC
389 * Types of charges to be moved. "move_charge_at_immitgrate" and
390 * "immigrate_flags" are treated as a left-shifted bitmap of these types.
7dc74be0
DN
391 */
392enum move_type {
4ffef5fe 393 MOVE_CHARGE_TYPE_ANON, /* private anonymous page and swap of it */
87946a72 394 MOVE_CHARGE_TYPE_FILE, /* file page(including tmpfs) and swap of it */
7dc74be0
DN
395 NR_MOVE_TYPE,
396};
397
4ffef5fe
DN
398/* "mc" and its members are protected by cgroup_mutex */
399static struct move_charge_struct {
b1dd693e 400 spinlock_t lock; /* for from, to */
4ffef5fe
DN
401 struct mem_cgroup *from;
402 struct mem_cgroup *to;
ee5e8472 403 unsigned long immigrate_flags;
4ffef5fe 404 unsigned long precharge;
854ffa8d 405 unsigned long moved_charge;
483c30b5 406 unsigned long moved_swap;
8033b97c
DN
407 struct task_struct *moving_task; /* a task moving charges */
408 wait_queue_head_t waitq; /* a waitq for other context */
409} mc = {
2bd9bb20 410 .lock = __SPIN_LOCK_UNLOCKED(mc.lock),
8033b97c
DN
411 .waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
412};
4ffef5fe 413
90254a65
DN
414static bool move_anon(void)
415{
ee5e8472 416 return test_bit(MOVE_CHARGE_TYPE_ANON, &mc.immigrate_flags);
90254a65
DN
417}
418
87946a72
DN
419static bool move_file(void)
420{
ee5e8472 421 return test_bit(MOVE_CHARGE_TYPE_FILE, &mc.immigrate_flags);
87946a72
DN
422}
423
4e416953
BS
424/*
425 * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft
426 * limit reclaim to prevent infinite loops, if they ever occur.
427 */
a0db00fc 428#define MEM_CGROUP_MAX_RECLAIM_LOOPS 100
bb4cc1a8 429#define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS 2
4e416953 430
217bc319
KH
431enum charge_type {
432 MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
41326c17 433 MEM_CGROUP_CHARGE_TYPE_ANON,
d13d1443 434 MEM_CGROUP_CHARGE_TYPE_SWAPOUT, /* for accounting swapcache */
8a9478ca 435 MEM_CGROUP_CHARGE_TYPE_DROP, /* a page was unused swap cache */
c05555b5
KH
436 NR_CHARGE_TYPE,
437};
438
8c7c6e34 439/* for encoding cft->private value on file */
86ae53e1
GC
440enum res_type {
441 _MEM,
442 _MEMSWAP,
443 _OOM_TYPE,
510fc4e1 444 _KMEM,
86ae53e1
GC
445};
446
a0db00fc
KS
447#define MEMFILE_PRIVATE(x, val) ((x) << 16 | (val))
448#define MEMFILE_TYPE(val) ((val) >> 16 & 0xffff)
8c7c6e34 449#define MEMFILE_ATTR(val) ((val) & 0xffff)
9490ff27
KH
450/* Used for OOM nofiier */
451#define OOM_CONTROL (0)
8c7c6e34 452
0999821b
GC
453/*
454 * The memcg_create_mutex will be held whenever a new cgroup is created.
455 * As a consequence, any change that needs to protect against new child cgroups
456 * appearing has to hold it as well.
457 */
458static DEFINE_MUTEX(memcg_create_mutex);
459
b2145145
WL
460struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *s)
461{
a7c6d554 462 return s ? container_of(s, struct mem_cgroup, css) : NULL;
b2145145
WL
463}
464
70ddf637
AV
465/* Some nice accessors for the vmpressure. */
466struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg)
467{
468 if (!memcg)
469 memcg = root_mem_cgroup;
470 return &memcg->vmpressure;
471}
472
473struct cgroup_subsys_state *vmpressure_to_css(struct vmpressure *vmpr)
474{
475 return &container_of(vmpr, struct mem_cgroup, vmpressure)->css;
476}
477
7ffc0edc
MH
478static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
479{
480 return (memcg == root_mem_cgroup);
481}
482
4219b2da
LZ
483/*
484 * We restrict the id in the range of [1, 65535], so it can fit into
485 * an unsigned short.
486 */
487#define MEM_CGROUP_ID_MAX USHRT_MAX
488
34c00c31
LZ
489static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
490{
15a4c835 491 return memcg->css.id;
34c00c31
LZ
492}
493
494static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
495{
496 struct cgroup_subsys_state *css;
497
7d699ddb 498 css = css_from_id(id, &memory_cgrp_subsys);
34c00c31
LZ
499 return mem_cgroup_from_css(css);
500}
501
e1aab161 502/* Writing them here to avoid exposing memcg's inner layout */
4bd2c1ee 503#if defined(CONFIG_INET) && defined(CONFIG_MEMCG_KMEM)
e1aab161 504
e1aab161
GC
505void sock_update_memcg(struct sock *sk)
506{
376be5ff 507 if (mem_cgroup_sockets_enabled) {
e1aab161 508 struct mem_cgroup *memcg;
3f134619 509 struct cg_proto *cg_proto;
e1aab161
GC
510
511 BUG_ON(!sk->sk_prot->proto_cgroup);
512
f3f511e1
GC
513 /* Socket cloning can throw us here with sk_cgrp already
514 * filled. It won't however, necessarily happen from
515 * process context. So the test for root memcg given
516 * the current task's memcg won't help us in this case.
517 *
518 * Respecting the original socket's memcg is a better
519 * decision in this case.
520 */
521 if (sk->sk_cgrp) {
522 BUG_ON(mem_cgroup_is_root(sk->sk_cgrp->memcg));
5347e5ae 523 css_get(&sk->sk_cgrp->memcg->css);
f3f511e1
GC
524 return;
525 }
526
e1aab161
GC
527 rcu_read_lock();
528 memcg = mem_cgroup_from_task(current);
3f134619 529 cg_proto = sk->sk_prot->proto_cgroup(memcg);
5347e5ae 530 if (!mem_cgroup_is_root(memcg) &&
ec903c0c
TH
531 memcg_proto_active(cg_proto) &&
532 css_tryget_online(&memcg->css)) {
3f134619 533 sk->sk_cgrp = cg_proto;
e1aab161
GC
534 }
535 rcu_read_unlock();
536 }
537}
538EXPORT_SYMBOL(sock_update_memcg);
539
540void sock_release_memcg(struct sock *sk)
541{
376be5ff 542 if (mem_cgroup_sockets_enabled && sk->sk_cgrp) {
e1aab161
GC
543 struct mem_cgroup *memcg;
544 WARN_ON(!sk->sk_cgrp->memcg);
545 memcg = sk->sk_cgrp->memcg;
5347e5ae 546 css_put(&sk->sk_cgrp->memcg->css);
e1aab161
GC
547 }
548}
d1a4c0b3
GC
549
550struct cg_proto *tcp_proto_cgroup(struct mem_cgroup *memcg)
551{
552 if (!memcg || mem_cgroup_is_root(memcg))
553 return NULL;
554
2e685cad 555 return &memcg->tcp_mem;
d1a4c0b3
GC
556}
557EXPORT_SYMBOL(tcp_proto_cgroup);
e1aab161 558
3f134619
GC
559static void disarm_sock_keys(struct mem_cgroup *memcg)
560{
2e685cad 561 if (!memcg_proto_activated(&memcg->tcp_mem))
3f134619
GC
562 return;
563 static_key_slow_dec(&memcg_socket_limit_enabled);
564}
565#else
566static void disarm_sock_keys(struct mem_cgroup *memcg)
567{
568}
569#endif
570
a8964b9b 571#ifdef CONFIG_MEMCG_KMEM
55007d84
GC
572/*
573 * This will be the memcg's index in each cache's ->memcg_params->memcg_caches.
b8627835
LZ
574 * The main reason for not using cgroup id for this:
575 * this works better in sparse environments, where we have a lot of memcgs,
576 * but only a few kmem-limited. Or also, if we have, for instance, 200
577 * memcgs, and none but the 200th is kmem-limited, we'd have to have a
578 * 200 entry array for that.
55007d84
GC
579 *
580 * The current size of the caches array is stored in
581 * memcg_limited_groups_array_size. It will double each time we have to
582 * increase it.
583 */
584static DEFINE_IDA(kmem_limited_groups);
749c5415
GC
585int memcg_limited_groups_array_size;
586
55007d84
GC
587/*
588 * MIN_SIZE is different than 1, because we would like to avoid going through
589 * the alloc/free process all the time. In a small machine, 4 kmem-limited
590 * cgroups is a reasonable guess. In the future, it could be a parameter or
591 * tunable, but that is strictly not necessary.
592 *
b8627835 593 * MAX_SIZE should be as large as the number of cgrp_ids. Ideally, we could get
55007d84
GC
594 * this constant directly from cgroup, but it is understandable that this is
595 * better kept as an internal representation in cgroup.c. In any case, the
b8627835 596 * cgrp_id space is not getting any smaller, and we don't have to necessarily
55007d84
GC
597 * increase ours as well if it increases.
598 */
599#define MEMCG_CACHES_MIN_SIZE 4
b8627835 600#define MEMCG_CACHES_MAX_SIZE MEM_CGROUP_ID_MAX
55007d84 601
d7f25f8a
GC
602/*
603 * A lot of the calls to the cache allocation functions are expected to be
604 * inlined by the compiler. Since the calls to memcg_kmem_get_cache are
605 * conditional to this static branch, we'll have to allow modules that does
606 * kmem_cache_alloc and the such to see this symbol as well
607 */
a8964b9b 608struct static_key memcg_kmem_enabled_key;
d7f25f8a 609EXPORT_SYMBOL(memcg_kmem_enabled_key);
a8964b9b 610
f3bb3043
VD
611static void memcg_free_cache_id(int id);
612
a8964b9b
GC
613static void disarm_kmem_keys(struct mem_cgroup *memcg)
614{
55007d84 615 if (memcg_kmem_is_active(memcg)) {
a8964b9b 616 static_key_slow_dec(&memcg_kmem_enabled_key);
f3bb3043 617 memcg_free_cache_id(memcg->kmemcg_id);
55007d84 618 }
bea207c8
GC
619 /*
620 * This check can't live in kmem destruction function,
621 * since the charges will outlive the cgroup
622 */
3e32cb2e 623 WARN_ON(page_counter_read(&memcg->kmem));
a8964b9b
GC
624}
625#else
626static void disarm_kmem_keys(struct mem_cgroup *memcg)
627{
628}
629#endif /* CONFIG_MEMCG_KMEM */
630
631static void disarm_static_keys(struct mem_cgroup *memcg)
632{
633 disarm_sock_keys(memcg);
634 disarm_kmem_keys(memcg);
635}
636
f64c3f54 637static struct mem_cgroup_per_zone *
e231875b 638mem_cgroup_zone_zoneinfo(struct mem_cgroup *memcg, struct zone *zone)
f64c3f54 639{
e231875b
JZ
640 int nid = zone_to_nid(zone);
641 int zid = zone_idx(zone);
642
54f72fe0 643 return &memcg->nodeinfo[nid]->zoneinfo[zid];
f64c3f54
BS
644}
645
c0ff4b85 646struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg)
d324236b 647{
c0ff4b85 648 return &memcg->css;
d324236b
WF
649}
650
f64c3f54 651static struct mem_cgroup_per_zone *
e231875b 652mem_cgroup_page_zoneinfo(struct mem_cgroup *memcg, struct page *page)
f64c3f54 653{
97a6c37b
JW
654 int nid = page_to_nid(page);
655 int zid = page_zonenum(page);
f64c3f54 656
e231875b 657 return &memcg->nodeinfo[nid]->zoneinfo[zid];
f64c3f54
BS
658}
659
bb4cc1a8
AM
660static struct mem_cgroup_tree_per_zone *
661soft_limit_tree_node_zone(int nid, int zid)
662{
663 return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
664}
665
666static struct mem_cgroup_tree_per_zone *
667soft_limit_tree_from_page(struct page *page)
668{
669 int nid = page_to_nid(page);
670 int zid = page_zonenum(page);
671
672 return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
673}
674
cf2c8127
JW
675static void __mem_cgroup_insert_exceeded(struct mem_cgroup_per_zone *mz,
676 struct mem_cgroup_tree_per_zone *mctz,
3e32cb2e 677 unsigned long new_usage_in_excess)
bb4cc1a8
AM
678{
679 struct rb_node **p = &mctz->rb_root.rb_node;
680 struct rb_node *parent = NULL;
681 struct mem_cgroup_per_zone *mz_node;
682
683 if (mz->on_tree)
684 return;
685
686 mz->usage_in_excess = new_usage_in_excess;
687 if (!mz->usage_in_excess)
688 return;
689 while (*p) {
690 parent = *p;
691 mz_node = rb_entry(parent, struct mem_cgroup_per_zone,
692 tree_node);
693 if (mz->usage_in_excess < mz_node->usage_in_excess)
694 p = &(*p)->rb_left;
695 /*
696 * We can't avoid mem cgroups that are over their soft
697 * limit by the same amount
698 */
699 else if (mz->usage_in_excess >= mz_node->usage_in_excess)
700 p = &(*p)->rb_right;
701 }
702 rb_link_node(&mz->tree_node, parent, p);
703 rb_insert_color(&mz->tree_node, &mctz->rb_root);
704 mz->on_tree = true;
705}
706
cf2c8127
JW
707static void __mem_cgroup_remove_exceeded(struct mem_cgroup_per_zone *mz,
708 struct mem_cgroup_tree_per_zone *mctz)
bb4cc1a8
AM
709{
710 if (!mz->on_tree)
711 return;
712 rb_erase(&mz->tree_node, &mctz->rb_root);
713 mz->on_tree = false;
714}
715
cf2c8127
JW
716static void mem_cgroup_remove_exceeded(struct mem_cgroup_per_zone *mz,
717 struct mem_cgroup_tree_per_zone *mctz)
bb4cc1a8 718{
0a31bc97
JW
719 unsigned long flags;
720
721 spin_lock_irqsave(&mctz->lock, flags);
cf2c8127 722 __mem_cgroup_remove_exceeded(mz, mctz);
0a31bc97 723 spin_unlock_irqrestore(&mctz->lock, flags);
bb4cc1a8
AM
724}
725
3e32cb2e
JW
726static unsigned long soft_limit_excess(struct mem_cgroup *memcg)
727{
728 unsigned long nr_pages = page_counter_read(&memcg->memory);
729 unsigned long soft_limit = ACCESS_ONCE(memcg->soft_limit);
730 unsigned long excess = 0;
731
732 if (nr_pages > soft_limit)
733 excess = nr_pages - soft_limit;
734
735 return excess;
736}
bb4cc1a8
AM
737
738static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page)
739{
3e32cb2e 740 unsigned long excess;
bb4cc1a8
AM
741 struct mem_cgroup_per_zone *mz;
742 struct mem_cgroup_tree_per_zone *mctz;
bb4cc1a8 743
e231875b 744 mctz = soft_limit_tree_from_page(page);
bb4cc1a8
AM
745 /*
746 * Necessary to update all ancestors when hierarchy is used.
747 * because their event counter is not touched.
748 */
749 for (; memcg; memcg = parent_mem_cgroup(memcg)) {
e231875b 750 mz = mem_cgroup_page_zoneinfo(memcg, page);
3e32cb2e 751 excess = soft_limit_excess(memcg);
bb4cc1a8
AM
752 /*
753 * We have to update the tree if mz is on RB-tree or
754 * mem is over its softlimit.
755 */
756 if (excess || mz->on_tree) {
0a31bc97
JW
757 unsigned long flags;
758
759 spin_lock_irqsave(&mctz->lock, flags);
bb4cc1a8
AM
760 /* if on-tree, remove it */
761 if (mz->on_tree)
cf2c8127 762 __mem_cgroup_remove_exceeded(mz, mctz);
bb4cc1a8
AM
763 /*
764 * Insert again. mz->usage_in_excess will be updated.
765 * If excess is 0, no tree ops.
766 */
cf2c8127 767 __mem_cgroup_insert_exceeded(mz, mctz, excess);
0a31bc97 768 spin_unlock_irqrestore(&mctz->lock, flags);
bb4cc1a8
AM
769 }
770 }
771}
772
773static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg)
774{
bb4cc1a8 775 struct mem_cgroup_tree_per_zone *mctz;
e231875b
JZ
776 struct mem_cgroup_per_zone *mz;
777 int nid, zid;
bb4cc1a8 778
e231875b
JZ
779 for_each_node(nid) {
780 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
781 mz = &memcg->nodeinfo[nid]->zoneinfo[zid];
782 mctz = soft_limit_tree_node_zone(nid, zid);
cf2c8127 783 mem_cgroup_remove_exceeded(mz, mctz);
bb4cc1a8
AM
784 }
785 }
786}
787
788static struct mem_cgroup_per_zone *
789__mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
790{
791 struct rb_node *rightmost = NULL;
792 struct mem_cgroup_per_zone *mz;
793
794retry:
795 mz = NULL;
796 rightmost = rb_last(&mctz->rb_root);
797 if (!rightmost)
798 goto done; /* Nothing to reclaim from */
799
800 mz = rb_entry(rightmost, struct mem_cgroup_per_zone, tree_node);
801 /*
802 * Remove the node now but someone else can add it back,
803 * we will to add it back at the end of reclaim to its correct
804 * position in the tree.
805 */
cf2c8127 806 __mem_cgroup_remove_exceeded(mz, mctz);
3e32cb2e 807 if (!soft_limit_excess(mz->memcg) ||
ec903c0c 808 !css_tryget_online(&mz->memcg->css))
bb4cc1a8
AM
809 goto retry;
810done:
811 return mz;
812}
813
814static struct mem_cgroup_per_zone *
815mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
816{
817 struct mem_cgroup_per_zone *mz;
818
0a31bc97 819 spin_lock_irq(&mctz->lock);
bb4cc1a8 820 mz = __mem_cgroup_largest_soft_limit_node(mctz);
0a31bc97 821 spin_unlock_irq(&mctz->lock);
bb4cc1a8
AM
822 return mz;
823}
824
711d3d2c
KH
825/*
826 * Implementation Note: reading percpu statistics for memcg.
827 *
828 * Both of vmstat[] and percpu_counter has threshold and do periodic
829 * synchronization to implement "quick" read. There are trade-off between
830 * reading cost and precision of value. Then, we may have a chance to implement
831 * a periodic synchronizion of counter in memcg's counter.
832 *
833 * But this _read() function is used for user interface now. The user accounts
834 * memory usage by memory cgroup and he _always_ requires exact value because
835 * he accounts memory. Even if we provide quick-and-fuzzy read, we always
836 * have to visit all online cpus and make sum. So, for now, unnecessary
837 * synchronization is not implemented. (just implemented for cpu hotplug)
838 *
839 * If there are kernel internal actions which can make use of some not-exact
840 * value, and reading all cpu value can be performance bottleneck in some
841 * common workload, threashold and synchonization as vmstat[] should be
842 * implemented.
843 */
c0ff4b85 844static long mem_cgroup_read_stat(struct mem_cgroup *memcg,
7a159cc9 845 enum mem_cgroup_stat_index idx)
c62b1a3b 846{
7a159cc9 847 long val = 0;
c62b1a3b 848 int cpu;
c62b1a3b 849
711d3d2c
KH
850 get_online_cpus();
851 for_each_online_cpu(cpu)
c0ff4b85 852 val += per_cpu(memcg->stat->count[idx], cpu);
711d3d2c 853#ifdef CONFIG_HOTPLUG_CPU
c0ff4b85
R
854 spin_lock(&memcg->pcp_counter_lock);
855 val += memcg->nocpu_base.count[idx];
856 spin_unlock(&memcg->pcp_counter_lock);
711d3d2c
KH
857#endif
858 put_online_cpus();
c62b1a3b
KH
859 return val;
860}
861
c0ff4b85 862static unsigned long mem_cgroup_read_events(struct mem_cgroup *memcg,
e9f8974f
JW
863 enum mem_cgroup_events_index idx)
864{
865 unsigned long val = 0;
866 int cpu;
867
9c567512 868 get_online_cpus();
e9f8974f 869 for_each_online_cpu(cpu)
c0ff4b85 870 val += per_cpu(memcg->stat->events[idx], cpu);
e9f8974f 871#ifdef CONFIG_HOTPLUG_CPU
c0ff4b85
R
872 spin_lock(&memcg->pcp_counter_lock);
873 val += memcg->nocpu_base.events[idx];
874 spin_unlock(&memcg->pcp_counter_lock);
e9f8974f 875#endif
9c567512 876 put_online_cpus();
e9f8974f
JW
877 return val;
878}
879
c0ff4b85 880static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
b070e65c 881 struct page *page,
0a31bc97 882 int nr_pages)
d52aa412 883{
b2402857
KH
884 /*
885 * Here, RSS means 'mapped anon' and anon's SwapCache. Shmem/tmpfs is
886 * counted as CACHE even if it's on ANON LRU.
887 */
0a31bc97 888 if (PageAnon(page))
b2402857 889 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS],
c0ff4b85 890 nr_pages);
d52aa412 891 else
b2402857 892 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_CACHE],
c0ff4b85 893 nr_pages);
55e462b0 894
b070e65c
DR
895 if (PageTransHuge(page))
896 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE],
897 nr_pages);
898
e401f176
KH
899 /* pagein of a big page is an event. So, ignore page size */
900 if (nr_pages > 0)
c0ff4b85 901 __this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGIN]);
3751d604 902 else {
c0ff4b85 903 __this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT]);
3751d604
KH
904 nr_pages = -nr_pages; /* for event */
905 }
e401f176 906
13114716 907 __this_cpu_add(memcg->stat->nr_page_events, nr_pages);
6d12e2d8
KH
908}
909
e231875b 910unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
074291fe
KK
911{
912 struct mem_cgroup_per_zone *mz;
913
914 mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec);
915 return mz->lru_size[lru];
916}
917
e231875b
JZ
918static unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
919 int nid,
920 unsigned int lru_mask)
bb2a0de9 921{
e231875b 922 unsigned long nr = 0;
889976db
YH
923 int zid;
924
e231875b 925 VM_BUG_ON((unsigned)nid >= nr_node_ids);
bb2a0de9 926
e231875b
JZ
927 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
928 struct mem_cgroup_per_zone *mz;
929 enum lru_list lru;
930
931 for_each_lru(lru) {
932 if (!(BIT(lru) & lru_mask))
933 continue;
934 mz = &memcg->nodeinfo[nid]->zoneinfo[zid];
935 nr += mz->lru_size[lru];
936 }
937 }
938 return nr;
889976db 939}
bb2a0de9 940
c0ff4b85 941static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg,
bb2a0de9 942 unsigned int lru_mask)
6d12e2d8 943{
e231875b 944 unsigned long nr = 0;
889976db 945 int nid;
6d12e2d8 946
31aaea4a 947 for_each_node_state(nid, N_MEMORY)
e231875b
JZ
948 nr += mem_cgroup_node_nr_lru_pages(memcg, nid, lru_mask);
949 return nr;
d52aa412
KH
950}
951
f53d7ce3
JW
952static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
953 enum mem_cgroup_events_target target)
7a159cc9
JW
954{
955 unsigned long val, next;
956
13114716 957 val = __this_cpu_read(memcg->stat->nr_page_events);
4799401f 958 next = __this_cpu_read(memcg->stat->targets[target]);
7a159cc9 959 /* from time_after() in jiffies.h */
f53d7ce3
JW
960 if ((long)next - (long)val < 0) {
961 switch (target) {
962 case MEM_CGROUP_TARGET_THRESH:
963 next = val + THRESHOLDS_EVENTS_TARGET;
964 break;
bb4cc1a8
AM
965 case MEM_CGROUP_TARGET_SOFTLIMIT:
966 next = val + SOFTLIMIT_EVENTS_TARGET;
967 break;
f53d7ce3
JW
968 case MEM_CGROUP_TARGET_NUMAINFO:
969 next = val + NUMAINFO_EVENTS_TARGET;
970 break;
971 default:
972 break;
973 }
974 __this_cpu_write(memcg->stat->targets[target], next);
975 return true;
7a159cc9 976 }
f53d7ce3 977 return false;
d2265e6f
KH
978}
979
980/*
981 * Check events in order.
982 *
983 */
c0ff4b85 984static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)
d2265e6f
KH
985{
986 /* threshold event is triggered in finer grain than soft limit */
f53d7ce3
JW
987 if (unlikely(mem_cgroup_event_ratelimit(memcg,
988 MEM_CGROUP_TARGET_THRESH))) {
bb4cc1a8 989 bool do_softlimit;
82b3f2a7 990 bool do_numainfo __maybe_unused;
f53d7ce3 991
bb4cc1a8
AM
992 do_softlimit = mem_cgroup_event_ratelimit(memcg,
993 MEM_CGROUP_TARGET_SOFTLIMIT);
f53d7ce3
JW
994#if MAX_NUMNODES > 1
995 do_numainfo = mem_cgroup_event_ratelimit(memcg,
996 MEM_CGROUP_TARGET_NUMAINFO);
997#endif
c0ff4b85 998 mem_cgroup_threshold(memcg);
bb4cc1a8
AM
999 if (unlikely(do_softlimit))
1000 mem_cgroup_update_tree(memcg, page);
453a9bf3 1001#if MAX_NUMNODES > 1
f53d7ce3 1002 if (unlikely(do_numainfo))
c0ff4b85 1003 atomic_inc(&memcg->numainfo_events);
453a9bf3 1004#endif
0a31bc97 1005 }
d2265e6f
KH
1006}
1007
cf475ad2 1008struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
78fb7466 1009{
31a78f23
BS
1010 /*
1011 * mm_update_next_owner() may clear mm->owner to NULL
1012 * if it races with swapoff, page migration, etc.
1013 * So this can be called with p == NULL.
1014 */
1015 if (unlikely(!p))
1016 return NULL;
1017
073219e9 1018 return mem_cgroup_from_css(task_css(p, memory_cgrp_id));
78fb7466
PE
1019}
1020
df381975 1021static struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
54595fe2 1022{
c0ff4b85 1023 struct mem_cgroup *memcg = NULL;
0b7f569e 1024
54595fe2
KH
1025 rcu_read_lock();
1026 do {
6f6acb00
MH
1027 /*
1028 * Page cache insertions can happen withou an
1029 * actual mm context, e.g. during disk probing
1030 * on boot, loopback IO, acct() writes etc.
1031 */
1032 if (unlikely(!mm))
df381975 1033 memcg = root_mem_cgroup;
6f6acb00
MH
1034 else {
1035 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
1036 if (unlikely(!memcg))
1037 memcg = root_mem_cgroup;
1038 }
ec903c0c 1039 } while (!css_tryget_online(&memcg->css));
54595fe2 1040 rcu_read_unlock();
c0ff4b85 1041 return memcg;
54595fe2
KH
1042}
1043
5660048c
JW
1044/**
1045 * mem_cgroup_iter - iterate over memory cgroup hierarchy
1046 * @root: hierarchy root
1047 * @prev: previously returned memcg, NULL on first invocation
1048 * @reclaim: cookie for shared reclaim walks, NULL for full walks
1049 *
1050 * Returns references to children of the hierarchy below @root, or
1051 * @root itself, or %NULL after a full round-trip.
1052 *
1053 * Caller must pass the return value in @prev on subsequent
1054 * invocations for reference counting, or use mem_cgroup_iter_break()
1055 * to cancel a hierarchy walk before the round-trip is complete.
1056 *
1057 * Reclaimers can specify a zone and a priority level in @reclaim to
1058 * divide up the memcgs in the hierarchy among all concurrent
1059 * reclaimers operating on the same zone and priority.
1060 */
694fbc0f 1061struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
5660048c 1062 struct mem_cgroup *prev,
694fbc0f 1063 struct mem_cgroup_reclaim_cookie *reclaim)
14067bb3 1064{
5ac8fb31
JW
1065 struct reclaim_iter *uninitialized_var(iter);
1066 struct cgroup_subsys_state *css = NULL;
9f3a0d09 1067 struct mem_cgroup *memcg = NULL;
5ac8fb31 1068 struct mem_cgroup *pos = NULL;
711d3d2c 1069
694fbc0f
AM
1070 if (mem_cgroup_disabled())
1071 return NULL;
5660048c 1072
9f3a0d09
JW
1073 if (!root)
1074 root = root_mem_cgroup;
7d74b06f 1075
9f3a0d09 1076 if (prev && !reclaim)
5ac8fb31 1077 pos = prev;
14067bb3 1078
9f3a0d09
JW
1079 if (!root->use_hierarchy && root != root_mem_cgroup) {
1080 if (prev)
5ac8fb31 1081 goto out;
694fbc0f 1082 return root;
9f3a0d09 1083 }
14067bb3 1084
542f85f9 1085 rcu_read_lock();
5f578161 1086
5ac8fb31
JW
1087 if (reclaim) {
1088 struct mem_cgroup_per_zone *mz;
1089
1090 mz = mem_cgroup_zone_zoneinfo(root, reclaim->zone);
1091 iter = &mz->iter[reclaim->priority];
1092
1093 if (prev && reclaim->generation != iter->generation)
1094 goto out_unlock;
1095
1096 do {
1097 pos = ACCESS_ONCE(iter->position);
1098 /*
1099 * A racing update may change the position and
1100 * put the last reference, hence css_tryget(),
1101 * or retry to see the updated position.
1102 */
1103 } while (pos && !css_tryget(&pos->css));
1104 }
1105
1106 if (pos)
1107 css = &pos->css;
1108
1109 for (;;) {
1110 css = css_next_descendant_pre(css, &root->css);
1111 if (!css) {
1112 /*
1113 * Reclaimers share the hierarchy walk, and a
1114 * new one might jump in right at the end of
1115 * the hierarchy - make sure they see at least
1116 * one group and restart from the beginning.
1117 */
1118 if (!prev)
1119 continue;
1120 break;
527a5ec9 1121 }
7d74b06f 1122
5ac8fb31
JW
1123 /*
1124 * Verify the css and acquire a reference. The root
1125 * is provided by the caller, so we know it's alive
1126 * and kicking, and don't take an extra reference.
1127 */
1128 memcg = mem_cgroup_from_css(css);
14067bb3 1129
5ac8fb31
JW
1130 if (css == &root->css)
1131 break;
542f85f9 1132
b2052564 1133 if (css_tryget(css)) {
5ac8fb31
JW
1134 /*
1135 * Make sure the memcg is initialized:
1136 * mem_cgroup_css_online() orders the the
1137 * initialization against setting the flag.
1138 */
1139 if (smp_load_acquire(&memcg->initialized))
1140 break;
1141
1142 css_put(css);
527a5ec9 1143 }
9f3a0d09 1144
5ac8fb31
JW
1145 memcg = NULL;
1146 }
1147
1148 if (reclaim) {
1149 if (cmpxchg(&iter->position, pos, memcg) == pos) {
1150 if (memcg)
1151 css_get(&memcg->css);
1152 if (pos)
1153 css_put(&pos->css);
1154 }
1155
1156 /*
1157 * pairs with css_tryget when dereferencing iter->position
1158 * above.
1159 */
1160 if (pos)
1161 css_put(&pos->css);
1162
1163 if (!memcg)
1164 iter->generation++;
1165 else if (!prev)
1166 reclaim->generation = iter->generation;
9f3a0d09 1167 }
5ac8fb31 1168
542f85f9
MH
1169out_unlock:
1170 rcu_read_unlock();
5ac8fb31 1171out:
c40046f3
MH
1172 if (prev && prev != root)
1173 css_put(&prev->css);
1174
9f3a0d09 1175 return memcg;
14067bb3 1176}
7d74b06f 1177
5660048c
JW
1178/**
1179 * mem_cgroup_iter_break - abort a hierarchy walk prematurely
1180 * @root: hierarchy root
1181 * @prev: last visited hierarchy member as returned by mem_cgroup_iter()
1182 */
1183void mem_cgroup_iter_break(struct mem_cgroup *root,
1184 struct mem_cgroup *prev)
9f3a0d09
JW
1185{
1186 if (!root)
1187 root = root_mem_cgroup;
1188 if (prev && prev != root)
1189 css_put(&prev->css);
1190}
7d74b06f 1191
9f3a0d09
JW
1192/*
1193 * Iteration constructs for visiting all cgroups (under a tree). If
1194 * loops are exited prematurely (break), mem_cgroup_iter_break() must
1195 * be used for reference counting.
1196 */
1197#define for_each_mem_cgroup_tree(iter, root) \
527a5ec9 1198 for (iter = mem_cgroup_iter(root, NULL, NULL); \
9f3a0d09 1199 iter != NULL; \
527a5ec9 1200 iter = mem_cgroup_iter(root, iter, NULL))
711d3d2c 1201
9f3a0d09 1202#define for_each_mem_cgroup(iter) \
527a5ec9 1203 for (iter = mem_cgroup_iter(NULL, NULL, NULL); \
9f3a0d09 1204 iter != NULL; \
527a5ec9 1205 iter = mem_cgroup_iter(NULL, iter, NULL))
14067bb3 1206
68ae564b 1207void __mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx)
456f998e 1208{
c0ff4b85 1209 struct mem_cgroup *memcg;
456f998e 1210
456f998e 1211 rcu_read_lock();
c0ff4b85
R
1212 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
1213 if (unlikely(!memcg))
456f998e
YH
1214 goto out;
1215
1216 switch (idx) {
456f998e 1217 case PGFAULT:
0e574a93
JW
1218 this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGFAULT]);
1219 break;
1220 case PGMAJFAULT:
1221 this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGMAJFAULT]);
456f998e
YH
1222 break;
1223 default:
1224 BUG();
1225 }
1226out:
1227 rcu_read_unlock();
1228}
68ae564b 1229EXPORT_SYMBOL(__mem_cgroup_count_vm_event);
456f998e 1230
925b7673
JW
1231/**
1232 * mem_cgroup_zone_lruvec - get the lru list vector for a zone and memcg
1233 * @zone: zone of the wanted lruvec
fa9add64 1234 * @memcg: memcg of the wanted lruvec
925b7673
JW
1235 *
1236 * Returns the lru list vector holding pages for the given @zone and
1237 * @mem. This can be the global zone lruvec, if the memory controller
1238 * is disabled.
1239 */
1240struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone,
1241 struct mem_cgroup *memcg)
1242{
1243 struct mem_cgroup_per_zone *mz;
bea8c150 1244 struct lruvec *lruvec;
925b7673 1245
bea8c150
HD
1246 if (mem_cgroup_disabled()) {
1247 lruvec = &zone->lruvec;
1248 goto out;
1249 }
925b7673 1250
e231875b 1251 mz = mem_cgroup_zone_zoneinfo(memcg, zone);
bea8c150
HD
1252 lruvec = &mz->lruvec;
1253out:
1254 /*
1255 * Since a node can be onlined after the mem_cgroup was created,
1256 * we have to be prepared to initialize lruvec->zone here;
1257 * and if offlined then reonlined, we need to reinitialize it.
1258 */
1259 if (unlikely(lruvec->zone != zone))
1260 lruvec->zone = zone;
1261 return lruvec;
925b7673
JW
1262}
1263
925b7673 1264/**
dfe0e773 1265 * mem_cgroup_page_lruvec - return lruvec for isolating/putting an LRU page
925b7673 1266 * @page: the page
fa9add64 1267 * @zone: zone of the page
dfe0e773
JW
1268 *
1269 * This function is only safe when following the LRU page isolation
1270 * and putback protocol: the LRU lock must be held, and the page must
1271 * either be PageLRU() or the caller must have isolated/allocated it.
925b7673 1272 */
fa9add64 1273struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct zone *zone)
08e552c6 1274{
08e552c6 1275 struct mem_cgroup_per_zone *mz;
925b7673
JW
1276 struct mem_cgroup *memcg;
1277 struct page_cgroup *pc;
bea8c150 1278 struct lruvec *lruvec;
6d12e2d8 1279
bea8c150
HD
1280 if (mem_cgroup_disabled()) {
1281 lruvec = &zone->lruvec;
1282 goto out;
1283 }
925b7673 1284
08e552c6 1285 pc = lookup_page_cgroup(page);
38c5d72f 1286 memcg = pc->mem_cgroup;
7512102c
HD
1287
1288 /*
dfe0e773
JW
1289 * Swapcache readahead pages are added to the LRU - and
1290 * possibly migrated - before they are charged. Ensure
1291 * pc->mem_cgroup is sane.
7512102c 1292 */
fa9add64 1293 if (!PageLRU(page) && !PageCgroupUsed(pc) && memcg != root_mem_cgroup)
7512102c
HD
1294 pc->mem_cgroup = memcg = root_mem_cgroup;
1295
e231875b 1296 mz = mem_cgroup_page_zoneinfo(memcg, page);
bea8c150
HD
1297 lruvec = &mz->lruvec;
1298out:
1299 /*
1300 * Since a node can be onlined after the mem_cgroup was created,
1301 * we have to be prepared to initialize lruvec->zone here;
1302 * and if offlined then reonlined, we need to reinitialize it.
1303 */
1304 if (unlikely(lruvec->zone != zone))
1305 lruvec->zone = zone;
1306 return lruvec;
08e552c6 1307}
b69408e8 1308
925b7673 1309/**
fa9add64
HD
1310 * mem_cgroup_update_lru_size - account for adding or removing an lru page
1311 * @lruvec: mem_cgroup per zone lru vector
1312 * @lru: index of lru list the page is sitting on
1313 * @nr_pages: positive when adding or negative when removing
925b7673 1314 *
fa9add64
HD
1315 * This function must be called when a page is added to or removed from an
1316 * lru list.
3f58a829 1317 */
fa9add64
HD
1318void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
1319 int nr_pages)
3f58a829
MK
1320{
1321 struct mem_cgroup_per_zone *mz;
fa9add64 1322 unsigned long *lru_size;
3f58a829
MK
1323
1324 if (mem_cgroup_disabled())
1325 return;
1326
fa9add64
HD
1327 mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec);
1328 lru_size = mz->lru_size + lru;
1329 *lru_size += nr_pages;
1330 VM_BUG_ON((long)(*lru_size) < 0);
08e552c6 1331}
544122e5 1332
3e92041d 1333/*
c0ff4b85 1334 * Checks whether given mem is same or in the root_mem_cgroup's
3e92041d
MH
1335 * hierarchy subtree
1336 */
c3ac9a8a
JW
1337bool __mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg,
1338 struct mem_cgroup *memcg)
3e92041d 1339{
91c63734
JW
1340 if (root_memcg == memcg)
1341 return true;
3a981f48 1342 if (!root_memcg->use_hierarchy || !memcg)
91c63734 1343 return false;
b47f77b5 1344 return cgroup_is_descendant(memcg->css.cgroup, root_memcg->css.cgroup);
c3ac9a8a
JW
1345}
1346
1347static bool mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg,
1348 struct mem_cgroup *memcg)
1349{
1350 bool ret;
1351
91c63734 1352 rcu_read_lock();
c3ac9a8a 1353 ret = __mem_cgroup_same_or_subtree(root_memcg, memcg);
91c63734
JW
1354 rcu_read_unlock();
1355 return ret;
3e92041d
MH
1356}
1357
ffbdccf5
DR
1358bool task_in_mem_cgroup(struct task_struct *task,
1359 const struct mem_cgroup *memcg)
4c4a2214 1360{
0b7f569e 1361 struct mem_cgroup *curr = NULL;
158e0a2d 1362 struct task_struct *p;
ffbdccf5 1363 bool ret;
4c4a2214 1364
158e0a2d 1365 p = find_lock_task_mm(task);
de077d22 1366 if (p) {
df381975 1367 curr = get_mem_cgroup_from_mm(p->mm);
de077d22
DR
1368 task_unlock(p);
1369 } else {
1370 /*
1371 * All threads may have already detached their mm's, but the oom
1372 * killer still needs to detect if they have already been oom
1373 * killed to prevent needlessly killing additional tasks.
1374 */
ffbdccf5 1375 rcu_read_lock();
de077d22
DR
1376 curr = mem_cgroup_from_task(task);
1377 if (curr)
1378 css_get(&curr->css);
ffbdccf5 1379 rcu_read_unlock();
de077d22 1380 }
d31f56db 1381 /*
c0ff4b85 1382 * We should check use_hierarchy of "memcg" not "curr". Because checking
d31f56db 1383 * use_hierarchy of "curr" here make this function true if hierarchy is
c0ff4b85
R
1384 * enabled in "curr" and "curr" is a child of "memcg" in *cgroup*
1385 * hierarchy(even if use_hierarchy is disabled in "memcg").
d31f56db 1386 */
c0ff4b85 1387 ret = mem_cgroup_same_or_subtree(memcg, curr);
0b7f569e 1388 css_put(&curr->css);
4c4a2214
DR
1389 return ret;
1390}
1391
c56d5c7d 1392int mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec)
14797e23 1393{
9b272977 1394 unsigned long inactive_ratio;
14797e23 1395 unsigned long inactive;
9b272977 1396 unsigned long active;
c772be93 1397 unsigned long gb;
14797e23 1398
4d7dcca2
HD
1399 inactive = mem_cgroup_get_lru_size(lruvec, LRU_INACTIVE_ANON);
1400 active = mem_cgroup_get_lru_size(lruvec, LRU_ACTIVE_ANON);
14797e23 1401
c772be93
KM
1402 gb = (inactive + active) >> (30 - PAGE_SHIFT);
1403 if (gb)
1404 inactive_ratio = int_sqrt(10 * gb);
1405 else
1406 inactive_ratio = 1;
1407
9b272977 1408 return inactive * inactive_ratio < active;
14797e23
KM
1409}
1410
3e32cb2e 1411#define mem_cgroup_from_counter(counter, member) \
6d61ef40
BS
1412 container_of(counter, struct mem_cgroup, member)
1413
19942822 1414/**
9d11ea9f 1415 * mem_cgroup_margin - calculate chargeable space of a memory cgroup
dad7557e 1416 * @memcg: the memory cgroup
19942822 1417 *
9d11ea9f 1418 * Returns the maximum amount of memory @mem can be charged with, in
7ec99d62 1419 * pages.
19942822 1420 */
c0ff4b85 1421static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
19942822 1422{
3e32cb2e
JW
1423 unsigned long margin = 0;
1424 unsigned long count;
1425 unsigned long limit;
9d11ea9f 1426
3e32cb2e
JW
1427 count = page_counter_read(&memcg->memory);
1428 limit = ACCESS_ONCE(memcg->memory.limit);
1429 if (count < limit)
1430 margin = limit - count;
1431
1432 if (do_swap_account) {
1433 count = page_counter_read(&memcg->memsw);
1434 limit = ACCESS_ONCE(memcg->memsw.limit);
1435 if (count <= limit)
1436 margin = min(margin, limit - count);
1437 }
1438
1439 return margin;
19942822
JW
1440}
1441
1f4c025b 1442int mem_cgroup_swappiness(struct mem_cgroup *memcg)
a7885eb8 1443{
a7885eb8 1444 /* root ? */
14208b0e 1445 if (mem_cgroup_disabled() || !memcg->css.parent)
a7885eb8
KM
1446 return vm_swappiness;
1447
bf1ff263 1448 return memcg->swappiness;
a7885eb8
KM
1449}
1450
619d094b
KH
1451/*
1452 * memcg->moving_account is used for checking possibility that some thread is
1453 * calling move_account(). When a thread on CPU-A starts moving pages under
1454 * a memcg, other threads should check memcg->moving_account under
1455 * rcu_read_lock(), like this:
1456 *
1457 * CPU-A CPU-B
1458 * rcu_read_lock()
1459 * memcg->moving_account+1 if (memcg->mocing_account)
1460 * take heavy locks.
1461 * synchronize_rcu() update something.
1462 * rcu_read_unlock()
1463 * start move here.
1464 */
4331f7d3 1465
c0ff4b85 1466static void mem_cgroup_start_move(struct mem_cgroup *memcg)
32047e2a 1467{
619d094b 1468 atomic_inc(&memcg->moving_account);
32047e2a
KH
1469 synchronize_rcu();
1470}
1471
c0ff4b85 1472static void mem_cgroup_end_move(struct mem_cgroup *memcg)
32047e2a 1473{
619d094b
KH
1474 /*
1475 * Now, mem_cgroup_clear_mc() may call this function with NULL.
1476 * We check NULL in callee rather than caller.
1477 */
d7365e78 1478 if (memcg)
619d094b 1479 atomic_dec(&memcg->moving_account);
32047e2a 1480}
619d094b 1481
32047e2a 1482/*
bdcbb659 1483 * A routine for checking "mem" is under move_account() or not.
32047e2a 1484 *
bdcbb659
QH
1485 * Checking a cgroup is mc.from or mc.to or under hierarchy of
1486 * moving cgroups. This is for waiting at high-memory pressure
1487 * caused by "move".
32047e2a 1488 */
c0ff4b85 1489static bool mem_cgroup_under_move(struct mem_cgroup *memcg)
4b534334 1490{
2bd9bb20
KH
1491 struct mem_cgroup *from;
1492 struct mem_cgroup *to;
4b534334 1493 bool ret = false;
2bd9bb20
KH
1494 /*
1495 * Unlike task_move routines, we access mc.to, mc.from not under
1496 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead.
1497 */
1498 spin_lock(&mc.lock);
1499 from = mc.from;
1500 to = mc.to;
1501 if (!from)
1502 goto unlock;
3e92041d 1503
c0ff4b85
R
1504 ret = mem_cgroup_same_or_subtree(memcg, from)
1505 || mem_cgroup_same_or_subtree(memcg, to);
2bd9bb20
KH
1506unlock:
1507 spin_unlock(&mc.lock);
4b534334
KH
1508 return ret;
1509}
1510
c0ff4b85 1511static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg)
4b534334
KH
1512{
1513 if (mc.moving_task && current != mc.moving_task) {
c0ff4b85 1514 if (mem_cgroup_under_move(memcg)) {
4b534334
KH
1515 DEFINE_WAIT(wait);
1516 prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE);
1517 /* moving charge context might have finished. */
1518 if (mc.moving_task)
1519 schedule();
1520 finish_wait(&mc.waitq, &wait);
1521 return true;
1522 }
1523 }
1524 return false;
1525}
1526
312734c0
KH
1527/*
1528 * Take this lock when
1529 * - a code tries to modify page's memcg while it's USED.
1530 * - a code tries to modify page state accounting in a memcg.
312734c0
KH
1531 */
1532static void move_lock_mem_cgroup(struct mem_cgroup *memcg,
1533 unsigned long *flags)
1534{
1535 spin_lock_irqsave(&memcg->move_lock, *flags);
1536}
1537
1538static void move_unlock_mem_cgroup(struct mem_cgroup *memcg,
1539 unsigned long *flags)
1540{
1541 spin_unlock_irqrestore(&memcg->move_lock, *flags);
1542}
1543
58cf188e 1544#define K(x) ((x) << (PAGE_SHIFT-10))
e222432b 1545/**
58cf188e 1546 * mem_cgroup_print_oom_info: Print OOM information relevant to memory controller.
e222432b
BS
1547 * @memcg: The memory cgroup that went over limit
1548 * @p: Task that is going to be killed
1549 *
1550 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
1551 * enabled
1552 */
1553void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
1554{
e61734c5 1555 /* oom_info_lock ensures that parallel ooms do not interleave */
08088cb9 1556 static DEFINE_MUTEX(oom_info_lock);
58cf188e
SZ
1557 struct mem_cgroup *iter;
1558 unsigned int i;
e222432b 1559
58cf188e 1560 if (!p)
e222432b
BS
1561 return;
1562
08088cb9 1563 mutex_lock(&oom_info_lock);
e222432b
BS
1564 rcu_read_lock();
1565
e61734c5
TH
1566 pr_info("Task in ");
1567 pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id));
1568 pr_info(" killed as a result of limit of ");
1569 pr_cont_cgroup_path(memcg->css.cgroup);
1570 pr_info("\n");
e222432b 1571
e222432b
BS
1572 rcu_read_unlock();
1573
3e32cb2e
JW
1574 pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n",
1575 K((u64)page_counter_read(&memcg->memory)),
1576 K((u64)memcg->memory.limit), memcg->memory.failcnt);
1577 pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n",
1578 K((u64)page_counter_read(&memcg->memsw)),
1579 K((u64)memcg->memsw.limit), memcg->memsw.failcnt);
1580 pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n",
1581 K((u64)page_counter_read(&memcg->kmem)),
1582 K((u64)memcg->kmem.limit), memcg->kmem.failcnt);
58cf188e
SZ
1583
1584 for_each_mem_cgroup_tree(iter, memcg) {
e61734c5
TH
1585 pr_info("Memory cgroup stats for ");
1586 pr_cont_cgroup_path(iter->css.cgroup);
58cf188e
SZ
1587 pr_cont(":");
1588
1589 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
1590 if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
1591 continue;
1592 pr_cont(" %s:%ldKB", mem_cgroup_stat_names[i],
1593 K(mem_cgroup_read_stat(iter, i)));
1594 }
1595
1596 for (i = 0; i < NR_LRU_LISTS; i++)
1597 pr_cont(" %s:%luKB", mem_cgroup_lru_names[i],
1598 K(mem_cgroup_nr_lru_pages(iter, BIT(i))));
1599
1600 pr_cont("\n");
1601 }
08088cb9 1602 mutex_unlock(&oom_info_lock);
e222432b
BS
1603}
1604
81d39c20
KH
1605/*
1606 * This function returns the number of memcg under hierarchy tree. Returns
1607 * 1(self count) if no children.
1608 */
c0ff4b85 1609static int mem_cgroup_count_children(struct mem_cgroup *memcg)
81d39c20
KH
1610{
1611 int num = 0;
7d74b06f
KH
1612 struct mem_cgroup *iter;
1613
c0ff4b85 1614 for_each_mem_cgroup_tree(iter, memcg)
7d74b06f 1615 num++;
81d39c20
KH
1616 return num;
1617}
1618
a63d83f4
DR
1619/*
1620 * Return the memory (and swap, if configured) limit for a memcg.
1621 */
3e32cb2e 1622static unsigned long mem_cgroup_get_limit(struct mem_cgroup *memcg)
a63d83f4 1623{
3e32cb2e 1624 unsigned long limit;
a63d83f4 1625
3e32cb2e 1626 limit = memcg->memory.limit;
9a5a8f19 1627 if (mem_cgroup_swappiness(memcg)) {
3e32cb2e 1628 unsigned long memsw_limit;
9a5a8f19 1629
3e32cb2e
JW
1630 memsw_limit = memcg->memsw.limit;
1631 limit = min(limit + total_swap_pages, memsw_limit);
9a5a8f19 1632 }
9a5a8f19 1633 return limit;
a63d83f4
DR
1634}
1635
19965460
DR
1636static void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
1637 int order)
9cbb78bb
DR
1638{
1639 struct mem_cgroup *iter;
1640 unsigned long chosen_points = 0;
1641 unsigned long totalpages;
1642 unsigned int points = 0;
1643 struct task_struct *chosen = NULL;
1644
876aafbf 1645 /*
465adcf1
DR
1646 * If current has a pending SIGKILL or is exiting, then automatically
1647 * select it. The goal is to allow it to allocate so that it may
1648 * quickly exit and free its memory.
876aafbf 1649 */
465adcf1 1650 if (fatal_signal_pending(current) || current->flags & PF_EXITING) {
876aafbf
DR
1651 set_thread_flag(TIF_MEMDIE);
1652 return;
1653 }
1654
1655 check_panic_on_oom(CONSTRAINT_MEMCG, gfp_mask, order, NULL);
3e32cb2e 1656 totalpages = mem_cgroup_get_limit(memcg) ? : 1;
9cbb78bb 1657 for_each_mem_cgroup_tree(iter, memcg) {
72ec7029 1658 struct css_task_iter it;
9cbb78bb
DR
1659 struct task_struct *task;
1660
72ec7029
TH
1661 css_task_iter_start(&iter->css, &it);
1662 while ((task = css_task_iter_next(&it))) {
9cbb78bb
DR
1663 switch (oom_scan_process_thread(task, totalpages, NULL,
1664 false)) {
1665 case OOM_SCAN_SELECT:
1666 if (chosen)
1667 put_task_struct(chosen);
1668 chosen = task;
1669 chosen_points = ULONG_MAX;
1670 get_task_struct(chosen);
1671 /* fall through */
1672 case OOM_SCAN_CONTINUE:
1673 continue;
1674 case OOM_SCAN_ABORT:
72ec7029 1675 css_task_iter_end(&it);
9cbb78bb
DR
1676 mem_cgroup_iter_break(memcg, iter);
1677 if (chosen)
1678 put_task_struct(chosen);
1679 return;
1680 case OOM_SCAN_OK:
1681 break;
1682 };
1683 points = oom_badness(task, memcg, NULL, totalpages);
d49ad935
DR
1684 if (!points || points < chosen_points)
1685 continue;
1686 /* Prefer thread group leaders for display purposes */
1687 if (points == chosen_points &&
1688 thread_group_leader(chosen))
1689 continue;
1690
1691 if (chosen)
1692 put_task_struct(chosen);
1693 chosen = task;
1694 chosen_points = points;
1695 get_task_struct(chosen);
9cbb78bb 1696 }
72ec7029 1697 css_task_iter_end(&it);
9cbb78bb
DR
1698 }
1699
1700 if (!chosen)
1701 return;
1702 points = chosen_points * 1000 / totalpages;
9cbb78bb
DR
1703 oom_kill_process(chosen, gfp_mask, order, points, totalpages, memcg,
1704 NULL, "Memory cgroup out of memory");
9cbb78bb
DR
1705}
1706
4d0c066d
KH
1707/**
1708 * test_mem_cgroup_node_reclaimable
dad7557e 1709 * @memcg: the target memcg
4d0c066d
KH
1710 * @nid: the node ID to be checked.
1711 * @noswap : specify true here if the user wants flle only information.
1712 *
1713 * This function returns whether the specified memcg contains any
1714 * reclaimable pages on a node. Returns true if there are any reclaimable
1715 * pages in the node.
1716 */
c0ff4b85 1717static bool test_mem_cgroup_node_reclaimable(struct mem_cgroup *memcg,
4d0c066d
KH
1718 int nid, bool noswap)
1719{
c0ff4b85 1720 if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_FILE))
4d0c066d
KH
1721 return true;
1722 if (noswap || !total_swap_pages)
1723 return false;
c0ff4b85 1724 if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_ANON))
4d0c066d
KH
1725 return true;
1726 return false;
1727
1728}
bb4cc1a8 1729#if MAX_NUMNODES > 1
889976db
YH
1730
1731/*
1732 * Always updating the nodemask is not very good - even if we have an empty
1733 * list or the wrong list here, we can start from some node and traverse all
1734 * nodes based on the zonelist. So update the list loosely once per 10 secs.
1735 *
1736 */
c0ff4b85 1737static void mem_cgroup_may_update_nodemask(struct mem_cgroup *memcg)
889976db
YH
1738{
1739 int nid;
453a9bf3
KH
1740 /*
1741 * numainfo_events > 0 means there was at least NUMAINFO_EVENTS_TARGET
1742 * pagein/pageout changes since the last update.
1743 */
c0ff4b85 1744 if (!atomic_read(&memcg->numainfo_events))
453a9bf3 1745 return;
c0ff4b85 1746 if (atomic_inc_return(&memcg->numainfo_updating) > 1)
889976db
YH
1747 return;
1748
889976db 1749 /* make a nodemask where this memcg uses memory from */
31aaea4a 1750 memcg->scan_nodes = node_states[N_MEMORY];
889976db 1751
31aaea4a 1752 for_each_node_mask(nid, node_states[N_MEMORY]) {
889976db 1753
c0ff4b85
R
1754 if (!test_mem_cgroup_node_reclaimable(memcg, nid, false))
1755 node_clear(nid, memcg->scan_nodes);
889976db 1756 }
453a9bf3 1757
c0ff4b85
R
1758 atomic_set(&memcg->numainfo_events, 0);
1759 atomic_set(&memcg->numainfo_updating, 0);
889976db
YH
1760}
1761
1762/*
1763 * Selecting a node where we start reclaim from. Because what we need is just
1764 * reducing usage counter, start from anywhere is O,K. Considering
1765 * memory reclaim from current node, there are pros. and cons.
1766 *
1767 * Freeing memory from current node means freeing memory from a node which
1768 * we'll use or we've used. So, it may make LRU bad. And if several threads
1769 * hit limits, it will see a contention on a node. But freeing from remote
1770 * node means more costs for memory reclaim because of memory latency.
1771 *
1772 * Now, we use round-robin. Better algorithm is welcomed.
1773 */
c0ff4b85 1774int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
889976db
YH
1775{
1776 int node;
1777
c0ff4b85
R
1778 mem_cgroup_may_update_nodemask(memcg);
1779 node = memcg->last_scanned_node;
889976db 1780
c0ff4b85 1781 node = next_node(node, memcg->scan_nodes);
889976db 1782 if (node == MAX_NUMNODES)
c0ff4b85 1783 node = first_node(memcg->scan_nodes);
889976db
YH
1784 /*
1785 * We call this when we hit limit, not when pages are added to LRU.
1786 * No LRU may hold pages because all pages are UNEVICTABLE or
1787 * memcg is too small and all pages are not on LRU. In that case,
1788 * we use curret node.
1789 */
1790 if (unlikely(node == MAX_NUMNODES))
1791 node = numa_node_id();
1792
c0ff4b85 1793 memcg->last_scanned_node = node;
889976db
YH
1794 return node;
1795}
1796
bb4cc1a8
AM
1797/*
1798 * Check all nodes whether it contains reclaimable pages or not.
1799 * For quick scan, we make use of scan_nodes. This will allow us to skip
1800 * unused nodes. But scan_nodes is lazily updated and may not cotain
1801 * enough new information. We need to do double check.
1802 */
1803static bool mem_cgroup_reclaimable(struct mem_cgroup *memcg, bool noswap)
1804{
1805 int nid;
1806
1807 /*
1808 * quick check...making use of scan_node.
1809 * We can skip unused nodes.
1810 */
1811 if (!nodes_empty(memcg->scan_nodes)) {
1812 for (nid = first_node(memcg->scan_nodes);
1813 nid < MAX_NUMNODES;
1814 nid = next_node(nid, memcg->scan_nodes)) {
1815
1816 if (test_mem_cgroup_node_reclaimable(memcg, nid, noswap))
1817 return true;
1818 }
1819 }
1820 /*
1821 * Check rest of nodes.
1822 */
1823 for_each_node_state(nid, N_MEMORY) {
1824 if (node_isset(nid, memcg->scan_nodes))
1825 continue;
1826 if (test_mem_cgroup_node_reclaimable(memcg, nid, noswap))
1827 return true;
1828 }
1829 return false;
1830}
1831
889976db 1832#else
c0ff4b85 1833int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
889976db
YH
1834{
1835 return 0;
1836}
4d0c066d 1837
bb4cc1a8
AM
1838static bool mem_cgroup_reclaimable(struct mem_cgroup *memcg, bool noswap)
1839{
1840 return test_mem_cgroup_node_reclaimable(memcg, 0, noswap);
1841}
889976db
YH
1842#endif
1843
0608f43d
AM
1844static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg,
1845 struct zone *zone,
1846 gfp_t gfp_mask,
1847 unsigned long *total_scanned)
1848{
1849 struct mem_cgroup *victim = NULL;
1850 int total = 0;
1851 int loop = 0;
1852 unsigned long excess;
1853 unsigned long nr_scanned;
1854 struct mem_cgroup_reclaim_cookie reclaim = {
1855 .zone = zone,
1856 .priority = 0,
1857 };
1858
3e32cb2e 1859 excess = soft_limit_excess(root_memcg);
0608f43d
AM
1860
1861 while (1) {
1862 victim = mem_cgroup_iter(root_memcg, victim, &reclaim);
1863 if (!victim) {
1864 loop++;
1865 if (loop >= 2) {
1866 /*
1867 * If we have not been able to reclaim
1868 * anything, it might because there are
1869 * no reclaimable pages under this hierarchy
1870 */
1871 if (!total)
1872 break;
1873 /*
1874 * We want to do more targeted reclaim.
1875 * excess >> 2 is not to excessive so as to
1876 * reclaim too much, nor too less that we keep
1877 * coming back to reclaim from this cgroup
1878 */
1879 if (total >= (excess >> 2) ||
1880 (loop > MEM_CGROUP_MAX_RECLAIM_LOOPS))
1881 break;
1882 }
1883 continue;
1884 }
1885 if (!mem_cgroup_reclaimable(victim, false))
1886 continue;
1887 total += mem_cgroup_shrink_node_zone(victim, gfp_mask, false,
1888 zone, &nr_scanned);
1889 *total_scanned += nr_scanned;
3e32cb2e 1890 if (!soft_limit_excess(root_memcg))
0608f43d 1891 break;
6d61ef40 1892 }
0608f43d
AM
1893 mem_cgroup_iter_break(root_memcg, victim);
1894 return total;
6d61ef40
BS
1895}
1896
0056f4e6
JW
1897#ifdef CONFIG_LOCKDEP
1898static struct lockdep_map memcg_oom_lock_dep_map = {
1899 .name = "memcg_oom_lock",
1900};
1901#endif
1902
fb2a6fc5
JW
1903static DEFINE_SPINLOCK(memcg_oom_lock);
1904
867578cb
KH
1905/*
1906 * Check OOM-Killer is already running under our hierarchy.
1907 * If someone is running, return false.
1908 */
fb2a6fc5 1909static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg)
867578cb 1910{
79dfdacc 1911 struct mem_cgroup *iter, *failed = NULL;
a636b327 1912
fb2a6fc5
JW
1913 spin_lock(&memcg_oom_lock);
1914
9f3a0d09 1915 for_each_mem_cgroup_tree(iter, memcg) {
23751be0 1916 if (iter->oom_lock) {
79dfdacc
MH
1917 /*
1918 * this subtree of our hierarchy is already locked
1919 * so we cannot give a lock.
1920 */
79dfdacc 1921 failed = iter;
9f3a0d09
JW
1922 mem_cgroup_iter_break(memcg, iter);
1923 break;
23751be0
JW
1924 } else
1925 iter->oom_lock = true;
7d74b06f 1926 }
867578cb 1927
fb2a6fc5
JW
1928 if (failed) {
1929 /*
1930 * OK, we failed to lock the whole subtree so we have
1931 * to clean up what we set up to the failing subtree
1932 */
1933 for_each_mem_cgroup_tree(iter, memcg) {
1934 if (iter == failed) {
1935 mem_cgroup_iter_break(memcg, iter);
1936 break;
1937 }
1938 iter->oom_lock = false;
79dfdacc 1939 }
0056f4e6
JW
1940 } else
1941 mutex_acquire(&memcg_oom_lock_dep_map, 0, 1, _RET_IP_);
fb2a6fc5
JW
1942
1943 spin_unlock(&memcg_oom_lock);
1944
1945 return !failed;
a636b327 1946}
0b7f569e 1947
fb2a6fc5 1948static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg)
0b7f569e 1949{
7d74b06f
KH
1950 struct mem_cgroup *iter;
1951
fb2a6fc5 1952 spin_lock(&memcg_oom_lock);
0056f4e6 1953 mutex_release(&memcg_oom_lock_dep_map, 1, _RET_IP_);
c0ff4b85 1954 for_each_mem_cgroup_tree(iter, memcg)
79dfdacc 1955 iter->oom_lock = false;
fb2a6fc5 1956 spin_unlock(&memcg_oom_lock);
79dfdacc
MH
1957}
1958
c0ff4b85 1959static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg)
79dfdacc
MH
1960{
1961 struct mem_cgroup *iter;
1962
c0ff4b85 1963 for_each_mem_cgroup_tree(iter, memcg)
79dfdacc
MH
1964 atomic_inc(&iter->under_oom);
1965}
1966
c0ff4b85 1967static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg)
79dfdacc
MH
1968{
1969 struct mem_cgroup *iter;
1970
867578cb
KH
1971 /*
1972 * When a new child is created while the hierarchy is under oom,
1973 * mem_cgroup_oom_lock() may not be called. We have to use
1974 * atomic_add_unless() here.
1975 */
c0ff4b85 1976 for_each_mem_cgroup_tree(iter, memcg)
79dfdacc 1977 atomic_add_unless(&iter->under_oom, -1, 0);
0b7f569e
KH
1978}
1979
867578cb
KH
1980static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
1981
dc98df5a 1982struct oom_wait_info {
d79154bb 1983 struct mem_cgroup *memcg;
dc98df5a
KH
1984 wait_queue_t wait;
1985};
1986
1987static int memcg_oom_wake_function(wait_queue_t *wait,
1988 unsigned mode, int sync, void *arg)
1989{
d79154bb
HD
1990 struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg;
1991 struct mem_cgroup *oom_wait_memcg;
dc98df5a
KH
1992 struct oom_wait_info *oom_wait_info;
1993
1994 oom_wait_info = container_of(wait, struct oom_wait_info, wait);
d79154bb 1995 oom_wait_memcg = oom_wait_info->memcg;
dc98df5a 1996
dc98df5a 1997 /*
d79154bb 1998 * Both of oom_wait_info->memcg and wake_memcg are stable under us.
dc98df5a
KH
1999 * Then we can use css_is_ancestor without taking care of RCU.
2000 */
c0ff4b85
R
2001 if (!mem_cgroup_same_or_subtree(oom_wait_memcg, wake_memcg)
2002 && !mem_cgroup_same_or_subtree(wake_memcg, oom_wait_memcg))
dc98df5a 2003 return 0;
dc98df5a
KH
2004 return autoremove_wake_function(wait, mode, sync, arg);
2005}
2006
c0ff4b85 2007static void memcg_wakeup_oom(struct mem_cgroup *memcg)
dc98df5a 2008{
3812c8c8 2009 atomic_inc(&memcg->oom_wakeups);
c0ff4b85
R
2010 /* for filtering, pass "memcg" as argument. */
2011 __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg);
dc98df5a
KH
2012}
2013
c0ff4b85 2014static void memcg_oom_recover(struct mem_cgroup *memcg)
3c11ecf4 2015{
c0ff4b85
R
2016 if (memcg && atomic_read(&memcg->under_oom))
2017 memcg_wakeup_oom(memcg);
3c11ecf4
KH
2018}
2019
3812c8c8 2020static void mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
0b7f569e 2021{
3812c8c8
JW
2022 if (!current->memcg_oom.may_oom)
2023 return;
867578cb 2024 /*
49426420
JW
2025 * We are in the middle of the charge context here, so we
2026 * don't want to block when potentially sitting on a callstack
2027 * that holds all kinds of filesystem and mm locks.
2028 *
2029 * Also, the caller may handle a failed allocation gracefully
2030 * (like optional page cache readahead) and so an OOM killer
2031 * invocation might not even be necessary.
2032 *
2033 * That's why we don't do anything here except remember the
2034 * OOM context and then deal with it at the end of the page
2035 * fault when the stack is unwound, the locks are released,
2036 * and when we know whether the fault was overall successful.
867578cb 2037 */
49426420
JW
2038 css_get(&memcg->css);
2039 current->memcg_oom.memcg = memcg;
2040 current->memcg_oom.gfp_mask = mask;
2041 current->memcg_oom.order = order;
3812c8c8
JW
2042}
2043
2044/**
2045 * mem_cgroup_oom_synchronize - complete memcg OOM handling
49426420 2046 * @handle: actually kill/wait or just clean up the OOM state
3812c8c8 2047 *
49426420
JW
2048 * This has to be called at the end of a page fault if the memcg OOM
2049 * handler was enabled.
3812c8c8 2050 *
49426420 2051 * Memcg supports userspace OOM handling where failed allocations must
3812c8c8
JW
2052 * sleep on a waitqueue until the userspace task resolves the
2053 * situation. Sleeping directly in the charge context with all kinds
2054 * of locks held is not a good idea, instead we remember an OOM state
2055 * in the task and mem_cgroup_oom_synchronize() has to be called at
49426420 2056 * the end of the page fault to complete the OOM handling.
3812c8c8
JW
2057 *
2058 * Returns %true if an ongoing memcg OOM situation was detected and
49426420 2059 * completed, %false otherwise.
3812c8c8 2060 */
49426420 2061bool mem_cgroup_oom_synchronize(bool handle)
3812c8c8 2062{
49426420 2063 struct mem_cgroup *memcg = current->memcg_oom.memcg;
3812c8c8 2064 struct oom_wait_info owait;
49426420 2065 bool locked;
3812c8c8
JW
2066
2067 /* OOM is global, do not handle */
3812c8c8 2068 if (!memcg)
49426420 2069 return false;
3812c8c8 2070
49426420
JW
2071 if (!handle)
2072 goto cleanup;
3812c8c8
JW
2073
2074 owait.memcg = memcg;
2075 owait.wait.flags = 0;
2076 owait.wait.func = memcg_oom_wake_function;
2077 owait.wait.private = current;
2078 INIT_LIST_HEAD(&owait.wait.task_list);
867578cb 2079
3812c8c8 2080 prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
49426420
JW
2081 mem_cgroup_mark_under_oom(memcg);
2082
2083 locked = mem_cgroup_oom_trylock(memcg);
2084
2085 if (locked)
2086 mem_cgroup_oom_notify(memcg);
2087
2088 if (locked && !memcg->oom_kill_disable) {
2089 mem_cgroup_unmark_under_oom(memcg);
2090 finish_wait(&memcg_oom_waitq, &owait.wait);
2091 mem_cgroup_out_of_memory(memcg, current->memcg_oom.gfp_mask,
2092 current->memcg_oom.order);
2093 } else {
3812c8c8 2094 schedule();
49426420
JW
2095 mem_cgroup_unmark_under_oom(memcg);
2096 finish_wait(&memcg_oom_waitq, &owait.wait);
2097 }
2098
2099 if (locked) {
fb2a6fc5
JW
2100 mem_cgroup_oom_unlock(memcg);
2101 /*
2102 * There is no guarantee that an OOM-lock contender
2103 * sees the wakeups triggered by the OOM kill
2104 * uncharges. Wake any sleepers explicitely.
2105 */
2106 memcg_oom_recover(memcg);
2107 }
49426420
JW
2108cleanup:
2109 current->memcg_oom.memcg = NULL;
3812c8c8 2110 css_put(&memcg->css);
867578cb 2111 return true;
0b7f569e
KH
2112}
2113
d7365e78
JW
2114/**
2115 * mem_cgroup_begin_page_stat - begin a page state statistics transaction
2116 * @page: page that is going to change accounted state
2117 * @locked: &memcg->move_lock slowpath was taken
2118 * @flags: IRQ-state flags for &memcg->move_lock
32047e2a 2119 *
d7365e78
JW
2120 * This function must mark the beginning of an accounted page state
2121 * change to prevent double accounting when the page is concurrently
2122 * being moved to another memcg:
32047e2a 2123 *
d7365e78
JW
2124 * memcg = mem_cgroup_begin_page_stat(page, &locked, &flags);
2125 * if (TestClearPageState(page))
2126 * mem_cgroup_update_page_stat(memcg, state, -1);
2127 * mem_cgroup_end_page_stat(memcg, locked, flags);
32047e2a 2128 *
d7365e78
JW
2129 * The RCU lock is held throughout the transaction. The fast path can
2130 * get away without acquiring the memcg->move_lock (@locked is false)
2131 * because page moving starts with an RCU grace period.
32047e2a 2132 *
d7365e78
JW
2133 * The RCU lock also protects the memcg from being freed when the page
2134 * state that is going to change is the only thing preventing the page
2135 * from being uncharged. E.g. end-writeback clearing PageWriteback(),
2136 * which allows migration to go ahead and uncharge the page before the
2137 * account transaction might be complete.
d69b042f 2138 */
d7365e78
JW
2139struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page,
2140 bool *locked,
2141 unsigned long *flags)
89c06bd5
KH
2142{
2143 struct mem_cgroup *memcg;
2144 struct page_cgroup *pc;
2145
d7365e78
JW
2146 rcu_read_lock();
2147
2148 if (mem_cgroup_disabled())
2149 return NULL;
2150
89c06bd5
KH
2151 pc = lookup_page_cgroup(page);
2152again:
2153 memcg = pc->mem_cgroup;
2154 if (unlikely(!memcg || !PageCgroupUsed(pc)))
d7365e78
JW
2155 return NULL;
2156
2157 *locked = false;
bdcbb659 2158 if (atomic_read(&memcg->moving_account) <= 0)
d7365e78 2159 return memcg;
89c06bd5
KH
2160
2161 move_lock_mem_cgroup(memcg, flags);
2162 if (memcg != pc->mem_cgroup || !PageCgroupUsed(pc)) {
2163 move_unlock_mem_cgroup(memcg, flags);
2164 goto again;
2165 }
2166 *locked = true;
d7365e78
JW
2167
2168 return memcg;
89c06bd5
KH
2169}
2170
d7365e78
JW
2171/**
2172 * mem_cgroup_end_page_stat - finish a page state statistics transaction
2173 * @memcg: the memcg that was accounted against
2174 * @locked: value received from mem_cgroup_begin_page_stat()
2175 * @flags: value received from mem_cgroup_begin_page_stat()
2176 */
2177void mem_cgroup_end_page_stat(struct mem_cgroup *memcg, bool locked,
2178 unsigned long flags)
89c06bd5 2179{
d7365e78
JW
2180 if (memcg && locked)
2181 move_unlock_mem_cgroup(memcg, &flags);
89c06bd5 2182
d7365e78 2183 rcu_read_unlock();
89c06bd5
KH
2184}
2185
d7365e78
JW
2186/**
2187 * mem_cgroup_update_page_stat - update page state statistics
2188 * @memcg: memcg to account against
2189 * @idx: page state item to account
2190 * @val: number of pages (positive or negative)
2191 *
2192 * See mem_cgroup_begin_page_stat() for locking requirements.
2193 */
2194void mem_cgroup_update_page_stat(struct mem_cgroup *memcg,
68b4876d 2195 enum mem_cgroup_stat_index idx, int val)
d69b042f 2196{
658b72c5 2197 VM_BUG_ON(!rcu_read_lock_held());
26174efd 2198
d7365e78
JW
2199 if (memcg)
2200 this_cpu_add(memcg->stat->count[idx], val);
d69b042f 2201}
26174efd 2202
cdec2e42
KH
2203/*
2204 * size of first charge trial. "32" comes from vmscan.c's magic value.
2205 * TODO: maybe necessary to use big numbers in big irons.
2206 */
7ec99d62 2207#define CHARGE_BATCH 32U
cdec2e42
KH
2208struct memcg_stock_pcp {
2209 struct mem_cgroup *cached; /* this never be root cgroup */
11c9ea4e 2210 unsigned int nr_pages;
cdec2e42 2211 struct work_struct work;
26fe6168 2212 unsigned long flags;
a0db00fc 2213#define FLUSHING_CACHED_CHARGE 0
cdec2e42
KH
2214};
2215static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
9f50fad6 2216static DEFINE_MUTEX(percpu_charge_mutex);
cdec2e42 2217
a0956d54
SS
2218/**
2219 * consume_stock: Try to consume stocked charge on this cpu.
2220 * @memcg: memcg to consume from.
2221 * @nr_pages: how many pages to charge.
2222 *
2223 * The charges will only happen if @memcg matches the current cpu's memcg
2224 * stock, and at least @nr_pages are available in that stock. Failure to
2225 * service an allocation will refill the stock.
2226 *
2227 * returns true if successful, false otherwise.
cdec2e42 2228 */
a0956d54 2229static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
cdec2e42
KH
2230{
2231 struct memcg_stock_pcp *stock;
3e32cb2e 2232 bool ret = false;
cdec2e42 2233
a0956d54 2234 if (nr_pages > CHARGE_BATCH)
3e32cb2e 2235 return ret;
a0956d54 2236
cdec2e42 2237 stock = &get_cpu_var(memcg_stock);
3e32cb2e 2238 if (memcg == stock->cached && stock->nr_pages >= nr_pages) {
a0956d54 2239 stock->nr_pages -= nr_pages;
3e32cb2e
JW
2240 ret = true;
2241 }
cdec2e42
KH
2242 put_cpu_var(memcg_stock);
2243 return ret;
2244}
2245
2246/*
3e32cb2e 2247 * Returns stocks cached in percpu and reset cached information.
cdec2e42
KH
2248 */
2249static void drain_stock(struct memcg_stock_pcp *stock)
2250{
2251 struct mem_cgroup *old = stock->cached;
2252
11c9ea4e 2253 if (stock->nr_pages) {
3e32cb2e 2254 page_counter_uncharge(&old->memory, stock->nr_pages);
cdec2e42 2255 if (do_swap_account)
3e32cb2e 2256 page_counter_uncharge(&old->memsw, stock->nr_pages);
e8ea14cc 2257 css_put_many(&old->css, stock->nr_pages);
11c9ea4e 2258 stock->nr_pages = 0;
cdec2e42
KH
2259 }
2260 stock->cached = NULL;
cdec2e42
KH
2261}
2262
2263/*
2264 * This must be called under preempt disabled or must be called by
2265 * a thread which is pinned to local cpu.
2266 */
2267static void drain_local_stock(struct work_struct *dummy)
2268{
7c8e0181 2269 struct memcg_stock_pcp *stock = this_cpu_ptr(&memcg_stock);
cdec2e42 2270 drain_stock(stock);
26fe6168 2271 clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
cdec2e42
KH
2272}
2273
e4777496
MH
2274static void __init memcg_stock_init(void)
2275{
2276 int cpu;
2277
2278 for_each_possible_cpu(cpu) {
2279 struct memcg_stock_pcp *stock =
2280 &per_cpu(memcg_stock, cpu);
2281 INIT_WORK(&stock->work, drain_local_stock);
2282 }
2283}
2284
cdec2e42 2285/*
3e32cb2e 2286 * Cache charges(val) to local per_cpu area.
320cc51d 2287 * This will be consumed by consume_stock() function, later.
cdec2e42 2288 */
c0ff4b85 2289static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
cdec2e42
KH
2290{
2291 struct memcg_stock_pcp *stock = &get_cpu_var(memcg_stock);
2292
c0ff4b85 2293 if (stock->cached != memcg) { /* reset if necessary */
cdec2e42 2294 drain_stock(stock);
c0ff4b85 2295 stock->cached = memcg;
cdec2e42 2296 }
11c9ea4e 2297 stock->nr_pages += nr_pages;
cdec2e42
KH
2298 put_cpu_var(memcg_stock);
2299}
2300
2301/*
c0ff4b85 2302 * Drains all per-CPU charge caches for given root_memcg resp. subtree
6d3d6aa2 2303 * of the hierarchy under it.
cdec2e42 2304 */
6d3d6aa2 2305static void drain_all_stock(struct mem_cgroup *root_memcg)
cdec2e42 2306{
26fe6168 2307 int cpu, curcpu;
d38144b7 2308
6d3d6aa2
JW
2309 /* If someone's already draining, avoid adding running more workers. */
2310 if (!mutex_trylock(&percpu_charge_mutex))
2311 return;
cdec2e42 2312 /* Notify other cpus that system-wide "drain" is running */
cdec2e42 2313 get_online_cpus();
5af12d0e 2314 curcpu = get_cpu();
cdec2e42
KH
2315 for_each_online_cpu(cpu) {
2316 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
c0ff4b85 2317 struct mem_cgroup *memcg;
26fe6168 2318
c0ff4b85
R
2319 memcg = stock->cached;
2320 if (!memcg || !stock->nr_pages)
26fe6168 2321 continue;
c0ff4b85 2322 if (!mem_cgroup_same_or_subtree(root_memcg, memcg))
3e92041d 2323 continue;
d1a05b69
MH
2324 if (!test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
2325 if (cpu == curcpu)
2326 drain_local_stock(&stock->work);
2327 else
2328 schedule_work_on(cpu, &stock->work);
2329 }
cdec2e42 2330 }
5af12d0e 2331 put_cpu();
f894ffa8 2332 put_online_cpus();
9f50fad6 2333 mutex_unlock(&percpu_charge_mutex);
cdec2e42
KH
2334}
2335
711d3d2c
KH
2336/*
2337 * This function drains percpu counter value from DEAD cpu and
2338 * move it to local cpu. Note that this function can be preempted.
2339 */
c0ff4b85 2340static void mem_cgroup_drain_pcp_counter(struct mem_cgroup *memcg, int cpu)
711d3d2c
KH
2341{
2342 int i;
2343
c0ff4b85 2344 spin_lock(&memcg->pcp_counter_lock);
6104621d 2345 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
c0ff4b85 2346 long x = per_cpu(memcg->stat->count[i], cpu);
711d3d2c 2347
c0ff4b85
R
2348 per_cpu(memcg->stat->count[i], cpu) = 0;
2349 memcg->nocpu_base.count[i] += x;
711d3d2c 2350 }
e9f8974f 2351 for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) {
c0ff4b85 2352 unsigned long x = per_cpu(memcg->stat->events[i], cpu);
e9f8974f 2353
c0ff4b85
R
2354 per_cpu(memcg->stat->events[i], cpu) = 0;
2355 memcg->nocpu_base.events[i] += x;
e9f8974f 2356 }
c0ff4b85 2357 spin_unlock(&memcg->pcp_counter_lock);
711d3d2c
KH
2358}
2359
0db0628d 2360static int memcg_cpu_hotplug_callback(struct notifier_block *nb,
cdec2e42
KH
2361 unsigned long action,
2362 void *hcpu)
2363{
2364 int cpu = (unsigned long)hcpu;
2365 struct memcg_stock_pcp *stock;
711d3d2c 2366 struct mem_cgroup *iter;
cdec2e42 2367
619d094b 2368 if (action == CPU_ONLINE)
1489ebad 2369 return NOTIFY_OK;
1489ebad 2370
d833049b 2371 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
cdec2e42 2372 return NOTIFY_OK;
711d3d2c 2373
9f3a0d09 2374 for_each_mem_cgroup(iter)
711d3d2c
KH
2375 mem_cgroup_drain_pcp_counter(iter, cpu);
2376
cdec2e42
KH
2377 stock = &per_cpu(memcg_stock, cpu);
2378 drain_stock(stock);
2379 return NOTIFY_OK;
2380}
2381
00501b53
JW
2382static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
2383 unsigned int nr_pages)
8a9f3ccd 2384{
7ec99d62 2385 unsigned int batch = max(CHARGE_BATCH, nr_pages);
9b130619 2386 int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
6539cc05 2387 struct mem_cgroup *mem_over_limit;
3e32cb2e 2388 struct page_counter *counter;
6539cc05 2389 unsigned long nr_reclaimed;
b70a2a21
JW
2390 bool may_swap = true;
2391 bool drained = false;
05b84301 2392 int ret = 0;
a636b327 2393
ce00a967
JW
2394 if (mem_cgroup_is_root(memcg))
2395 goto done;
6539cc05 2396retry:
b6b6cc72
MH
2397 if (consume_stock(memcg, nr_pages))
2398 goto done;
8a9f3ccd 2399
3fbe7244 2400 if (!do_swap_account ||
3e32cb2e
JW
2401 !page_counter_try_charge(&memcg->memsw, batch, &counter)) {
2402 if (!page_counter_try_charge(&memcg->memory, batch, &counter))
6539cc05 2403 goto done_restock;
3fbe7244 2404 if (do_swap_account)
3e32cb2e
JW
2405 page_counter_uncharge(&memcg->memsw, batch);
2406 mem_over_limit = mem_cgroup_from_counter(counter, memory);
3fbe7244 2407 } else {
3e32cb2e 2408 mem_over_limit = mem_cgroup_from_counter(counter, memsw);
b70a2a21 2409 may_swap = false;
3fbe7244 2410 }
7a81b88c 2411
6539cc05
JW
2412 if (batch > nr_pages) {
2413 batch = nr_pages;
2414 goto retry;
2415 }
6d61ef40 2416
06b078fc
JW
2417 /*
2418 * Unlike in global OOM situations, memcg is not in a physical
2419 * memory shortage. Allow dying and OOM-killed tasks to
2420 * bypass the last charges so that they can exit quickly and
2421 * free their memory.
2422 */
2423 if (unlikely(test_thread_flag(TIF_MEMDIE) ||
2424 fatal_signal_pending(current) ||
2425 current->flags & PF_EXITING))
2426 goto bypass;
2427
2428 if (unlikely(task_in_memcg_oom(current)))
2429 goto nomem;
2430
6539cc05
JW
2431 if (!(gfp_mask & __GFP_WAIT))
2432 goto nomem;
4b534334 2433
b70a2a21
JW
2434 nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages,
2435 gfp_mask, may_swap);
6539cc05 2436
61e02c74 2437 if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
6539cc05 2438 goto retry;
28c34c29 2439
b70a2a21 2440 if (!drained) {
6d3d6aa2 2441 drain_all_stock(mem_over_limit);
b70a2a21
JW
2442 drained = true;
2443 goto retry;
2444 }
2445
28c34c29
JW
2446 if (gfp_mask & __GFP_NORETRY)
2447 goto nomem;
6539cc05
JW
2448 /*
2449 * Even though the limit is exceeded at this point, reclaim
2450 * may have been able to free some pages. Retry the charge
2451 * before killing the task.
2452 *
2453 * Only for regular pages, though: huge pages are rather
2454 * unlikely to succeed so close to the limit, and we fall back
2455 * to regular pages anyway in case of failure.
2456 */
61e02c74 2457 if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER))
6539cc05
JW
2458 goto retry;
2459 /*
2460 * At task move, charge accounts can be doubly counted. So, it's
2461 * better to wait until the end of task_move if something is going on.
2462 */
2463 if (mem_cgroup_wait_acct_move(mem_over_limit))
2464 goto retry;
2465
9b130619
JW
2466 if (nr_retries--)
2467 goto retry;
2468
06b078fc
JW
2469 if (gfp_mask & __GFP_NOFAIL)
2470 goto bypass;
2471
6539cc05
JW
2472 if (fatal_signal_pending(current))
2473 goto bypass;
2474
61e02c74 2475 mem_cgroup_oom(mem_over_limit, gfp_mask, get_order(nr_pages));
7a81b88c 2476nomem:
6d1fdc48 2477 if (!(gfp_mask & __GFP_NOFAIL))
3168ecbe 2478 return -ENOMEM;
867578cb 2479bypass:
ce00a967 2480 return -EINTR;
6539cc05
JW
2481
2482done_restock:
e8ea14cc 2483 css_get_many(&memcg->css, batch);
6539cc05
JW
2484 if (batch > nr_pages)
2485 refill_stock(memcg, batch - nr_pages);
2486done:
05b84301 2487 return ret;
7a81b88c 2488}
8a9f3ccd 2489
00501b53 2490static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages)
a3032a2c 2491{
ce00a967
JW
2492 if (mem_cgroup_is_root(memcg))
2493 return;
2494
3e32cb2e 2495 page_counter_uncharge(&memcg->memory, nr_pages);
05b84301 2496 if (do_swap_account)
3e32cb2e 2497 page_counter_uncharge(&memcg->memsw, nr_pages);
e8ea14cc
JW
2498
2499 css_put_many(&memcg->css, nr_pages);
d01dd17f
KH
2500}
2501
a3b2d692
KH
2502/*
2503 * A helper function to get mem_cgroup from ID. must be called under
ec903c0c
TH
2504 * rcu_read_lock(). The caller is responsible for calling
2505 * css_tryget_online() if the mem_cgroup is used for charging. (dropping
2506 * refcnt from swap can be called against removed memcg.)
a3b2d692
KH
2507 */
2508static struct mem_cgroup *mem_cgroup_lookup(unsigned short id)
2509{
a3b2d692
KH
2510 /* ID 0 is unused ID */
2511 if (!id)
2512 return NULL;
34c00c31 2513 return mem_cgroup_from_id(id);
a3b2d692
KH
2514}
2515
0a31bc97
JW
2516/*
2517 * try_get_mem_cgroup_from_page - look up page's memcg association
2518 * @page: the page
2519 *
2520 * Look up, get a css reference, and return the memcg that owns @page.
2521 *
2522 * The page must be locked to prevent racing with swap-in and page
2523 * cache charges. If coming from an unlocked page table, the caller
2524 * must ensure the page is on the LRU or this can race with charging.
2525 */
e42d9d5d 2526struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
b5a84319 2527{
c0ff4b85 2528 struct mem_cgroup *memcg = NULL;
3c776e64 2529 struct page_cgroup *pc;
a3b2d692 2530 unsigned short id;
b5a84319
KH
2531 swp_entry_t ent;
2532
309381fe 2533 VM_BUG_ON_PAGE(!PageLocked(page), page);
3c776e64 2534
3c776e64 2535 pc = lookup_page_cgroup(page);
a3b2d692 2536 if (PageCgroupUsed(pc)) {
c0ff4b85 2537 memcg = pc->mem_cgroup;
ec903c0c 2538 if (memcg && !css_tryget_online(&memcg->css))
c0ff4b85 2539 memcg = NULL;
e42d9d5d 2540 } else if (PageSwapCache(page)) {
3c776e64 2541 ent.val = page_private(page);
9fb4b7cc 2542 id = lookup_swap_cgroup_id(ent);
a3b2d692 2543 rcu_read_lock();
c0ff4b85 2544 memcg = mem_cgroup_lookup(id);
ec903c0c 2545 if (memcg && !css_tryget_online(&memcg->css))
c0ff4b85 2546 memcg = NULL;
a3b2d692 2547 rcu_read_unlock();
3c776e64 2548 }
c0ff4b85 2549 return memcg;
b5a84319
KH
2550}
2551
0a31bc97
JW
2552static void lock_page_lru(struct page *page, int *isolated)
2553{
2554 struct zone *zone = page_zone(page);
2555
2556 spin_lock_irq(&zone->lru_lock);
2557 if (PageLRU(page)) {
2558 struct lruvec *lruvec;
2559
2560 lruvec = mem_cgroup_page_lruvec(page, zone);
2561 ClearPageLRU(page);
2562 del_page_from_lru_list(page, lruvec, page_lru(page));
2563 *isolated = 1;
2564 } else
2565 *isolated = 0;
2566}
2567
2568static void unlock_page_lru(struct page *page, int isolated)
2569{
2570 struct zone *zone = page_zone(page);
2571
2572 if (isolated) {
2573 struct lruvec *lruvec;
2574
2575 lruvec = mem_cgroup_page_lruvec(page, zone);
2576 VM_BUG_ON_PAGE(PageLRU(page), page);
2577 SetPageLRU(page);
2578 add_page_to_lru_list(page, lruvec, page_lru(page));
2579 }
2580 spin_unlock_irq(&zone->lru_lock);
2581}
2582
00501b53 2583static void commit_charge(struct page *page, struct mem_cgroup *memcg,
6abb5a86 2584 bool lrucare)
7a81b88c 2585{
ce587e65 2586 struct page_cgroup *pc = lookup_page_cgroup(page);
0a31bc97 2587 int isolated;
9ce70c02 2588
309381fe 2589 VM_BUG_ON_PAGE(PageCgroupUsed(pc), page);
ca3e0214
KH
2590 /*
2591 * we don't need page_cgroup_lock about tail pages, becase they are not
2592 * accessed by any other context at this point.
2593 */
9ce70c02
HD
2594
2595 /*
2596 * In some cases, SwapCache and FUSE(splice_buf->radixtree), the page
2597 * may already be on some other mem_cgroup's LRU. Take care of it.
2598 */
0a31bc97
JW
2599 if (lrucare)
2600 lock_page_lru(page, &isolated);
9ce70c02 2601
0a31bc97
JW
2602 /*
2603 * Nobody should be changing or seriously looking at
2604 * pc->mem_cgroup and pc->flags at this point:
2605 *
2606 * - the page is uncharged
2607 *
2608 * - the page is off-LRU
2609 *
2610 * - an anonymous fault has exclusive page access, except for
2611 * a locked page table
2612 *
2613 * - a page cache insertion, a swapin fault, or a migration
2614 * have the page locked
2615 */
c0ff4b85 2616 pc->mem_cgroup = memcg;
0a31bc97 2617 pc->flags = PCG_USED | PCG_MEM | (do_swap_account ? PCG_MEMSW : 0);
9ce70c02 2618
0a31bc97
JW
2619 if (lrucare)
2620 unlock_page_lru(page, isolated);
7a81b88c 2621}
66e1707b 2622
7ae1e1d0 2623#ifdef CONFIG_MEMCG_KMEM
bd673145
VD
2624/*
2625 * The memcg_slab_mutex is held whenever a per memcg kmem cache is created or
2626 * destroyed. It protects memcg_caches arrays and memcg_slab_caches lists.
2627 */
2628static DEFINE_MUTEX(memcg_slab_mutex);
2629
d6441637
VD
2630static DEFINE_MUTEX(activate_kmem_mutex);
2631
1f458cbf
GC
2632/*
2633 * This is a bit cumbersome, but it is rarely used and avoids a backpointer
2634 * in the memcg_cache_params struct.
2635 */
2636static struct kmem_cache *memcg_params_to_cache(struct memcg_cache_params *p)
2637{
2638 struct kmem_cache *cachep;
2639
2640 VM_BUG_ON(p->is_root_cache);
2641 cachep = p->root_cache;
7a67d7ab 2642 return cache_from_memcg_idx(cachep, memcg_cache_id(p->memcg));
1f458cbf
GC
2643}
2644
749c5415 2645#ifdef CONFIG_SLABINFO
2da8ca82 2646static int mem_cgroup_slabinfo_read(struct seq_file *m, void *v)
749c5415 2647{
2da8ca82 2648 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
749c5415
GC
2649 struct memcg_cache_params *params;
2650
cf2b8fbf 2651 if (!memcg_kmem_is_active(memcg))
749c5415
GC
2652 return -EIO;
2653
2654 print_slabinfo_header(m);
2655
bd673145 2656 mutex_lock(&memcg_slab_mutex);
749c5415
GC
2657 list_for_each_entry(params, &memcg->memcg_slab_caches, list)
2658 cache_show(memcg_params_to_cache(params), m);
bd673145 2659 mutex_unlock(&memcg_slab_mutex);
749c5415
GC
2660
2661 return 0;
2662}
2663#endif
2664
3e32cb2e
JW
2665static int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp,
2666 unsigned long nr_pages)
7ae1e1d0 2667{
3e32cb2e 2668 struct page_counter *counter;
7ae1e1d0 2669 int ret = 0;
7ae1e1d0 2670
3e32cb2e
JW
2671 ret = page_counter_try_charge(&memcg->kmem, nr_pages, &counter);
2672 if (ret < 0)
7ae1e1d0
GC
2673 return ret;
2674
3e32cb2e 2675 ret = try_charge(memcg, gfp, nr_pages);
7ae1e1d0
GC
2676 if (ret == -EINTR) {
2677 /*
00501b53
JW
2678 * try_charge() chose to bypass to root due to OOM kill or
2679 * fatal signal. Since our only options are to either fail
2680 * the allocation or charge it to this cgroup, do it as a
2681 * temporary condition. But we can't fail. From a kmem/slab
2682 * perspective, the cache has already been selected, by
2683 * mem_cgroup_kmem_get_cache(), so it is too late to change
7ae1e1d0
GC
2684 * our minds.
2685 *
2686 * This condition will only trigger if the task entered
00501b53
JW
2687 * memcg_charge_kmem in a sane state, but was OOM-killed
2688 * during try_charge() above. Tasks that were already dying
2689 * when the allocation triggers should have been already
7ae1e1d0
GC
2690 * directed to the root cgroup in memcontrol.h
2691 */
3e32cb2e 2692 page_counter_charge(&memcg->memory, nr_pages);
7ae1e1d0 2693 if (do_swap_account)
3e32cb2e 2694 page_counter_charge(&memcg->memsw, nr_pages);
e8ea14cc 2695 css_get_many(&memcg->css, nr_pages);
7ae1e1d0
GC
2696 ret = 0;
2697 } else if (ret)
3e32cb2e 2698 page_counter_uncharge(&memcg->kmem, nr_pages);
7ae1e1d0
GC
2699
2700 return ret;
2701}
2702
3e32cb2e
JW
2703static void memcg_uncharge_kmem(struct mem_cgroup *memcg,
2704 unsigned long nr_pages)
7ae1e1d0 2705{
3e32cb2e 2706 page_counter_uncharge(&memcg->memory, nr_pages);
7ae1e1d0 2707 if (do_swap_account)
3e32cb2e 2708 page_counter_uncharge(&memcg->memsw, nr_pages);
7de37682 2709
64f21993 2710 page_counter_uncharge(&memcg->kmem, nr_pages);
e8ea14cc
JW
2711
2712 css_put_many(&memcg->css, nr_pages);
7ae1e1d0
GC
2713}
2714
2633d7a0
GC
2715/*
2716 * helper for acessing a memcg's index. It will be used as an index in the
2717 * child cache array in kmem_cache, and also to derive its name. This function
2718 * will return -1 when this is not a kmem-limited memcg.
2719 */
2720int memcg_cache_id(struct mem_cgroup *memcg)
2721{
2722 return memcg ? memcg->kmemcg_id : -1;
2723}
2724
f3bb3043 2725static int memcg_alloc_cache_id(void)
55007d84 2726{
f3bb3043
VD
2727 int id, size;
2728 int err;
2729
2730 id = ida_simple_get(&kmem_limited_groups,
2731 0, MEMCG_CACHES_MAX_SIZE, GFP_KERNEL);
2732 if (id < 0)
2733 return id;
55007d84 2734
f3bb3043
VD
2735 if (id < memcg_limited_groups_array_size)
2736 return id;
2737
2738 /*
2739 * There's no space for the new id in memcg_caches arrays,
2740 * so we have to grow them.
2741 */
2742
2743 size = 2 * (id + 1);
55007d84
GC
2744 if (size < MEMCG_CACHES_MIN_SIZE)
2745 size = MEMCG_CACHES_MIN_SIZE;
2746 else if (size > MEMCG_CACHES_MAX_SIZE)
2747 size = MEMCG_CACHES_MAX_SIZE;
2748
f3bb3043
VD
2749 mutex_lock(&memcg_slab_mutex);
2750 err = memcg_update_all_caches(size);
2751 mutex_unlock(&memcg_slab_mutex);
2752
2753 if (err) {
2754 ida_simple_remove(&kmem_limited_groups, id);
2755 return err;
2756 }
2757 return id;
2758}
2759
2760static void memcg_free_cache_id(int id)
2761{
2762 ida_simple_remove(&kmem_limited_groups, id);
55007d84
GC
2763}
2764
2765/*
2766 * We should update the current array size iff all caches updates succeed. This
2767 * can only be done from the slab side. The slab mutex needs to be held when
2768 * calling this.
2769 */
2770void memcg_update_array_size(int num)
2771{
f3bb3043 2772 memcg_limited_groups_array_size = num;
55007d84
GC
2773}
2774
776ed0f0
VD
2775static void memcg_register_cache(struct mem_cgroup *memcg,
2776 struct kmem_cache *root_cache)
2633d7a0 2777{
93f39eea
VD
2778 static char memcg_name_buf[NAME_MAX + 1]; /* protected by
2779 memcg_slab_mutex */
bd673145 2780 struct kmem_cache *cachep;
d7f25f8a
GC
2781 int id;
2782
bd673145
VD
2783 lockdep_assert_held(&memcg_slab_mutex);
2784
2785 id = memcg_cache_id(memcg);
2786
2787 /*
2788 * Since per-memcg caches are created asynchronously on first
2789 * allocation (see memcg_kmem_get_cache()), several threads can try to
2790 * create the same cache, but only one of them may succeed.
2791 */
2792 if (cache_from_memcg_idx(root_cache, id))
1aa13254
VD
2793 return;
2794
073ee1c6 2795 cgroup_name(memcg->css.cgroup, memcg_name_buf, NAME_MAX + 1);
776ed0f0 2796 cachep = memcg_create_kmem_cache(memcg, root_cache, memcg_name_buf);
2edefe11 2797 /*
bd673145
VD
2798 * If we could not create a memcg cache, do not complain, because
2799 * that's not critical at all as we can always proceed with the root
2800 * cache.
2edefe11 2801 */
bd673145
VD
2802 if (!cachep)
2803 return;
2edefe11 2804
33a690c4 2805 css_get(&memcg->css);
bd673145 2806 list_add(&cachep->memcg_params->list, &memcg->memcg_slab_caches);
1aa13254 2807
d7f25f8a 2808 /*
959c8963
VD
2809 * Since readers won't lock (see cache_from_memcg_idx()), we need a
2810 * barrier here to ensure nobody will see the kmem_cache partially
2811 * initialized.
d7f25f8a 2812 */
959c8963
VD
2813 smp_wmb();
2814
bd673145
VD
2815 BUG_ON(root_cache->memcg_params->memcg_caches[id]);
2816 root_cache->memcg_params->memcg_caches[id] = cachep;
1aa13254 2817}
d7f25f8a 2818
776ed0f0 2819static void memcg_unregister_cache(struct kmem_cache *cachep)
1aa13254 2820{
bd673145 2821 struct kmem_cache *root_cache;
1aa13254
VD
2822 struct mem_cgroup *memcg;
2823 int id;
2824
bd673145 2825 lockdep_assert_held(&memcg_slab_mutex);
d7f25f8a 2826
bd673145 2827 BUG_ON(is_root_cache(cachep));
2edefe11 2828
bd673145
VD
2829 root_cache = cachep->memcg_params->root_cache;
2830 memcg = cachep->memcg_params->memcg;
96403da2 2831 id = memcg_cache_id(memcg);
d7f25f8a 2832
bd673145
VD
2833 BUG_ON(root_cache->memcg_params->memcg_caches[id] != cachep);
2834 root_cache->memcg_params->memcg_caches[id] = NULL;
d7f25f8a 2835
bd673145
VD
2836 list_del(&cachep->memcg_params->list);
2837
2838 kmem_cache_destroy(cachep);
33a690c4
VD
2839
2840 /* drop the reference taken in memcg_register_cache */
2841 css_put(&memcg->css);
2633d7a0
GC
2842}
2843
0e9d92f2
GC
2844/*
2845 * During the creation a new cache, we need to disable our accounting mechanism
2846 * altogether. This is true even if we are not creating, but rather just
2847 * enqueing new caches to be created.
2848 *
2849 * This is because that process will trigger allocations; some visible, like
2850 * explicit kmallocs to auxiliary data structures, name strings and internal
2851 * cache structures; some well concealed, like INIT_WORK() that can allocate
2852 * objects during debug.
2853 *
2854 * If any allocation happens during memcg_kmem_get_cache, we will recurse back
2855 * to it. This may not be a bounded recursion: since the first cache creation
2856 * failed to complete (waiting on the allocation), we'll just try to create the
2857 * cache again, failing at the same point.
2858 *
2859 * memcg_kmem_get_cache is prepared to abort after seeing a positive count of
2860 * memcg_kmem_skip_account. So we enclose anything that might allocate memory
2861 * inside the following two functions.
2862 */
2863static inline void memcg_stop_kmem_account(void)
2864{
2865 VM_BUG_ON(!current->mm);
2866 current->memcg_kmem_skip_account++;
2867}
2868
2869static inline void memcg_resume_kmem_account(void)
2870{
2871 VM_BUG_ON(!current->mm);
2872 current->memcg_kmem_skip_account--;
2873}
2874
776ed0f0 2875int __memcg_cleanup_cache_params(struct kmem_cache *s)
7cf27982
GC
2876{
2877 struct kmem_cache *c;
b8529907 2878 int i, failed = 0;
7cf27982 2879
bd673145 2880 mutex_lock(&memcg_slab_mutex);
7a67d7ab
QH
2881 for_each_memcg_cache_index(i) {
2882 c = cache_from_memcg_idx(s, i);
7cf27982
GC
2883 if (!c)
2884 continue;
2885
776ed0f0 2886 memcg_unregister_cache(c);
b8529907
VD
2887
2888 if (cache_from_memcg_idx(s, i))
2889 failed++;
7cf27982 2890 }
bd673145 2891 mutex_unlock(&memcg_slab_mutex);
b8529907 2892 return failed;
7cf27982
GC
2893}
2894
776ed0f0 2895static void memcg_unregister_all_caches(struct mem_cgroup *memcg)
1f458cbf
GC
2896{
2897 struct kmem_cache *cachep;
bd673145 2898 struct memcg_cache_params *params, *tmp;
1f458cbf
GC
2899
2900 if (!memcg_kmem_is_active(memcg))
2901 return;
2902
bd673145
VD
2903 mutex_lock(&memcg_slab_mutex);
2904 list_for_each_entry_safe(params, tmp, &memcg->memcg_slab_caches, list) {
1f458cbf 2905 cachep = memcg_params_to_cache(params);
bd673145
VD
2906 kmem_cache_shrink(cachep);
2907 if (atomic_read(&cachep->memcg_params->nr_pages) == 0)
776ed0f0 2908 memcg_unregister_cache(cachep);
1f458cbf 2909 }
bd673145 2910 mutex_unlock(&memcg_slab_mutex);
1f458cbf
GC
2911}
2912
776ed0f0 2913struct memcg_register_cache_work {
5722d094
VD
2914 struct mem_cgroup *memcg;
2915 struct kmem_cache *cachep;
2916 struct work_struct work;
2917};
2918
776ed0f0 2919static void memcg_register_cache_func(struct work_struct *w)
d7f25f8a 2920{
776ed0f0
VD
2921 struct memcg_register_cache_work *cw =
2922 container_of(w, struct memcg_register_cache_work, work);
5722d094
VD
2923 struct mem_cgroup *memcg = cw->memcg;
2924 struct kmem_cache *cachep = cw->cachep;
d7f25f8a 2925
bd673145 2926 mutex_lock(&memcg_slab_mutex);
776ed0f0 2927 memcg_register_cache(memcg, cachep);
bd673145
VD
2928 mutex_unlock(&memcg_slab_mutex);
2929
5722d094 2930 css_put(&memcg->css);
d7f25f8a
GC
2931 kfree(cw);
2932}
2933
2934/*
2935 * Enqueue the creation of a per-memcg kmem_cache.
d7f25f8a 2936 */
776ed0f0
VD
2937static void __memcg_schedule_register_cache(struct mem_cgroup *memcg,
2938 struct kmem_cache *cachep)
d7f25f8a 2939{
776ed0f0 2940 struct memcg_register_cache_work *cw;
d7f25f8a 2941
776ed0f0 2942 cw = kmalloc(sizeof(*cw), GFP_NOWAIT);
ca0dde97
LZ
2943 if (cw == NULL) {
2944 css_put(&memcg->css);
d7f25f8a
GC
2945 return;
2946 }
2947
2948 cw->memcg = memcg;
2949 cw->cachep = cachep;
2950
776ed0f0 2951 INIT_WORK(&cw->work, memcg_register_cache_func);
d7f25f8a
GC
2952 schedule_work(&cw->work);
2953}
2954
776ed0f0
VD
2955static void memcg_schedule_register_cache(struct mem_cgroup *memcg,
2956 struct kmem_cache *cachep)
0e9d92f2
GC
2957{
2958 /*
2959 * We need to stop accounting when we kmalloc, because if the
2960 * corresponding kmalloc cache is not yet created, the first allocation
776ed0f0 2961 * in __memcg_schedule_register_cache will recurse.
0e9d92f2
GC
2962 *
2963 * However, it is better to enclose the whole function. Depending on
2964 * the debugging options enabled, INIT_WORK(), for instance, can
2965 * trigger an allocation. This too, will make us recurse. Because at
2966 * this point we can't allow ourselves back into memcg_kmem_get_cache,
2967 * the safest choice is to do it like this, wrapping the whole function.
2968 */
2969 memcg_stop_kmem_account();
776ed0f0 2970 __memcg_schedule_register_cache(memcg, cachep);
0e9d92f2
GC
2971 memcg_resume_kmem_account();
2972}
c67a8a68
VD
2973
2974int __memcg_charge_slab(struct kmem_cache *cachep, gfp_t gfp, int order)
2975{
3e32cb2e 2976 unsigned int nr_pages = 1 << order;
c67a8a68
VD
2977 int res;
2978
3e32cb2e 2979 res = memcg_charge_kmem(cachep->memcg_params->memcg, gfp, nr_pages);
c67a8a68 2980 if (!res)
3e32cb2e 2981 atomic_add(nr_pages, &cachep->memcg_params->nr_pages);
c67a8a68
VD
2982 return res;
2983}
2984
2985void __memcg_uncharge_slab(struct kmem_cache *cachep, int order)
2986{
3e32cb2e
JW
2987 unsigned int nr_pages = 1 << order;
2988
2989 memcg_uncharge_kmem(cachep->memcg_params->memcg, nr_pages);
2990 atomic_sub(nr_pages, &cachep->memcg_params->nr_pages);
c67a8a68
VD
2991}
2992
d7f25f8a
GC
2993/*
2994 * Return the kmem_cache we're supposed to use for a slab allocation.
2995 * We try to use the current memcg's version of the cache.
2996 *
2997 * If the cache does not exist yet, if we are the first user of it,
2998 * we either create it immediately, if possible, or create it asynchronously
2999 * in a workqueue.
3000 * In the latter case, we will let the current allocation go through with
3001 * the original cache.
3002 *
3003 * Can't be called in interrupt context or from kernel threads.
3004 * This function needs to be called with rcu_read_lock() held.
3005 */
3006struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep,
3007 gfp_t gfp)
3008{
3009 struct mem_cgroup *memcg;
959c8963 3010 struct kmem_cache *memcg_cachep;
d7f25f8a
GC
3011
3012 VM_BUG_ON(!cachep->memcg_params);
3013 VM_BUG_ON(!cachep->memcg_params->is_root_cache);
3014
0e9d92f2
GC
3015 if (!current->mm || current->memcg_kmem_skip_account)
3016 return cachep;
3017
d7f25f8a
GC
3018 rcu_read_lock();
3019 memcg = mem_cgroup_from_task(rcu_dereference(current->mm->owner));
d7f25f8a 3020
cf2b8fbf 3021 if (!memcg_kmem_is_active(memcg))
ca0dde97 3022 goto out;
d7f25f8a 3023
959c8963
VD
3024 memcg_cachep = cache_from_memcg_idx(cachep, memcg_cache_id(memcg));
3025 if (likely(memcg_cachep)) {
3026 cachep = memcg_cachep;
ca0dde97 3027 goto out;
d7f25f8a
GC
3028 }
3029
ca0dde97 3030 /* The corresponding put will be done in the workqueue. */
ec903c0c 3031 if (!css_tryget_online(&memcg->css))
ca0dde97
LZ
3032 goto out;
3033 rcu_read_unlock();
3034
3035 /*
3036 * If we are in a safe context (can wait, and not in interrupt
3037 * context), we could be be predictable and return right away.
3038 * This would guarantee that the allocation being performed
3039 * already belongs in the new cache.
3040 *
3041 * However, there are some clashes that can arrive from locking.
3042 * For instance, because we acquire the slab_mutex while doing
776ed0f0
VD
3043 * memcg_create_kmem_cache, this means no further allocation
3044 * could happen with the slab_mutex held. So it's better to
3045 * defer everything.
ca0dde97 3046 */
776ed0f0 3047 memcg_schedule_register_cache(memcg, cachep);
ca0dde97
LZ
3048 return cachep;
3049out:
3050 rcu_read_unlock();
3051 return cachep;
d7f25f8a 3052}
d7f25f8a 3053
7ae1e1d0
GC
3054/*
3055 * We need to verify if the allocation against current->mm->owner's memcg is
3056 * possible for the given order. But the page is not allocated yet, so we'll
3057 * need a further commit step to do the final arrangements.
3058 *
3059 * It is possible for the task to switch cgroups in this mean time, so at
3060 * commit time, we can't rely on task conversion any longer. We'll then use
3061 * the handle argument to return to the caller which cgroup we should commit
3062 * against. We could also return the memcg directly and avoid the pointer
3063 * passing, but a boolean return value gives better semantics considering
3064 * the compiled-out case as well.
3065 *
3066 * Returning true means the allocation is possible.
3067 */
3068bool
3069__memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **_memcg, int order)
3070{
3071 struct mem_cgroup *memcg;
3072 int ret;
3073
3074 *_memcg = NULL;
6d42c232
GC
3075
3076 /*
3077 * Disabling accounting is only relevant for some specific memcg
3078 * internal allocations. Therefore we would initially not have such
52383431
VD
3079 * check here, since direct calls to the page allocator that are
3080 * accounted to kmemcg (alloc_kmem_pages and friends) only happen
3081 * outside memcg core. We are mostly concerned with cache allocations,
3082 * and by having this test at memcg_kmem_get_cache, we are already able
3083 * to relay the allocation to the root cache and bypass the memcg cache
3084 * altogether.
6d42c232
GC
3085 *
3086 * There is one exception, though: the SLUB allocator does not create
3087 * large order caches, but rather service large kmallocs directly from
3088 * the page allocator. Therefore, the following sequence when backed by
3089 * the SLUB allocator:
3090 *
f894ffa8
AM
3091 * memcg_stop_kmem_account();
3092 * kmalloc(<large_number>)
3093 * memcg_resume_kmem_account();
6d42c232
GC
3094 *
3095 * would effectively ignore the fact that we should skip accounting,
3096 * since it will drive us directly to this function without passing
3097 * through the cache selector memcg_kmem_get_cache. Such large
3098 * allocations are extremely rare but can happen, for instance, for the
3099 * cache arrays. We bring this test here.
3100 */
3101 if (!current->mm || current->memcg_kmem_skip_account)
3102 return true;
3103
df381975 3104 memcg = get_mem_cgroup_from_mm(current->mm);
7ae1e1d0 3105
cf2b8fbf 3106 if (!memcg_kmem_is_active(memcg)) {
7ae1e1d0
GC
3107 css_put(&memcg->css);
3108 return true;
3109 }
3110
3e32cb2e 3111 ret = memcg_charge_kmem(memcg, gfp, 1 << order);
7ae1e1d0
GC
3112 if (!ret)
3113 *_memcg = memcg;
7ae1e1d0
GC
3114
3115 css_put(&memcg->css);
3116 return (ret == 0);
3117}
3118
3119void __memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg,
3120 int order)
3121{
3122 struct page_cgroup *pc;
3123
3124 VM_BUG_ON(mem_cgroup_is_root(memcg));
3125
3126 /* The page allocation failed. Revert */
3127 if (!page) {
3e32cb2e 3128 memcg_uncharge_kmem(memcg, 1 << order);
7ae1e1d0
GC
3129 return;
3130 }
a840cda6
JW
3131 /*
3132 * The page is freshly allocated and not visible to any
3133 * outside callers yet. Set up pc non-atomically.
3134 */
7ae1e1d0 3135 pc = lookup_page_cgroup(page);
7ae1e1d0 3136 pc->mem_cgroup = memcg;
a840cda6 3137 pc->flags = PCG_USED;
7ae1e1d0
GC
3138}
3139
3140void __memcg_kmem_uncharge_pages(struct page *page, int order)
3141{
3142 struct mem_cgroup *memcg = NULL;
3143 struct page_cgroup *pc;
3144
3145
3146 pc = lookup_page_cgroup(page);
7ae1e1d0
GC
3147 if (!PageCgroupUsed(pc))
3148 return;
3149
a840cda6
JW
3150 memcg = pc->mem_cgroup;
3151 pc->flags = 0;
7ae1e1d0
GC
3152
3153 /*
3154 * We trust that only if there is a memcg associated with the page, it
3155 * is a valid allocation
3156 */
3157 if (!memcg)
3158 return;
3159
309381fe 3160 VM_BUG_ON_PAGE(mem_cgroup_is_root(memcg), page);
3e32cb2e 3161 memcg_uncharge_kmem(memcg, 1 << order);
7ae1e1d0 3162}
1f458cbf 3163#else
776ed0f0 3164static inline void memcg_unregister_all_caches(struct mem_cgroup *memcg)
1f458cbf
GC
3165{
3166}
7ae1e1d0
GC
3167#endif /* CONFIG_MEMCG_KMEM */
3168
ca3e0214
KH
3169#ifdef CONFIG_TRANSPARENT_HUGEPAGE
3170
ca3e0214
KH
3171/*
3172 * Because tail pages are not marked as "used", set it. We're under
e94c8a9c
KH
3173 * zone->lru_lock, 'splitting on pmd' and compound_lock.
3174 * charge/uncharge will be never happen and move_account() is done under
3175 * compound_lock(), so we don't have to take care of races.
ca3e0214 3176 */
e94c8a9c 3177void mem_cgroup_split_huge_fixup(struct page *head)
ca3e0214
KH
3178{
3179 struct page_cgroup *head_pc = lookup_page_cgroup(head);
e94c8a9c 3180 struct page_cgroup *pc;
b070e65c 3181 struct mem_cgroup *memcg;
e94c8a9c 3182 int i;
ca3e0214 3183
3d37c4a9
KH
3184 if (mem_cgroup_disabled())
3185 return;
b070e65c
DR
3186
3187 memcg = head_pc->mem_cgroup;
e94c8a9c
KH
3188 for (i = 1; i < HPAGE_PMD_NR; i++) {
3189 pc = head_pc + i;
b070e65c 3190 pc->mem_cgroup = memcg;
0a31bc97 3191 pc->flags = head_pc->flags;
e94c8a9c 3192 }
b070e65c
DR
3193 __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE],
3194 HPAGE_PMD_NR);
ca3e0214 3195}
12d27107 3196#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
ca3e0214 3197
f817ed48 3198/**
de3638d9 3199 * mem_cgroup_move_account - move account of the page
5564e88b 3200 * @page: the page
7ec99d62 3201 * @nr_pages: number of regular pages (>1 for huge pages)
f817ed48
KH
3202 * @pc: page_cgroup of the page.
3203 * @from: mem_cgroup which the page is moved from.
3204 * @to: mem_cgroup which the page is moved to. @from != @to.
3205 *
3206 * The caller must confirm following.
08e552c6 3207 * - page is not on LRU (isolate_page() is useful.)
7ec99d62 3208 * - compound_lock is held when nr_pages > 1
f817ed48 3209 *
2f3479b1
KH
3210 * This function doesn't do "charge" to new cgroup and doesn't do "uncharge"
3211 * from old cgroup.
f817ed48 3212 */
7ec99d62
JW
3213static int mem_cgroup_move_account(struct page *page,
3214 unsigned int nr_pages,
3215 struct page_cgroup *pc,
3216 struct mem_cgroup *from,
2f3479b1 3217 struct mem_cgroup *to)
f817ed48 3218{
de3638d9
JW
3219 unsigned long flags;
3220 int ret;
987eba66 3221
f817ed48 3222 VM_BUG_ON(from == to);
309381fe 3223 VM_BUG_ON_PAGE(PageLRU(page), page);
de3638d9
JW
3224 /*
3225 * The page is isolated from LRU. So, collapse function
3226 * will not handle this page. But page splitting can happen.
3227 * Do this check under compound_page_lock(). The caller should
3228 * hold it.
3229 */
3230 ret = -EBUSY;
7ec99d62 3231 if (nr_pages > 1 && !PageTransHuge(page))
de3638d9
JW
3232 goto out;
3233
0a31bc97
JW
3234 /*
3235 * Prevent mem_cgroup_migrate() from looking at pc->mem_cgroup
3236 * of its source page while we change it: page migration takes
3237 * both pages off the LRU, but page cache replacement doesn't.
3238 */
3239 if (!trylock_page(page))
3240 goto out;
de3638d9
JW
3241
3242 ret = -EINVAL;
3243 if (!PageCgroupUsed(pc) || pc->mem_cgroup != from)
0a31bc97 3244 goto out_unlock;
de3638d9 3245
312734c0 3246 move_lock_mem_cgroup(from, &flags);
f817ed48 3247
0a31bc97 3248 if (!PageAnon(page) && page_mapped(page)) {
59d1d256
JW
3249 __this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_FILE_MAPPED],
3250 nr_pages);
3251 __this_cpu_add(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED],
3252 nr_pages);
3253 }
3ea67d06 3254
59d1d256
JW
3255 if (PageWriteback(page)) {
3256 __this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_WRITEBACK],
3257 nr_pages);
3258 __this_cpu_add(to->stat->count[MEM_CGROUP_STAT_WRITEBACK],
3259 nr_pages);
3260 }
3ea67d06 3261
0a31bc97
JW
3262 /*
3263 * It is safe to change pc->mem_cgroup here because the page
3264 * is referenced, charged, and isolated - we can't race with
3265 * uncharging, charging, migration, or LRU putback.
3266 */
d69b042f 3267
854ffa8d 3268 /* caller should have done css_get */
08e552c6 3269 pc->mem_cgroup = to;
312734c0 3270 move_unlock_mem_cgroup(from, &flags);
de3638d9 3271 ret = 0;
0a31bc97
JW
3272
3273 local_irq_disable();
3274 mem_cgroup_charge_statistics(to, page, nr_pages);
5564e88b 3275 memcg_check_events(to, page);
0a31bc97 3276 mem_cgroup_charge_statistics(from, page, -nr_pages);
5564e88b 3277 memcg_check_events(from, page);
0a31bc97
JW
3278 local_irq_enable();
3279out_unlock:
3280 unlock_page(page);
de3638d9 3281out:
f817ed48
KH
3282 return ret;
3283}
3284
c255a458 3285#ifdef CONFIG_MEMCG_SWAP
0a31bc97
JW
3286static void mem_cgroup_swap_statistics(struct mem_cgroup *memcg,
3287 bool charge)
d13d1443 3288{
0a31bc97
JW
3289 int val = (charge) ? 1 : -1;
3290 this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_SWAP], val);
d13d1443 3291}
02491447
DN
3292
3293/**
3294 * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
3295 * @entry: swap entry to be moved
3296 * @from: mem_cgroup which the entry is moved from
3297 * @to: mem_cgroup which the entry is moved to
3298 *
3299 * It succeeds only when the swap_cgroup's record for this entry is the same
3300 * as the mem_cgroup's id of @from.
3301 *
3302 * Returns 0 on success, -EINVAL on failure.
3303 *
3e32cb2e 3304 * The caller must have charged to @to, IOW, called page_counter_charge() about
02491447
DN
3305 * both res and memsw, and called css_get().
3306 */
3307static int mem_cgroup_move_swap_account(swp_entry_t entry,
e91cbb42 3308 struct mem_cgroup *from, struct mem_cgroup *to)
02491447
DN
3309{
3310 unsigned short old_id, new_id;
3311
34c00c31
LZ
3312 old_id = mem_cgroup_id(from);
3313 new_id = mem_cgroup_id(to);
02491447
DN
3314
3315 if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
02491447 3316 mem_cgroup_swap_statistics(from, false);
483c30b5 3317 mem_cgroup_swap_statistics(to, true);
02491447 3318 /*
483c30b5 3319 * This function is only called from task migration context now.
3e32cb2e 3320 * It postpones page_counter and refcount handling till the end
483c30b5 3321 * of task migration(mem_cgroup_clear_mc()) for performance
4050377b
LZ
3322 * improvement. But we cannot postpone css_get(to) because if
3323 * the process that has been moved to @to does swap-in, the
3324 * refcount of @to might be decreased to 0.
3325 *
3326 * We are in attach() phase, so the cgroup is guaranteed to be
3327 * alive, so we can just call css_get().
02491447 3328 */
4050377b 3329 css_get(&to->css);
02491447
DN
3330 return 0;
3331 }
3332 return -EINVAL;
3333}
3334#else
3335static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
e91cbb42 3336 struct mem_cgroup *from, struct mem_cgroup *to)
02491447
DN
3337{
3338 return -EINVAL;
3339}
8c7c6e34 3340#endif
d13d1443 3341
f212ad7c
DN
3342#ifdef CONFIG_DEBUG_VM
3343static struct page_cgroup *lookup_page_cgroup_used(struct page *page)
3344{
3345 struct page_cgroup *pc;
3346
3347 pc = lookup_page_cgroup(page);
cfa44946
JW
3348 /*
3349 * Can be NULL while feeding pages into the page allocator for
3350 * the first time, i.e. during boot or memory hotplug;
3351 * or when mem_cgroup_disabled().
3352 */
f212ad7c
DN
3353 if (likely(pc) && PageCgroupUsed(pc))
3354 return pc;
3355 return NULL;
3356}
3357
3358bool mem_cgroup_bad_page_check(struct page *page)
3359{
3360 if (mem_cgroup_disabled())
3361 return false;
3362
3363 return lookup_page_cgroup_used(page) != NULL;
3364}
3365
3366void mem_cgroup_print_bad_page(struct page *page)
3367{
3368 struct page_cgroup *pc;
3369
3370 pc = lookup_page_cgroup_used(page);
3371 if (pc) {
d045197f
AM
3372 pr_alert("pc:%p pc->flags:%lx pc->mem_cgroup:%p\n",
3373 pc, pc->flags, pc->mem_cgroup);
f212ad7c
DN
3374 }
3375}
3376#endif
3377
3e32cb2e
JW
3378static DEFINE_MUTEX(memcg_limit_mutex);
3379
d38d2a75 3380static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
3e32cb2e 3381 unsigned long limit)
628f4235 3382{
3e32cb2e
JW
3383 unsigned long curusage;
3384 unsigned long oldusage;
3385 bool enlarge = false;
81d39c20 3386 int retry_count;
3e32cb2e 3387 int ret;
81d39c20
KH
3388
3389 /*
3390 * For keeping hierarchical_reclaim simple, how long we should retry
3391 * is depends on callers. We set our retry-count to be function
3392 * of # of children which we should visit in this loop.
3393 */
3e32cb2e
JW
3394 retry_count = MEM_CGROUP_RECLAIM_RETRIES *
3395 mem_cgroup_count_children(memcg);
81d39c20 3396
3e32cb2e 3397 oldusage = page_counter_read(&memcg->memory);
628f4235 3398
3e32cb2e 3399 do {
628f4235
KH
3400 if (signal_pending(current)) {
3401 ret = -EINTR;
3402 break;
3403 }
3e32cb2e
JW
3404
3405 mutex_lock(&memcg_limit_mutex);
3406 if (limit > memcg->memsw.limit) {
3407 mutex_unlock(&memcg_limit_mutex);
8c7c6e34 3408 ret = -EINVAL;
628f4235
KH
3409 break;
3410 }
3e32cb2e
JW
3411 if (limit > memcg->memory.limit)
3412 enlarge = true;
3413 ret = page_counter_limit(&memcg->memory, limit);
3414 mutex_unlock(&memcg_limit_mutex);
8c7c6e34
KH
3415
3416 if (!ret)
3417 break;
3418
b70a2a21
JW
3419 try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL, true);
3420
3e32cb2e 3421 curusage = page_counter_read(&memcg->memory);
81d39c20 3422 /* Usage is reduced ? */
f894ffa8 3423 if (curusage >= oldusage)
81d39c20
KH
3424 retry_count--;
3425 else
3426 oldusage = curusage;
3e32cb2e
JW
3427 } while (retry_count);
3428
3c11ecf4
KH
3429 if (!ret && enlarge)
3430 memcg_oom_recover(memcg);
14797e23 3431
8c7c6e34
KH
3432 return ret;
3433}
3434
338c8431 3435static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
3e32cb2e 3436 unsigned long limit)
8c7c6e34 3437{
3e32cb2e
JW
3438 unsigned long curusage;
3439 unsigned long oldusage;
3440 bool enlarge = false;
81d39c20 3441 int retry_count;
3e32cb2e 3442 int ret;
8c7c6e34 3443
81d39c20 3444 /* see mem_cgroup_resize_res_limit */
3e32cb2e
JW
3445 retry_count = MEM_CGROUP_RECLAIM_RETRIES *
3446 mem_cgroup_count_children(memcg);
3447
3448 oldusage = page_counter_read(&memcg->memsw);
3449
3450 do {
8c7c6e34
KH
3451 if (signal_pending(current)) {
3452 ret = -EINTR;
3453 break;
3454 }
3e32cb2e
JW
3455
3456 mutex_lock(&memcg_limit_mutex);
3457 if (limit < memcg->memory.limit) {
3458 mutex_unlock(&memcg_limit_mutex);
8c7c6e34 3459 ret = -EINVAL;
8c7c6e34
KH
3460 break;
3461 }
3e32cb2e
JW
3462 if (limit > memcg->memsw.limit)
3463 enlarge = true;
3464 ret = page_counter_limit(&memcg->memsw, limit);
3465 mutex_unlock(&memcg_limit_mutex);
8c7c6e34
KH
3466
3467 if (!ret)
3468 break;
3469
b70a2a21
JW
3470 try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL, false);
3471
3e32cb2e 3472 curusage = page_counter_read(&memcg->memsw);
81d39c20 3473 /* Usage is reduced ? */
8c7c6e34 3474 if (curusage >= oldusage)
628f4235 3475 retry_count--;
81d39c20
KH
3476 else
3477 oldusage = curusage;
3e32cb2e
JW
3478 } while (retry_count);
3479
3c11ecf4
KH
3480 if (!ret && enlarge)
3481 memcg_oom_recover(memcg);
3e32cb2e 3482
628f4235
KH
3483 return ret;
3484}
3485
0608f43d
AM
3486unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
3487 gfp_t gfp_mask,
3488 unsigned long *total_scanned)
3489{
3490 unsigned long nr_reclaimed = 0;
3491 struct mem_cgroup_per_zone *mz, *next_mz = NULL;
3492 unsigned long reclaimed;
3493 int loop = 0;
3494 struct mem_cgroup_tree_per_zone *mctz;
3e32cb2e 3495 unsigned long excess;
0608f43d
AM
3496 unsigned long nr_scanned;
3497
3498 if (order > 0)
3499 return 0;
3500
3501 mctz = soft_limit_tree_node_zone(zone_to_nid(zone), zone_idx(zone));
3502 /*
3503 * This loop can run a while, specially if mem_cgroup's continuously
3504 * keep exceeding their soft limit and putting the system under
3505 * pressure
3506 */
3507 do {
3508 if (next_mz)
3509 mz = next_mz;
3510 else
3511 mz = mem_cgroup_largest_soft_limit_node(mctz);
3512 if (!mz)
3513 break;
3514
3515 nr_scanned = 0;
3516 reclaimed = mem_cgroup_soft_reclaim(mz->memcg, zone,
3517 gfp_mask, &nr_scanned);
3518 nr_reclaimed += reclaimed;
3519 *total_scanned += nr_scanned;
0a31bc97 3520 spin_lock_irq(&mctz->lock);
bc2f2e7f 3521 __mem_cgroup_remove_exceeded(mz, mctz);
0608f43d
AM
3522
3523 /*
3524 * If we failed to reclaim anything from this memory cgroup
3525 * it is time to move on to the next cgroup
3526 */
3527 next_mz = NULL;
bc2f2e7f
VD
3528 if (!reclaimed)
3529 next_mz = __mem_cgroup_largest_soft_limit_node(mctz);
3530
3e32cb2e 3531 excess = soft_limit_excess(mz->memcg);
0608f43d
AM
3532 /*
3533 * One school of thought says that we should not add
3534 * back the node to the tree if reclaim returns 0.
3535 * But our reclaim could return 0, simply because due
3536 * to priority we are exposing a smaller subset of
3537 * memory to reclaim from. Consider this as a longer
3538 * term TODO.
3539 */
3540 /* If excess == 0, no tree ops */
cf2c8127 3541 __mem_cgroup_insert_exceeded(mz, mctz, excess);
0a31bc97 3542 spin_unlock_irq(&mctz->lock);
0608f43d
AM
3543 css_put(&mz->memcg->css);
3544 loop++;
3545 /*
3546 * Could not reclaim anything and there are no more
3547 * mem cgroups to try or we seem to be looping without
3548 * reclaiming anything.
3549 */
3550 if (!nr_reclaimed &&
3551 (next_mz == NULL ||
3552 loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
3553 break;
3554 } while (!nr_reclaimed);
3555 if (next_mz)
3556 css_put(&next_mz->memcg->css);
3557 return nr_reclaimed;
3558}
3559
ea280e7b
TH
3560/*
3561 * Test whether @memcg has children, dead or alive. Note that this
3562 * function doesn't care whether @memcg has use_hierarchy enabled and
3563 * returns %true if there are child csses according to the cgroup
3564 * hierarchy. Testing use_hierarchy is the caller's responsiblity.
3565 */
b5f99b53
GC
3566static inline bool memcg_has_children(struct mem_cgroup *memcg)
3567{
ea280e7b
TH
3568 bool ret;
3569
696ac172 3570 /*
ea280e7b
TH
3571 * The lock does not prevent addition or deletion of children, but
3572 * it prevents a new child from being initialized based on this
3573 * parent in css_online(), so it's enough to decide whether
3574 * hierarchically inherited attributes can still be changed or not.
696ac172 3575 */
ea280e7b
TH
3576 lockdep_assert_held(&memcg_create_mutex);
3577
3578 rcu_read_lock();
3579 ret = css_next_child(NULL, &memcg->css);
3580 rcu_read_unlock();
3581 return ret;
b5f99b53
GC
3582}
3583
c26251f9
MH
3584/*
3585 * Reclaims as many pages from the given memcg as possible and moves
3586 * the rest to the parent.
3587 *
3588 * Caller is responsible for holding css reference for memcg.
3589 */
3590static int mem_cgroup_force_empty(struct mem_cgroup *memcg)
3591{
3592 int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
c26251f9 3593
c1e862c1
KH
3594 /* we call try-to-free pages for make this cgroup empty */
3595 lru_add_drain_all();
f817ed48 3596 /* try to free all pages in this cgroup */
3e32cb2e 3597 while (nr_retries && page_counter_read(&memcg->memory)) {
f817ed48 3598 int progress;
c1e862c1 3599
c26251f9
MH
3600 if (signal_pending(current))
3601 return -EINTR;
3602
b70a2a21
JW
3603 progress = try_to_free_mem_cgroup_pages(memcg, 1,
3604 GFP_KERNEL, true);
c1e862c1 3605 if (!progress) {
f817ed48 3606 nr_retries--;
c1e862c1 3607 /* maybe some writeback is necessary */
8aa7e847 3608 congestion_wait(BLK_RW_ASYNC, HZ/10);
c1e862c1 3609 }
f817ed48
KH
3610
3611 }
ab5196c2
MH
3612
3613 return 0;
cc847582
KH
3614}
3615
6770c64e
TH
3616static ssize_t mem_cgroup_force_empty_write(struct kernfs_open_file *of,
3617 char *buf, size_t nbytes,
3618 loff_t off)
c1e862c1 3619{
6770c64e 3620 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
c26251f9 3621
d8423011
MH
3622 if (mem_cgroup_is_root(memcg))
3623 return -EINVAL;
6770c64e 3624 return mem_cgroup_force_empty(memcg) ?: nbytes;
c1e862c1
KH
3625}
3626
182446d0
TH
3627static u64 mem_cgroup_hierarchy_read(struct cgroup_subsys_state *css,
3628 struct cftype *cft)
18f59ea7 3629{
182446d0 3630 return mem_cgroup_from_css(css)->use_hierarchy;
18f59ea7
BS
3631}
3632
182446d0
TH
3633static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css,
3634 struct cftype *cft, u64 val)
18f59ea7
BS
3635{
3636 int retval = 0;
182446d0 3637 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5c9d535b 3638 struct mem_cgroup *parent_memcg = mem_cgroup_from_css(memcg->css.parent);
18f59ea7 3639
0999821b 3640 mutex_lock(&memcg_create_mutex);
567fb435
GC
3641
3642 if (memcg->use_hierarchy == val)
3643 goto out;
3644
18f59ea7 3645 /*
af901ca1 3646 * If parent's use_hierarchy is set, we can't make any modifications
18f59ea7
BS
3647 * in the child subtrees. If it is unset, then the change can
3648 * occur, provided the current cgroup has no children.
3649 *
3650 * For the root cgroup, parent_mem is NULL, we allow value to be
3651 * set if there are no children.
3652 */
c0ff4b85 3653 if ((!parent_memcg || !parent_memcg->use_hierarchy) &&
18f59ea7 3654 (val == 1 || val == 0)) {
ea280e7b 3655 if (!memcg_has_children(memcg))
c0ff4b85 3656 memcg->use_hierarchy = val;
18f59ea7
BS
3657 else
3658 retval = -EBUSY;
3659 } else
3660 retval = -EINVAL;
567fb435
GC
3661
3662out:
0999821b 3663 mutex_unlock(&memcg_create_mutex);
18f59ea7
BS
3664
3665 return retval;
3666}
3667
3e32cb2e
JW
3668static unsigned long tree_stat(struct mem_cgroup *memcg,
3669 enum mem_cgroup_stat_index idx)
ce00a967
JW
3670{
3671 struct mem_cgroup *iter;
3672 long val = 0;
3673
3674 /* Per-cpu values can be negative, use a signed accumulator */
3675 for_each_mem_cgroup_tree(iter, memcg)
3676 val += mem_cgroup_read_stat(iter, idx);
3677
3678 if (val < 0) /* race ? */
3679 val = 0;
3680 return val;
3681}
3682
3683static inline u64 mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
3684{
3685 u64 val;
3686
3e32cb2e
JW
3687 if (mem_cgroup_is_root(memcg)) {
3688 val = tree_stat(memcg, MEM_CGROUP_STAT_CACHE);
3689 val += tree_stat(memcg, MEM_CGROUP_STAT_RSS);
3690 if (swap)
3691 val += tree_stat(memcg, MEM_CGROUP_STAT_SWAP);
3692 } else {
ce00a967 3693 if (!swap)
3e32cb2e 3694 val = page_counter_read(&memcg->memory);
ce00a967 3695 else
3e32cb2e 3696 val = page_counter_read(&memcg->memsw);
ce00a967 3697 }
ce00a967
JW
3698 return val << PAGE_SHIFT;
3699}
3700
3e32cb2e
JW
3701enum {
3702 RES_USAGE,
3703 RES_LIMIT,
3704 RES_MAX_USAGE,
3705 RES_FAILCNT,
3706 RES_SOFT_LIMIT,
3707};
ce00a967 3708
791badbd 3709static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css,
05b84301 3710 struct cftype *cft)
8cdea7c0 3711{
182446d0 3712 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3e32cb2e 3713 struct page_counter *counter;
af36f906 3714
3e32cb2e 3715 switch (MEMFILE_TYPE(cft->private)) {
8c7c6e34 3716 case _MEM:
3e32cb2e
JW
3717 counter = &memcg->memory;
3718 break;
8c7c6e34 3719 case _MEMSWAP:
3e32cb2e
JW
3720 counter = &memcg->memsw;
3721 break;
510fc4e1 3722 case _KMEM:
3e32cb2e 3723 counter = &memcg->kmem;
510fc4e1 3724 break;
8c7c6e34
KH
3725 default:
3726 BUG();
8c7c6e34 3727 }
3e32cb2e
JW
3728
3729 switch (MEMFILE_ATTR(cft->private)) {
3730 case RES_USAGE:
3731 if (counter == &memcg->memory)
3732 return mem_cgroup_usage(memcg, false);
3733 if (counter == &memcg->memsw)
3734 return mem_cgroup_usage(memcg, true);
3735 return (u64)page_counter_read(counter) * PAGE_SIZE;
3736 case RES_LIMIT:
3737 return (u64)counter->limit * PAGE_SIZE;
3738 case RES_MAX_USAGE:
3739 return (u64)counter->watermark * PAGE_SIZE;
3740 case RES_FAILCNT:
3741 return counter->failcnt;
3742 case RES_SOFT_LIMIT:
3743 return (u64)memcg->soft_limit * PAGE_SIZE;
3744 default:
3745 BUG();
3746 }
8cdea7c0 3747}
510fc4e1 3748
510fc4e1 3749#ifdef CONFIG_MEMCG_KMEM
d6441637
VD
3750/* should be called with activate_kmem_mutex held */
3751static int __memcg_activate_kmem(struct mem_cgroup *memcg,
3e32cb2e 3752 unsigned long nr_pages)
d6441637
VD
3753{
3754 int err = 0;
3755 int memcg_id;
3756
3757 if (memcg_kmem_is_active(memcg))
3758 return 0;
3759
3760 /*
3761 * We are going to allocate memory for data shared by all memory
3762 * cgroups so let's stop accounting here.
3763 */
3764 memcg_stop_kmem_account();
3765
510fc4e1
GC
3766 /*
3767 * For simplicity, we won't allow this to be disabled. It also can't
3768 * be changed if the cgroup has children already, or if tasks had
3769 * already joined.
3770 *
3771 * If tasks join before we set the limit, a person looking at
3772 * kmem.usage_in_bytes will have no way to determine when it took
3773 * place, which makes the value quite meaningless.
3774 *
3775 * After it first became limited, changes in the value of the limit are
3776 * of course permitted.
510fc4e1 3777 */
0999821b 3778 mutex_lock(&memcg_create_mutex);
ea280e7b
TH
3779 if (cgroup_has_tasks(memcg->css.cgroup) ||
3780 (memcg->use_hierarchy && memcg_has_children(memcg)))
d6441637
VD
3781 err = -EBUSY;
3782 mutex_unlock(&memcg_create_mutex);
3783 if (err)
3784 goto out;
510fc4e1 3785
f3bb3043 3786 memcg_id = memcg_alloc_cache_id();
d6441637
VD
3787 if (memcg_id < 0) {
3788 err = memcg_id;
3789 goto out;
3790 }
3791
d6441637
VD
3792 memcg->kmemcg_id = memcg_id;
3793 INIT_LIST_HEAD(&memcg->memcg_slab_caches);
d6441637
VD
3794
3795 /*
3796 * We couldn't have accounted to this cgroup, because it hasn't got the
3797 * active bit set yet, so this should succeed.
3798 */
3e32cb2e 3799 err = page_counter_limit(&memcg->kmem, nr_pages);
d6441637
VD
3800 VM_BUG_ON(err);
3801
3802 static_key_slow_inc(&memcg_kmem_enabled_key);
3803 /*
3804 * Setting the active bit after enabling static branching will
3805 * guarantee no one starts accounting before all call sites are
3806 * patched.
3807 */
3808 memcg_kmem_set_active(memcg);
510fc4e1 3809out:
d6441637
VD
3810 memcg_resume_kmem_account();
3811 return err;
d6441637
VD
3812}
3813
3814static int memcg_activate_kmem(struct mem_cgroup *memcg,
3e32cb2e 3815 unsigned long nr_pages)
d6441637
VD
3816{
3817 int ret;
3818
3819 mutex_lock(&activate_kmem_mutex);
3e32cb2e 3820 ret = __memcg_activate_kmem(memcg, nr_pages);
d6441637
VD
3821 mutex_unlock(&activate_kmem_mutex);
3822 return ret;
3823}
3824
3825static int memcg_update_kmem_limit(struct mem_cgroup *memcg,
3e32cb2e 3826 unsigned long limit)
d6441637
VD
3827{
3828 int ret;
3829
3e32cb2e 3830 mutex_lock(&memcg_limit_mutex);
d6441637 3831 if (!memcg_kmem_is_active(memcg))
3e32cb2e 3832 ret = memcg_activate_kmem(memcg, limit);
d6441637 3833 else
3e32cb2e
JW
3834 ret = page_counter_limit(&memcg->kmem, limit);
3835 mutex_unlock(&memcg_limit_mutex);
510fc4e1
GC
3836 return ret;
3837}
3838
55007d84 3839static int memcg_propagate_kmem(struct mem_cgroup *memcg)
510fc4e1 3840{
55007d84 3841 int ret = 0;
510fc4e1 3842 struct mem_cgroup *parent = parent_mem_cgroup(memcg);
55007d84 3843
d6441637
VD
3844 if (!parent)
3845 return 0;
55007d84 3846
d6441637 3847 mutex_lock(&activate_kmem_mutex);
55007d84 3848 /*
d6441637
VD
3849 * If the parent cgroup is not kmem-active now, it cannot be activated
3850 * after this point, because it has at least one child already.
55007d84 3851 */
d6441637 3852 if (memcg_kmem_is_active(parent))
3e32cb2e 3853 ret = __memcg_activate_kmem(memcg, PAGE_COUNTER_MAX);
d6441637 3854 mutex_unlock(&activate_kmem_mutex);
55007d84 3855 return ret;
510fc4e1 3856}
d6441637
VD
3857#else
3858static int memcg_update_kmem_limit(struct mem_cgroup *memcg,
3e32cb2e 3859 unsigned long limit)
d6441637
VD
3860{
3861 return -EINVAL;
3862}
6d043990 3863#endif /* CONFIG_MEMCG_KMEM */
510fc4e1 3864
628f4235
KH
3865/*
3866 * The user of this function is...
3867 * RES_LIMIT.
3868 */
451af504
TH
3869static ssize_t mem_cgroup_write(struct kernfs_open_file *of,
3870 char *buf, size_t nbytes, loff_t off)
8cdea7c0 3871{
451af504 3872 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3e32cb2e 3873 unsigned long nr_pages;
628f4235
KH
3874 int ret;
3875
451af504 3876 buf = strstrip(buf);
3e32cb2e
JW
3877 ret = page_counter_memparse(buf, &nr_pages);
3878 if (ret)
3879 return ret;
af36f906 3880
3e32cb2e 3881 switch (MEMFILE_ATTR(of_cft(of)->private)) {
628f4235 3882 case RES_LIMIT:
4b3bde4c
BS
3883 if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */
3884 ret = -EINVAL;
3885 break;
3886 }
3e32cb2e
JW
3887 switch (MEMFILE_TYPE(of_cft(of)->private)) {
3888 case _MEM:
3889 ret = mem_cgroup_resize_limit(memcg, nr_pages);
8c7c6e34 3890 break;
3e32cb2e
JW
3891 case _MEMSWAP:
3892 ret = mem_cgroup_resize_memsw_limit(memcg, nr_pages);
296c81d8 3893 break;
3e32cb2e
JW
3894 case _KMEM:
3895 ret = memcg_update_kmem_limit(memcg, nr_pages);
3896 break;
3897 }
296c81d8 3898 break;
3e32cb2e
JW
3899 case RES_SOFT_LIMIT:
3900 memcg->soft_limit = nr_pages;
3901 ret = 0;
628f4235
KH
3902 break;
3903 }
451af504 3904 return ret ?: nbytes;
8cdea7c0
BS
3905}
3906
6770c64e
TH
3907static ssize_t mem_cgroup_reset(struct kernfs_open_file *of, char *buf,
3908 size_t nbytes, loff_t off)
c84872e1 3909{
6770c64e 3910 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3e32cb2e 3911 struct page_counter *counter;
c84872e1 3912
3e32cb2e
JW
3913 switch (MEMFILE_TYPE(of_cft(of)->private)) {
3914 case _MEM:
3915 counter = &memcg->memory;
3916 break;
3917 case _MEMSWAP:
3918 counter = &memcg->memsw;
3919 break;
3920 case _KMEM:
3921 counter = &memcg->kmem;
3922 break;
3923 default:
3924 BUG();
3925 }
af36f906 3926
3e32cb2e 3927 switch (MEMFILE_ATTR(of_cft(of)->private)) {
29f2a4da 3928 case RES_MAX_USAGE:
3e32cb2e 3929 page_counter_reset_watermark(counter);
29f2a4da
PE
3930 break;
3931 case RES_FAILCNT:
3e32cb2e 3932 counter->failcnt = 0;
29f2a4da 3933 break;
3e32cb2e
JW
3934 default:
3935 BUG();
29f2a4da 3936 }
f64c3f54 3937
6770c64e 3938 return nbytes;
c84872e1
PE
3939}
3940
182446d0 3941static u64 mem_cgroup_move_charge_read(struct cgroup_subsys_state *css,
7dc74be0
DN
3942 struct cftype *cft)
3943{
182446d0 3944 return mem_cgroup_from_css(css)->move_charge_at_immigrate;
7dc74be0
DN
3945}
3946
02491447 3947#ifdef CONFIG_MMU
182446d0 3948static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
7dc74be0
DN
3949 struct cftype *cft, u64 val)
3950{
182446d0 3951 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
7dc74be0
DN
3952
3953 if (val >= (1 << NR_MOVE_TYPE))
3954 return -EINVAL;
ee5e8472 3955
7dc74be0 3956 /*
ee5e8472
GC
3957 * No kind of locking is needed in here, because ->can_attach() will
3958 * check this value once in the beginning of the process, and then carry
3959 * on with stale data. This means that changes to this value will only
3960 * affect task migrations starting after the change.
7dc74be0 3961 */
c0ff4b85 3962 memcg->move_charge_at_immigrate = val;
7dc74be0
DN
3963 return 0;
3964}
02491447 3965#else
182446d0 3966static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
02491447
DN
3967 struct cftype *cft, u64 val)
3968{
3969 return -ENOSYS;
3970}
3971#endif
7dc74be0 3972
406eb0c9 3973#ifdef CONFIG_NUMA
2da8ca82 3974static int memcg_numa_stat_show(struct seq_file *m, void *v)
406eb0c9 3975{
25485de6
GT
3976 struct numa_stat {
3977 const char *name;
3978 unsigned int lru_mask;
3979 };
3980
3981 static const struct numa_stat stats[] = {
3982 { "total", LRU_ALL },
3983 { "file", LRU_ALL_FILE },
3984 { "anon", LRU_ALL_ANON },
3985 { "unevictable", BIT(LRU_UNEVICTABLE) },
3986 };
3987 const struct numa_stat *stat;
406eb0c9 3988 int nid;
25485de6 3989 unsigned long nr;
2da8ca82 3990 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
406eb0c9 3991
25485de6
GT
3992 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
3993 nr = mem_cgroup_nr_lru_pages(memcg, stat->lru_mask);
3994 seq_printf(m, "%s=%lu", stat->name, nr);
3995 for_each_node_state(nid, N_MEMORY) {
3996 nr = mem_cgroup_node_nr_lru_pages(memcg, nid,
3997 stat->lru_mask);
3998 seq_printf(m, " N%d=%lu", nid, nr);
3999 }
4000 seq_putc(m, '\n');
406eb0c9 4001 }
406eb0c9 4002
071aee13
YH
4003 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
4004 struct mem_cgroup *iter;
4005
4006 nr = 0;
4007 for_each_mem_cgroup_tree(iter, memcg)
4008 nr += mem_cgroup_nr_lru_pages(iter, stat->lru_mask);
4009 seq_printf(m, "hierarchical_%s=%lu", stat->name, nr);
4010 for_each_node_state(nid, N_MEMORY) {
4011 nr = 0;
4012 for_each_mem_cgroup_tree(iter, memcg)
4013 nr += mem_cgroup_node_nr_lru_pages(
4014 iter, nid, stat->lru_mask);
4015 seq_printf(m, " N%d=%lu", nid, nr);
4016 }
4017 seq_putc(m, '\n');
406eb0c9 4018 }
406eb0c9 4019
406eb0c9
YH
4020 return 0;
4021}
4022#endif /* CONFIG_NUMA */
4023
af7c4b0e
JW
4024static inline void mem_cgroup_lru_names_not_uptodate(void)
4025{
4026 BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_lru_names) != NR_LRU_LISTS);
4027}
4028
2da8ca82 4029static int memcg_stat_show(struct seq_file *m, void *v)
d2ceb9b7 4030{
2da8ca82 4031 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
3e32cb2e 4032 unsigned long memory, memsw;
af7c4b0e
JW
4033 struct mem_cgroup *mi;
4034 unsigned int i;
406eb0c9 4035
af7c4b0e 4036 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
bff6bb83 4037 if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
1dd3a273 4038 continue;
af7c4b0e
JW
4039 seq_printf(m, "%s %ld\n", mem_cgroup_stat_names[i],
4040 mem_cgroup_read_stat(memcg, i) * PAGE_SIZE);
1dd3a273 4041 }
7b854121 4042
af7c4b0e
JW
4043 for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++)
4044 seq_printf(m, "%s %lu\n", mem_cgroup_events_names[i],
4045 mem_cgroup_read_events(memcg, i));
4046
4047 for (i = 0; i < NR_LRU_LISTS; i++)
4048 seq_printf(m, "%s %lu\n", mem_cgroup_lru_names[i],
4049 mem_cgroup_nr_lru_pages(memcg, BIT(i)) * PAGE_SIZE);
4050
14067bb3 4051 /* Hierarchical information */
3e32cb2e
JW
4052 memory = memsw = PAGE_COUNTER_MAX;
4053 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) {
4054 memory = min(memory, mi->memory.limit);
4055 memsw = min(memsw, mi->memsw.limit);
fee7b548 4056 }
3e32cb2e
JW
4057 seq_printf(m, "hierarchical_memory_limit %llu\n",
4058 (u64)memory * PAGE_SIZE);
4059 if (do_swap_account)
4060 seq_printf(m, "hierarchical_memsw_limit %llu\n",
4061 (u64)memsw * PAGE_SIZE);
7f016ee8 4062
af7c4b0e
JW
4063 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
4064 long long val = 0;
4065
bff6bb83 4066 if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
1dd3a273 4067 continue;
af7c4b0e
JW
4068 for_each_mem_cgroup_tree(mi, memcg)
4069 val += mem_cgroup_read_stat(mi, i) * PAGE_SIZE;
4070 seq_printf(m, "total_%s %lld\n", mem_cgroup_stat_names[i], val);
4071 }
4072
4073 for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) {
4074 unsigned long long val = 0;
4075
4076 for_each_mem_cgroup_tree(mi, memcg)
4077 val += mem_cgroup_read_events(mi, i);
4078 seq_printf(m, "total_%s %llu\n",
4079 mem_cgroup_events_names[i], val);
4080 }
4081
4082 for (i = 0; i < NR_LRU_LISTS; i++) {
4083 unsigned long long val = 0;
4084
4085 for_each_mem_cgroup_tree(mi, memcg)
4086 val += mem_cgroup_nr_lru_pages(mi, BIT(i)) * PAGE_SIZE;
4087 seq_printf(m, "total_%s %llu\n", mem_cgroup_lru_names[i], val);
1dd3a273 4088 }
14067bb3 4089
7f016ee8 4090#ifdef CONFIG_DEBUG_VM
7f016ee8
KM
4091 {
4092 int nid, zid;
4093 struct mem_cgroup_per_zone *mz;
89abfab1 4094 struct zone_reclaim_stat *rstat;
7f016ee8
KM
4095 unsigned long recent_rotated[2] = {0, 0};
4096 unsigned long recent_scanned[2] = {0, 0};
4097
4098 for_each_online_node(nid)
4099 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
e231875b 4100 mz = &memcg->nodeinfo[nid]->zoneinfo[zid];
89abfab1 4101 rstat = &mz->lruvec.reclaim_stat;
7f016ee8 4102
89abfab1
HD
4103 recent_rotated[0] += rstat->recent_rotated[0];
4104 recent_rotated[1] += rstat->recent_rotated[1];
4105 recent_scanned[0] += rstat->recent_scanned[0];
4106 recent_scanned[1] += rstat->recent_scanned[1];
7f016ee8 4107 }
78ccf5b5
JW
4108 seq_printf(m, "recent_rotated_anon %lu\n", recent_rotated[0]);
4109 seq_printf(m, "recent_rotated_file %lu\n", recent_rotated[1]);
4110 seq_printf(m, "recent_scanned_anon %lu\n", recent_scanned[0]);
4111 seq_printf(m, "recent_scanned_file %lu\n", recent_scanned[1]);
7f016ee8
KM
4112 }
4113#endif
4114
d2ceb9b7
KH
4115 return 0;
4116}
4117
182446d0
TH
4118static u64 mem_cgroup_swappiness_read(struct cgroup_subsys_state *css,
4119 struct cftype *cft)
a7885eb8 4120{
182446d0 4121 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
a7885eb8 4122
1f4c025b 4123 return mem_cgroup_swappiness(memcg);
a7885eb8
KM
4124}
4125
182446d0
TH
4126static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css,
4127 struct cftype *cft, u64 val)
a7885eb8 4128{
182446d0 4129 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
a7885eb8 4130
3dae7fec 4131 if (val > 100)
a7885eb8
KM
4132 return -EINVAL;
4133
14208b0e 4134 if (css->parent)
3dae7fec
JW
4135 memcg->swappiness = val;
4136 else
4137 vm_swappiness = val;
068b38c1 4138
a7885eb8
KM
4139 return 0;
4140}
4141
2e72b634
KS
4142static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
4143{
4144 struct mem_cgroup_threshold_ary *t;
3e32cb2e 4145 unsigned long usage;
2e72b634
KS
4146 int i;
4147
4148 rcu_read_lock();
4149 if (!swap)
2c488db2 4150 t = rcu_dereference(memcg->thresholds.primary);
2e72b634 4151 else
2c488db2 4152 t = rcu_dereference(memcg->memsw_thresholds.primary);
2e72b634
KS
4153
4154 if (!t)
4155 goto unlock;
4156
ce00a967 4157 usage = mem_cgroup_usage(memcg, swap);
2e72b634
KS
4158
4159 /*
748dad36 4160 * current_threshold points to threshold just below or equal to usage.
2e72b634
KS
4161 * If it's not true, a threshold was crossed after last
4162 * call of __mem_cgroup_threshold().
4163 */
5407a562 4164 i = t->current_threshold;
2e72b634
KS
4165
4166 /*
4167 * Iterate backward over array of thresholds starting from
4168 * current_threshold and check if a threshold is crossed.
4169 * If none of thresholds below usage is crossed, we read
4170 * only one element of the array here.
4171 */
4172 for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--)
4173 eventfd_signal(t->entries[i].eventfd, 1);
4174
4175 /* i = current_threshold + 1 */
4176 i++;
4177
4178 /*
4179 * Iterate forward over array of thresholds starting from
4180 * current_threshold+1 and check if a threshold is crossed.
4181 * If none of thresholds above usage is crossed, we read
4182 * only one element of the array here.
4183 */
4184 for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++)
4185 eventfd_signal(t->entries[i].eventfd, 1);
4186
4187 /* Update current_threshold */
5407a562 4188 t->current_threshold = i - 1;
2e72b634
KS
4189unlock:
4190 rcu_read_unlock();
4191}
4192
4193static void mem_cgroup_threshold(struct mem_cgroup *memcg)
4194{
ad4ca5f4
KS
4195 while (memcg) {
4196 __mem_cgroup_threshold(memcg, false);
4197 if (do_swap_account)
4198 __mem_cgroup_threshold(memcg, true);
4199
4200 memcg = parent_mem_cgroup(memcg);
4201 }
2e72b634
KS
4202}
4203
4204static int compare_thresholds(const void *a, const void *b)
4205{
4206 const struct mem_cgroup_threshold *_a = a;
4207 const struct mem_cgroup_threshold *_b = b;
4208
2bff24a3
GT
4209 if (_a->threshold > _b->threshold)
4210 return 1;
4211
4212 if (_a->threshold < _b->threshold)
4213 return -1;
4214
4215 return 0;
2e72b634
KS
4216}
4217
c0ff4b85 4218static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
9490ff27
KH
4219{
4220 struct mem_cgroup_eventfd_list *ev;
4221
2bcf2e92
MH
4222 spin_lock(&memcg_oom_lock);
4223
c0ff4b85 4224 list_for_each_entry(ev, &memcg->oom_notify, list)
9490ff27 4225 eventfd_signal(ev->eventfd, 1);
2bcf2e92
MH
4226
4227 spin_unlock(&memcg_oom_lock);
9490ff27
KH
4228 return 0;
4229}
4230
c0ff4b85 4231static void mem_cgroup_oom_notify(struct mem_cgroup *memcg)
9490ff27 4232{
7d74b06f
KH
4233 struct mem_cgroup *iter;
4234
c0ff4b85 4235 for_each_mem_cgroup_tree(iter, memcg)
7d74b06f 4236 mem_cgroup_oom_notify_cb(iter);
9490ff27
KH
4237}
4238
59b6f873 4239static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
347c4a87 4240 struct eventfd_ctx *eventfd, const char *args, enum res_type type)
2e72b634 4241{
2c488db2
KS
4242 struct mem_cgroup_thresholds *thresholds;
4243 struct mem_cgroup_threshold_ary *new;
3e32cb2e
JW
4244 unsigned long threshold;
4245 unsigned long usage;
2c488db2 4246 int i, size, ret;
2e72b634 4247
3e32cb2e 4248 ret = page_counter_memparse(args, &threshold);
2e72b634
KS
4249 if (ret)
4250 return ret;
4251
4252 mutex_lock(&memcg->thresholds_lock);
2c488db2 4253
05b84301 4254 if (type == _MEM) {
2c488db2 4255 thresholds = &memcg->thresholds;
ce00a967 4256 usage = mem_cgroup_usage(memcg, false);
05b84301 4257 } else if (type == _MEMSWAP) {
2c488db2 4258 thresholds = &memcg->memsw_thresholds;
ce00a967 4259 usage = mem_cgroup_usage(memcg, true);
05b84301 4260 } else
2e72b634
KS
4261 BUG();
4262
2e72b634 4263 /* Check if a threshold crossed before adding a new one */
2c488db2 4264 if (thresholds->primary)
2e72b634
KS
4265 __mem_cgroup_threshold(memcg, type == _MEMSWAP);
4266
2c488db2 4267 size = thresholds->primary ? thresholds->primary->size + 1 : 1;
2e72b634
KS
4268
4269 /* Allocate memory for new array of thresholds */
2c488db2 4270 new = kmalloc(sizeof(*new) + size * sizeof(struct mem_cgroup_threshold),
2e72b634 4271 GFP_KERNEL);
2c488db2 4272 if (!new) {
2e72b634
KS
4273 ret = -ENOMEM;
4274 goto unlock;
4275 }
2c488db2 4276 new->size = size;
2e72b634
KS
4277
4278 /* Copy thresholds (if any) to new array */
2c488db2
KS
4279 if (thresholds->primary) {
4280 memcpy(new->entries, thresholds->primary->entries, (size - 1) *
2e72b634 4281 sizeof(struct mem_cgroup_threshold));
2c488db2
KS
4282 }
4283
2e72b634 4284 /* Add new threshold */
2c488db2
KS
4285 new->entries[size - 1].eventfd = eventfd;
4286 new->entries[size - 1].threshold = threshold;
2e72b634
KS
4287
4288 /* Sort thresholds. Registering of new threshold isn't time-critical */
2c488db2 4289 sort(new->entries, size, sizeof(struct mem_cgroup_threshold),
2e72b634
KS
4290 compare_thresholds, NULL);
4291
4292 /* Find current threshold */
2c488db2 4293 new->current_threshold = -1;
2e72b634 4294 for (i = 0; i < size; i++) {
748dad36 4295 if (new->entries[i].threshold <= usage) {
2e72b634 4296 /*
2c488db2
KS
4297 * new->current_threshold will not be used until
4298 * rcu_assign_pointer(), so it's safe to increment
2e72b634
KS
4299 * it here.
4300 */
2c488db2 4301 ++new->current_threshold;
748dad36
SZ
4302 } else
4303 break;
2e72b634
KS
4304 }
4305
2c488db2
KS
4306 /* Free old spare buffer and save old primary buffer as spare */
4307 kfree(thresholds->spare);
4308 thresholds->spare = thresholds->primary;
4309
4310 rcu_assign_pointer(thresholds->primary, new);
2e72b634 4311
907860ed 4312 /* To be sure that nobody uses thresholds */
2e72b634
KS
4313 synchronize_rcu();
4314
2e72b634
KS
4315unlock:
4316 mutex_unlock(&memcg->thresholds_lock);
4317
4318 return ret;
4319}
4320
59b6f873 4321static int mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
347c4a87
TH
4322 struct eventfd_ctx *eventfd, const char *args)
4323{
59b6f873 4324 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEM);
347c4a87
TH
4325}
4326
59b6f873 4327static int memsw_cgroup_usage_register_event(struct mem_cgroup *memcg,
347c4a87
TH
4328 struct eventfd_ctx *eventfd, const char *args)
4329{
59b6f873 4330 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEMSWAP);
347c4a87
TH
4331}
4332
59b6f873 4333static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
347c4a87 4334 struct eventfd_ctx *eventfd, enum res_type type)
2e72b634 4335{
2c488db2
KS
4336 struct mem_cgroup_thresholds *thresholds;
4337 struct mem_cgroup_threshold_ary *new;
3e32cb2e 4338 unsigned long usage;
2c488db2 4339 int i, j, size;
2e72b634
KS
4340
4341 mutex_lock(&memcg->thresholds_lock);
05b84301
JW
4342
4343 if (type == _MEM) {
2c488db2 4344 thresholds = &memcg->thresholds;
ce00a967 4345 usage = mem_cgroup_usage(memcg, false);
05b84301 4346 } else if (type == _MEMSWAP) {
2c488db2 4347 thresholds = &memcg->memsw_thresholds;
ce00a967 4348 usage = mem_cgroup_usage(memcg, true);
05b84301 4349 } else
2e72b634
KS
4350 BUG();
4351
371528ca
AV
4352 if (!thresholds->primary)
4353 goto unlock;
4354
2e72b634
KS
4355 /* Check if a threshold crossed before removing */
4356 __mem_cgroup_threshold(memcg, type == _MEMSWAP);
4357
4358 /* Calculate new number of threshold */
2c488db2
KS
4359 size = 0;
4360 for (i = 0; i < thresholds->primary->size; i++) {
4361 if (thresholds->primary->entries[i].eventfd != eventfd)
2e72b634
KS
4362 size++;
4363 }
4364
2c488db2 4365 new = thresholds->spare;
907860ed 4366
2e72b634
KS
4367 /* Set thresholds array to NULL if we don't have thresholds */
4368 if (!size) {
2c488db2
KS
4369 kfree(new);
4370 new = NULL;
907860ed 4371 goto swap_buffers;
2e72b634
KS
4372 }
4373
2c488db2 4374 new->size = size;
2e72b634
KS
4375
4376 /* Copy thresholds and find current threshold */
2c488db2
KS
4377 new->current_threshold = -1;
4378 for (i = 0, j = 0; i < thresholds->primary->size; i++) {
4379 if (thresholds->primary->entries[i].eventfd == eventfd)
2e72b634
KS
4380 continue;
4381
2c488db2 4382 new->entries[j] = thresholds->primary->entries[i];
748dad36 4383 if (new->entries[j].threshold <= usage) {
2e72b634 4384 /*
2c488db2 4385 * new->current_threshold will not be used
2e72b634
KS
4386 * until rcu_assign_pointer(), so it's safe to increment
4387 * it here.
4388 */
2c488db2 4389 ++new->current_threshold;
2e72b634
KS
4390 }
4391 j++;
4392 }
4393
907860ed 4394swap_buffers:
2c488db2
KS
4395 /* Swap primary and spare array */
4396 thresholds->spare = thresholds->primary;
8c757763
SZ
4397 /* If all events are unregistered, free the spare array */
4398 if (!new) {
4399 kfree(thresholds->spare);
4400 thresholds->spare = NULL;
4401 }
4402
2c488db2 4403 rcu_assign_pointer(thresholds->primary, new);
2e72b634 4404
907860ed 4405 /* To be sure that nobody uses thresholds */
2e72b634 4406 synchronize_rcu();
371528ca 4407unlock:
2e72b634 4408 mutex_unlock(&memcg->thresholds_lock);
2e72b634 4409}
c1e862c1 4410
59b6f873 4411static void mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
347c4a87
TH
4412 struct eventfd_ctx *eventfd)
4413{
59b6f873 4414 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEM);
347c4a87
TH
4415}
4416
59b6f873 4417static void memsw_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
347c4a87
TH
4418 struct eventfd_ctx *eventfd)
4419{
59b6f873 4420 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEMSWAP);
347c4a87
TH
4421}
4422
59b6f873 4423static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg,
347c4a87 4424 struct eventfd_ctx *eventfd, const char *args)
9490ff27 4425{
9490ff27 4426 struct mem_cgroup_eventfd_list *event;
9490ff27 4427
9490ff27
KH
4428 event = kmalloc(sizeof(*event), GFP_KERNEL);
4429 if (!event)
4430 return -ENOMEM;
4431
1af8efe9 4432 spin_lock(&memcg_oom_lock);
9490ff27
KH
4433
4434 event->eventfd = eventfd;
4435 list_add(&event->list, &memcg->oom_notify);
4436
4437 /* already in OOM ? */
79dfdacc 4438 if (atomic_read(&memcg->under_oom))
9490ff27 4439 eventfd_signal(eventfd, 1);
1af8efe9 4440 spin_unlock(&memcg_oom_lock);
9490ff27
KH
4441
4442 return 0;
4443}
4444
59b6f873 4445static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg,
347c4a87 4446 struct eventfd_ctx *eventfd)
9490ff27 4447{
9490ff27 4448 struct mem_cgroup_eventfd_list *ev, *tmp;
9490ff27 4449
1af8efe9 4450 spin_lock(&memcg_oom_lock);
9490ff27 4451
c0ff4b85 4452 list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) {
9490ff27
KH
4453 if (ev->eventfd == eventfd) {
4454 list_del(&ev->list);
4455 kfree(ev);
4456 }
4457 }
4458
1af8efe9 4459 spin_unlock(&memcg_oom_lock);
9490ff27
KH
4460}
4461
2da8ca82 4462static int mem_cgroup_oom_control_read(struct seq_file *sf, void *v)
3c11ecf4 4463{
2da8ca82 4464 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(sf));
3c11ecf4 4465
791badbd
TH
4466 seq_printf(sf, "oom_kill_disable %d\n", memcg->oom_kill_disable);
4467 seq_printf(sf, "under_oom %d\n", (bool)atomic_read(&memcg->under_oom));
3c11ecf4
KH
4468 return 0;
4469}
4470
182446d0 4471static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css,
3c11ecf4
KH
4472 struct cftype *cft, u64 val)
4473{
182446d0 4474 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3c11ecf4
KH
4475
4476 /* cannot set to root cgroup and only 0 and 1 are allowed */
14208b0e 4477 if (!css->parent || !((val == 0) || (val == 1)))
3c11ecf4
KH
4478 return -EINVAL;
4479
c0ff4b85 4480 memcg->oom_kill_disable = val;
4d845ebf 4481 if (!val)
c0ff4b85 4482 memcg_oom_recover(memcg);
3dae7fec 4483
3c11ecf4
KH
4484 return 0;
4485}
4486
c255a458 4487#ifdef CONFIG_MEMCG_KMEM
cbe128e3 4488static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
e5671dfa 4489{
55007d84
GC
4490 int ret;
4491
2633d7a0 4492 memcg->kmemcg_id = -1;
55007d84
GC
4493 ret = memcg_propagate_kmem(memcg);
4494 if (ret)
4495 return ret;
2633d7a0 4496
1d62e436 4497 return mem_cgroup_sockets_init(memcg, ss);
573b400d 4498}
e5671dfa 4499
10d5ebf4 4500static void memcg_destroy_kmem(struct mem_cgroup *memcg)
d1a4c0b3 4501{
1d62e436 4502 mem_cgroup_sockets_destroy(memcg);
10d5ebf4 4503}
e5671dfa 4504#else
cbe128e3 4505static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
e5671dfa
GC
4506{
4507 return 0;
4508}
d1a4c0b3 4509
10d5ebf4
LZ
4510static void memcg_destroy_kmem(struct mem_cgroup *memcg)
4511{
4512}
e5671dfa
GC
4513#endif
4514
3bc942f3
TH
4515/*
4516 * DO NOT USE IN NEW FILES.
4517 *
4518 * "cgroup.event_control" implementation.
4519 *
4520 * This is way over-engineered. It tries to support fully configurable
4521 * events for each user. Such level of flexibility is completely
4522 * unnecessary especially in the light of the planned unified hierarchy.
4523 *
4524 * Please deprecate this and replace with something simpler if at all
4525 * possible.
4526 */
4527
79bd9814
TH
4528/*
4529 * Unregister event and free resources.
4530 *
4531 * Gets called from workqueue.
4532 */
3bc942f3 4533static void memcg_event_remove(struct work_struct *work)
79bd9814 4534{
3bc942f3
TH
4535 struct mem_cgroup_event *event =
4536 container_of(work, struct mem_cgroup_event, remove);
59b6f873 4537 struct mem_cgroup *memcg = event->memcg;
79bd9814
TH
4538
4539 remove_wait_queue(event->wqh, &event->wait);
4540
59b6f873 4541 event->unregister_event(memcg, event->eventfd);
79bd9814
TH
4542
4543 /* Notify userspace the event is going away. */
4544 eventfd_signal(event->eventfd, 1);
4545
4546 eventfd_ctx_put(event->eventfd);
4547 kfree(event);
59b6f873 4548 css_put(&memcg->css);
79bd9814
TH
4549}
4550
4551/*
4552 * Gets called on POLLHUP on eventfd when user closes it.
4553 *
4554 * Called with wqh->lock held and interrupts disabled.
4555 */
3bc942f3
TH
4556static int memcg_event_wake(wait_queue_t *wait, unsigned mode,
4557 int sync, void *key)
79bd9814 4558{
3bc942f3
TH
4559 struct mem_cgroup_event *event =
4560 container_of(wait, struct mem_cgroup_event, wait);
59b6f873 4561 struct mem_cgroup *memcg = event->memcg;
79bd9814
TH
4562 unsigned long flags = (unsigned long)key;
4563
4564 if (flags & POLLHUP) {
4565 /*
4566 * If the event has been detached at cgroup removal, we
4567 * can simply return knowing the other side will cleanup
4568 * for us.
4569 *
4570 * We can't race against event freeing since the other
4571 * side will require wqh->lock via remove_wait_queue(),
4572 * which we hold.
4573 */
fba94807 4574 spin_lock(&memcg->event_list_lock);
79bd9814
TH
4575 if (!list_empty(&event->list)) {
4576 list_del_init(&event->list);
4577 /*
4578 * We are in atomic context, but cgroup_event_remove()
4579 * may sleep, so we have to call it in workqueue.
4580 */
4581 schedule_work(&event->remove);
4582 }
fba94807 4583 spin_unlock(&memcg->event_list_lock);
79bd9814
TH
4584 }
4585
4586 return 0;
4587}
4588
3bc942f3 4589static void memcg_event_ptable_queue_proc(struct file *file,
79bd9814
TH
4590 wait_queue_head_t *wqh, poll_table *pt)
4591{
3bc942f3
TH
4592 struct mem_cgroup_event *event =
4593 container_of(pt, struct mem_cgroup_event, pt);
79bd9814
TH
4594
4595 event->wqh = wqh;
4596 add_wait_queue(wqh, &event->wait);
4597}
4598
4599/*
3bc942f3
TH
4600 * DO NOT USE IN NEW FILES.
4601 *
79bd9814
TH
4602 * Parse input and register new cgroup event handler.
4603 *
4604 * Input must be in format '<event_fd> <control_fd> <args>'.
4605 * Interpretation of args is defined by control file implementation.
4606 */
451af504
TH
4607static ssize_t memcg_write_event_control(struct kernfs_open_file *of,
4608 char *buf, size_t nbytes, loff_t off)
79bd9814 4609{
451af504 4610 struct cgroup_subsys_state *css = of_css(of);
fba94807 4611 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3bc942f3 4612 struct mem_cgroup_event *event;
79bd9814
TH
4613 struct cgroup_subsys_state *cfile_css;
4614 unsigned int efd, cfd;
4615 struct fd efile;
4616 struct fd cfile;
fba94807 4617 const char *name;
79bd9814
TH
4618 char *endp;
4619 int ret;
4620
451af504
TH
4621 buf = strstrip(buf);
4622
4623 efd = simple_strtoul(buf, &endp, 10);
79bd9814
TH
4624 if (*endp != ' ')
4625 return -EINVAL;
451af504 4626 buf = endp + 1;
79bd9814 4627
451af504 4628 cfd = simple_strtoul(buf, &endp, 10);
79bd9814
TH
4629 if ((*endp != ' ') && (*endp != '\0'))
4630 return -EINVAL;
451af504 4631 buf = endp + 1;
79bd9814
TH
4632
4633 event = kzalloc(sizeof(*event), GFP_KERNEL);
4634 if (!event)
4635 return -ENOMEM;
4636
59b6f873 4637 event->memcg = memcg;
79bd9814 4638 INIT_LIST_HEAD(&event->list);
3bc942f3
TH
4639 init_poll_funcptr(&event->pt, memcg_event_ptable_queue_proc);
4640 init_waitqueue_func_entry(&event->wait, memcg_event_wake);
4641 INIT_WORK(&event->remove, memcg_event_remove);
79bd9814
TH
4642
4643 efile = fdget(efd);
4644 if (!efile.file) {
4645 ret = -EBADF;
4646 goto out_kfree;
4647 }
4648
4649 event->eventfd = eventfd_ctx_fileget(efile.file);
4650 if (IS_ERR(event->eventfd)) {
4651 ret = PTR_ERR(event->eventfd);
4652 goto out_put_efile;
4653 }
4654
4655 cfile = fdget(cfd);
4656 if (!cfile.file) {
4657 ret = -EBADF;
4658 goto out_put_eventfd;
4659 }
4660
4661 /* the process need read permission on control file */
4662 /* AV: shouldn't we check that it's been opened for read instead? */
4663 ret = inode_permission(file_inode(cfile.file), MAY_READ);
4664 if (ret < 0)
4665 goto out_put_cfile;
4666
fba94807
TH
4667 /*
4668 * Determine the event callbacks and set them in @event. This used
4669 * to be done via struct cftype but cgroup core no longer knows
4670 * about these events. The following is crude but the whole thing
4671 * is for compatibility anyway.
3bc942f3
TH
4672 *
4673 * DO NOT ADD NEW FILES.
fba94807
TH
4674 */
4675 name = cfile.file->f_dentry->d_name.name;
4676
4677 if (!strcmp(name, "memory.usage_in_bytes")) {
4678 event->register_event = mem_cgroup_usage_register_event;
4679 event->unregister_event = mem_cgroup_usage_unregister_event;
4680 } else if (!strcmp(name, "memory.oom_control")) {
4681 event->register_event = mem_cgroup_oom_register_event;
4682 event->unregister_event = mem_cgroup_oom_unregister_event;
4683 } else if (!strcmp(name, "memory.pressure_level")) {
4684 event->register_event = vmpressure_register_event;
4685 event->unregister_event = vmpressure_unregister_event;
4686 } else if (!strcmp(name, "memory.memsw.usage_in_bytes")) {
347c4a87
TH
4687 event->register_event = memsw_cgroup_usage_register_event;
4688 event->unregister_event = memsw_cgroup_usage_unregister_event;
fba94807
TH
4689 } else {
4690 ret = -EINVAL;
4691 goto out_put_cfile;
4692 }
4693
79bd9814 4694 /*
b5557c4c
TH
4695 * Verify @cfile should belong to @css. Also, remaining events are
4696 * automatically removed on cgroup destruction but the removal is
4697 * asynchronous, so take an extra ref on @css.
79bd9814 4698 */
ec903c0c
TH
4699 cfile_css = css_tryget_online_from_dir(cfile.file->f_dentry->d_parent,
4700 &memory_cgrp_subsys);
79bd9814 4701 ret = -EINVAL;
5a17f543 4702 if (IS_ERR(cfile_css))
79bd9814 4703 goto out_put_cfile;
5a17f543
TH
4704 if (cfile_css != css) {
4705 css_put(cfile_css);
79bd9814 4706 goto out_put_cfile;
5a17f543 4707 }
79bd9814 4708
451af504 4709 ret = event->register_event(memcg, event->eventfd, buf);
79bd9814
TH
4710 if (ret)
4711 goto out_put_css;
4712
4713 efile.file->f_op->poll(efile.file, &event->pt);
4714
fba94807
TH
4715 spin_lock(&memcg->event_list_lock);
4716 list_add(&event->list, &memcg->event_list);
4717 spin_unlock(&memcg->event_list_lock);
79bd9814
TH
4718
4719 fdput(cfile);
4720 fdput(efile);
4721
451af504 4722 return nbytes;
79bd9814
TH
4723
4724out_put_css:
b5557c4c 4725 css_put(css);
79bd9814
TH
4726out_put_cfile:
4727 fdput(cfile);
4728out_put_eventfd:
4729 eventfd_ctx_put(event->eventfd);
4730out_put_efile:
4731 fdput(efile);
4732out_kfree:
4733 kfree(event);
4734
4735 return ret;
4736}
4737
8cdea7c0
BS
4738static struct cftype mem_cgroup_files[] = {
4739 {
0eea1030 4740 .name = "usage_in_bytes",
8c7c6e34 4741 .private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
791badbd 4742 .read_u64 = mem_cgroup_read_u64,
8cdea7c0 4743 },
c84872e1
PE
4744 {
4745 .name = "max_usage_in_bytes",
8c7c6e34 4746 .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
6770c64e 4747 .write = mem_cgroup_reset,
791badbd 4748 .read_u64 = mem_cgroup_read_u64,
c84872e1 4749 },
8cdea7c0 4750 {
0eea1030 4751 .name = "limit_in_bytes",
8c7c6e34 4752 .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
451af504 4753 .write = mem_cgroup_write,
791badbd 4754 .read_u64 = mem_cgroup_read_u64,
8cdea7c0 4755 },
296c81d8
BS
4756 {
4757 .name = "soft_limit_in_bytes",
4758 .private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT),
451af504 4759 .write = mem_cgroup_write,
791badbd 4760 .read_u64 = mem_cgroup_read_u64,
296c81d8 4761 },
8cdea7c0
BS
4762 {
4763 .name = "failcnt",
8c7c6e34 4764 .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
6770c64e 4765 .write = mem_cgroup_reset,
791badbd 4766 .read_u64 = mem_cgroup_read_u64,
8cdea7c0 4767 },
d2ceb9b7
KH
4768 {
4769 .name = "stat",
2da8ca82 4770 .seq_show = memcg_stat_show,
d2ceb9b7 4771 },
c1e862c1
KH
4772 {
4773 .name = "force_empty",
6770c64e 4774 .write = mem_cgroup_force_empty_write,
c1e862c1 4775 },
18f59ea7
BS
4776 {
4777 .name = "use_hierarchy",
4778 .write_u64 = mem_cgroup_hierarchy_write,
4779 .read_u64 = mem_cgroup_hierarchy_read,
4780 },
79bd9814 4781 {
3bc942f3 4782 .name = "cgroup.event_control", /* XXX: for compat */
451af504 4783 .write = memcg_write_event_control,
79bd9814
TH
4784 .flags = CFTYPE_NO_PREFIX,
4785 .mode = S_IWUGO,
4786 },
a7885eb8
KM
4787 {
4788 .name = "swappiness",
4789 .read_u64 = mem_cgroup_swappiness_read,
4790 .write_u64 = mem_cgroup_swappiness_write,
4791 },
7dc74be0
DN
4792 {
4793 .name = "move_charge_at_immigrate",
4794 .read_u64 = mem_cgroup_move_charge_read,
4795 .write_u64 = mem_cgroup_move_charge_write,
4796 },
9490ff27
KH
4797 {
4798 .name = "oom_control",
2da8ca82 4799 .seq_show = mem_cgroup_oom_control_read,
3c11ecf4 4800 .write_u64 = mem_cgroup_oom_control_write,
9490ff27
KH
4801 .private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL),
4802 },
70ddf637
AV
4803 {
4804 .name = "pressure_level",
70ddf637 4805 },
406eb0c9
YH
4806#ifdef CONFIG_NUMA
4807 {
4808 .name = "numa_stat",
2da8ca82 4809 .seq_show = memcg_numa_stat_show,
406eb0c9
YH
4810 },
4811#endif
510fc4e1
GC
4812#ifdef CONFIG_MEMCG_KMEM
4813 {
4814 .name = "kmem.limit_in_bytes",
4815 .private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT),
451af504 4816 .write = mem_cgroup_write,
791badbd 4817 .read_u64 = mem_cgroup_read_u64,
510fc4e1
GC
4818 },
4819 {
4820 .name = "kmem.usage_in_bytes",
4821 .private = MEMFILE_PRIVATE(_KMEM, RES_USAGE),
791badbd 4822 .read_u64 = mem_cgroup_read_u64,
510fc4e1
GC
4823 },
4824 {
4825 .name = "kmem.failcnt",
4826 .private = MEMFILE_PRIVATE(_KMEM, RES_FAILCNT),
6770c64e 4827 .write = mem_cgroup_reset,
791badbd 4828 .read_u64 = mem_cgroup_read_u64,
510fc4e1
GC
4829 },
4830 {
4831 .name = "kmem.max_usage_in_bytes",
4832 .private = MEMFILE_PRIVATE(_KMEM, RES_MAX_USAGE),
6770c64e 4833 .write = mem_cgroup_reset,
791badbd 4834 .read_u64 = mem_cgroup_read_u64,
510fc4e1 4835 },
749c5415
GC
4836#ifdef CONFIG_SLABINFO
4837 {
4838 .name = "kmem.slabinfo",
2da8ca82 4839 .seq_show = mem_cgroup_slabinfo_read,
749c5415
GC
4840 },
4841#endif
8c7c6e34 4842#endif
6bc10349 4843 { }, /* terminate */
af36f906 4844};
8c7c6e34 4845
2d11085e
MH
4846#ifdef CONFIG_MEMCG_SWAP
4847static struct cftype memsw_cgroup_files[] = {
4848 {
4849 .name = "memsw.usage_in_bytes",
4850 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
791badbd 4851 .read_u64 = mem_cgroup_read_u64,
2d11085e
MH
4852 },
4853 {
4854 .name = "memsw.max_usage_in_bytes",
4855 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
6770c64e 4856 .write = mem_cgroup_reset,
791badbd 4857 .read_u64 = mem_cgroup_read_u64,
2d11085e
MH
4858 },
4859 {
4860 .name = "memsw.limit_in_bytes",
4861 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
451af504 4862 .write = mem_cgroup_write,
791badbd 4863 .read_u64 = mem_cgroup_read_u64,
2d11085e
MH
4864 },
4865 {
4866 .name = "memsw.failcnt",
4867 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
6770c64e 4868 .write = mem_cgroup_reset,
791badbd 4869 .read_u64 = mem_cgroup_read_u64,
2d11085e
MH
4870 },
4871 { }, /* terminate */
4872};
4873#endif
c0ff4b85 4874static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
6d12e2d8
KH
4875{
4876 struct mem_cgroup_per_node *pn;
1ecaab2b 4877 struct mem_cgroup_per_zone *mz;
41e3355d 4878 int zone, tmp = node;
1ecaab2b
KH
4879 /*
4880 * This routine is called against possible nodes.
4881 * But it's BUG to call kmalloc() against offline node.
4882 *
4883 * TODO: this routine can waste much memory for nodes which will
4884 * never be onlined. It's better to use memory hotplug callback
4885 * function.
4886 */
41e3355d
KH
4887 if (!node_state(node, N_NORMAL_MEMORY))
4888 tmp = -1;
17295c88 4889 pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
6d12e2d8
KH
4890 if (!pn)
4891 return 1;
1ecaab2b 4892
1ecaab2b
KH
4893 for (zone = 0; zone < MAX_NR_ZONES; zone++) {
4894 mz = &pn->zoneinfo[zone];
bea8c150 4895 lruvec_init(&mz->lruvec);
bb4cc1a8
AM
4896 mz->usage_in_excess = 0;
4897 mz->on_tree = false;
d79154bb 4898 mz->memcg = memcg;
1ecaab2b 4899 }
54f72fe0 4900 memcg->nodeinfo[node] = pn;
6d12e2d8
KH
4901 return 0;
4902}
4903
c0ff4b85 4904static void free_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
1ecaab2b 4905{
54f72fe0 4906 kfree(memcg->nodeinfo[node]);
1ecaab2b
KH
4907}
4908
33327948
KH
4909static struct mem_cgroup *mem_cgroup_alloc(void)
4910{
d79154bb 4911 struct mem_cgroup *memcg;
8ff69e2c 4912 size_t size;
33327948 4913
8ff69e2c
VD
4914 size = sizeof(struct mem_cgroup);
4915 size += nr_node_ids * sizeof(struct mem_cgroup_per_node *);
33327948 4916
8ff69e2c 4917 memcg = kzalloc(size, GFP_KERNEL);
d79154bb 4918 if (!memcg)
e7bbcdf3
DC
4919 return NULL;
4920
d79154bb
HD
4921 memcg->stat = alloc_percpu(struct mem_cgroup_stat_cpu);
4922 if (!memcg->stat)
d2e61b8d 4923 goto out_free;
d79154bb
HD
4924 spin_lock_init(&memcg->pcp_counter_lock);
4925 return memcg;
d2e61b8d
DC
4926
4927out_free:
8ff69e2c 4928 kfree(memcg);
d2e61b8d 4929 return NULL;
33327948
KH
4930}
4931
59927fb9 4932/*
c8b2a36f
GC
4933 * At destroying mem_cgroup, references from swap_cgroup can remain.
4934 * (scanning all at force_empty is too costly...)
4935 *
4936 * Instead of clearing all references at force_empty, we remember
4937 * the number of reference from swap_cgroup and free mem_cgroup when
4938 * it goes down to 0.
4939 *
4940 * Removal of cgroup itself succeeds regardless of refs from swap.
59927fb9 4941 */
c8b2a36f
GC
4942
4943static void __mem_cgroup_free(struct mem_cgroup *memcg)
59927fb9 4944{
c8b2a36f 4945 int node;
59927fb9 4946
bb4cc1a8 4947 mem_cgroup_remove_from_trees(memcg);
c8b2a36f
GC
4948
4949 for_each_node(node)
4950 free_mem_cgroup_per_zone_info(memcg, node);
4951
4952 free_percpu(memcg->stat);
4953
3f134619
GC
4954 /*
4955 * We need to make sure that (at least for now), the jump label
4956 * destruction code runs outside of the cgroup lock. This is because
4957 * get_online_cpus(), which is called from the static_branch update,
4958 * can't be called inside the cgroup_lock. cpusets are the ones
4959 * enforcing this dependency, so if they ever change, we might as well.
4960 *
4961 * schedule_work() will guarantee this happens. Be careful if you need
4962 * to move this code around, and make sure it is outside
4963 * the cgroup_lock.
4964 */
a8964b9b 4965 disarm_static_keys(memcg);
8ff69e2c 4966 kfree(memcg);
59927fb9 4967}
3afe36b1 4968
7bcc1bb1
DN
4969/*
4970 * Returns the parent mem_cgroup in memcgroup hierarchy with hierarchy enabled.
4971 */
e1aab161 4972struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
7bcc1bb1 4973{
3e32cb2e 4974 if (!memcg->memory.parent)
7bcc1bb1 4975 return NULL;
3e32cb2e 4976 return mem_cgroup_from_counter(memcg->memory.parent, memory);
7bcc1bb1 4977}
e1aab161 4978EXPORT_SYMBOL(parent_mem_cgroup);
33327948 4979
bb4cc1a8
AM
4980static void __init mem_cgroup_soft_limit_tree_init(void)
4981{
4982 struct mem_cgroup_tree_per_node *rtpn;
4983 struct mem_cgroup_tree_per_zone *rtpz;
4984 int tmp, node, zone;
4985
4986 for_each_node(node) {
4987 tmp = node;
4988 if (!node_state(node, N_NORMAL_MEMORY))
4989 tmp = -1;
4990 rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, tmp);
4991 BUG_ON(!rtpn);
4992
4993 soft_limit_tree.rb_tree_per_node[node] = rtpn;
4994
4995 for (zone = 0; zone < MAX_NR_ZONES; zone++) {
4996 rtpz = &rtpn->rb_tree_per_zone[zone];
4997 rtpz->rb_root = RB_ROOT;
4998 spin_lock_init(&rtpz->lock);
4999 }
5000 }
5001}
5002
0eb253e2 5003static struct cgroup_subsys_state * __ref
eb95419b 5004mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
8cdea7c0 5005{
d142e3e6 5006 struct mem_cgroup *memcg;
04046e1a 5007 long error = -ENOMEM;
6d12e2d8 5008 int node;
8cdea7c0 5009
c0ff4b85
R
5010 memcg = mem_cgroup_alloc();
5011 if (!memcg)
04046e1a 5012 return ERR_PTR(error);
78fb7466 5013
3ed28fa1 5014 for_each_node(node)
c0ff4b85 5015 if (alloc_mem_cgroup_per_zone_info(memcg, node))
6d12e2d8 5016 goto free_out;
f64c3f54 5017
c077719b 5018 /* root ? */
eb95419b 5019 if (parent_css == NULL) {
a41c58a6 5020 root_mem_cgroup = memcg;
3e32cb2e
JW
5021 page_counter_init(&memcg->memory, NULL);
5022 page_counter_init(&memcg->memsw, NULL);
5023 page_counter_init(&memcg->kmem, NULL);
18f59ea7 5024 }
28dbc4b6 5025
d142e3e6
GC
5026 memcg->last_scanned_node = MAX_NUMNODES;
5027 INIT_LIST_HEAD(&memcg->oom_notify);
d142e3e6
GC
5028 memcg->move_charge_at_immigrate = 0;
5029 mutex_init(&memcg->thresholds_lock);
5030 spin_lock_init(&memcg->move_lock);
70ddf637 5031 vmpressure_init(&memcg->vmpressure);
fba94807
TH
5032 INIT_LIST_HEAD(&memcg->event_list);
5033 spin_lock_init(&memcg->event_list_lock);
d142e3e6
GC
5034
5035 return &memcg->css;
5036
5037free_out:
5038 __mem_cgroup_free(memcg);
5039 return ERR_PTR(error);
5040}
5041
5042static int
eb95419b 5043mem_cgroup_css_online(struct cgroup_subsys_state *css)
d142e3e6 5044{
eb95419b 5045 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5c9d535b 5046 struct mem_cgroup *parent = mem_cgroup_from_css(css->parent);
2f7dd7a4 5047 int ret;
d142e3e6 5048
15a4c835 5049 if (css->id > MEM_CGROUP_ID_MAX)
4219b2da
LZ
5050 return -ENOSPC;
5051
63876986 5052 if (!parent)
d142e3e6
GC
5053 return 0;
5054
0999821b 5055 mutex_lock(&memcg_create_mutex);
d142e3e6
GC
5056
5057 memcg->use_hierarchy = parent->use_hierarchy;
5058 memcg->oom_kill_disable = parent->oom_kill_disable;
5059 memcg->swappiness = mem_cgroup_swappiness(parent);
5060
5061 if (parent->use_hierarchy) {
3e32cb2e
JW
5062 page_counter_init(&memcg->memory, &parent->memory);
5063 page_counter_init(&memcg->memsw, &parent->memsw);
5064 page_counter_init(&memcg->kmem, &parent->kmem);
55007d84 5065
7bcc1bb1 5066 /*
8d76a979
LZ
5067 * No need to take a reference to the parent because cgroup
5068 * core guarantees its existence.
7bcc1bb1 5069 */
18f59ea7 5070 } else {
3e32cb2e
JW
5071 page_counter_init(&memcg->memory, NULL);
5072 page_counter_init(&memcg->memsw, NULL);
5073 page_counter_init(&memcg->kmem, NULL);
8c7f6edb
TH
5074 /*
5075 * Deeper hierachy with use_hierarchy == false doesn't make
5076 * much sense so let cgroup subsystem know about this
5077 * unfortunate state in our controller.
5078 */
d142e3e6 5079 if (parent != root_mem_cgroup)
073219e9 5080 memory_cgrp_subsys.broken_hierarchy = true;
18f59ea7 5081 }
0999821b 5082 mutex_unlock(&memcg_create_mutex);
d6441637 5083
2f7dd7a4
JW
5084 ret = memcg_init_kmem(memcg, &memory_cgrp_subsys);
5085 if (ret)
5086 return ret;
5087
5088 /*
5089 * Make sure the memcg is initialized: mem_cgroup_iter()
5090 * orders reading memcg->initialized against its callers
5091 * reading the memcg members.
5092 */
5093 smp_store_release(&memcg->initialized, 1);
5094
5095 return 0;
8cdea7c0
BS
5096}
5097
eb95419b 5098static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
df878fb0 5099{
eb95419b 5100 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3bc942f3 5101 struct mem_cgroup_event *event, *tmp;
79bd9814
TH
5102
5103 /*
5104 * Unregister events and notify userspace.
5105 * Notify userspace about cgroup removing only after rmdir of cgroup
5106 * directory to avoid race between userspace and kernelspace.
5107 */
fba94807
TH
5108 spin_lock(&memcg->event_list_lock);
5109 list_for_each_entry_safe(event, tmp, &memcg->event_list, list) {
79bd9814
TH
5110 list_del_init(&event->list);
5111 schedule_work(&event->remove);
5112 }
fba94807 5113 spin_unlock(&memcg->event_list_lock);
ec64f515 5114
776ed0f0 5115 memcg_unregister_all_caches(memcg);
33cb876e 5116 vmpressure_cleanup(&memcg->vmpressure);
df878fb0
KH
5117}
5118
eb95419b 5119static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
8cdea7c0 5120{
eb95419b 5121 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
c268e994 5122
10d5ebf4 5123 memcg_destroy_kmem(memcg);
465939a1 5124 __mem_cgroup_free(memcg);
8cdea7c0
BS
5125}
5126
1ced953b
TH
5127/**
5128 * mem_cgroup_css_reset - reset the states of a mem_cgroup
5129 * @css: the target css
5130 *
5131 * Reset the states of the mem_cgroup associated with @css. This is
5132 * invoked when the userland requests disabling on the default hierarchy
5133 * but the memcg is pinned through dependency. The memcg should stop
5134 * applying policies and should revert to the vanilla state as it may be
5135 * made visible again.
5136 *
5137 * The current implementation only resets the essential configurations.
5138 * This needs to be expanded to cover all the visible parts.
5139 */
5140static void mem_cgroup_css_reset(struct cgroup_subsys_state *css)
5141{
5142 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5143
3e32cb2e
JW
5144 mem_cgroup_resize_limit(memcg, PAGE_COUNTER_MAX);
5145 mem_cgroup_resize_memsw_limit(memcg, PAGE_COUNTER_MAX);
5146 memcg_update_kmem_limit(memcg, PAGE_COUNTER_MAX);
5147 memcg->soft_limit = 0;
1ced953b
TH
5148}
5149
02491447 5150#ifdef CONFIG_MMU
7dc74be0 5151/* Handlers for move charge at task migration. */
854ffa8d 5152static int mem_cgroup_do_precharge(unsigned long count)
7dc74be0 5153{
05b84301 5154 int ret;
9476db97
JW
5155
5156 /* Try a single bulk charge without reclaim first */
00501b53 5157 ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_WAIT, count);
9476db97 5158 if (!ret) {
854ffa8d 5159 mc.precharge += count;
854ffa8d
DN
5160 return ret;
5161 }
692e7c45 5162 if (ret == -EINTR) {
00501b53 5163 cancel_charge(root_mem_cgroup, count);
692e7c45
JW
5164 return ret;
5165 }
9476db97
JW
5166
5167 /* Try charges one by one with reclaim */
854ffa8d 5168 while (count--) {
00501b53 5169 ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_NORETRY, 1);
9476db97
JW
5170 /*
5171 * In case of failure, any residual charges against
5172 * mc.to will be dropped by mem_cgroup_clear_mc()
692e7c45
JW
5173 * later on. However, cancel any charges that are
5174 * bypassed to root right away or they'll be lost.
9476db97 5175 */
692e7c45 5176 if (ret == -EINTR)
00501b53 5177 cancel_charge(root_mem_cgroup, 1);
38c5d72f 5178 if (ret)
38c5d72f 5179 return ret;
854ffa8d 5180 mc.precharge++;
9476db97 5181 cond_resched();
854ffa8d 5182 }
9476db97 5183 return 0;
4ffef5fe
DN
5184}
5185
5186/**
8d32ff84 5187 * get_mctgt_type - get target type of moving charge
4ffef5fe
DN
5188 * @vma: the vma the pte to be checked belongs
5189 * @addr: the address corresponding to the pte to be checked
5190 * @ptent: the pte to be checked
02491447 5191 * @target: the pointer the target page or swap ent will be stored(can be NULL)
4ffef5fe
DN
5192 *
5193 * Returns
5194 * 0(MC_TARGET_NONE): if the pte is not a target for move charge.
5195 * 1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for
5196 * move charge. if @target is not NULL, the page is stored in target->page
5197 * with extra refcnt got(Callers should handle it).
02491447
DN
5198 * 2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a
5199 * target for charge migration. if @target is not NULL, the entry is stored
5200 * in target->ent.
4ffef5fe
DN
5201 *
5202 * Called with pte lock held.
5203 */
4ffef5fe
DN
5204union mc_target {
5205 struct page *page;
02491447 5206 swp_entry_t ent;
4ffef5fe
DN
5207};
5208
4ffef5fe 5209enum mc_target_type {
8d32ff84 5210 MC_TARGET_NONE = 0,
4ffef5fe 5211 MC_TARGET_PAGE,
02491447 5212 MC_TARGET_SWAP,
4ffef5fe
DN
5213};
5214
90254a65
DN
5215static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
5216 unsigned long addr, pte_t ptent)
4ffef5fe 5217{
90254a65 5218 struct page *page = vm_normal_page(vma, addr, ptent);
4ffef5fe 5219
90254a65
DN
5220 if (!page || !page_mapped(page))
5221 return NULL;
5222 if (PageAnon(page)) {
5223 /* we don't move shared anon */
4b91355e 5224 if (!move_anon())
90254a65 5225 return NULL;
87946a72
DN
5226 } else if (!move_file())
5227 /* we ignore mapcount for file pages */
90254a65
DN
5228 return NULL;
5229 if (!get_page_unless_zero(page))
5230 return NULL;
5231
5232 return page;
5233}
5234
4b91355e 5235#ifdef CONFIG_SWAP
90254a65
DN
5236static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
5237 unsigned long addr, pte_t ptent, swp_entry_t *entry)
5238{
90254a65
DN
5239 struct page *page = NULL;
5240 swp_entry_t ent = pte_to_swp_entry(ptent);
5241
5242 if (!move_anon() || non_swap_entry(ent))
5243 return NULL;
4b91355e
KH
5244 /*
5245 * Because lookup_swap_cache() updates some statistics counter,
5246 * we call find_get_page() with swapper_space directly.
5247 */
33806f06 5248 page = find_get_page(swap_address_space(ent), ent.val);
90254a65
DN
5249 if (do_swap_account)
5250 entry->val = ent.val;
5251
5252 return page;
5253}
4b91355e
KH
5254#else
5255static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
5256 unsigned long addr, pte_t ptent, swp_entry_t *entry)
5257{
5258 return NULL;
5259}
5260#endif
90254a65 5261
87946a72
DN
5262static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
5263 unsigned long addr, pte_t ptent, swp_entry_t *entry)
5264{
5265 struct page *page = NULL;
87946a72
DN
5266 struct address_space *mapping;
5267 pgoff_t pgoff;
5268
5269 if (!vma->vm_file) /* anonymous vma */
5270 return NULL;
5271 if (!move_file())
5272 return NULL;
5273
87946a72
DN
5274 mapping = vma->vm_file->f_mapping;
5275 if (pte_none(ptent))
5276 pgoff = linear_page_index(vma, addr);
5277 else /* pte_file(ptent) is true */
5278 pgoff = pte_to_pgoff(ptent);
5279
5280 /* page is moved even if it's not RSS of this task(page-faulted). */
aa3b1895
HD
5281#ifdef CONFIG_SWAP
5282 /* shmem/tmpfs may report page out on swap: account for that too. */
139b6a6f
JW
5283 if (shmem_mapping(mapping)) {
5284 page = find_get_entry(mapping, pgoff);
5285 if (radix_tree_exceptional_entry(page)) {
5286 swp_entry_t swp = radix_to_swp_entry(page);
5287 if (do_swap_account)
5288 *entry = swp;
5289 page = find_get_page(swap_address_space(swp), swp.val);
5290 }
5291 } else
5292 page = find_get_page(mapping, pgoff);
5293#else
5294 page = find_get_page(mapping, pgoff);
aa3b1895 5295#endif
87946a72
DN
5296 return page;
5297}
5298
8d32ff84 5299static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma,
90254a65
DN
5300 unsigned long addr, pte_t ptent, union mc_target *target)
5301{
5302 struct page *page = NULL;
5303 struct page_cgroup *pc;
8d32ff84 5304 enum mc_target_type ret = MC_TARGET_NONE;
90254a65
DN
5305 swp_entry_t ent = { .val = 0 };
5306
5307 if (pte_present(ptent))
5308 page = mc_handle_present_pte(vma, addr, ptent);
5309 else if (is_swap_pte(ptent))
5310 page = mc_handle_swap_pte(vma, addr, ptent, &ent);
87946a72
DN
5311 else if (pte_none(ptent) || pte_file(ptent))
5312 page = mc_handle_file_pte(vma, addr, ptent, &ent);
90254a65
DN
5313
5314 if (!page && !ent.val)
8d32ff84 5315 return ret;
02491447
DN
5316 if (page) {
5317 pc = lookup_page_cgroup(page);
5318 /*
0a31bc97
JW
5319 * Do only loose check w/o serialization.
5320 * mem_cgroup_move_account() checks the pc is valid or
5321 * not under LRU exclusion.
02491447
DN
5322 */
5323 if (PageCgroupUsed(pc) && pc->mem_cgroup == mc.from) {
5324 ret = MC_TARGET_PAGE;
5325 if (target)
5326 target->page = page;
5327 }
5328 if (!ret || !target)
5329 put_page(page);
5330 }
90254a65
DN
5331 /* There is a swap entry and a page doesn't exist or isn't charged */
5332 if (ent.val && !ret &&
34c00c31 5333 mem_cgroup_id(mc.from) == lookup_swap_cgroup_id(ent)) {
7f0f1546
KH
5334 ret = MC_TARGET_SWAP;
5335 if (target)
5336 target->ent = ent;
4ffef5fe 5337 }
4ffef5fe
DN
5338 return ret;
5339}
5340
12724850
NH
5341#ifdef CONFIG_TRANSPARENT_HUGEPAGE
5342/*
5343 * We don't consider swapping or file mapped pages because THP does not
5344 * support them for now.
5345 * Caller should make sure that pmd_trans_huge(pmd) is true.
5346 */
5347static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
5348 unsigned long addr, pmd_t pmd, union mc_target *target)
5349{
5350 struct page *page = NULL;
5351 struct page_cgroup *pc;
5352 enum mc_target_type ret = MC_TARGET_NONE;
5353
5354 page = pmd_page(pmd);
309381fe 5355 VM_BUG_ON_PAGE(!page || !PageHead(page), page);
12724850
NH
5356 if (!move_anon())
5357 return ret;
5358 pc = lookup_page_cgroup(page);
5359 if (PageCgroupUsed(pc) && pc->mem_cgroup == mc.from) {
5360 ret = MC_TARGET_PAGE;
5361 if (target) {
5362 get_page(page);
5363 target->page = page;
5364 }
5365 }
5366 return ret;
5367}
5368#else
5369static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
5370 unsigned long addr, pmd_t pmd, union mc_target *target)
5371{
5372 return MC_TARGET_NONE;
5373}
5374#endif
5375
4ffef5fe
DN
5376static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
5377 unsigned long addr, unsigned long end,
5378 struct mm_walk *walk)
5379{
5380 struct vm_area_struct *vma = walk->private;
5381 pte_t *pte;
5382 spinlock_t *ptl;
5383
bf929152 5384 if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
12724850
NH
5385 if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE)
5386 mc.precharge += HPAGE_PMD_NR;
bf929152 5387 spin_unlock(ptl);
1a5a9906 5388 return 0;
12724850 5389 }
03319327 5390
45f83cef
AA
5391 if (pmd_trans_unstable(pmd))
5392 return 0;
4ffef5fe
DN
5393 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
5394 for (; addr != end; pte++, addr += PAGE_SIZE)
8d32ff84 5395 if (get_mctgt_type(vma, addr, *pte, NULL))
4ffef5fe
DN
5396 mc.precharge++; /* increment precharge temporarily */
5397 pte_unmap_unlock(pte - 1, ptl);
5398 cond_resched();
5399
7dc74be0
DN
5400 return 0;
5401}
5402
4ffef5fe
DN
5403static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
5404{
5405 unsigned long precharge;
5406 struct vm_area_struct *vma;
5407
dfe076b0 5408 down_read(&mm->mmap_sem);
4ffef5fe
DN
5409 for (vma = mm->mmap; vma; vma = vma->vm_next) {
5410 struct mm_walk mem_cgroup_count_precharge_walk = {
5411 .pmd_entry = mem_cgroup_count_precharge_pte_range,
5412 .mm = mm,
5413 .private = vma,
5414 };
5415 if (is_vm_hugetlb_page(vma))
5416 continue;
4ffef5fe
DN
5417 walk_page_range(vma->vm_start, vma->vm_end,
5418 &mem_cgroup_count_precharge_walk);
5419 }
dfe076b0 5420 up_read(&mm->mmap_sem);
4ffef5fe
DN
5421
5422 precharge = mc.precharge;
5423 mc.precharge = 0;
5424
5425 return precharge;
5426}
5427
4ffef5fe
DN
5428static int mem_cgroup_precharge_mc(struct mm_struct *mm)
5429{
dfe076b0
DN
5430 unsigned long precharge = mem_cgroup_count_precharge(mm);
5431
5432 VM_BUG_ON(mc.moving_task);
5433 mc.moving_task = current;
5434 return mem_cgroup_do_precharge(precharge);
4ffef5fe
DN
5435}
5436
dfe076b0
DN
5437/* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */
5438static void __mem_cgroup_clear_mc(void)
4ffef5fe 5439{
2bd9bb20
KH
5440 struct mem_cgroup *from = mc.from;
5441 struct mem_cgroup *to = mc.to;
5442
4ffef5fe 5443 /* we must uncharge all the leftover precharges from mc.to */
854ffa8d 5444 if (mc.precharge) {
00501b53 5445 cancel_charge(mc.to, mc.precharge);
854ffa8d
DN
5446 mc.precharge = 0;
5447 }
5448 /*
5449 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so
5450 * we must uncharge here.
5451 */
5452 if (mc.moved_charge) {
00501b53 5453 cancel_charge(mc.from, mc.moved_charge);
854ffa8d 5454 mc.moved_charge = 0;
4ffef5fe 5455 }
483c30b5
DN
5456 /* we must fixup refcnts and charges */
5457 if (mc.moved_swap) {
483c30b5 5458 /* uncharge swap account from the old cgroup */
ce00a967 5459 if (!mem_cgroup_is_root(mc.from))
3e32cb2e 5460 page_counter_uncharge(&mc.from->memsw, mc.moved_swap);
483c30b5 5461
05b84301 5462 /*
3e32cb2e
JW
5463 * we charged both to->memory and to->memsw, so we
5464 * should uncharge to->memory.
05b84301 5465 */
ce00a967 5466 if (!mem_cgroup_is_root(mc.to))
3e32cb2e
JW
5467 page_counter_uncharge(&mc.to->memory, mc.moved_swap);
5468
e8ea14cc 5469 css_put_many(&mc.from->css, mc.moved_swap);
3e32cb2e 5470
4050377b 5471 /* we've already done css_get(mc.to) */
483c30b5
DN
5472 mc.moved_swap = 0;
5473 }
dfe076b0
DN
5474 memcg_oom_recover(from);
5475 memcg_oom_recover(to);
5476 wake_up_all(&mc.waitq);
5477}
5478
5479static void mem_cgroup_clear_mc(void)
5480{
5481 struct mem_cgroup *from = mc.from;
5482
5483 /*
5484 * we must clear moving_task before waking up waiters at the end of
5485 * task migration.
5486 */
5487 mc.moving_task = NULL;
5488 __mem_cgroup_clear_mc();
2bd9bb20 5489 spin_lock(&mc.lock);
4ffef5fe
DN
5490 mc.from = NULL;
5491 mc.to = NULL;
2bd9bb20 5492 spin_unlock(&mc.lock);
32047e2a 5493 mem_cgroup_end_move(from);
4ffef5fe
DN
5494}
5495
eb95419b 5496static int mem_cgroup_can_attach(struct cgroup_subsys_state *css,
761b3ef5 5497 struct cgroup_taskset *tset)
7dc74be0 5498{
2f7ee569 5499 struct task_struct *p = cgroup_taskset_first(tset);
7dc74be0 5500 int ret = 0;
eb95419b 5501 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
ee5e8472 5502 unsigned long move_charge_at_immigrate;
7dc74be0 5503
ee5e8472
GC
5504 /*
5505 * We are now commited to this value whatever it is. Changes in this
5506 * tunable will only affect upcoming migrations, not the current one.
5507 * So we need to save it, and keep it going.
5508 */
5509 move_charge_at_immigrate = memcg->move_charge_at_immigrate;
5510 if (move_charge_at_immigrate) {
7dc74be0
DN
5511 struct mm_struct *mm;
5512 struct mem_cgroup *from = mem_cgroup_from_task(p);
5513
c0ff4b85 5514 VM_BUG_ON(from == memcg);
7dc74be0
DN
5515
5516 mm = get_task_mm(p);
5517 if (!mm)
5518 return 0;
7dc74be0 5519 /* We move charges only when we move a owner of the mm */
4ffef5fe
DN
5520 if (mm->owner == p) {
5521 VM_BUG_ON(mc.from);
5522 VM_BUG_ON(mc.to);
5523 VM_BUG_ON(mc.precharge);
854ffa8d 5524 VM_BUG_ON(mc.moved_charge);
483c30b5 5525 VM_BUG_ON(mc.moved_swap);
32047e2a 5526 mem_cgroup_start_move(from);
2bd9bb20 5527 spin_lock(&mc.lock);
4ffef5fe 5528 mc.from = from;
c0ff4b85 5529 mc.to = memcg;
ee5e8472 5530 mc.immigrate_flags = move_charge_at_immigrate;
2bd9bb20 5531 spin_unlock(&mc.lock);
dfe076b0 5532 /* We set mc.moving_task later */
4ffef5fe
DN
5533
5534 ret = mem_cgroup_precharge_mc(mm);
5535 if (ret)
5536 mem_cgroup_clear_mc();
dfe076b0
DN
5537 }
5538 mmput(mm);
7dc74be0
DN
5539 }
5540 return ret;
5541}
5542
eb95419b 5543static void mem_cgroup_cancel_attach(struct cgroup_subsys_state *css,
761b3ef5 5544 struct cgroup_taskset *tset)
7dc74be0 5545{
4ffef5fe 5546 mem_cgroup_clear_mc();
7dc74be0
DN
5547}
5548
4ffef5fe
DN
5549static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
5550 unsigned long addr, unsigned long end,
5551 struct mm_walk *walk)
7dc74be0 5552{
4ffef5fe
DN
5553 int ret = 0;
5554 struct vm_area_struct *vma = walk->private;
5555 pte_t *pte;
5556 spinlock_t *ptl;
12724850
NH
5557 enum mc_target_type target_type;
5558 union mc_target target;
5559 struct page *page;
5560 struct page_cgroup *pc;
4ffef5fe 5561
12724850
NH
5562 /*
5563 * We don't take compound_lock() here but no race with splitting thp
5564 * happens because:
5565 * - if pmd_trans_huge_lock() returns 1, the relevant thp is not
5566 * under splitting, which means there's no concurrent thp split,
5567 * - if another thread runs into split_huge_page() just after we
5568 * entered this if-block, the thread must wait for page table lock
5569 * to be unlocked in __split_huge_page_splitting(), where the main
5570 * part of thp split is not executed yet.
5571 */
bf929152 5572 if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
62ade86a 5573 if (mc.precharge < HPAGE_PMD_NR) {
bf929152 5574 spin_unlock(ptl);
12724850
NH
5575 return 0;
5576 }
5577 target_type = get_mctgt_type_thp(vma, addr, *pmd, &target);
5578 if (target_type == MC_TARGET_PAGE) {
5579 page = target.page;
5580 if (!isolate_lru_page(page)) {
5581 pc = lookup_page_cgroup(page);
5582 if (!mem_cgroup_move_account(page, HPAGE_PMD_NR,
2f3479b1 5583 pc, mc.from, mc.to)) {
12724850
NH
5584 mc.precharge -= HPAGE_PMD_NR;
5585 mc.moved_charge += HPAGE_PMD_NR;
5586 }
5587 putback_lru_page(page);
5588 }
5589 put_page(page);
5590 }
bf929152 5591 spin_unlock(ptl);
1a5a9906 5592 return 0;
12724850
NH
5593 }
5594
45f83cef
AA
5595 if (pmd_trans_unstable(pmd))
5596 return 0;
4ffef5fe
DN
5597retry:
5598 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
5599 for (; addr != end; addr += PAGE_SIZE) {
5600 pte_t ptent = *(pte++);
02491447 5601 swp_entry_t ent;
4ffef5fe
DN
5602
5603 if (!mc.precharge)
5604 break;
5605
8d32ff84 5606 switch (get_mctgt_type(vma, addr, ptent, &target)) {
4ffef5fe
DN
5607 case MC_TARGET_PAGE:
5608 page = target.page;
5609 if (isolate_lru_page(page))
5610 goto put;
5611 pc = lookup_page_cgroup(page);
7ec99d62 5612 if (!mem_cgroup_move_account(page, 1, pc,
2f3479b1 5613 mc.from, mc.to)) {
4ffef5fe 5614 mc.precharge--;
854ffa8d
DN
5615 /* we uncharge from mc.from later. */
5616 mc.moved_charge++;
4ffef5fe
DN
5617 }
5618 putback_lru_page(page);
8d32ff84 5619put: /* get_mctgt_type() gets the page */
4ffef5fe
DN
5620 put_page(page);
5621 break;
02491447
DN
5622 case MC_TARGET_SWAP:
5623 ent = target.ent;
e91cbb42 5624 if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) {
02491447 5625 mc.precharge--;
483c30b5
DN
5626 /* we fixup refcnts and charges later. */
5627 mc.moved_swap++;
5628 }
02491447 5629 break;
4ffef5fe
DN
5630 default:
5631 break;
5632 }
5633 }
5634 pte_unmap_unlock(pte - 1, ptl);
5635 cond_resched();
5636
5637 if (addr != end) {
5638 /*
5639 * We have consumed all precharges we got in can_attach().
5640 * We try charge one by one, but don't do any additional
5641 * charges to mc.to if we have failed in charge once in attach()
5642 * phase.
5643 */
854ffa8d 5644 ret = mem_cgroup_do_precharge(1);
4ffef5fe
DN
5645 if (!ret)
5646 goto retry;
5647 }
5648
5649 return ret;
5650}
5651
5652static void mem_cgroup_move_charge(struct mm_struct *mm)
5653{
5654 struct vm_area_struct *vma;
5655
5656 lru_add_drain_all();
dfe076b0
DN
5657retry:
5658 if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
5659 /*
5660 * Someone who are holding the mmap_sem might be waiting in
5661 * waitq. So we cancel all extra charges, wake up all waiters,
5662 * and retry. Because we cancel precharges, we might not be able
5663 * to move enough charges, but moving charge is a best-effort
5664 * feature anyway, so it wouldn't be a big problem.
5665 */
5666 __mem_cgroup_clear_mc();
5667 cond_resched();
5668 goto retry;
5669 }
4ffef5fe
DN
5670 for (vma = mm->mmap; vma; vma = vma->vm_next) {
5671 int ret;
5672 struct mm_walk mem_cgroup_move_charge_walk = {
5673 .pmd_entry = mem_cgroup_move_charge_pte_range,
5674 .mm = mm,
5675 .private = vma,
5676 };
5677 if (is_vm_hugetlb_page(vma))
5678 continue;
4ffef5fe
DN
5679 ret = walk_page_range(vma->vm_start, vma->vm_end,
5680 &mem_cgroup_move_charge_walk);
5681 if (ret)
5682 /*
5683 * means we have consumed all precharges and failed in
5684 * doing additional charge. Just abandon here.
5685 */
5686 break;
5687 }
dfe076b0 5688 up_read(&mm->mmap_sem);
7dc74be0
DN
5689}
5690
eb95419b 5691static void mem_cgroup_move_task(struct cgroup_subsys_state *css,
761b3ef5 5692 struct cgroup_taskset *tset)
67e465a7 5693{
2f7ee569 5694 struct task_struct *p = cgroup_taskset_first(tset);
a433658c 5695 struct mm_struct *mm = get_task_mm(p);
dfe076b0 5696
dfe076b0 5697 if (mm) {
a433658c
KM
5698 if (mc.to)
5699 mem_cgroup_move_charge(mm);
dfe076b0
DN
5700 mmput(mm);
5701 }
a433658c
KM
5702 if (mc.to)
5703 mem_cgroup_clear_mc();
67e465a7 5704}
5cfb80a7 5705#else /* !CONFIG_MMU */
eb95419b 5706static int mem_cgroup_can_attach(struct cgroup_subsys_state *css,
761b3ef5 5707 struct cgroup_taskset *tset)
5cfb80a7
DN
5708{
5709 return 0;
5710}
eb95419b 5711static void mem_cgroup_cancel_attach(struct cgroup_subsys_state *css,
761b3ef5 5712 struct cgroup_taskset *tset)
5cfb80a7
DN
5713{
5714}
eb95419b 5715static void mem_cgroup_move_task(struct cgroup_subsys_state *css,
761b3ef5 5716 struct cgroup_taskset *tset)
5cfb80a7
DN
5717{
5718}
5719#endif
67e465a7 5720
f00baae7
TH
5721/*
5722 * Cgroup retains root cgroups across [un]mount cycles making it necessary
aa6ec29b
TH
5723 * to verify whether we're attached to the default hierarchy on each mount
5724 * attempt.
f00baae7 5725 */
eb95419b 5726static void mem_cgroup_bind(struct cgroup_subsys_state *root_css)
f00baae7
TH
5727{
5728 /*
aa6ec29b 5729 * use_hierarchy is forced on the default hierarchy. cgroup core
f00baae7
TH
5730 * guarantees that @root doesn't have any children, so turning it
5731 * on for the root memcg is enough.
5732 */
aa6ec29b 5733 if (cgroup_on_dfl(root_css->cgroup))
eb95419b 5734 mem_cgroup_from_css(root_css)->use_hierarchy = true;
f00baae7
TH
5735}
5736
073219e9 5737struct cgroup_subsys memory_cgrp_subsys = {
92fb9748 5738 .css_alloc = mem_cgroup_css_alloc,
d142e3e6 5739 .css_online = mem_cgroup_css_online,
92fb9748
TH
5740 .css_offline = mem_cgroup_css_offline,
5741 .css_free = mem_cgroup_css_free,
1ced953b 5742 .css_reset = mem_cgroup_css_reset,
7dc74be0
DN
5743 .can_attach = mem_cgroup_can_attach,
5744 .cancel_attach = mem_cgroup_cancel_attach,
67e465a7 5745 .attach = mem_cgroup_move_task,
f00baae7 5746 .bind = mem_cgroup_bind,
5577964e 5747 .legacy_cftypes = mem_cgroup_files,
6d12e2d8 5748 .early_init = 0,
8cdea7c0 5749};
c077719b 5750
c255a458 5751#ifdef CONFIG_MEMCG_SWAP
a42c390c
MH
5752static int __init enable_swap_account(char *s)
5753{
a2c8990a 5754 if (!strcmp(s, "1"))
a42c390c 5755 really_do_swap_account = 1;
a2c8990a 5756 else if (!strcmp(s, "0"))
a42c390c
MH
5757 really_do_swap_account = 0;
5758 return 1;
5759}
a2c8990a 5760__setup("swapaccount=", enable_swap_account);
c077719b 5761
2d11085e
MH
5762static void __init memsw_file_init(void)
5763{
2cf669a5
TH
5764 WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys,
5765 memsw_cgroup_files));
6acc8b02
MH
5766}
5767
5768static void __init enable_swap_cgroup(void)
5769{
5770 if (!mem_cgroup_disabled() && really_do_swap_account) {
5771 do_swap_account = 1;
5772 memsw_file_init();
5773 }
2d11085e 5774}
6acc8b02 5775
2d11085e 5776#else
6acc8b02 5777static void __init enable_swap_cgroup(void)
2d11085e
MH
5778{
5779}
c077719b 5780#endif
2d11085e 5781
0a31bc97
JW
5782#ifdef CONFIG_MEMCG_SWAP
5783/**
5784 * mem_cgroup_swapout - transfer a memsw charge to swap
5785 * @page: page whose memsw charge to transfer
5786 * @entry: swap entry to move the charge to
5787 *
5788 * Transfer the memsw charge of @page to @entry.
5789 */
5790void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
5791{
5792 struct page_cgroup *pc;
5793 unsigned short oldid;
5794
5795 VM_BUG_ON_PAGE(PageLRU(page), page);
5796 VM_BUG_ON_PAGE(page_count(page), page);
5797
5798 if (!do_swap_account)
5799 return;
5800
5801 pc = lookup_page_cgroup(page);
5802
5803 /* Readahead page, never charged */
5804 if (!PageCgroupUsed(pc))
5805 return;
5806
5807 VM_BUG_ON_PAGE(!(pc->flags & PCG_MEMSW), page);
5808
5809 oldid = swap_cgroup_record(entry, mem_cgroup_id(pc->mem_cgroup));
5810 VM_BUG_ON_PAGE(oldid, page);
5811
5812 pc->flags &= ~PCG_MEMSW;
5813 css_get(&pc->mem_cgroup->css);
5814 mem_cgroup_swap_statistics(pc->mem_cgroup, true);
5815}
5816
5817/**
5818 * mem_cgroup_uncharge_swap - uncharge a swap entry
5819 * @entry: swap entry to uncharge
5820 *
5821 * Drop the memsw charge associated with @entry.
5822 */
5823void mem_cgroup_uncharge_swap(swp_entry_t entry)
5824{
5825 struct mem_cgroup *memcg;
5826 unsigned short id;
5827
5828 if (!do_swap_account)
5829 return;
5830
5831 id = swap_cgroup_record(entry, 0);
5832 rcu_read_lock();
5833 memcg = mem_cgroup_lookup(id);
5834 if (memcg) {
ce00a967 5835 if (!mem_cgroup_is_root(memcg))
3e32cb2e 5836 page_counter_uncharge(&memcg->memsw, 1);
0a31bc97
JW
5837 mem_cgroup_swap_statistics(memcg, false);
5838 css_put(&memcg->css);
5839 }
5840 rcu_read_unlock();
5841}
5842#endif
5843
00501b53
JW
5844/**
5845 * mem_cgroup_try_charge - try charging a page
5846 * @page: page to charge
5847 * @mm: mm context of the victim
5848 * @gfp_mask: reclaim mode
5849 * @memcgp: charged memcg return
5850 *
5851 * Try to charge @page to the memcg that @mm belongs to, reclaiming
5852 * pages according to @gfp_mask if necessary.
5853 *
5854 * Returns 0 on success, with *@memcgp pointing to the charged memcg.
5855 * Otherwise, an error code is returned.
5856 *
5857 * After page->mapping has been set up, the caller must finalize the
5858 * charge with mem_cgroup_commit_charge(). Or abort the transaction
5859 * with mem_cgroup_cancel_charge() in case page instantiation fails.
5860 */
5861int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
5862 gfp_t gfp_mask, struct mem_cgroup **memcgp)
5863{
5864 struct mem_cgroup *memcg = NULL;
5865 unsigned int nr_pages = 1;
5866 int ret = 0;
5867
5868 if (mem_cgroup_disabled())
5869 goto out;
5870
5871 if (PageSwapCache(page)) {
5872 struct page_cgroup *pc = lookup_page_cgroup(page);
5873 /*
5874 * Every swap fault against a single page tries to charge the
5875 * page, bail as early as possible. shmem_unuse() encounters
5876 * already charged pages, too. The USED bit is protected by
5877 * the page lock, which serializes swap cache removal, which
5878 * in turn serializes uncharging.
5879 */
5880 if (PageCgroupUsed(pc))
5881 goto out;
5882 }
5883
5884 if (PageTransHuge(page)) {
5885 nr_pages <<= compound_order(page);
5886 VM_BUG_ON_PAGE(!PageTransHuge(page), page);
5887 }
5888
5889 if (do_swap_account && PageSwapCache(page))
5890 memcg = try_get_mem_cgroup_from_page(page);
5891 if (!memcg)
5892 memcg = get_mem_cgroup_from_mm(mm);
5893
5894 ret = try_charge(memcg, gfp_mask, nr_pages);
5895
5896 css_put(&memcg->css);
5897
5898 if (ret == -EINTR) {
5899 memcg = root_mem_cgroup;
5900 ret = 0;
5901 }
5902out:
5903 *memcgp = memcg;
5904 return ret;
5905}
5906
5907/**
5908 * mem_cgroup_commit_charge - commit a page charge
5909 * @page: page to charge
5910 * @memcg: memcg to charge the page to
5911 * @lrucare: page might be on LRU already
5912 *
5913 * Finalize a charge transaction started by mem_cgroup_try_charge(),
5914 * after page->mapping has been set up. This must happen atomically
5915 * as part of the page instantiation, i.e. under the page table lock
5916 * for anonymous pages, under the page lock for page and swap cache.
5917 *
5918 * In addition, the page must not be on the LRU during the commit, to
5919 * prevent racing with task migration. If it might be, use @lrucare.
5920 *
5921 * Use mem_cgroup_cancel_charge() to cancel the transaction instead.
5922 */
5923void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg,
5924 bool lrucare)
5925{
5926 unsigned int nr_pages = 1;
5927
5928 VM_BUG_ON_PAGE(!page->mapping, page);
5929 VM_BUG_ON_PAGE(PageLRU(page) && !lrucare, page);
5930
5931 if (mem_cgroup_disabled())
5932 return;
5933 /*
5934 * Swap faults will attempt to charge the same page multiple
5935 * times. But reuse_swap_page() might have removed the page
5936 * from swapcache already, so we can't check PageSwapCache().
5937 */
5938 if (!memcg)
5939 return;
5940
6abb5a86
JW
5941 commit_charge(page, memcg, lrucare);
5942
00501b53
JW
5943 if (PageTransHuge(page)) {
5944 nr_pages <<= compound_order(page);
5945 VM_BUG_ON_PAGE(!PageTransHuge(page), page);
5946 }
5947
6abb5a86
JW
5948 local_irq_disable();
5949 mem_cgroup_charge_statistics(memcg, page, nr_pages);
5950 memcg_check_events(memcg, page);
5951 local_irq_enable();
00501b53
JW
5952
5953 if (do_swap_account && PageSwapCache(page)) {
5954 swp_entry_t entry = { .val = page_private(page) };
5955 /*
5956 * The swap entry might not get freed for a long time,
5957 * let's not wait for it. The page already received a
5958 * memory+swap charge, drop the swap entry duplicate.
5959 */
5960 mem_cgroup_uncharge_swap(entry);
5961 }
5962}
5963
5964/**
5965 * mem_cgroup_cancel_charge - cancel a page charge
5966 * @page: page to charge
5967 * @memcg: memcg to charge the page to
5968 *
5969 * Cancel a charge transaction started by mem_cgroup_try_charge().
5970 */
5971void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg)
5972{
5973 unsigned int nr_pages = 1;
5974
5975 if (mem_cgroup_disabled())
5976 return;
5977 /*
5978 * Swap faults will attempt to charge the same page multiple
5979 * times. But reuse_swap_page() might have removed the page
5980 * from swapcache already, so we can't check PageSwapCache().
5981 */
5982 if (!memcg)
5983 return;
5984
5985 if (PageTransHuge(page)) {
5986 nr_pages <<= compound_order(page);
5987 VM_BUG_ON_PAGE(!PageTransHuge(page), page);
5988 }
5989
5990 cancel_charge(memcg, nr_pages);
5991}
5992
747db954
JW
5993static void uncharge_batch(struct mem_cgroup *memcg, unsigned long pgpgout,
5994 unsigned long nr_mem, unsigned long nr_memsw,
5995 unsigned long nr_anon, unsigned long nr_file,
5996 unsigned long nr_huge, struct page *dummy_page)
5997{
5998 unsigned long flags;
5999
ce00a967
JW
6000 if (!mem_cgroup_is_root(memcg)) {
6001 if (nr_mem)
3e32cb2e 6002 page_counter_uncharge(&memcg->memory, nr_mem);
ce00a967 6003 if (nr_memsw)
3e32cb2e 6004 page_counter_uncharge(&memcg->memsw, nr_memsw);
ce00a967
JW
6005 memcg_oom_recover(memcg);
6006 }
747db954
JW
6007
6008 local_irq_save(flags);
6009 __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS], nr_anon);
6010 __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_CACHE], nr_file);
6011 __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE], nr_huge);
6012 __this_cpu_add(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT], pgpgout);
6013 __this_cpu_add(memcg->stat->nr_page_events, nr_anon + nr_file);
6014 memcg_check_events(memcg, dummy_page);
6015 local_irq_restore(flags);
e8ea14cc
JW
6016
6017 if (!mem_cgroup_is_root(memcg))
6018 css_put_many(&memcg->css, max(nr_mem, nr_memsw));
747db954
JW
6019}
6020
6021static void uncharge_list(struct list_head *page_list)
6022{
6023 struct mem_cgroup *memcg = NULL;
6024 unsigned long nr_memsw = 0;
6025 unsigned long nr_anon = 0;
6026 unsigned long nr_file = 0;
6027 unsigned long nr_huge = 0;
6028 unsigned long pgpgout = 0;
6029 unsigned long nr_mem = 0;
6030 struct list_head *next;
6031 struct page *page;
6032
6033 next = page_list->next;
6034 do {
6035 unsigned int nr_pages = 1;
6036 struct page_cgroup *pc;
6037
6038 page = list_entry(next, struct page, lru);
6039 next = page->lru.next;
6040
6041 VM_BUG_ON_PAGE(PageLRU(page), page);
6042 VM_BUG_ON_PAGE(page_count(page), page);
6043
6044 pc = lookup_page_cgroup(page);
6045 if (!PageCgroupUsed(pc))
6046 continue;
6047
6048 /*
6049 * Nobody should be changing or seriously looking at
6050 * pc->mem_cgroup and pc->flags at this point, we have
6051 * fully exclusive access to the page.
6052 */
6053
6054 if (memcg != pc->mem_cgroup) {
6055 if (memcg) {
6056 uncharge_batch(memcg, pgpgout, nr_mem, nr_memsw,
6057 nr_anon, nr_file, nr_huge, page);
6058 pgpgout = nr_mem = nr_memsw = 0;
6059 nr_anon = nr_file = nr_huge = 0;
6060 }
6061 memcg = pc->mem_cgroup;
6062 }
6063
6064 if (PageTransHuge(page)) {
6065 nr_pages <<= compound_order(page);
6066 VM_BUG_ON_PAGE(!PageTransHuge(page), page);
6067 nr_huge += nr_pages;
6068 }
6069
6070 if (PageAnon(page))
6071 nr_anon += nr_pages;
6072 else
6073 nr_file += nr_pages;
6074
6075 if (pc->flags & PCG_MEM)
6076 nr_mem += nr_pages;
6077 if (pc->flags & PCG_MEMSW)
6078 nr_memsw += nr_pages;
6079 pc->flags = 0;
6080
6081 pgpgout++;
6082 } while (next != page_list);
6083
6084 if (memcg)
6085 uncharge_batch(memcg, pgpgout, nr_mem, nr_memsw,
6086 nr_anon, nr_file, nr_huge, page);
6087}
6088
0a31bc97
JW
6089/**
6090 * mem_cgroup_uncharge - uncharge a page
6091 * @page: page to uncharge
6092 *
6093 * Uncharge a page previously charged with mem_cgroup_try_charge() and
6094 * mem_cgroup_commit_charge().
6095 */
6096void mem_cgroup_uncharge(struct page *page)
6097{
0a31bc97 6098 struct page_cgroup *pc;
0a31bc97
JW
6099
6100 if (mem_cgroup_disabled())
6101 return;
6102
747db954 6103 /* Don't touch page->lru of any random page, pre-check: */
0a31bc97 6104 pc = lookup_page_cgroup(page);
0a31bc97
JW
6105 if (!PageCgroupUsed(pc))
6106 return;
6107
747db954
JW
6108 INIT_LIST_HEAD(&page->lru);
6109 uncharge_list(&page->lru);
6110}
0a31bc97 6111
747db954
JW
6112/**
6113 * mem_cgroup_uncharge_list - uncharge a list of page
6114 * @page_list: list of pages to uncharge
6115 *
6116 * Uncharge a list of pages previously charged with
6117 * mem_cgroup_try_charge() and mem_cgroup_commit_charge().
6118 */
6119void mem_cgroup_uncharge_list(struct list_head *page_list)
6120{
6121 if (mem_cgroup_disabled())
6122 return;
0a31bc97 6123
747db954
JW
6124 if (!list_empty(page_list))
6125 uncharge_list(page_list);
0a31bc97
JW
6126}
6127
6128/**
6129 * mem_cgroup_migrate - migrate a charge to another page
6130 * @oldpage: currently charged page
6131 * @newpage: page to transfer the charge to
6132 * @lrucare: both pages might be on the LRU already
6133 *
6134 * Migrate the charge from @oldpage to @newpage.
6135 *
6136 * Both pages must be locked, @newpage->mapping must be set up.
6137 */
6138void mem_cgroup_migrate(struct page *oldpage, struct page *newpage,
6139 bool lrucare)
6140{
0a31bc97
JW
6141 struct page_cgroup *pc;
6142 int isolated;
6143
6144 VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage);
6145 VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
6146 VM_BUG_ON_PAGE(!lrucare && PageLRU(oldpage), oldpage);
6147 VM_BUG_ON_PAGE(!lrucare && PageLRU(newpage), newpage);
6148 VM_BUG_ON_PAGE(PageAnon(oldpage) != PageAnon(newpage), newpage);
6abb5a86
JW
6149 VM_BUG_ON_PAGE(PageTransHuge(oldpage) != PageTransHuge(newpage),
6150 newpage);
0a31bc97
JW
6151
6152 if (mem_cgroup_disabled())
6153 return;
6154
6155 /* Page cache replacement: new page already charged? */
6156 pc = lookup_page_cgroup(newpage);
6157 if (PageCgroupUsed(pc))
6158 return;
6159
7d5e3245
JW
6160 /*
6161 * Swapcache readahead pages can get migrated before being
6162 * charged, and migration from compaction can happen to an
6163 * uncharged page when the PFN walker finds a page that
6164 * reclaim just put back on the LRU but has not released yet.
6165 */
0a31bc97
JW
6166 pc = lookup_page_cgroup(oldpage);
6167 if (!PageCgroupUsed(pc))
6168 return;
6169
6170 VM_BUG_ON_PAGE(!(pc->flags & PCG_MEM), oldpage);
6171 VM_BUG_ON_PAGE(do_swap_account && !(pc->flags & PCG_MEMSW), oldpage);
6172
0a31bc97
JW
6173 if (lrucare)
6174 lock_page_lru(oldpage, &isolated);
6175
6176 pc->flags = 0;
6177
6178 if (lrucare)
6179 unlock_page_lru(oldpage, isolated);
6180
6abb5a86 6181 commit_charge(newpage, pc->mem_cgroup, lrucare);
0a31bc97
JW
6182}
6183
2d11085e 6184/*
1081312f
MH
6185 * subsys_initcall() for memory controller.
6186 *
6187 * Some parts like hotcpu_notifier() have to be initialized from this context
6188 * because of lock dependencies (cgroup_lock -> cpu hotplug) but basically
6189 * everything that doesn't depend on a specific mem_cgroup structure should
6190 * be initialized from here.
2d11085e
MH
6191 */
6192static int __init mem_cgroup_init(void)
6193{
6194 hotcpu_notifier(memcg_cpu_hotplug_callback, 0);
6acc8b02 6195 enable_swap_cgroup();
bb4cc1a8 6196 mem_cgroup_soft_limit_tree_init();
e4777496 6197 memcg_stock_init();
2d11085e
MH
6198 return 0;
6199}
6200subsys_initcall(mem_cgroup_init);