ublk: honor IO_URING_F_NONBLOCK for handling control command
[linux-block.git] / mm / memcontrol.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* memcontrol.c - Memory Controller
3  *
4  * Copyright IBM Corporation, 2007
5  * Author Balbir Singh <balbir@linux.vnet.ibm.com>
6  *
7  * Copyright 2007 OpenVZ SWsoft Inc
8  * Author: Pavel Emelianov <xemul@openvz.org>
9  *
10  * Memory thresholds
11  * Copyright (C) 2009 Nokia Corporation
12  * Author: Kirill A. Shutemov
13  *
14  * Kernel Memory Controller
15  * Copyright (C) 2012 Parallels Inc. and Google Inc.
16  * Authors: Glauber Costa and Suleiman Souhlal
17  *
18  * Native page reclaim
19  * Charge lifetime sanitation
20  * Lockless page tracking & accounting
21  * Unified hierarchy configuration model
22  * Copyright (C) 2015 Red Hat, Inc., Johannes Weiner
23  *
24  * Per memcg lru locking
25  * Copyright (C) 2020 Alibaba, Inc, Alex Shi
26  */
27
28 #include <linux/page_counter.h>
29 #include <linux/memcontrol.h>
30 #include <linux/cgroup.h>
31 #include <linux/pagewalk.h>
32 #include <linux/sched/mm.h>
33 #include <linux/shmem_fs.h>
34 #include <linux/hugetlb.h>
35 #include <linux/pagemap.h>
36 #include <linux/vm_event_item.h>
37 #include <linux/smp.h>
38 #include <linux/page-flags.h>
39 #include <linux/backing-dev.h>
40 #include <linux/bit_spinlock.h>
41 #include <linux/rcupdate.h>
42 #include <linux/limits.h>
43 #include <linux/export.h>
44 #include <linux/mutex.h>
45 #include <linux/rbtree.h>
46 #include <linux/slab.h>
47 #include <linux/swap.h>
48 #include <linux/swapops.h>
49 #include <linux/spinlock.h>
50 #include <linux/eventfd.h>
51 #include <linux/poll.h>
52 #include <linux/sort.h>
53 #include <linux/fs.h>
54 #include <linux/seq_file.h>
55 #include <linux/vmpressure.h>
56 #include <linux/memremap.h>
57 #include <linux/mm_inline.h>
58 #include <linux/swap_cgroup.h>
59 #include <linux/cpu.h>
60 #include <linux/oom.h>
61 #include <linux/lockdep.h>
62 #include <linux/file.h>
63 #include <linux/resume_user_mode.h>
64 #include <linux/psi.h>
65 #include <linux/seq_buf.h>
66 #include <linux/parser.h>
67 #include "internal.h"
68 #include <net/sock.h>
69 #include <net/ip.h>
70 #include "slab.h"
71 #include "swap.h"
72
73 #include <linux/uaccess.h>
74
75 #include <trace/events/vmscan.h>
76
77 struct cgroup_subsys memory_cgrp_subsys __read_mostly;
78 EXPORT_SYMBOL(memory_cgrp_subsys);
79
80 struct mem_cgroup *root_mem_cgroup __read_mostly;
81
82 /* Active memory cgroup to use from an interrupt context */
83 DEFINE_PER_CPU(struct mem_cgroup *, int_active_memcg);
84 EXPORT_PER_CPU_SYMBOL_GPL(int_active_memcg);
85
86 /* Socket memory accounting disabled? */
87 static bool cgroup_memory_nosocket __ro_after_init;
88
89 /* Kernel memory accounting disabled? */
90 static bool cgroup_memory_nokmem __ro_after_init;
91
92 #ifdef CONFIG_CGROUP_WRITEBACK
93 static DECLARE_WAIT_QUEUE_HEAD(memcg_cgwb_frn_waitq);
94 #endif
95
96 /* Whether legacy memory+swap accounting is active */
97 static bool do_memsw_account(void)
98 {
99         return !cgroup_subsys_on_dfl(memory_cgrp_subsys);
100 }
101
102 #define THRESHOLDS_EVENTS_TARGET 128
103 #define SOFTLIMIT_EVENTS_TARGET 1024
104
105 /*
106  * Cgroups above their limits are maintained in a RB-Tree, independent of
107  * their hierarchy representation
108  */
109
110 struct mem_cgroup_tree_per_node {
111         struct rb_root rb_root;
112         struct rb_node *rb_rightmost;
113         spinlock_t lock;
114 };
115
116 struct mem_cgroup_tree {
117         struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
118 };
119
120 static struct mem_cgroup_tree soft_limit_tree __read_mostly;
121
122 /* for OOM */
123 struct mem_cgroup_eventfd_list {
124         struct list_head list;
125         struct eventfd_ctx *eventfd;
126 };
127
128 /*
129  * cgroup_event represents events which userspace want to receive.
130  */
131 struct mem_cgroup_event {
132         /*
133          * memcg which the event belongs to.
134          */
135         struct mem_cgroup *memcg;
136         /*
137          * eventfd to signal userspace about the event.
138          */
139         struct eventfd_ctx *eventfd;
140         /*
141          * Each of these stored in a list by the cgroup.
142          */
143         struct list_head list;
144         /*
145          * register_event() callback will be used to add new userspace
146          * waiter for changes related to this event.  Use eventfd_signal()
147          * on eventfd to send notification to userspace.
148          */
149         int (*register_event)(struct mem_cgroup *memcg,
150                               struct eventfd_ctx *eventfd, const char *args);
151         /*
152          * unregister_event() callback will be called when userspace closes
153          * the eventfd or on cgroup removing.  This callback must be set,
154          * if you want provide notification functionality.
155          */
156         void (*unregister_event)(struct mem_cgroup *memcg,
157                                  struct eventfd_ctx *eventfd);
158         /*
159          * All fields below needed to unregister event when
160          * userspace closes eventfd.
161          */
162         poll_table pt;
163         wait_queue_head_t *wqh;
164         wait_queue_entry_t wait;
165         struct work_struct remove;
166 };
167
168 static void mem_cgroup_threshold(struct mem_cgroup *memcg);
169 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
170
171 /* Stuffs for move charges at task migration. */
172 /*
173  * Types of charges to be moved.
174  */
175 #define MOVE_ANON       0x1U
176 #define MOVE_FILE       0x2U
177 #define MOVE_MASK       (MOVE_ANON | MOVE_FILE)
178
179 /* "mc" and its members are protected by cgroup_mutex */
180 static struct move_charge_struct {
181         spinlock_t        lock; /* for from, to */
182         struct mm_struct  *mm;
183         struct mem_cgroup *from;
184         struct mem_cgroup *to;
185         unsigned long flags;
186         unsigned long precharge;
187         unsigned long moved_charge;
188         unsigned long moved_swap;
189         struct task_struct *moving_task;        /* a task moving charges */
190         wait_queue_head_t waitq;                /* a waitq for other context */
191 } mc = {
192         .lock = __SPIN_LOCK_UNLOCKED(mc.lock),
193         .waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
194 };
195
196 /*
197  * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft
198  * limit reclaim to prevent infinite loops, if they ever occur.
199  */
200 #define MEM_CGROUP_MAX_RECLAIM_LOOPS            100
201 #define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS 2
202
203 /* for encoding cft->private value on file */
204 enum res_type {
205         _MEM,
206         _MEMSWAP,
207         _KMEM,
208         _TCP,
209 };
210
211 #define MEMFILE_PRIVATE(x, val) ((x) << 16 | (val))
212 #define MEMFILE_TYPE(val)       ((val) >> 16 & 0xffff)
213 #define MEMFILE_ATTR(val)       ((val) & 0xffff)
214
215 /*
216  * Iteration constructs for visiting all cgroups (under a tree).  If
217  * loops are exited prematurely (break), mem_cgroup_iter_break() must
218  * be used for reference counting.
219  */
220 #define for_each_mem_cgroup_tree(iter, root)            \
221         for (iter = mem_cgroup_iter(root, NULL, NULL);  \
222              iter != NULL;                              \
223              iter = mem_cgroup_iter(root, iter, NULL))
224
225 #define for_each_mem_cgroup(iter)                       \
226         for (iter = mem_cgroup_iter(NULL, NULL, NULL);  \
227              iter != NULL;                              \
228              iter = mem_cgroup_iter(NULL, iter, NULL))
229
230 static inline bool task_is_dying(void)
231 {
232         return tsk_is_oom_victim(current) || fatal_signal_pending(current) ||
233                 (current->flags & PF_EXITING);
234 }
235
236 /* Some nice accessors for the vmpressure. */
237 struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg)
238 {
239         if (!memcg)
240                 memcg = root_mem_cgroup;
241         return &memcg->vmpressure;
242 }
243
244 struct mem_cgroup *vmpressure_to_memcg(struct vmpressure *vmpr)
245 {
246         return container_of(vmpr, struct mem_cgroup, vmpressure);
247 }
248
249 #ifdef CONFIG_MEMCG_KMEM
250 static DEFINE_SPINLOCK(objcg_lock);
251
252 bool mem_cgroup_kmem_disabled(void)
253 {
254         return cgroup_memory_nokmem;
255 }
256
257 static void obj_cgroup_uncharge_pages(struct obj_cgroup *objcg,
258                                       unsigned int nr_pages);
259
260 static void obj_cgroup_release(struct percpu_ref *ref)
261 {
262         struct obj_cgroup *objcg = container_of(ref, struct obj_cgroup, refcnt);
263         unsigned int nr_bytes;
264         unsigned int nr_pages;
265         unsigned long flags;
266
267         /*
268          * At this point all allocated objects are freed, and
269          * objcg->nr_charged_bytes can't have an arbitrary byte value.
270          * However, it can be PAGE_SIZE or (x * PAGE_SIZE).
271          *
272          * The following sequence can lead to it:
273          * 1) CPU0: objcg == stock->cached_objcg
274          * 2) CPU1: we do a small allocation (e.g. 92 bytes),
275          *          PAGE_SIZE bytes are charged
276          * 3) CPU1: a process from another memcg is allocating something,
277          *          the stock if flushed,
278          *          objcg->nr_charged_bytes = PAGE_SIZE - 92
279          * 5) CPU0: we do release this object,
280          *          92 bytes are added to stock->nr_bytes
281          * 6) CPU0: stock is flushed,
282          *          92 bytes are added to objcg->nr_charged_bytes
283          *
284          * In the result, nr_charged_bytes == PAGE_SIZE.
285          * This page will be uncharged in obj_cgroup_release().
286          */
287         nr_bytes = atomic_read(&objcg->nr_charged_bytes);
288         WARN_ON_ONCE(nr_bytes & (PAGE_SIZE - 1));
289         nr_pages = nr_bytes >> PAGE_SHIFT;
290
291         if (nr_pages)
292                 obj_cgroup_uncharge_pages(objcg, nr_pages);
293
294         spin_lock_irqsave(&objcg_lock, flags);
295         list_del(&objcg->list);
296         spin_unlock_irqrestore(&objcg_lock, flags);
297
298         percpu_ref_exit(ref);
299         kfree_rcu(objcg, rcu);
300 }
301
302 static struct obj_cgroup *obj_cgroup_alloc(void)
303 {
304         struct obj_cgroup *objcg;
305         int ret;
306
307         objcg = kzalloc(sizeof(struct obj_cgroup), GFP_KERNEL);
308         if (!objcg)
309                 return NULL;
310
311         ret = percpu_ref_init(&objcg->refcnt, obj_cgroup_release, 0,
312                               GFP_KERNEL);
313         if (ret) {
314                 kfree(objcg);
315                 return NULL;
316         }
317         INIT_LIST_HEAD(&objcg->list);
318         return objcg;
319 }
320
321 static void memcg_reparent_objcgs(struct mem_cgroup *memcg,
322                                   struct mem_cgroup *parent)
323 {
324         struct obj_cgroup *objcg, *iter;
325
326         objcg = rcu_replace_pointer(memcg->objcg, NULL, true);
327
328         spin_lock_irq(&objcg_lock);
329
330         /* 1) Ready to reparent active objcg. */
331         list_add(&objcg->list, &memcg->objcg_list);
332         /* 2) Reparent active objcg and already reparented objcgs to parent. */
333         list_for_each_entry(iter, &memcg->objcg_list, list)
334                 WRITE_ONCE(iter->memcg, parent);
335         /* 3) Move already reparented objcgs to the parent's list */
336         list_splice(&memcg->objcg_list, &parent->objcg_list);
337
338         spin_unlock_irq(&objcg_lock);
339
340         percpu_ref_kill(&objcg->refcnt);
341 }
342
343 /*
344  * A lot of the calls to the cache allocation functions are expected to be
345  * inlined by the compiler. Since the calls to memcg_slab_pre_alloc_hook() are
346  * conditional to this static branch, we'll have to allow modules that does
347  * kmem_cache_alloc and the such to see this symbol as well
348  */
349 DEFINE_STATIC_KEY_FALSE(memcg_kmem_enabled_key);
350 EXPORT_SYMBOL(memcg_kmem_enabled_key);
351 #endif
352
353 /**
354  * mem_cgroup_css_from_page - css of the memcg associated with a page
355  * @page: page of interest
356  *
357  * If memcg is bound to the default hierarchy, css of the memcg associated
358  * with @page is returned.  The returned css remains associated with @page
359  * until it is released.
360  *
361  * If memcg is bound to a traditional hierarchy, the css of root_mem_cgroup
362  * is returned.
363  */
364 struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page)
365 {
366         struct mem_cgroup *memcg;
367
368         memcg = page_memcg(page);
369
370         if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
371                 memcg = root_mem_cgroup;
372
373         return &memcg->css;
374 }
375
376 /**
377  * page_cgroup_ino - return inode number of the memcg a page is charged to
378  * @page: the page
379  *
380  * Look up the closest online ancestor of the memory cgroup @page is charged to
381  * and return its inode number or 0 if @page is not charged to any cgroup. It
382  * is safe to call this function without holding a reference to @page.
383  *
384  * Note, this function is inherently racy, because there is nothing to prevent
385  * the cgroup inode from getting torn down and potentially reallocated a moment
386  * after page_cgroup_ino() returns, so it only should be used by callers that
387  * do not care (such as procfs interfaces).
388  */
389 ino_t page_cgroup_ino(struct page *page)
390 {
391         struct mem_cgroup *memcg;
392         unsigned long ino = 0;
393
394         rcu_read_lock();
395         memcg = page_memcg_check(page);
396
397         while (memcg && !(memcg->css.flags & CSS_ONLINE))
398                 memcg = parent_mem_cgroup(memcg);
399         if (memcg)
400                 ino = cgroup_ino(memcg->css.cgroup);
401         rcu_read_unlock();
402         return ino;
403 }
404
405 static void __mem_cgroup_insert_exceeded(struct mem_cgroup_per_node *mz,
406                                          struct mem_cgroup_tree_per_node *mctz,
407                                          unsigned long new_usage_in_excess)
408 {
409         struct rb_node **p = &mctz->rb_root.rb_node;
410         struct rb_node *parent = NULL;
411         struct mem_cgroup_per_node *mz_node;
412         bool rightmost = true;
413
414         if (mz->on_tree)
415                 return;
416
417         mz->usage_in_excess = new_usage_in_excess;
418         if (!mz->usage_in_excess)
419                 return;
420         while (*p) {
421                 parent = *p;
422                 mz_node = rb_entry(parent, struct mem_cgroup_per_node,
423                                         tree_node);
424                 if (mz->usage_in_excess < mz_node->usage_in_excess) {
425                         p = &(*p)->rb_left;
426                         rightmost = false;
427                 } else {
428                         p = &(*p)->rb_right;
429                 }
430         }
431
432         if (rightmost)
433                 mctz->rb_rightmost = &mz->tree_node;
434
435         rb_link_node(&mz->tree_node, parent, p);
436         rb_insert_color(&mz->tree_node, &mctz->rb_root);
437         mz->on_tree = true;
438 }
439
440 static void __mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz,
441                                          struct mem_cgroup_tree_per_node *mctz)
442 {
443         if (!mz->on_tree)
444                 return;
445
446         if (&mz->tree_node == mctz->rb_rightmost)
447                 mctz->rb_rightmost = rb_prev(&mz->tree_node);
448
449         rb_erase(&mz->tree_node, &mctz->rb_root);
450         mz->on_tree = false;
451 }
452
453 static void mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz,
454                                        struct mem_cgroup_tree_per_node *mctz)
455 {
456         unsigned long flags;
457
458         spin_lock_irqsave(&mctz->lock, flags);
459         __mem_cgroup_remove_exceeded(mz, mctz);
460         spin_unlock_irqrestore(&mctz->lock, flags);
461 }
462
463 static unsigned long soft_limit_excess(struct mem_cgroup *memcg)
464 {
465         unsigned long nr_pages = page_counter_read(&memcg->memory);
466         unsigned long soft_limit = READ_ONCE(memcg->soft_limit);
467         unsigned long excess = 0;
468
469         if (nr_pages > soft_limit)
470                 excess = nr_pages - soft_limit;
471
472         return excess;
473 }
474
475 static void mem_cgroup_update_tree(struct mem_cgroup *memcg, int nid)
476 {
477         unsigned long excess;
478         struct mem_cgroup_per_node *mz;
479         struct mem_cgroup_tree_per_node *mctz;
480
481         mctz = soft_limit_tree.rb_tree_per_node[nid];
482         if (!mctz)
483                 return;
484         /*
485          * Necessary to update all ancestors when hierarchy is used.
486          * because their event counter is not touched.
487          */
488         for (; memcg; memcg = parent_mem_cgroup(memcg)) {
489                 mz = memcg->nodeinfo[nid];
490                 excess = soft_limit_excess(memcg);
491                 /*
492                  * We have to update the tree if mz is on RB-tree or
493                  * mem is over its softlimit.
494                  */
495                 if (excess || mz->on_tree) {
496                         unsigned long flags;
497
498                         spin_lock_irqsave(&mctz->lock, flags);
499                         /* if on-tree, remove it */
500                         if (mz->on_tree)
501                                 __mem_cgroup_remove_exceeded(mz, mctz);
502                         /*
503                          * Insert again. mz->usage_in_excess will be updated.
504                          * If excess is 0, no tree ops.
505                          */
506                         __mem_cgroup_insert_exceeded(mz, mctz, excess);
507                         spin_unlock_irqrestore(&mctz->lock, flags);
508                 }
509         }
510 }
511
512 static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg)
513 {
514         struct mem_cgroup_tree_per_node *mctz;
515         struct mem_cgroup_per_node *mz;
516         int nid;
517
518         for_each_node(nid) {
519                 mz = memcg->nodeinfo[nid];
520                 mctz = soft_limit_tree.rb_tree_per_node[nid];
521                 if (mctz)
522                         mem_cgroup_remove_exceeded(mz, mctz);
523         }
524 }
525
526 static struct mem_cgroup_per_node *
527 __mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
528 {
529         struct mem_cgroup_per_node *mz;
530
531 retry:
532         mz = NULL;
533         if (!mctz->rb_rightmost)
534                 goto done;              /* Nothing to reclaim from */
535
536         mz = rb_entry(mctz->rb_rightmost,
537                       struct mem_cgroup_per_node, tree_node);
538         /*
539          * Remove the node now but someone else can add it back,
540          * we will to add it back at the end of reclaim to its correct
541          * position in the tree.
542          */
543         __mem_cgroup_remove_exceeded(mz, mctz);
544         if (!soft_limit_excess(mz->memcg) ||
545             !css_tryget(&mz->memcg->css))
546                 goto retry;
547 done:
548         return mz;
549 }
550
551 static struct mem_cgroup_per_node *
552 mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
553 {
554         struct mem_cgroup_per_node *mz;
555
556         spin_lock_irq(&mctz->lock);
557         mz = __mem_cgroup_largest_soft_limit_node(mctz);
558         spin_unlock_irq(&mctz->lock);
559         return mz;
560 }
561
562 /*
563  * memcg and lruvec stats flushing
564  *
565  * Many codepaths leading to stats update or read are performance sensitive and
566  * adding stats flushing in such codepaths is not desirable. So, to optimize the
567  * flushing the kernel does:
568  *
569  * 1) Periodically and asynchronously flush the stats every 2 seconds to not let
570  *    rstat update tree grow unbounded.
571  *
572  * 2) Flush the stats synchronously on reader side only when there are more than
573  *    (MEMCG_CHARGE_BATCH * nr_cpus) update events. Though this optimization
574  *    will let stats be out of sync by atmost (MEMCG_CHARGE_BATCH * nr_cpus) but
575  *    only for 2 seconds due to (1).
576  */
577 static void flush_memcg_stats_dwork(struct work_struct *w);
578 static DECLARE_DEFERRABLE_WORK(stats_flush_dwork, flush_memcg_stats_dwork);
579 static DEFINE_SPINLOCK(stats_flush_lock);
580 static DEFINE_PER_CPU(unsigned int, stats_updates);
581 static atomic_t stats_flush_threshold = ATOMIC_INIT(0);
582 static u64 flush_next_time;
583
584 #define FLUSH_TIME (2UL*HZ)
585
586 /*
587  * Accessors to ensure that preemption is disabled on PREEMPT_RT because it can
588  * not rely on this as part of an acquired spinlock_t lock. These functions are
589  * never used in hardirq context on PREEMPT_RT and therefore disabling preemtion
590  * is sufficient.
591  */
592 static void memcg_stats_lock(void)
593 {
594         preempt_disable_nested();
595         VM_WARN_ON_IRQS_ENABLED();
596 }
597
598 static void __memcg_stats_lock(void)
599 {
600         preempt_disable_nested();
601 }
602
603 static void memcg_stats_unlock(void)
604 {
605         preempt_enable_nested();
606 }
607
608 static inline void memcg_rstat_updated(struct mem_cgroup *memcg, int val)
609 {
610         unsigned int x;
611
612         cgroup_rstat_updated(memcg->css.cgroup, smp_processor_id());
613
614         x = __this_cpu_add_return(stats_updates, abs(val));
615         if (x > MEMCG_CHARGE_BATCH) {
616                 /*
617                  * If stats_flush_threshold exceeds the threshold
618                  * (>num_online_cpus()), cgroup stats update will be triggered
619                  * in __mem_cgroup_flush_stats(). Increasing this var further
620                  * is redundant and simply adds overhead in atomic update.
621                  */
622                 if (atomic_read(&stats_flush_threshold) <= num_online_cpus())
623                         atomic_add(x / MEMCG_CHARGE_BATCH, &stats_flush_threshold);
624                 __this_cpu_write(stats_updates, 0);
625         }
626 }
627
628 static void __mem_cgroup_flush_stats(void)
629 {
630         unsigned long flag;
631
632         if (!spin_trylock_irqsave(&stats_flush_lock, flag))
633                 return;
634
635         flush_next_time = jiffies_64 + 2*FLUSH_TIME;
636         cgroup_rstat_flush_irqsafe(root_mem_cgroup->css.cgroup);
637         atomic_set(&stats_flush_threshold, 0);
638         spin_unlock_irqrestore(&stats_flush_lock, flag);
639 }
640
641 void mem_cgroup_flush_stats(void)
642 {
643         if (atomic_read(&stats_flush_threshold) > num_online_cpus())
644                 __mem_cgroup_flush_stats();
645 }
646
647 void mem_cgroup_flush_stats_delayed(void)
648 {
649         if (time_after64(jiffies_64, flush_next_time))
650                 mem_cgroup_flush_stats();
651 }
652
653 static void flush_memcg_stats_dwork(struct work_struct *w)
654 {
655         __mem_cgroup_flush_stats();
656         queue_delayed_work(system_unbound_wq, &stats_flush_dwork, FLUSH_TIME);
657 }
658
659 /* Subset of vm_event_item to report for memcg event stats */
660 static const unsigned int memcg_vm_event_stat[] = {
661         PGPGIN,
662         PGPGOUT,
663         PGSCAN_KSWAPD,
664         PGSCAN_DIRECT,
665         PGSCAN_KHUGEPAGED,
666         PGSTEAL_KSWAPD,
667         PGSTEAL_DIRECT,
668         PGSTEAL_KHUGEPAGED,
669         PGFAULT,
670         PGMAJFAULT,
671         PGREFILL,
672         PGACTIVATE,
673         PGDEACTIVATE,
674         PGLAZYFREE,
675         PGLAZYFREED,
676 #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
677         ZSWPIN,
678         ZSWPOUT,
679 #endif
680 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
681         THP_FAULT_ALLOC,
682         THP_COLLAPSE_ALLOC,
683 #endif
684 };
685
686 #define NR_MEMCG_EVENTS ARRAY_SIZE(memcg_vm_event_stat)
687 static int mem_cgroup_events_index[NR_VM_EVENT_ITEMS] __read_mostly;
688
689 static void init_memcg_events(void)
690 {
691         int i;
692
693         for (i = 0; i < NR_MEMCG_EVENTS; ++i)
694                 mem_cgroup_events_index[memcg_vm_event_stat[i]] = i + 1;
695 }
696
697 static inline int memcg_events_index(enum vm_event_item idx)
698 {
699         return mem_cgroup_events_index[idx] - 1;
700 }
701
702 struct memcg_vmstats_percpu {
703         /* Local (CPU and cgroup) page state & events */
704         long                    state[MEMCG_NR_STAT];
705         unsigned long           events[NR_MEMCG_EVENTS];
706
707         /* Delta calculation for lockless upward propagation */
708         long                    state_prev[MEMCG_NR_STAT];
709         unsigned long           events_prev[NR_MEMCG_EVENTS];
710
711         /* Cgroup1: threshold notifications & softlimit tree updates */
712         unsigned long           nr_page_events;
713         unsigned long           targets[MEM_CGROUP_NTARGETS];
714 };
715
716 struct memcg_vmstats {
717         /* Aggregated (CPU and subtree) page state & events */
718         long                    state[MEMCG_NR_STAT];
719         unsigned long           events[NR_MEMCG_EVENTS];
720
721         /* Pending child counts during tree propagation */
722         long                    state_pending[MEMCG_NR_STAT];
723         unsigned long           events_pending[NR_MEMCG_EVENTS];
724 };
725
726 unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
727 {
728         long x = READ_ONCE(memcg->vmstats->state[idx]);
729 #ifdef CONFIG_SMP
730         if (x < 0)
731                 x = 0;
732 #endif
733         return x;
734 }
735
736 /**
737  * __mod_memcg_state - update cgroup memory statistics
738  * @memcg: the memory cgroup
739  * @idx: the stat item - can be enum memcg_stat_item or enum node_stat_item
740  * @val: delta to add to the counter, can be negative
741  */
742 void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val)
743 {
744         if (mem_cgroup_disabled())
745                 return;
746
747         __this_cpu_add(memcg->vmstats_percpu->state[idx], val);
748         memcg_rstat_updated(memcg, val);
749 }
750
751 /* idx can be of type enum memcg_stat_item or node_stat_item. */
752 static unsigned long memcg_page_state_local(struct mem_cgroup *memcg, int idx)
753 {
754         long x = 0;
755         int cpu;
756
757         for_each_possible_cpu(cpu)
758                 x += per_cpu(memcg->vmstats_percpu->state[idx], cpu);
759 #ifdef CONFIG_SMP
760         if (x < 0)
761                 x = 0;
762 #endif
763         return x;
764 }
765
766 void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
767                               int val)
768 {
769         struct mem_cgroup_per_node *pn;
770         struct mem_cgroup *memcg;
771
772         pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
773         memcg = pn->memcg;
774
775         /*
776          * The caller from rmap relay on disabled preemption becase they never
777          * update their counter from in-interrupt context. For these two
778          * counters we check that the update is never performed from an
779          * interrupt context while other caller need to have disabled interrupt.
780          */
781         __memcg_stats_lock();
782         if (IS_ENABLED(CONFIG_DEBUG_VM)) {
783                 switch (idx) {
784                 case NR_ANON_MAPPED:
785                 case NR_FILE_MAPPED:
786                 case NR_ANON_THPS:
787                 case NR_SHMEM_PMDMAPPED:
788                 case NR_FILE_PMDMAPPED:
789                         WARN_ON_ONCE(!in_task());
790                         break;
791                 default:
792                         VM_WARN_ON_IRQS_ENABLED();
793                 }
794         }
795
796         /* Update memcg */
797         __this_cpu_add(memcg->vmstats_percpu->state[idx], val);
798
799         /* Update lruvec */
800         __this_cpu_add(pn->lruvec_stats_percpu->state[idx], val);
801
802         memcg_rstat_updated(memcg, val);
803         memcg_stats_unlock();
804 }
805
806 /**
807  * __mod_lruvec_state - update lruvec memory statistics
808  * @lruvec: the lruvec
809  * @idx: the stat item
810  * @val: delta to add to the counter, can be negative
811  *
812  * The lruvec is the intersection of the NUMA node and a cgroup. This
813  * function updates the all three counters that are affected by a
814  * change of state at this level: per-node, per-cgroup, per-lruvec.
815  */
816 void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
817                         int val)
818 {
819         /* Update node */
820         __mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
821
822         /* Update memcg and lruvec */
823         if (!mem_cgroup_disabled())
824                 __mod_memcg_lruvec_state(lruvec, idx, val);
825 }
826
827 void __mod_lruvec_page_state(struct page *page, enum node_stat_item idx,
828                              int val)
829 {
830         struct page *head = compound_head(page); /* rmap on tail pages */
831         struct mem_cgroup *memcg;
832         pg_data_t *pgdat = page_pgdat(page);
833         struct lruvec *lruvec;
834
835         rcu_read_lock();
836         memcg = page_memcg(head);
837         /* Untracked pages have no memcg, no lruvec. Update only the node */
838         if (!memcg) {
839                 rcu_read_unlock();
840                 __mod_node_page_state(pgdat, idx, val);
841                 return;
842         }
843
844         lruvec = mem_cgroup_lruvec(memcg, pgdat);
845         __mod_lruvec_state(lruvec, idx, val);
846         rcu_read_unlock();
847 }
848 EXPORT_SYMBOL(__mod_lruvec_page_state);
849
850 void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val)
851 {
852         pg_data_t *pgdat = page_pgdat(virt_to_page(p));
853         struct mem_cgroup *memcg;
854         struct lruvec *lruvec;
855
856         rcu_read_lock();
857         memcg = mem_cgroup_from_slab_obj(p);
858
859         /*
860          * Untracked pages have no memcg, no lruvec. Update only the
861          * node. If we reparent the slab objects to the root memcg,
862          * when we free the slab object, we need to update the per-memcg
863          * vmstats to keep it correct for the root memcg.
864          */
865         if (!memcg) {
866                 __mod_node_page_state(pgdat, idx, val);
867         } else {
868                 lruvec = mem_cgroup_lruvec(memcg, pgdat);
869                 __mod_lruvec_state(lruvec, idx, val);
870         }
871         rcu_read_unlock();
872 }
873
874 /**
875  * __count_memcg_events - account VM events in a cgroup
876  * @memcg: the memory cgroup
877  * @idx: the event item
878  * @count: the number of events that occurred
879  */
880 void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
881                           unsigned long count)
882 {
883         int index = memcg_events_index(idx);
884
885         if (mem_cgroup_disabled() || index < 0)
886                 return;
887
888         memcg_stats_lock();
889         __this_cpu_add(memcg->vmstats_percpu->events[index], count);
890         memcg_rstat_updated(memcg, count);
891         memcg_stats_unlock();
892 }
893
894 static unsigned long memcg_events(struct mem_cgroup *memcg, int event)
895 {
896         int index = memcg_events_index(event);
897
898         if (index < 0)
899                 return 0;
900         return READ_ONCE(memcg->vmstats->events[index]);
901 }
902
903 static unsigned long memcg_events_local(struct mem_cgroup *memcg, int event)
904 {
905         long x = 0;
906         int cpu;
907         int index = memcg_events_index(event);
908
909         if (index < 0)
910                 return 0;
911
912         for_each_possible_cpu(cpu)
913                 x += per_cpu(memcg->vmstats_percpu->events[index], cpu);
914         return x;
915 }
916
917 static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
918                                          int nr_pages)
919 {
920         /* pagein of a big page is an event. So, ignore page size */
921         if (nr_pages > 0)
922                 __count_memcg_events(memcg, PGPGIN, 1);
923         else {
924                 __count_memcg_events(memcg, PGPGOUT, 1);
925                 nr_pages = -nr_pages; /* for event */
926         }
927
928         __this_cpu_add(memcg->vmstats_percpu->nr_page_events, nr_pages);
929 }
930
931 static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
932                                        enum mem_cgroup_events_target target)
933 {
934         unsigned long val, next;
935
936         val = __this_cpu_read(memcg->vmstats_percpu->nr_page_events);
937         next = __this_cpu_read(memcg->vmstats_percpu->targets[target]);
938         /* from time_after() in jiffies.h */
939         if ((long)(next - val) < 0) {
940                 switch (target) {
941                 case MEM_CGROUP_TARGET_THRESH:
942                         next = val + THRESHOLDS_EVENTS_TARGET;
943                         break;
944                 case MEM_CGROUP_TARGET_SOFTLIMIT:
945                         next = val + SOFTLIMIT_EVENTS_TARGET;
946                         break;
947                 default:
948                         break;
949                 }
950                 __this_cpu_write(memcg->vmstats_percpu->targets[target], next);
951                 return true;
952         }
953         return false;
954 }
955
956 /*
957  * Check events in order.
958  *
959  */
960 static void memcg_check_events(struct mem_cgroup *memcg, int nid)
961 {
962         if (IS_ENABLED(CONFIG_PREEMPT_RT))
963                 return;
964
965         /* threshold event is triggered in finer grain than soft limit */
966         if (unlikely(mem_cgroup_event_ratelimit(memcg,
967                                                 MEM_CGROUP_TARGET_THRESH))) {
968                 bool do_softlimit;
969
970                 do_softlimit = mem_cgroup_event_ratelimit(memcg,
971                                                 MEM_CGROUP_TARGET_SOFTLIMIT);
972                 mem_cgroup_threshold(memcg);
973                 if (unlikely(do_softlimit))
974                         mem_cgroup_update_tree(memcg, nid);
975         }
976 }
977
978 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
979 {
980         /*
981          * mm_update_next_owner() may clear mm->owner to NULL
982          * if it races with swapoff, page migration, etc.
983          * So this can be called with p == NULL.
984          */
985         if (unlikely(!p))
986                 return NULL;
987
988         return mem_cgroup_from_css(task_css(p, memory_cgrp_id));
989 }
990 EXPORT_SYMBOL(mem_cgroup_from_task);
991
992 static __always_inline struct mem_cgroup *active_memcg(void)
993 {
994         if (!in_task())
995                 return this_cpu_read(int_active_memcg);
996         else
997                 return current->active_memcg;
998 }
999
1000 /**
1001  * get_mem_cgroup_from_mm: Obtain a reference on given mm_struct's memcg.
1002  * @mm: mm from which memcg should be extracted. It can be NULL.
1003  *
1004  * Obtain a reference on mm->memcg and returns it if successful. If mm
1005  * is NULL, then the memcg is chosen as follows:
1006  * 1) The active memcg, if set.
1007  * 2) current->mm->memcg, if available
1008  * 3) root memcg
1009  * If mem_cgroup is disabled, NULL is returned.
1010  */
1011 struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
1012 {
1013         struct mem_cgroup *memcg;
1014
1015         if (mem_cgroup_disabled())
1016                 return NULL;
1017
1018         /*
1019          * Page cache insertions can happen without an
1020          * actual mm context, e.g. during disk probing
1021          * on boot, loopback IO, acct() writes etc.
1022          *
1023          * No need to css_get on root memcg as the reference
1024          * counting is disabled on the root level in the
1025          * cgroup core. See CSS_NO_REF.
1026          */
1027         if (unlikely(!mm)) {
1028                 memcg = active_memcg();
1029                 if (unlikely(memcg)) {
1030                         /* remote memcg must hold a ref */
1031                         css_get(&memcg->css);
1032                         return memcg;
1033                 }
1034                 mm = current->mm;
1035                 if (unlikely(!mm))
1036                         return root_mem_cgroup;
1037         }
1038
1039         rcu_read_lock();
1040         do {
1041                 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
1042                 if (unlikely(!memcg))
1043                         memcg = root_mem_cgroup;
1044         } while (!css_tryget(&memcg->css));
1045         rcu_read_unlock();
1046         return memcg;
1047 }
1048 EXPORT_SYMBOL(get_mem_cgroup_from_mm);
1049
1050 static __always_inline bool memcg_kmem_bypass(void)
1051 {
1052         /* Allow remote memcg charging from any context. */
1053         if (unlikely(active_memcg()))
1054                 return false;
1055
1056         /* Memcg to charge can't be determined. */
1057         if (!in_task() || !current->mm || (current->flags & PF_KTHREAD))
1058                 return true;
1059
1060         return false;
1061 }
1062
1063 /**
1064  * mem_cgroup_iter - iterate over memory cgroup hierarchy
1065  * @root: hierarchy root
1066  * @prev: previously returned memcg, NULL on first invocation
1067  * @reclaim: cookie for shared reclaim walks, NULL for full walks
1068  *
1069  * Returns references to children of the hierarchy below @root, or
1070  * @root itself, or %NULL after a full round-trip.
1071  *
1072  * Caller must pass the return value in @prev on subsequent
1073  * invocations for reference counting, or use mem_cgroup_iter_break()
1074  * to cancel a hierarchy walk before the round-trip is complete.
1075  *
1076  * Reclaimers can specify a node in @reclaim to divide up the memcgs
1077  * in the hierarchy among all concurrent reclaimers operating on the
1078  * same node.
1079  */
1080 struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
1081                                    struct mem_cgroup *prev,
1082                                    struct mem_cgroup_reclaim_cookie *reclaim)
1083 {
1084         struct mem_cgroup_reclaim_iter *iter;
1085         struct cgroup_subsys_state *css = NULL;
1086         struct mem_cgroup *memcg = NULL;
1087         struct mem_cgroup *pos = NULL;
1088
1089         if (mem_cgroup_disabled())
1090                 return NULL;
1091
1092         if (!root)
1093                 root = root_mem_cgroup;
1094
1095         rcu_read_lock();
1096
1097         if (reclaim) {
1098                 struct mem_cgroup_per_node *mz;
1099
1100                 mz = root->nodeinfo[reclaim->pgdat->node_id];
1101                 iter = &mz->iter;
1102
1103                 /*
1104                  * On start, join the current reclaim iteration cycle.
1105                  * Exit when a concurrent walker completes it.
1106                  */
1107                 if (!prev)
1108                         reclaim->generation = iter->generation;
1109                 else if (reclaim->generation != iter->generation)
1110                         goto out_unlock;
1111
1112                 while (1) {
1113                         pos = READ_ONCE(iter->position);
1114                         if (!pos || css_tryget(&pos->css))
1115                                 break;
1116                         /*
1117                          * css reference reached zero, so iter->position will
1118                          * be cleared by ->css_released. However, we should not
1119                          * rely on this happening soon, because ->css_released
1120                          * is called from a work queue, and by busy-waiting we
1121                          * might block it. So we clear iter->position right
1122                          * away.
1123                          */
1124                         (void)cmpxchg(&iter->position, pos, NULL);
1125                 }
1126         } else if (prev) {
1127                 pos = prev;
1128         }
1129
1130         if (pos)
1131                 css = &pos->css;
1132
1133         for (;;) {
1134                 css = css_next_descendant_pre(css, &root->css);
1135                 if (!css) {
1136                         /*
1137                          * Reclaimers share the hierarchy walk, and a
1138                          * new one might jump in right at the end of
1139                          * the hierarchy - make sure they see at least
1140                          * one group and restart from the beginning.
1141                          */
1142                         if (!prev)
1143                                 continue;
1144                         break;
1145                 }
1146
1147                 /*
1148                  * Verify the css and acquire a reference.  The root
1149                  * is provided by the caller, so we know it's alive
1150                  * and kicking, and don't take an extra reference.
1151                  */
1152                 if (css == &root->css || css_tryget(css)) {
1153                         memcg = mem_cgroup_from_css(css);
1154                         break;
1155                 }
1156         }
1157
1158         if (reclaim) {
1159                 /*
1160                  * The position could have already been updated by a competing
1161                  * thread, so check that the value hasn't changed since we read
1162                  * it to avoid reclaiming from the same cgroup twice.
1163                  */
1164                 (void)cmpxchg(&iter->position, pos, memcg);
1165
1166                 if (pos)
1167                         css_put(&pos->css);
1168
1169                 if (!memcg)
1170                         iter->generation++;
1171         }
1172
1173 out_unlock:
1174         rcu_read_unlock();
1175         if (prev && prev != root)
1176                 css_put(&prev->css);
1177
1178         return memcg;
1179 }
1180
1181 /**
1182  * mem_cgroup_iter_break - abort a hierarchy walk prematurely
1183  * @root: hierarchy root
1184  * @prev: last visited hierarchy member as returned by mem_cgroup_iter()
1185  */
1186 void mem_cgroup_iter_break(struct mem_cgroup *root,
1187                            struct mem_cgroup *prev)
1188 {
1189         if (!root)
1190                 root = root_mem_cgroup;
1191         if (prev && prev != root)
1192                 css_put(&prev->css);
1193 }
1194
1195 static void __invalidate_reclaim_iterators(struct mem_cgroup *from,
1196                                         struct mem_cgroup *dead_memcg)
1197 {
1198         struct mem_cgroup_reclaim_iter *iter;
1199         struct mem_cgroup_per_node *mz;
1200         int nid;
1201
1202         for_each_node(nid) {
1203                 mz = from->nodeinfo[nid];
1204                 iter = &mz->iter;
1205                 cmpxchg(&iter->position, dead_memcg, NULL);
1206         }
1207 }
1208
1209 static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
1210 {
1211         struct mem_cgroup *memcg = dead_memcg;
1212         struct mem_cgroup *last;
1213
1214         do {
1215                 __invalidate_reclaim_iterators(memcg, dead_memcg);
1216                 last = memcg;
1217         } while ((memcg = parent_mem_cgroup(memcg)));
1218
1219         /*
1220          * When cgroup1 non-hierarchy mode is used,
1221          * parent_mem_cgroup() does not walk all the way up to the
1222          * cgroup root (root_mem_cgroup). So we have to handle
1223          * dead_memcg from cgroup root separately.
1224          */
1225         if (!mem_cgroup_is_root(last))
1226                 __invalidate_reclaim_iterators(root_mem_cgroup,
1227                                                 dead_memcg);
1228 }
1229
1230 /**
1231  * mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy
1232  * @memcg: hierarchy root
1233  * @fn: function to call for each task
1234  * @arg: argument passed to @fn
1235  *
1236  * This function iterates over tasks attached to @memcg or to any of its
1237  * descendants and calls @fn for each task. If @fn returns a non-zero
1238  * value, the function breaks the iteration loop and returns the value.
1239  * Otherwise, it will iterate over all tasks and return 0.
1240  *
1241  * This function must not be called for the root memory cgroup.
1242  */
1243 int mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
1244                           int (*fn)(struct task_struct *, void *), void *arg)
1245 {
1246         struct mem_cgroup *iter;
1247         int ret = 0;
1248
1249         BUG_ON(mem_cgroup_is_root(memcg));
1250
1251         for_each_mem_cgroup_tree(iter, memcg) {
1252                 struct css_task_iter it;
1253                 struct task_struct *task;
1254
1255                 css_task_iter_start(&iter->css, CSS_TASK_ITER_PROCS, &it);
1256                 while (!ret && (task = css_task_iter_next(&it)))
1257                         ret = fn(task, arg);
1258                 css_task_iter_end(&it);
1259                 if (ret) {
1260                         mem_cgroup_iter_break(memcg, iter);
1261                         break;
1262                 }
1263         }
1264         return ret;
1265 }
1266
1267 #ifdef CONFIG_DEBUG_VM
1268 void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio)
1269 {
1270         struct mem_cgroup *memcg;
1271
1272         if (mem_cgroup_disabled())
1273                 return;
1274
1275         memcg = folio_memcg(folio);
1276
1277         if (!memcg)
1278                 VM_BUG_ON_FOLIO(!mem_cgroup_is_root(lruvec_memcg(lruvec)), folio);
1279         else
1280                 VM_BUG_ON_FOLIO(lruvec_memcg(lruvec) != memcg, folio);
1281 }
1282 #endif
1283
1284 /**
1285  * folio_lruvec_lock - Lock the lruvec for a folio.
1286  * @folio: Pointer to the folio.
1287  *
1288  * These functions are safe to use under any of the following conditions:
1289  * - folio locked
1290  * - folio_test_lru false
1291  * - folio_memcg_lock()
1292  * - folio frozen (refcount of 0)
1293  *
1294  * Return: The lruvec this folio is on with its lock held.
1295  */
1296 struct lruvec *folio_lruvec_lock(struct folio *folio)
1297 {
1298         struct lruvec *lruvec = folio_lruvec(folio);
1299
1300         spin_lock(&lruvec->lru_lock);
1301         lruvec_memcg_debug(lruvec, folio);
1302
1303         return lruvec;
1304 }
1305
1306 /**
1307  * folio_lruvec_lock_irq - Lock the lruvec for a folio.
1308  * @folio: Pointer to the folio.
1309  *
1310  * These functions are safe to use under any of the following conditions:
1311  * - folio locked
1312  * - folio_test_lru false
1313  * - folio_memcg_lock()
1314  * - folio frozen (refcount of 0)
1315  *
1316  * Return: The lruvec this folio is on with its lock held and interrupts
1317  * disabled.
1318  */
1319 struct lruvec *folio_lruvec_lock_irq(struct folio *folio)
1320 {
1321         struct lruvec *lruvec = folio_lruvec(folio);
1322
1323         spin_lock_irq(&lruvec->lru_lock);
1324         lruvec_memcg_debug(lruvec, folio);
1325
1326         return lruvec;
1327 }
1328
1329 /**
1330  * folio_lruvec_lock_irqsave - Lock the lruvec for a folio.
1331  * @folio: Pointer to the folio.
1332  * @flags: Pointer to irqsave flags.
1333  *
1334  * These functions are safe to use under any of the following conditions:
1335  * - folio locked
1336  * - folio_test_lru false
1337  * - folio_memcg_lock()
1338  * - folio frozen (refcount of 0)
1339  *
1340  * Return: The lruvec this folio is on with its lock held and interrupts
1341  * disabled.
1342  */
1343 struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio,
1344                 unsigned long *flags)
1345 {
1346         struct lruvec *lruvec = folio_lruvec(folio);
1347
1348         spin_lock_irqsave(&lruvec->lru_lock, *flags);
1349         lruvec_memcg_debug(lruvec, folio);
1350
1351         return lruvec;
1352 }
1353
1354 /**
1355  * mem_cgroup_update_lru_size - account for adding or removing an lru page
1356  * @lruvec: mem_cgroup per zone lru vector
1357  * @lru: index of lru list the page is sitting on
1358  * @zid: zone id of the accounted pages
1359  * @nr_pages: positive when adding or negative when removing
1360  *
1361  * This function must be called under lru_lock, just before a page is added
1362  * to or just after a page is removed from an lru list.
1363  */
1364 void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
1365                                 int zid, int nr_pages)
1366 {
1367         struct mem_cgroup_per_node *mz;
1368         unsigned long *lru_size;
1369         long size;
1370
1371         if (mem_cgroup_disabled())
1372                 return;
1373
1374         mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
1375         lru_size = &mz->lru_zone_size[zid][lru];
1376
1377         if (nr_pages < 0)
1378                 *lru_size += nr_pages;
1379
1380         size = *lru_size;
1381         if (WARN_ONCE(size < 0,
1382                 "%s(%p, %d, %d): lru_size %ld\n",
1383                 __func__, lruvec, lru, nr_pages, size)) {
1384                 VM_BUG_ON(1);
1385                 *lru_size = 0;
1386         }
1387
1388         if (nr_pages > 0)
1389                 *lru_size += nr_pages;
1390 }
1391
1392 /**
1393  * mem_cgroup_margin - calculate chargeable space of a memory cgroup
1394  * @memcg: the memory cgroup
1395  *
1396  * Returns the maximum amount of memory @mem can be charged with, in
1397  * pages.
1398  */
1399 static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
1400 {
1401         unsigned long margin = 0;
1402         unsigned long count;
1403         unsigned long limit;
1404
1405         count = page_counter_read(&memcg->memory);
1406         limit = READ_ONCE(memcg->memory.max);
1407         if (count < limit)
1408                 margin = limit - count;
1409
1410         if (do_memsw_account()) {
1411                 count = page_counter_read(&memcg->memsw);
1412                 limit = READ_ONCE(memcg->memsw.max);
1413                 if (count < limit)
1414                         margin = min(margin, limit - count);
1415                 else
1416                         margin = 0;
1417         }
1418
1419         return margin;
1420 }
1421
1422 /*
1423  * A routine for checking "mem" is under move_account() or not.
1424  *
1425  * Checking a cgroup is mc.from or mc.to or under hierarchy of
1426  * moving cgroups. This is for waiting at high-memory pressure
1427  * caused by "move".
1428  */
1429 static bool mem_cgroup_under_move(struct mem_cgroup *memcg)
1430 {
1431         struct mem_cgroup *from;
1432         struct mem_cgroup *to;
1433         bool ret = false;
1434         /*
1435          * Unlike task_move routines, we access mc.to, mc.from not under
1436          * mutual exclusion by cgroup_mutex. Here, we take spinlock instead.
1437          */
1438         spin_lock(&mc.lock);
1439         from = mc.from;
1440         to = mc.to;
1441         if (!from)
1442                 goto unlock;
1443
1444         ret = mem_cgroup_is_descendant(from, memcg) ||
1445                 mem_cgroup_is_descendant(to, memcg);
1446 unlock:
1447         spin_unlock(&mc.lock);
1448         return ret;
1449 }
1450
1451 static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg)
1452 {
1453         if (mc.moving_task && current != mc.moving_task) {
1454                 if (mem_cgroup_under_move(memcg)) {
1455                         DEFINE_WAIT(wait);
1456                         prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE);
1457                         /* moving charge context might have finished. */
1458                         if (mc.moving_task)
1459                                 schedule();
1460                         finish_wait(&mc.waitq, &wait);
1461                         return true;
1462                 }
1463         }
1464         return false;
1465 }
1466
1467 struct memory_stat {
1468         const char *name;
1469         unsigned int idx;
1470 };
1471
1472 static const struct memory_stat memory_stats[] = {
1473         { "anon",                       NR_ANON_MAPPED                  },
1474         { "file",                       NR_FILE_PAGES                   },
1475         { "kernel",                     MEMCG_KMEM                      },
1476         { "kernel_stack",               NR_KERNEL_STACK_KB              },
1477         { "pagetables",                 NR_PAGETABLE                    },
1478         { "sec_pagetables",             NR_SECONDARY_PAGETABLE          },
1479         { "percpu",                     MEMCG_PERCPU_B                  },
1480         { "sock",                       MEMCG_SOCK                      },
1481         { "vmalloc",                    MEMCG_VMALLOC                   },
1482         { "shmem",                      NR_SHMEM                        },
1483 #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
1484         { "zswap",                      MEMCG_ZSWAP_B                   },
1485         { "zswapped",                   MEMCG_ZSWAPPED                  },
1486 #endif
1487         { "file_mapped",                NR_FILE_MAPPED                  },
1488         { "file_dirty",                 NR_FILE_DIRTY                   },
1489         { "file_writeback",             NR_WRITEBACK                    },
1490 #ifdef CONFIG_SWAP
1491         { "swapcached",                 NR_SWAPCACHE                    },
1492 #endif
1493 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1494         { "anon_thp",                   NR_ANON_THPS                    },
1495         { "file_thp",                   NR_FILE_THPS                    },
1496         { "shmem_thp",                  NR_SHMEM_THPS                   },
1497 #endif
1498         { "inactive_anon",              NR_INACTIVE_ANON                },
1499         { "active_anon",                NR_ACTIVE_ANON                  },
1500         { "inactive_file",              NR_INACTIVE_FILE                },
1501         { "active_file",                NR_ACTIVE_FILE                  },
1502         { "unevictable",                NR_UNEVICTABLE                  },
1503         { "slab_reclaimable",           NR_SLAB_RECLAIMABLE_B           },
1504         { "slab_unreclaimable",         NR_SLAB_UNRECLAIMABLE_B         },
1505
1506         /* The memory events */
1507         { "workingset_refault_anon",    WORKINGSET_REFAULT_ANON         },
1508         { "workingset_refault_file",    WORKINGSET_REFAULT_FILE         },
1509         { "workingset_activate_anon",   WORKINGSET_ACTIVATE_ANON        },
1510         { "workingset_activate_file",   WORKINGSET_ACTIVATE_FILE        },
1511         { "workingset_restore_anon",    WORKINGSET_RESTORE_ANON         },
1512         { "workingset_restore_file",    WORKINGSET_RESTORE_FILE         },
1513         { "workingset_nodereclaim",     WORKINGSET_NODERECLAIM          },
1514 };
1515
1516 /* Translate stat items to the correct unit for memory.stat output */
1517 static int memcg_page_state_unit(int item)
1518 {
1519         switch (item) {
1520         case MEMCG_PERCPU_B:
1521         case MEMCG_ZSWAP_B:
1522         case NR_SLAB_RECLAIMABLE_B:
1523         case NR_SLAB_UNRECLAIMABLE_B:
1524         case WORKINGSET_REFAULT_ANON:
1525         case WORKINGSET_REFAULT_FILE:
1526         case WORKINGSET_ACTIVATE_ANON:
1527         case WORKINGSET_ACTIVATE_FILE:
1528         case WORKINGSET_RESTORE_ANON:
1529         case WORKINGSET_RESTORE_FILE:
1530         case WORKINGSET_NODERECLAIM:
1531                 return 1;
1532         case NR_KERNEL_STACK_KB:
1533                 return SZ_1K;
1534         default:
1535                 return PAGE_SIZE;
1536         }
1537 }
1538
1539 static inline unsigned long memcg_page_state_output(struct mem_cgroup *memcg,
1540                                                     int item)
1541 {
1542         return memcg_page_state(memcg, item) * memcg_page_state_unit(item);
1543 }
1544
1545 static void memory_stat_format(struct mem_cgroup *memcg, char *buf, int bufsize)
1546 {
1547         struct seq_buf s;
1548         int i;
1549
1550         seq_buf_init(&s, buf, bufsize);
1551
1552         /*
1553          * Provide statistics on the state of the memory subsystem as
1554          * well as cumulative event counters that show past behavior.
1555          *
1556          * This list is ordered following a combination of these gradients:
1557          * 1) generic big picture -> specifics and details
1558          * 2) reflecting userspace activity -> reflecting kernel heuristics
1559          *
1560          * Current memory state:
1561          */
1562         mem_cgroup_flush_stats();
1563
1564         for (i = 0; i < ARRAY_SIZE(memory_stats); i++) {
1565                 u64 size;
1566
1567                 size = memcg_page_state_output(memcg, memory_stats[i].idx);
1568                 seq_buf_printf(&s, "%s %llu\n", memory_stats[i].name, size);
1569
1570                 if (unlikely(memory_stats[i].idx == NR_SLAB_UNRECLAIMABLE_B)) {
1571                         size += memcg_page_state_output(memcg,
1572                                                         NR_SLAB_RECLAIMABLE_B);
1573                         seq_buf_printf(&s, "slab %llu\n", size);
1574                 }
1575         }
1576
1577         /* Accumulated memory events */
1578         seq_buf_printf(&s, "pgscan %lu\n",
1579                        memcg_events(memcg, PGSCAN_KSWAPD) +
1580                        memcg_events(memcg, PGSCAN_DIRECT) +
1581                        memcg_events(memcg, PGSCAN_KHUGEPAGED));
1582         seq_buf_printf(&s, "pgsteal %lu\n",
1583                        memcg_events(memcg, PGSTEAL_KSWAPD) +
1584                        memcg_events(memcg, PGSTEAL_DIRECT) +
1585                        memcg_events(memcg, PGSTEAL_KHUGEPAGED));
1586
1587         for (i = 0; i < ARRAY_SIZE(memcg_vm_event_stat); i++) {
1588                 if (memcg_vm_event_stat[i] == PGPGIN ||
1589                     memcg_vm_event_stat[i] == PGPGOUT)
1590                         continue;
1591
1592                 seq_buf_printf(&s, "%s %lu\n",
1593                                vm_event_name(memcg_vm_event_stat[i]),
1594                                memcg_events(memcg, memcg_vm_event_stat[i]));
1595         }
1596
1597         /* The above should easily fit into one page */
1598         WARN_ON_ONCE(seq_buf_has_overflowed(&s));
1599 }
1600
1601 #define K(x) ((x) << (PAGE_SHIFT-10))
1602 /**
1603  * mem_cgroup_print_oom_context: Print OOM information relevant to
1604  * memory controller.
1605  * @memcg: The memory cgroup that went over limit
1606  * @p: Task that is going to be killed
1607  *
1608  * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
1609  * enabled
1610  */
1611 void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p)
1612 {
1613         rcu_read_lock();
1614
1615         if (memcg) {
1616                 pr_cont(",oom_memcg=");
1617                 pr_cont_cgroup_path(memcg->css.cgroup);
1618         } else
1619                 pr_cont(",global_oom");
1620         if (p) {
1621                 pr_cont(",task_memcg=");
1622                 pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id));
1623         }
1624         rcu_read_unlock();
1625 }
1626
1627 /**
1628  * mem_cgroup_print_oom_meminfo: Print OOM memory information relevant to
1629  * memory controller.
1630  * @memcg: The memory cgroup that went over limit
1631  */
1632 void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
1633 {
1634         /* Use static buffer, for the caller is holding oom_lock. */
1635         static char buf[PAGE_SIZE];
1636
1637         lockdep_assert_held(&oom_lock);
1638
1639         pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n",
1640                 K((u64)page_counter_read(&memcg->memory)),
1641                 K((u64)READ_ONCE(memcg->memory.max)), memcg->memory.failcnt);
1642         if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
1643                 pr_info("swap: usage %llukB, limit %llukB, failcnt %lu\n",
1644                         K((u64)page_counter_read(&memcg->swap)),
1645                         K((u64)READ_ONCE(memcg->swap.max)), memcg->swap.failcnt);
1646         else {
1647                 pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n",
1648                         K((u64)page_counter_read(&memcg->memsw)),
1649                         K((u64)memcg->memsw.max), memcg->memsw.failcnt);
1650                 pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n",
1651                         K((u64)page_counter_read(&memcg->kmem)),
1652                         K((u64)memcg->kmem.max), memcg->kmem.failcnt);
1653         }
1654
1655         pr_info("Memory cgroup stats for ");
1656         pr_cont_cgroup_path(memcg->css.cgroup);
1657         pr_cont(":");
1658         memory_stat_format(memcg, buf, sizeof(buf));
1659         pr_info("%s", buf);
1660 }
1661
1662 /*
1663  * Return the memory (and swap, if configured) limit for a memcg.
1664  */
1665 unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
1666 {
1667         unsigned long max = READ_ONCE(memcg->memory.max);
1668
1669         if (do_memsw_account()) {
1670                 if (mem_cgroup_swappiness(memcg)) {
1671                         /* Calculate swap excess capacity from memsw limit */
1672                         unsigned long swap = READ_ONCE(memcg->memsw.max) - max;
1673
1674                         max += min(swap, (unsigned long)total_swap_pages);
1675                 }
1676         } else {
1677                 if (mem_cgroup_swappiness(memcg))
1678                         max += min(READ_ONCE(memcg->swap.max),
1679                                    (unsigned long)total_swap_pages);
1680         }
1681         return max;
1682 }
1683
1684 unsigned long mem_cgroup_size(struct mem_cgroup *memcg)
1685 {
1686         return page_counter_read(&memcg->memory);
1687 }
1688
1689 static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
1690                                      int order)
1691 {
1692         struct oom_control oc = {
1693                 .zonelist = NULL,
1694                 .nodemask = NULL,
1695                 .memcg = memcg,
1696                 .gfp_mask = gfp_mask,
1697                 .order = order,
1698         };
1699         bool ret = true;
1700
1701         if (mutex_lock_killable(&oom_lock))
1702                 return true;
1703
1704         if (mem_cgroup_margin(memcg) >= (1 << order))
1705                 goto unlock;
1706
1707         /*
1708          * A few threads which were not waiting at mutex_lock_killable() can
1709          * fail to bail out. Therefore, check again after holding oom_lock.
1710          */
1711         ret = task_is_dying() || out_of_memory(&oc);
1712
1713 unlock:
1714         mutex_unlock(&oom_lock);
1715         return ret;
1716 }
1717
1718 static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg,
1719                                    pg_data_t *pgdat,
1720                                    gfp_t gfp_mask,
1721                                    unsigned long *total_scanned)
1722 {
1723         struct mem_cgroup *victim = NULL;
1724         int total = 0;
1725         int loop = 0;
1726         unsigned long excess;
1727         unsigned long nr_scanned;
1728         struct mem_cgroup_reclaim_cookie reclaim = {
1729                 .pgdat = pgdat,
1730         };
1731
1732         excess = soft_limit_excess(root_memcg);
1733
1734         while (1) {
1735                 victim = mem_cgroup_iter(root_memcg, victim, &reclaim);
1736                 if (!victim) {
1737                         loop++;
1738                         if (loop >= 2) {
1739                                 /*
1740                                  * If we have not been able to reclaim
1741                                  * anything, it might because there are
1742                                  * no reclaimable pages under this hierarchy
1743                                  */
1744                                 if (!total)
1745                                         break;
1746                                 /*
1747                                  * We want to do more targeted reclaim.
1748                                  * excess >> 2 is not to excessive so as to
1749                                  * reclaim too much, nor too less that we keep
1750                                  * coming back to reclaim from this cgroup
1751                                  */
1752                                 if (total >= (excess >> 2) ||
1753                                         (loop > MEM_CGROUP_MAX_RECLAIM_LOOPS))
1754                                         break;
1755                         }
1756                         continue;
1757                 }
1758                 total += mem_cgroup_shrink_node(victim, gfp_mask, false,
1759                                         pgdat, &nr_scanned);
1760                 *total_scanned += nr_scanned;
1761                 if (!soft_limit_excess(root_memcg))
1762                         break;
1763         }
1764         mem_cgroup_iter_break(root_memcg, victim);
1765         return total;
1766 }
1767
1768 #ifdef CONFIG_LOCKDEP
1769 static struct lockdep_map memcg_oom_lock_dep_map = {
1770         .name = "memcg_oom_lock",
1771 };
1772 #endif
1773
1774 static DEFINE_SPINLOCK(memcg_oom_lock);
1775
1776 /*
1777  * Check OOM-Killer is already running under our hierarchy.
1778  * If someone is running, return false.
1779  */
1780 static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg)
1781 {
1782         struct mem_cgroup *iter, *failed = NULL;
1783
1784         spin_lock(&memcg_oom_lock);
1785
1786         for_each_mem_cgroup_tree(iter, memcg) {
1787                 if (iter->oom_lock) {
1788                         /*
1789                          * this subtree of our hierarchy is already locked
1790                          * so we cannot give a lock.
1791                          */
1792                         failed = iter;
1793                         mem_cgroup_iter_break(memcg, iter);
1794                         break;
1795                 } else
1796                         iter->oom_lock = true;
1797         }
1798
1799         if (failed) {
1800                 /*
1801                  * OK, we failed to lock the whole subtree so we have
1802                  * to clean up what we set up to the failing subtree
1803                  */
1804                 for_each_mem_cgroup_tree(iter, memcg) {
1805                         if (iter == failed) {
1806                                 mem_cgroup_iter_break(memcg, iter);
1807                                 break;
1808                         }
1809                         iter->oom_lock = false;
1810                 }
1811         } else
1812                 mutex_acquire(&memcg_oom_lock_dep_map, 0, 1, _RET_IP_);
1813
1814         spin_unlock(&memcg_oom_lock);
1815
1816         return !failed;
1817 }
1818
1819 static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg)
1820 {
1821         struct mem_cgroup *iter;
1822
1823         spin_lock(&memcg_oom_lock);
1824         mutex_release(&memcg_oom_lock_dep_map, _RET_IP_);
1825         for_each_mem_cgroup_tree(iter, memcg)
1826                 iter->oom_lock = false;
1827         spin_unlock(&memcg_oom_lock);
1828 }
1829
1830 static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg)
1831 {
1832         struct mem_cgroup *iter;
1833
1834         spin_lock(&memcg_oom_lock);
1835         for_each_mem_cgroup_tree(iter, memcg)
1836                 iter->under_oom++;
1837         spin_unlock(&memcg_oom_lock);
1838 }
1839
1840 static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg)
1841 {
1842         struct mem_cgroup *iter;
1843
1844         /*
1845          * Be careful about under_oom underflows because a child memcg
1846          * could have been added after mem_cgroup_mark_under_oom.
1847          */
1848         spin_lock(&memcg_oom_lock);
1849         for_each_mem_cgroup_tree(iter, memcg)
1850                 if (iter->under_oom > 0)
1851                         iter->under_oom--;
1852         spin_unlock(&memcg_oom_lock);
1853 }
1854
1855 static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
1856
1857 struct oom_wait_info {
1858         struct mem_cgroup *memcg;
1859         wait_queue_entry_t      wait;
1860 };
1861
1862 static int memcg_oom_wake_function(wait_queue_entry_t *wait,
1863         unsigned mode, int sync, void *arg)
1864 {
1865         struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg;
1866         struct mem_cgroup *oom_wait_memcg;
1867         struct oom_wait_info *oom_wait_info;
1868
1869         oom_wait_info = container_of(wait, struct oom_wait_info, wait);
1870         oom_wait_memcg = oom_wait_info->memcg;
1871
1872         if (!mem_cgroup_is_descendant(wake_memcg, oom_wait_memcg) &&
1873             !mem_cgroup_is_descendant(oom_wait_memcg, wake_memcg))
1874                 return 0;
1875         return autoremove_wake_function(wait, mode, sync, arg);
1876 }
1877
1878 static void memcg_oom_recover(struct mem_cgroup *memcg)
1879 {
1880         /*
1881          * For the following lockless ->under_oom test, the only required
1882          * guarantee is that it must see the state asserted by an OOM when
1883          * this function is called as a result of userland actions
1884          * triggered by the notification of the OOM.  This is trivially
1885          * achieved by invoking mem_cgroup_mark_under_oom() before
1886          * triggering notification.
1887          */
1888         if (memcg && memcg->under_oom)
1889                 __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg);
1890 }
1891
1892 /*
1893  * Returns true if successfully killed one or more processes. Though in some
1894  * corner cases it can return true even without killing any process.
1895  */
1896 static bool mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
1897 {
1898         bool locked, ret;
1899
1900         if (order > PAGE_ALLOC_COSTLY_ORDER)
1901                 return false;
1902
1903         memcg_memory_event(memcg, MEMCG_OOM);
1904
1905         /*
1906          * We are in the middle of the charge context here, so we
1907          * don't want to block when potentially sitting on a callstack
1908          * that holds all kinds of filesystem and mm locks.
1909          *
1910          * cgroup1 allows disabling the OOM killer and waiting for outside
1911          * handling until the charge can succeed; remember the context and put
1912          * the task to sleep at the end of the page fault when all locks are
1913          * released.
1914          *
1915          * On the other hand, in-kernel OOM killer allows for an async victim
1916          * memory reclaim (oom_reaper) and that means that we are not solely
1917          * relying on the oom victim to make a forward progress and we can
1918          * invoke the oom killer here.
1919          *
1920          * Please note that mem_cgroup_out_of_memory might fail to find a
1921          * victim and then we have to bail out from the charge path.
1922          */
1923         if (memcg->oom_kill_disable) {
1924                 if (current->in_user_fault) {
1925                         css_get(&memcg->css);
1926                         current->memcg_in_oom = memcg;
1927                         current->memcg_oom_gfp_mask = mask;
1928                         current->memcg_oom_order = order;
1929                 }
1930                 return false;
1931         }
1932
1933         mem_cgroup_mark_under_oom(memcg);
1934
1935         locked = mem_cgroup_oom_trylock(memcg);
1936
1937         if (locked)
1938                 mem_cgroup_oom_notify(memcg);
1939
1940         mem_cgroup_unmark_under_oom(memcg);
1941         ret = mem_cgroup_out_of_memory(memcg, mask, order);
1942
1943         if (locked)
1944                 mem_cgroup_oom_unlock(memcg);
1945
1946         return ret;
1947 }
1948
1949 /**
1950  * mem_cgroup_oom_synchronize - complete memcg OOM handling
1951  * @handle: actually kill/wait or just clean up the OOM state
1952  *
1953  * This has to be called at the end of a page fault if the memcg OOM
1954  * handler was enabled.
1955  *
1956  * Memcg supports userspace OOM handling where failed allocations must
1957  * sleep on a waitqueue until the userspace task resolves the
1958  * situation.  Sleeping directly in the charge context with all kinds
1959  * of locks held is not a good idea, instead we remember an OOM state
1960  * in the task and mem_cgroup_oom_synchronize() has to be called at
1961  * the end of the page fault to complete the OOM handling.
1962  *
1963  * Returns %true if an ongoing memcg OOM situation was detected and
1964  * completed, %false otherwise.
1965  */
1966 bool mem_cgroup_oom_synchronize(bool handle)
1967 {
1968         struct mem_cgroup *memcg = current->memcg_in_oom;
1969         struct oom_wait_info owait;
1970         bool locked;
1971
1972         /* OOM is global, do not handle */
1973         if (!memcg)
1974                 return false;
1975
1976         if (!handle)
1977                 goto cleanup;
1978
1979         owait.memcg = memcg;
1980         owait.wait.flags = 0;
1981         owait.wait.func = memcg_oom_wake_function;
1982         owait.wait.private = current;
1983         INIT_LIST_HEAD(&owait.wait.entry);
1984
1985         prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
1986         mem_cgroup_mark_under_oom(memcg);
1987
1988         locked = mem_cgroup_oom_trylock(memcg);
1989
1990         if (locked)
1991                 mem_cgroup_oom_notify(memcg);
1992
1993         if (locked && !memcg->oom_kill_disable) {
1994                 mem_cgroup_unmark_under_oom(memcg);
1995                 finish_wait(&memcg_oom_waitq, &owait.wait);
1996                 mem_cgroup_out_of_memory(memcg, current->memcg_oom_gfp_mask,
1997                                          current->memcg_oom_order);
1998         } else {
1999                 schedule();
2000                 mem_cgroup_unmark_under_oom(memcg);
2001                 finish_wait(&memcg_oom_waitq, &owait.wait);
2002         }
2003
2004         if (locked) {
2005                 mem_cgroup_oom_unlock(memcg);
2006                 /*
2007                  * There is no guarantee that an OOM-lock contender
2008                  * sees the wakeups triggered by the OOM kill
2009                  * uncharges.  Wake any sleepers explicitly.
2010                  */
2011                 memcg_oom_recover(memcg);
2012         }
2013 cleanup:
2014         current->memcg_in_oom = NULL;
2015         css_put(&memcg->css);
2016         return true;
2017 }
2018
2019 /**
2020  * mem_cgroup_get_oom_group - get a memory cgroup to clean up after OOM
2021  * @victim: task to be killed by the OOM killer
2022  * @oom_domain: memcg in case of memcg OOM, NULL in case of system-wide OOM
2023  *
2024  * Returns a pointer to a memory cgroup, which has to be cleaned up
2025  * by killing all belonging OOM-killable tasks.
2026  *
2027  * Caller has to call mem_cgroup_put() on the returned non-NULL memcg.
2028  */
2029 struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim,
2030                                             struct mem_cgroup *oom_domain)
2031 {
2032         struct mem_cgroup *oom_group = NULL;
2033         struct mem_cgroup *memcg;
2034
2035         if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
2036                 return NULL;
2037
2038         if (!oom_domain)
2039                 oom_domain = root_mem_cgroup;
2040
2041         rcu_read_lock();
2042
2043         memcg = mem_cgroup_from_task(victim);
2044         if (mem_cgroup_is_root(memcg))
2045                 goto out;
2046
2047         /*
2048          * If the victim task has been asynchronously moved to a different
2049          * memory cgroup, we might end up killing tasks outside oom_domain.
2050          * In this case it's better to ignore memory.group.oom.
2051          */
2052         if (unlikely(!mem_cgroup_is_descendant(memcg, oom_domain)))
2053                 goto out;
2054
2055         /*
2056          * Traverse the memory cgroup hierarchy from the victim task's
2057          * cgroup up to the OOMing cgroup (or root) to find the
2058          * highest-level memory cgroup with oom.group set.
2059          */
2060         for (; memcg; memcg = parent_mem_cgroup(memcg)) {
2061                 if (memcg->oom_group)
2062                         oom_group = memcg;
2063
2064                 if (memcg == oom_domain)
2065                         break;
2066         }
2067
2068         if (oom_group)
2069                 css_get(&oom_group->css);
2070 out:
2071         rcu_read_unlock();
2072
2073         return oom_group;
2074 }
2075
2076 void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
2077 {
2078         pr_info("Tasks in ");
2079         pr_cont_cgroup_path(memcg->css.cgroup);
2080         pr_cont(" are going to be killed due to memory.oom.group set\n");
2081 }
2082
2083 /**
2084  * folio_memcg_lock - Bind a folio to its memcg.
2085  * @folio: The folio.
2086  *
2087  * This function prevents unlocked LRU folios from being moved to
2088  * another cgroup.
2089  *
2090  * It ensures lifetime of the bound memcg.  The caller is responsible
2091  * for the lifetime of the folio.
2092  */
2093 void folio_memcg_lock(struct folio *folio)
2094 {
2095         struct mem_cgroup *memcg;
2096         unsigned long flags;
2097
2098         /*
2099          * The RCU lock is held throughout the transaction.  The fast
2100          * path can get away without acquiring the memcg->move_lock
2101          * because page moving starts with an RCU grace period.
2102          */
2103         rcu_read_lock();
2104
2105         if (mem_cgroup_disabled())
2106                 return;
2107 again:
2108         memcg = folio_memcg(folio);
2109         if (unlikely(!memcg))
2110                 return;
2111
2112 #ifdef CONFIG_PROVE_LOCKING
2113         local_irq_save(flags);
2114         might_lock(&memcg->move_lock);
2115         local_irq_restore(flags);
2116 #endif
2117
2118         if (atomic_read(&memcg->moving_account) <= 0)
2119                 return;
2120
2121         spin_lock_irqsave(&memcg->move_lock, flags);
2122         if (memcg != folio_memcg(folio)) {
2123                 spin_unlock_irqrestore(&memcg->move_lock, flags);
2124                 goto again;
2125         }
2126
2127         /*
2128          * When charge migration first begins, we can have multiple
2129          * critical sections holding the fast-path RCU lock and one
2130          * holding the slowpath move_lock. Track the task who has the
2131          * move_lock for unlock_page_memcg().
2132          */
2133         memcg->move_lock_task = current;
2134         memcg->move_lock_flags = flags;
2135 }
2136
2137 void lock_page_memcg(struct page *page)
2138 {
2139         folio_memcg_lock(page_folio(page));
2140 }
2141
2142 static void __folio_memcg_unlock(struct mem_cgroup *memcg)
2143 {
2144         if (memcg && memcg->move_lock_task == current) {
2145                 unsigned long flags = memcg->move_lock_flags;
2146
2147                 memcg->move_lock_task = NULL;
2148                 memcg->move_lock_flags = 0;
2149
2150                 spin_unlock_irqrestore(&memcg->move_lock, flags);
2151         }
2152
2153         rcu_read_unlock();
2154 }
2155
2156 /**
2157  * folio_memcg_unlock - Release the binding between a folio and its memcg.
2158  * @folio: The folio.
2159  *
2160  * This releases the binding created by folio_memcg_lock().  This does
2161  * not change the accounting of this folio to its memcg, but it does
2162  * permit others to change it.
2163  */
2164 void folio_memcg_unlock(struct folio *folio)
2165 {
2166         __folio_memcg_unlock(folio_memcg(folio));
2167 }
2168
2169 void unlock_page_memcg(struct page *page)
2170 {
2171         folio_memcg_unlock(page_folio(page));
2172 }
2173
2174 struct memcg_stock_pcp {
2175         local_lock_t stock_lock;
2176         struct mem_cgroup *cached; /* this never be root cgroup */
2177         unsigned int nr_pages;
2178
2179 #ifdef CONFIG_MEMCG_KMEM
2180         struct obj_cgroup *cached_objcg;
2181         struct pglist_data *cached_pgdat;
2182         unsigned int nr_bytes;
2183         int nr_slab_reclaimable_b;
2184         int nr_slab_unreclaimable_b;
2185 #endif
2186
2187         struct work_struct work;
2188         unsigned long flags;
2189 #define FLUSHING_CACHED_CHARGE  0
2190 };
2191 static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock) = {
2192         .stock_lock = INIT_LOCAL_LOCK(stock_lock),
2193 };
2194 static DEFINE_MUTEX(percpu_charge_mutex);
2195
2196 #ifdef CONFIG_MEMCG_KMEM
2197 static struct obj_cgroup *drain_obj_stock(struct memcg_stock_pcp *stock);
2198 static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
2199                                      struct mem_cgroup *root_memcg);
2200 static void memcg_account_kmem(struct mem_cgroup *memcg, int nr_pages);
2201
2202 #else
2203 static inline struct obj_cgroup *drain_obj_stock(struct memcg_stock_pcp *stock)
2204 {
2205         return NULL;
2206 }
2207 static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
2208                                      struct mem_cgroup *root_memcg)
2209 {
2210         return false;
2211 }
2212 static void memcg_account_kmem(struct mem_cgroup *memcg, int nr_pages)
2213 {
2214 }
2215 #endif
2216
2217 /**
2218  * consume_stock: Try to consume stocked charge on this cpu.
2219  * @memcg: memcg to consume from.
2220  * @nr_pages: how many pages to charge.
2221  *
2222  * The charges will only happen if @memcg matches the current cpu's memcg
2223  * stock, and at least @nr_pages are available in that stock.  Failure to
2224  * service an allocation will refill the stock.
2225  *
2226  * returns true if successful, false otherwise.
2227  */
2228 static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2229 {
2230         struct memcg_stock_pcp *stock;
2231         unsigned long flags;
2232         bool ret = false;
2233
2234         if (nr_pages > MEMCG_CHARGE_BATCH)
2235                 return ret;
2236
2237         local_lock_irqsave(&memcg_stock.stock_lock, flags);
2238
2239         stock = this_cpu_ptr(&memcg_stock);
2240         if (memcg == stock->cached && stock->nr_pages >= nr_pages) {
2241                 stock->nr_pages -= nr_pages;
2242                 ret = true;
2243         }
2244
2245         local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
2246
2247         return ret;
2248 }
2249
2250 /*
2251  * Returns stocks cached in percpu and reset cached information.
2252  */
2253 static void drain_stock(struct memcg_stock_pcp *stock)
2254 {
2255         struct mem_cgroup *old = stock->cached;
2256
2257         if (!old)
2258                 return;
2259
2260         if (stock->nr_pages) {
2261                 page_counter_uncharge(&old->memory, stock->nr_pages);
2262                 if (do_memsw_account())
2263                         page_counter_uncharge(&old->memsw, stock->nr_pages);
2264                 stock->nr_pages = 0;
2265         }
2266
2267         css_put(&old->css);
2268         stock->cached = NULL;
2269 }
2270
2271 static void drain_local_stock(struct work_struct *dummy)
2272 {
2273         struct memcg_stock_pcp *stock;
2274         struct obj_cgroup *old = NULL;
2275         unsigned long flags;
2276
2277         /*
2278          * The only protection from cpu hotplug (memcg_hotplug_cpu_dead) vs.
2279          * drain_stock races is that we always operate on local CPU stock
2280          * here with IRQ disabled
2281          */
2282         local_lock_irqsave(&memcg_stock.stock_lock, flags);
2283
2284         stock = this_cpu_ptr(&memcg_stock);
2285         old = drain_obj_stock(stock);
2286         drain_stock(stock);
2287         clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
2288
2289         local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
2290         if (old)
2291                 obj_cgroup_put(old);
2292 }
2293
2294 /*
2295  * Cache charges(val) to local per_cpu area.
2296  * This will be consumed by consume_stock() function, later.
2297  */
2298 static void __refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2299 {
2300         struct memcg_stock_pcp *stock;
2301
2302         stock = this_cpu_ptr(&memcg_stock);
2303         if (stock->cached != memcg) { /* reset if necessary */
2304                 drain_stock(stock);
2305                 css_get(&memcg->css);
2306                 stock->cached = memcg;
2307         }
2308         stock->nr_pages += nr_pages;
2309
2310         if (stock->nr_pages > MEMCG_CHARGE_BATCH)
2311                 drain_stock(stock);
2312 }
2313
2314 static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2315 {
2316         unsigned long flags;
2317
2318         local_lock_irqsave(&memcg_stock.stock_lock, flags);
2319         __refill_stock(memcg, nr_pages);
2320         local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
2321 }
2322
2323 /*
2324  * Drains all per-CPU charge caches for given root_memcg resp. subtree
2325  * of the hierarchy under it.
2326  */
2327 static void drain_all_stock(struct mem_cgroup *root_memcg)
2328 {
2329         int cpu, curcpu;
2330
2331         /* If someone's already draining, avoid adding running more workers. */
2332         if (!mutex_trylock(&percpu_charge_mutex))
2333                 return;
2334         /*
2335          * Notify other cpus that system-wide "drain" is running
2336          * We do not care about races with the cpu hotplug because cpu down
2337          * as well as workers from this path always operate on the local
2338          * per-cpu data. CPU up doesn't touch memcg_stock at all.
2339          */
2340         migrate_disable();
2341         curcpu = smp_processor_id();
2342         for_each_online_cpu(cpu) {
2343                 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
2344                 struct mem_cgroup *memcg;
2345                 bool flush = false;
2346
2347                 rcu_read_lock();
2348                 memcg = stock->cached;
2349                 if (memcg && stock->nr_pages &&
2350                     mem_cgroup_is_descendant(memcg, root_memcg))
2351                         flush = true;
2352                 else if (obj_stock_flush_required(stock, root_memcg))
2353                         flush = true;
2354                 rcu_read_unlock();
2355
2356                 if (flush &&
2357                     !test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
2358                         if (cpu == curcpu)
2359                                 drain_local_stock(&stock->work);
2360                         else
2361                                 schedule_work_on(cpu, &stock->work);
2362                 }
2363         }
2364         migrate_enable();
2365         mutex_unlock(&percpu_charge_mutex);
2366 }
2367
2368 static int memcg_hotplug_cpu_dead(unsigned int cpu)
2369 {
2370         struct memcg_stock_pcp *stock;
2371
2372         stock = &per_cpu(memcg_stock, cpu);
2373         drain_stock(stock);
2374
2375         return 0;
2376 }
2377
2378 static unsigned long reclaim_high(struct mem_cgroup *memcg,
2379                                   unsigned int nr_pages,
2380                                   gfp_t gfp_mask)
2381 {
2382         unsigned long nr_reclaimed = 0;
2383
2384         do {
2385                 unsigned long pflags;
2386
2387                 if (page_counter_read(&memcg->memory) <=
2388                     READ_ONCE(memcg->memory.high))
2389                         continue;
2390
2391                 memcg_memory_event(memcg, MEMCG_HIGH);
2392
2393                 psi_memstall_enter(&pflags);
2394                 nr_reclaimed += try_to_free_mem_cgroup_pages(memcg, nr_pages,
2395                                                         gfp_mask,
2396                                                         MEMCG_RECLAIM_MAY_SWAP,
2397                                                         NULL);
2398                 psi_memstall_leave(&pflags);
2399         } while ((memcg = parent_mem_cgroup(memcg)) &&
2400                  !mem_cgroup_is_root(memcg));
2401
2402         return nr_reclaimed;
2403 }
2404
2405 static void high_work_func(struct work_struct *work)
2406 {
2407         struct mem_cgroup *memcg;
2408
2409         memcg = container_of(work, struct mem_cgroup, high_work);
2410         reclaim_high(memcg, MEMCG_CHARGE_BATCH, GFP_KERNEL);
2411 }
2412
2413 /*
2414  * Clamp the maximum sleep time per allocation batch to 2 seconds. This is
2415  * enough to still cause a significant slowdown in most cases, while still
2416  * allowing diagnostics and tracing to proceed without becoming stuck.
2417  */
2418 #define MEMCG_MAX_HIGH_DELAY_JIFFIES (2UL*HZ)
2419
2420 /*
2421  * When calculating the delay, we use these either side of the exponentiation to
2422  * maintain precision and scale to a reasonable number of jiffies (see the table
2423  * below.
2424  *
2425  * - MEMCG_DELAY_PRECISION_SHIFT: Extra precision bits while translating the
2426  *   overage ratio to a delay.
2427  * - MEMCG_DELAY_SCALING_SHIFT: The number of bits to scale down the
2428  *   proposed penalty in order to reduce to a reasonable number of jiffies, and
2429  *   to produce a reasonable delay curve.
2430  *
2431  * MEMCG_DELAY_SCALING_SHIFT just happens to be a number that produces a
2432  * reasonable delay curve compared to precision-adjusted overage, not
2433  * penalising heavily at first, but still making sure that growth beyond the
2434  * limit penalises misbehaviour cgroups by slowing them down exponentially. For
2435  * example, with a high of 100 megabytes:
2436  *
2437  *  +-------+------------------------+
2438  *  | usage | time to allocate in ms |
2439  *  +-------+------------------------+
2440  *  | 100M  |                      0 |
2441  *  | 101M  |                      6 |
2442  *  | 102M  |                     25 |
2443  *  | 103M  |                     57 |
2444  *  | 104M  |                    102 |
2445  *  | 105M  |                    159 |
2446  *  | 106M  |                    230 |
2447  *  | 107M  |                    313 |
2448  *  | 108M  |                    409 |
2449  *  | 109M  |                    518 |
2450  *  | 110M  |                    639 |
2451  *  | 111M  |                    774 |
2452  *  | 112M  |                    921 |
2453  *  | 113M  |                   1081 |
2454  *  | 114M  |                   1254 |
2455  *  | 115M  |                   1439 |
2456  *  | 116M  |                   1638 |
2457  *  | 117M  |                   1849 |
2458  *  | 118M  |                   2000 |
2459  *  | 119M  |                   2000 |
2460  *  | 120M  |                   2000 |
2461  *  +-------+------------------------+
2462  */
2463  #define MEMCG_DELAY_PRECISION_SHIFT 20
2464  #define MEMCG_DELAY_SCALING_SHIFT 14
2465
2466 static u64 calculate_overage(unsigned long usage, unsigned long high)
2467 {
2468         u64 overage;
2469
2470         if (usage <= high)
2471                 return 0;
2472
2473         /*
2474          * Prevent division by 0 in overage calculation by acting as if
2475          * it was a threshold of 1 page
2476          */
2477         high = max(high, 1UL);
2478
2479         overage = usage - high;
2480         overage <<= MEMCG_DELAY_PRECISION_SHIFT;
2481         return div64_u64(overage, high);
2482 }
2483
2484 static u64 mem_find_max_overage(struct mem_cgroup *memcg)
2485 {
2486         u64 overage, max_overage = 0;
2487
2488         do {
2489                 overage = calculate_overage(page_counter_read(&memcg->memory),
2490                                             READ_ONCE(memcg->memory.high));
2491                 max_overage = max(overage, max_overage);
2492         } while ((memcg = parent_mem_cgroup(memcg)) &&
2493                  !mem_cgroup_is_root(memcg));
2494
2495         return max_overage;
2496 }
2497
2498 static u64 swap_find_max_overage(struct mem_cgroup *memcg)
2499 {
2500         u64 overage, max_overage = 0;
2501
2502         do {
2503                 overage = calculate_overage(page_counter_read(&memcg->swap),
2504                                             READ_ONCE(memcg->swap.high));
2505                 if (overage)
2506                         memcg_memory_event(memcg, MEMCG_SWAP_HIGH);
2507                 max_overage = max(overage, max_overage);
2508         } while ((memcg = parent_mem_cgroup(memcg)) &&
2509                  !mem_cgroup_is_root(memcg));
2510
2511         return max_overage;
2512 }
2513
2514 /*
2515  * Get the number of jiffies that we should penalise a mischievous cgroup which
2516  * is exceeding its memory.high by checking both it and its ancestors.
2517  */
2518 static unsigned long calculate_high_delay(struct mem_cgroup *memcg,
2519                                           unsigned int nr_pages,
2520                                           u64 max_overage)
2521 {
2522         unsigned long penalty_jiffies;
2523
2524         if (!max_overage)
2525                 return 0;
2526
2527         /*
2528          * We use overage compared to memory.high to calculate the number of
2529          * jiffies to sleep (penalty_jiffies). Ideally this value should be
2530          * fairly lenient on small overages, and increasingly harsh when the
2531          * memcg in question makes it clear that it has no intention of stopping
2532          * its crazy behaviour, so we exponentially increase the delay based on
2533          * overage amount.
2534          */
2535         penalty_jiffies = max_overage * max_overage * HZ;
2536         penalty_jiffies >>= MEMCG_DELAY_PRECISION_SHIFT;
2537         penalty_jiffies >>= MEMCG_DELAY_SCALING_SHIFT;
2538
2539         /*
2540          * Factor in the task's own contribution to the overage, such that four
2541          * N-sized allocations are throttled approximately the same as one
2542          * 4N-sized allocation.
2543          *
2544          * MEMCG_CHARGE_BATCH pages is nominal, so work out how much smaller or
2545          * larger the current charge patch is than that.
2546          */
2547         return penalty_jiffies * nr_pages / MEMCG_CHARGE_BATCH;
2548 }
2549
2550 /*
2551  * Scheduled by try_charge() to be executed from the userland return path
2552  * and reclaims memory over the high limit.
2553  */
2554 void mem_cgroup_handle_over_high(void)
2555 {
2556         unsigned long penalty_jiffies;
2557         unsigned long pflags;
2558         unsigned long nr_reclaimed;
2559         unsigned int nr_pages = current->memcg_nr_pages_over_high;
2560         int nr_retries = MAX_RECLAIM_RETRIES;
2561         struct mem_cgroup *memcg;
2562         bool in_retry = false;
2563
2564         if (likely(!nr_pages))
2565                 return;
2566
2567         memcg = get_mem_cgroup_from_mm(current->mm);
2568         current->memcg_nr_pages_over_high = 0;
2569
2570 retry_reclaim:
2571         /*
2572          * The allocating task should reclaim at least the batch size, but for
2573          * subsequent retries we only want to do what's necessary to prevent oom
2574          * or breaching resource isolation.
2575          *
2576          * This is distinct from memory.max or page allocator behaviour because
2577          * memory.high is currently batched, whereas memory.max and the page
2578          * allocator run every time an allocation is made.
2579          */
2580         nr_reclaimed = reclaim_high(memcg,
2581                                     in_retry ? SWAP_CLUSTER_MAX : nr_pages,
2582                                     GFP_KERNEL);
2583
2584         /*
2585          * memory.high is breached and reclaim is unable to keep up. Throttle
2586          * allocators proactively to slow down excessive growth.
2587          */
2588         penalty_jiffies = calculate_high_delay(memcg, nr_pages,
2589                                                mem_find_max_overage(memcg));
2590
2591         penalty_jiffies += calculate_high_delay(memcg, nr_pages,
2592                                                 swap_find_max_overage(memcg));
2593
2594         /*
2595          * Clamp the max delay per usermode return so as to still keep the
2596          * application moving forwards and also permit diagnostics, albeit
2597          * extremely slowly.
2598          */
2599         penalty_jiffies = min(penalty_jiffies, MEMCG_MAX_HIGH_DELAY_JIFFIES);
2600
2601         /*
2602          * Don't sleep if the amount of jiffies this memcg owes us is so low
2603          * that it's not even worth doing, in an attempt to be nice to those who
2604          * go only a small amount over their memory.high value and maybe haven't
2605          * been aggressively reclaimed enough yet.
2606          */
2607         if (penalty_jiffies <= HZ / 100)
2608                 goto out;
2609
2610         /*
2611          * If reclaim is making forward progress but we're still over
2612          * memory.high, we want to encourage that rather than doing allocator
2613          * throttling.
2614          */
2615         if (nr_reclaimed || nr_retries--) {
2616                 in_retry = true;
2617                 goto retry_reclaim;
2618         }
2619
2620         /*
2621          * If we exit early, we're guaranteed to die (since
2622          * schedule_timeout_killable sets TASK_KILLABLE). This means we don't
2623          * need to account for any ill-begotten jiffies to pay them off later.
2624          */
2625         psi_memstall_enter(&pflags);
2626         schedule_timeout_killable(penalty_jiffies);
2627         psi_memstall_leave(&pflags);
2628
2629 out:
2630         css_put(&memcg->css);
2631 }
2632
2633 static int try_charge_memcg(struct mem_cgroup *memcg, gfp_t gfp_mask,
2634                         unsigned int nr_pages)
2635 {
2636         unsigned int batch = max(MEMCG_CHARGE_BATCH, nr_pages);
2637         int nr_retries = MAX_RECLAIM_RETRIES;
2638         struct mem_cgroup *mem_over_limit;
2639         struct page_counter *counter;
2640         unsigned long nr_reclaimed;
2641         bool passed_oom = false;
2642         unsigned int reclaim_options = MEMCG_RECLAIM_MAY_SWAP;
2643         bool drained = false;
2644         bool raised_max_event = false;
2645         unsigned long pflags;
2646
2647 retry:
2648         if (consume_stock(memcg, nr_pages))
2649                 return 0;
2650
2651         if (!do_memsw_account() ||
2652             page_counter_try_charge(&memcg->memsw, batch, &counter)) {
2653                 if (page_counter_try_charge(&memcg->memory, batch, &counter))
2654                         goto done_restock;
2655                 if (do_memsw_account())
2656                         page_counter_uncharge(&memcg->memsw, batch);
2657                 mem_over_limit = mem_cgroup_from_counter(counter, memory);
2658         } else {
2659                 mem_over_limit = mem_cgroup_from_counter(counter, memsw);
2660                 reclaim_options &= ~MEMCG_RECLAIM_MAY_SWAP;
2661         }
2662
2663         if (batch > nr_pages) {
2664                 batch = nr_pages;
2665                 goto retry;
2666         }
2667
2668         /*
2669          * Prevent unbounded recursion when reclaim operations need to
2670          * allocate memory. This might exceed the limits temporarily,
2671          * but we prefer facilitating memory reclaim and getting back
2672          * under the limit over triggering OOM kills in these cases.
2673          */
2674         if (unlikely(current->flags & PF_MEMALLOC))
2675                 goto force;
2676
2677         if (unlikely(task_in_memcg_oom(current)))
2678                 goto nomem;
2679
2680         if (!gfpflags_allow_blocking(gfp_mask))
2681                 goto nomem;
2682
2683         memcg_memory_event(mem_over_limit, MEMCG_MAX);
2684         raised_max_event = true;
2685
2686         psi_memstall_enter(&pflags);
2687         nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages,
2688                                                     gfp_mask, reclaim_options,
2689                                                     NULL);
2690         psi_memstall_leave(&pflags);
2691
2692         if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
2693                 goto retry;
2694
2695         if (!drained) {
2696                 drain_all_stock(mem_over_limit);
2697                 drained = true;
2698                 goto retry;
2699         }
2700
2701         if (gfp_mask & __GFP_NORETRY)
2702                 goto nomem;
2703         /*
2704          * Even though the limit is exceeded at this point, reclaim
2705          * may have been able to free some pages.  Retry the charge
2706          * before killing the task.
2707          *
2708          * Only for regular pages, though: huge pages are rather
2709          * unlikely to succeed so close to the limit, and we fall back
2710          * to regular pages anyway in case of failure.
2711          */
2712         if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER))
2713                 goto retry;
2714         /*
2715          * At task move, charge accounts can be doubly counted. So, it's
2716          * better to wait until the end of task_move if something is going on.
2717          */
2718         if (mem_cgroup_wait_acct_move(mem_over_limit))
2719                 goto retry;
2720
2721         if (nr_retries--)
2722                 goto retry;
2723
2724         if (gfp_mask & __GFP_RETRY_MAYFAIL)
2725                 goto nomem;
2726
2727         /* Avoid endless loop for tasks bypassed by the oom killer */
2728         if (passed_oom && task_is_dying())
2729                 goto nomem;
2730
2731         /*
2732          * keep retrying as long as the memcg oom killer is able to make
2733          * a forward progress or bypass the charge if the oom killer
2734          * couldn't make any progress.
2735          */
2736         if (mem_cgroup_oom(mem_over_limit, gfp_mask,
2737                            get_order(nr_pages * PAGE_SIZE))) {
2738                 passed_oom = true;
2739                 nr_retries = MAX_RECLAIM_RETRIES;
2740                 goto retry;
2741         }
2742 nomem:
2743         /*
2744          * Memcg doesn't have a dedicated reserve for atomic
2745          * allocations. But like the global atomic pool, we need to
2746          * put the burden of reclaim on regular allocation requests
2747          * and let these go through as privileged allocations.
2748          */
2749         if (!(gfp_mask & (__GFP_NOFAIL | __GFP_HIGH)))
2750                 return -ENOMEM;
2751 force:
2752         /*
2753          * If the allocation has to be enforced, don't forget to raise
2754          * a MEMCG_MAX event.
2755          */
2756         if (!raised_max_event)
2757                 memcg_memory_event(mem_over_limit, MEMCG_MAX);
2758
2759         /*
2760          * The allocation either can't fail or will lead to more memory
2761          * being freed very soon.  Allow memory usage go over the limit
2762          * temporarily by force charging it.
2763          */
2764         page_counter_charge(&memcg->memory, nr_pages);
2765         if (do_memsw_account())
2766                 page_counter_charge(&memcg->memsw, nr_pages);
2767
2768         return 0;
2769
2770 done_restock:
2771         if (batch > nr_pages)
2772                 refill_stock(memcg, batch - nr_pages);
2773
2774         /*
2775          * If the hierarchy is above the normal consumption range, schedule
2776          * reclaim on returning to userland.  We can perform reclaim here
2777          * if __GFP_RECLAIM but let's always punt for simplicity and so that
2778          * GFP_KERNEL can consistently be used during reclaim.  @memcg is
2779          * not recorded as it most likely matches current's and won't
2780          * change in the meantime.  As high limit is checked again before
2781          * reclaim, the cost of mismatch is negligible.
2782          */
2783         do {
2784                 bool mem_high, swap_high;
2785
2786                 mem_high = page_counter_read(&memcg->memory) >
2787                         READ_ONCE(memcg->memory.high);
2788                 swap_high = page_counter_read(&memcg->swap) >
2789                         READ_ONCE(memcg->swap.high);
2790
2791                 /* Don't bother a random interrupted task */
2792                 if (!in_task()) {
2793                         if (mem_high) {
2794                                 schedule_work(&memcg->high_work);
2795                                 break;
2796                         }
2797                         continue;
2798                 }
2799
2800                 if (mem_high || swap_high) {
2801                         /*
2802                          * The allocating tasks in this cgroup will need to do
2803                          * reclaim or be throttled to prevent further growth
2804                          * of the memory or swap footprints.
2805                          *
2806                          * Target some best-effort fairness between the tasks,
2807                          * and distribute reclaim work and delay penalties
2808                          * based on how much each task is actually allocating.
2809                          */
2810                         current->memcg_nr_pages_over_high += batch;
2811                         set_notify_resume(current);
2812                         break;
2813                 }
2814         } while ((memcg = parent_mem_cgroup(memcg)));
2815
2816         if (current->memcg_nr_pages_over_high > MEMCG_CHARGE_BATCH &&
2817             !(current->flags & PF_MEMALLOC) &&
2818             gfpflags_allow_blocking(gfp_mask)) {
2819                 mem_cgroup_handle_over_high();
2820         }
2821         return 0;
2822 }
2823
2824 static inline int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
2825                              unsigned int nr_pages)
2826 {
2827         if (mem_cgroup_is_root(memcg))
2828                 return 0;
2829
2830         return try_charge_memcg(memcg, gfp_mask, nr_pages);
2831 }
2832
2833 static inline void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages)
2834 {
2835         if (mem_cgroup_is_root(memcg))
2836                 return;
2837
2838         page_counter_uncharge(&memcg->memory, nr_pages);
2839         if (do_memsw_account())
2840                 page_counter_uncharge(&memcg->memsw, nr_pages);
2841 }
2842
2843 static void commit_charge(struct folio *folio, struct mem_cgroup *memcg)
2844 {
2845         VM_BUG_ON_FOLIO(folio_memcg(folio), folio);
2846         /*
2847          * Any of the following ensures page's memcg stability:
2848          *
2849          * - the page lock
2850          * - LRU isolation
2851          * - lock_page_memcg()
2852          * - exclusive reference
2853          * - mem_cgroup_trylock_pages()
2854          */
2855         folio->memcg_data = (unsigned long)memcg;
2856 }
2857
2858 #ifdef CONFIG_MEMCG_KMEM
2859 /*
2860  * The allocated objcg pointers array is not accounted directly.
2861  * Moreover, it should not come from DMA buffer and is not readily
2862  * reclaimable. So those GFP bits should be masked off.
2863  */
2864 #define OBJCGS_CLEAR_MASK       (__GFP_DMA | __GFP_RECLAIMABLE | __GFP_ACCOUNT)
2865
2866 /*
2867  * mod_objcg_mlstate() may be called with irq enabled, so
2868  * mod_memcg_lruvec_state() should be used.
2869  */
2870 static inline void mod_objcg_mlstate(struct obj_cgroup *objcg,
2871                                      struct pglist_data *pgdat,
2872                                      enum node_stat_item idx, int nr)
2873 {
2874         struct mem_cgroup *memcg;
2875         struct lruvec *lruvec;
2876
2877         rcu_read_lock();
2878         memcg = obj_cgroup_memcg(objcg);
2879         lruvec = mem_cgroup_lruvec(memcg, pgdat);
2880         mod_memcg_lruvec_state(lruvec, idx, nr);
2881         rcu_read_unlock();
2882 }
2883
2884 int memcg_alloc_slab_cgroups(struct slab *slab, struct kmem_cache *s,
2885                                  gfp_t gfp, bool new_slab)
2886 {
2887         unsigned int objects = objs_per_slab(s, slab);
2888         unsigned long memcg_data;
2889         void *vec;
2890
2891         gfp &= ~OBJCGS_CLEAR_MASK;
2892         vec = kcalloc_node(objects, sizeof(struct obj_cgroup *), gfp,
2893                            slab_nid(slab));
2894         if (!vec)
2895                 return -ENOMEM;
2896
2897         memcg_data = (unsigned long) vec | MEMCG_DATA_OBJCGS;
2898         if (new_slab) {
2899                 /*
2900                  * If the slab is brand new and nobody can yet access its
2901                  * memcg_data, no synchronization is required and memcg_data can
2902                  * be simply assigned.
2903                  */
2904                 slab->memcg_data = memcg_data;
2905         } else if (cmpxchg(&slab->memcg_data, 0, memcg_data)) {
2906                 /*
2907                  * If the slab is already in use, somebody can allocate and
2908                  * assign obj_cgroups in parallel. In this case the existing
2909                  * objcg vector should be reused.
2910                  */
2911                 kfree(vec);
2912                 return 0;
2913         }
2914
2915         kmemleak_not_leak(vec);
2916         return 0;
2917 }
2918
2919 static __always_inline
2920 struct mem_cgroup *mem_cgroup_from_obj_folio(struct folio *folio, void *p)
2921 {
2922         /*
2923          * Slab objects are accounted individually, not per-page.
2924          * Memcg membership data for each individual object is saved in
2925          * slab->memcg_data.
2926          */
2927         if (folio_test_slab(folio)) {
2928                 struct obj_cgroup **objcgs;
2929                 struct slab *slab;
2930                 unsigned int off;
2931
2932                 slab = folio_slab(folio);
2933                 objcgs = slab_objcgs(slab);
2934                 if (!objcgs)
2935                         return NULL;
2936
2937                 off = obj_to_index(slab->slab_cache, slab, p);
2938                 if (objcgs[off])
2939                         return obj_cgroup_memcg(objcgs[off]);
2940
2941                 return NULL;
2942         }
2943
2944         /*
2945          * page_memcg_check() is used here, because in theory we can encounter
2946          * a folio where the slab flag has been cleared already, but
2947          * slab->memcg_data has not been freed yet
2948          * page_memcg_check(page) will guarantee that a proper memory
2949          * cgroup pointer or NULL will be returned.
2950          */
2951         return page_memcg_check(folio_page(folio, 0));
2952 }
2953
2954 /*
2955  * Returns a pointer to the memory cgroup to which the kernel object is charged.
2956  *
2957  * A passed kernel object can be a slab object, vmalloc object or a generic
2958  * kernel page, so different mechanisms for getting the memory cgroup pointer
2959  * should be used.
2960  *
2961  * In certain cases (e.g. kernel stacks or large kmallocs with SLUB) the caller
2962  * can not know for sure how the kernel object is implemented.
2963  * mem_cgroup_from_obj() can be safely used in such cases.
2964  *
2965  * The caller must ensure the memcg lifetime, e.g. by taking rcu_read_lock(),
2966  * cgroup_mutex, etc.
2967  */
2968 struct mem_cgroup *mem_cgroup_from_obj(void *p)
2969 {
2970         struct folio *folio;
2971
2972         if (mem_cgroup_disabled())
2973                 return NULL;
2974
2975         if (unlikely(is_vmalloc_addr(p)))
2976                 folio = page_folio(vmalloc_to_page(p));
2977         else
2978                 folio = virt_to_folio(p);
2979
2980         return mem_cgroup_from_obj_folio(folio, p);
2981 }
2982
2983 /*
2984  * Returns a pointer to the memory cgroup to which the kernel object is charged.
2985  * Similar to mem_cgroup_from_obj(), but faster and not suitable for objects,
2986  * allocated using vmalloc().
2987  *
2988  * A passed kernel object must be a slab object or a generic kernel page.
2989  *
2990  * The caller must ensure the memcg lifetime, e.g. by taking rcu_read_lock(),
2991  * cgroup_mutex, etc.
2992  */
2993 struct mem_cgroup *mem_cgroup_from_slab_obj(void *p)
2994 {
2995         if (mem_cgroup_disabled())
2996                 return NULL;
2997
2998         return mem_cgroup_from_obj_folio(virt_to_folio(p), p);
2999 }
3000
3001 static struct obj_cgroup *__get_obj_cgroup_from_memcg(struct mem_cgroup *memcg)
3002 {
3003         struct obj_cgroup *objcg = NULL;
3004
3005         for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg)) {
3006                 objcg = rcu_dereference(memcg->objcg);
3007                 if (objcg && obj_cgroup_tryget(objcg))
3008                         break;
3009                 objcg = NULL;
3010         }
3011         return objcg;
3012 }
3013
3014 __always_inline struct obj_cgroup *get_obj_cgroup_from_current(void)
3015 {
3016         struct obj_cgroup *objcg = NULL;
3017         struct mem_cgroup *memcg;
3018
3019         if (memcg_kmem_bypass())
3020                 return NULL;
3021
3022         rcu_read_lock();
3023         if (unlikely(active_memcg()))
3024                 memcg = active_memcg();
3025         else
3026                 memcg = mem_cgroup_from_task(current);
3027         objcg = __get_obj_cgroup_from_memcg(memcg);
3028         rcu_read_unlock();
3029         return objcg;
3030 }
3031
3032 struct obj_cgroup *get_obj_cgroup_from_page(struct page *page)
3033 {
3034         struct obj_cgroup *objcg;
3035
3036         if (!memcg_kmem_enabled())
3037                 return NULL;
3038
3039         if (PageMemcgKmem(page)) {
3040                 objcg = __folio_objcg(page_folio(page));
3041                 obj_cgroup_get(objcg);
3042         } else {
3043                 struct mem_cgroup *memcg;
3044
3045                 rcu_read_lock();
3046                 memcg = __folio_memcg(page_folio(page));
3047                 if (memcg)
3048                         objcg = __get_obj_cgroup_from_memcg(memcg);
3049                 else
3050                         objcg = NULL;
3051                 rcu_read_unlock();
3052         }
3053         return objcg;
3054 }
3055
3056 static void memcg_account_kmem(struct mem_cgroup *memcg, int nr_pages)
3057 {
3058         mod_memcg_state(memcg, MEMCG_KMEM, nr_pages);
3059         if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
3060                 if (nr_pages > 0)
3061                         page_counter_charge(&memcg->kmem, nr_pages);
3062                 else
3063                         page_counter_uncharge(&memcg->kmem, -nr_pages);
3064         }
3065 }
3066
3067
3068 /*
3069  * obj_cgroup_uncharge_pages: uncharge a number of kernel pages from a objcg
3070  * @objcg: object cgroup to uncharge
3071  * @nr_pages: number of pages to uncharge
3072  */
3073 static void obj_cgroup_uncharge_pages(struct obj_cgroup *objcg,
3074                                       unsigned int nr_pages)
3075 {
3076         struct mem_cgroup *memcg;
3077
3078         memcg = get_mem_cgroup_from_objcg(objcg);
3079
3080         memcg_account_kmem(memcg, -nr_pages);
3081         refill_stock(memcg, nr_pages);
3082
3083         css_put(&memcg->css);
3084 }
3085
3086 /*
3087  * obj_cgroup_charge_pages: charge a number of kernel pages to a objcg
3088  * @objcg: object cgroup to charge
3089  * @gfp: reclaim mode
3090  * @nr_pages: number of pages to charge
3091  *
3092  * Returns 0 on success, an error code on failure.
3093  */
3094 static int obj_cgroup_charge_pages(struct obj_cgroup *objcg, gfp_t gfp,
3095                                    unsigned int nr_pages)
3096 {
3097         struct mem_cgroup *memcg;
3098         int ret;
3099
3100         memcg = get_mem_cgroup_from_objcg(objcg);
3101
3102         ret = try_charge_memcg(memcg, gfp, nr_pages);
3103         if (ret)
3104                 goto out;
3105
3106         memcg_account_kmem(memcg, nr_pages);
3107 out:
3108         css_put(&memcg->css);
3109
3110         return ret;
3111 }
3112
3113 /**
3114  * __memcg_kmem_charge_page: charge a kmem page to the current memory cgroup
3115  * @page: page to charge
3116  * @gfp: reclaim mode
3117  * @order: allocation order
3118  *
3119  * Returns 0 on success, an error code on failure.
3120  */
3121 int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order)
3122 {
3123         struct obj_cgroup *objcg;
3124         int ret = 0;
3125
3126         objcg = get_obj_cgroup_from_current();
3127         if (objcg) {
3128                 ret = obj_cgroup_charge_pages(objcg, gfp, 1 << order);
3129                 if (!ret) {
3130                         page->memcg_data = (unsigned long)objcg |
3131                                 MEMCG_DATA_KMEM;
3132                         return 0;
3133                 }
3134                 obj_cgroup_put(objcg);
3135         }
3136         return ret;
3137 }
3138
3139 /**
3140  * __memcg_kmem_uncharge_page: uncharge a kmem page
3141  * @page: page to uncharge
3142  * @order: allocation order
3143  */
3144 void __memcg_kmem_uncharge_page(struct page *page, int order)
3145 {
3146         struct folio *folio = page_folio(page);
3147         struct obj_cgroup *objcg;
3148         unsigned int nr_pages = 1 << order;
3149
3150         if (!folio_memcg_kmem(folio))
3151                 return;
3152
3153         objcg = __folio_objcg(folio);
3154         obj_cgroup_uncharge_pages(objcg, nr_pages);
3155         folio->memcg_data = 0;
3156         obj_cgroup_put(objcg);
3157 }
3158
3159 void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat,
3160                      enum node_stat_item idx, int nr)
3161 {
3162         struct memcg_stock_pcp *stock;
3163         struct obj_cgroup *old = NULL;
3164         unsigned long flags;
3165         int *bytes;
3166
3167         local_lock_irqsave(&memcg_stock.stock_lock, flags);
3168         stock = this_cpu_ptr(&memcg_stock);
3169
3170         /*
3171          * Save vmstat data in stock and skip vmstat array update unless
3172          * accumulating over a page of vmstat data or when pgdat or idx
3173          * changes.
3174          */
3175         if (stock->cached_objcg != objcg) {
3176                 old = drain_obj_stock(stock);
3177                 obj_cgroup_get(objcg);
3178                 stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes)
3179                                 ? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0;
3180                 stock->cached_objcg = objcg;
3181                 stock->cached_pgdat = pgdat;
3182         } else if (stock->cached_pgdat != pgdat) {
3183                 /* Flush the existing cached vmstat data */
3184                 struct pglist_data *oldpg = stock->cached_pgdat;
3185
3186                 if (stock->nr_slab_reclaimable_b) {
3187                         mod_objcg_mlstate(objcg, oldpg, NR_SLAB_RECLAIMABLE_B,
3188                                           stock->nr_slab_reclaimable_b);
3189                         stock->nr_slab_reclaimable_b = 0;
3190                 }
3191                 if (stock->nr_slab_unreclaimable_b) {
3192                         mod_objcg_mlstate(objcg, oldpg, NR_SLAB_UNRECLAIMABLE_B,
3193                                           stock->nr_slab_unreclaimable_b);
3194                         stock->nr_slab_unreclaimable_b = 0;
3195                 }
3196                 stock->cached_pgdat = pgdat;
3197         }
3198
3199         bytes = (idx == NR_SLAB_RECLAIMABLE_B) ? &stock->nr_slab_reclaimable_b
3200                                                : &stock->nr_slab_unreclaimable_b;
3201         /*
3202          * Even for large object >= PAGE_SIZE, the vmstat data will still be
3203          * cached locally at least once before pushing it out.
3204          */
3205         if (!*bytes) {
3206                 *bytes = nr;
3207                 nr = 0;
3208         } else {
3209                 *bytes += nr;
3210                 if (abs(*bytes) > PAGE_SIZE) {
3211                         nr = *bytes;
3212                         *bytes = 0;
3213                 } else {
3214                         nr = 0;
3215                 }
3216         }
3217         if (nr)
3218                 mod_objcg_mlstate(objcg, pgdat, idx, nr);
3219
3220         local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
3221         if (old)
3222                 obj_cgroup_put(old);
3223 }
3224
3225 static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes)
3226 {
3227         struct memcg_stock_pcp *stock;
3228         unsigned long flags;
3229         bool ret = false;
3230
3231         local_lock_irqsave(&memcg_stock.stock_lock, flags);
3232
3233         stock = this_cpu_ptr(&memcg_stock);
3234         if (objcg == stock->cached_objcg && stock->nr_bytes >= nr_bytes) {
3235                 stock->nr_bytes -= nr_bytes;
3236                 ret = true;
3237         }
3238
3239         local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
3240
3241         return ret;
3242 }
3243
3244 static struct obj_cgroup *drain_obj_stock(struct memcg_stock_pcp *stock)
3245 {
3246         struct obj_cgroup *old = stock->cached_objcg;
3247
3248         if (!old)
3249                 return NULL;
3250
3251         if (stock->nr_bytes) {
3252                 unsigned int nr_pages = stock->nr_bytes >> PAGE_SHIFT;
3253                 unsigned int nr_bytes = stock->nr_bytes & (PAGE_SIZE - 1);
3254
3255                 if (nr_pages) {
3256                         struct mem_cgroup *memcg;
3257
3258                         memcg = get_mem_cgroup_from_objcg(old);
3259
3260                         memcg_account_kmem(memcg, -nr_pages);
3261                         __refill_stock(memcg, nr_pages);
3262
3263                         css_put(&memcg->css);
3264                 }
3265
3266                 /*
3267                  * The leftover is flushed to the centralized per-memcg value.
3268                  * On the next attempt to refill obj stock it will be moved
3269                  * to a per-cpu stock (probably, on an other CPU), see
3270                  * refill_obj_stock().
3271                  *
3272                  * How often it's flushed is a trade-off between the memory
3273                  * limit enforcement accuracy and potential CPU contention,
3274                  * so it might be changed in the future.
3275                  */
3276                 atomic_add(nr_bytes, &old->nr_charged_bytes);
3277                 stock->nr_bytes = 0;
3278         }
3279
3280         /*
3281          * Flush the vmstat data in current stock
3282          */
3283         if (stock->nr_slab_reclaimable_b || stock->nr_slab_unreclaimable_b) {
3284                 if (stock->nr_slab_reclaimable_b) {
3285                         mod_objcg_mlstate(old, stock->cached_pgdat,
3286                                           NR_SLAB_RECLAIMABLE_B,
3287                                           stock->nr_slab_reclaimable_b);
3288                         stock->nr_slab_reclaimable_b = 0;
3289                 }
3290                 if (stock->nr_slab_unreclaimable_b) {
3291                         mod_objcg_mlstate(old, stock->cached_pgdat,
3292                                           NR_SLAB_UNRECLAIMABLE_B,
3293                                           stock->nr_slab_unreclaimable_b);
3294                         stock->nr_slab_unreclaimable_b = 0;
3295                 }
3296                 stock->cached_pgdat = NULL;
3297         }
3298
3299         stock->cached_objcg = NULL;
3300         /*
3301          * The `old' objects needs to be released by the caller via
3302          * obj_cgroup_put() outside of memcg_stock_pcp::stock_lock.
3303          */
3304         return old;
3305 }
3306
3307 static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
3308                                      struct mem_cgroup *root_memcg)
3309 {
3310         struct mem_cgroup *memcg;
3311
3312         if (stock->cached_objcg) {
3313                 memcg = obj_cgroup_memcg(stock->cached_objcg);
3314                 if (memcg && mem_cgroup_is_descendant(memcg, root_memcg))
3315                         return true;
3316         }
3317
3318         return false;
3319 }
3320
3321 static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes,
3322                              bool allow_uncharge)
3323 {
3324         struct memcg_stock_pcp *stock;
3325         struct obj_cgroup *old = NULL;
3326         unsigned long flags;
3327         unsigned int nr_pages = 0;
3328
3329         local_lock_irqsave(&memcg_stock.stock_lock, flags);
3330
3331         stock = this_cpu_ptr(&memcg_stock);
3332         if (stock->cached_objcg != objcg) { /* reset if necessary */
3333                 old = drain_obj_stock(stock);
3334                 obj_cgroup_get(objcg);
3335                 stock->cached_objcg = objcg;
3336                 stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes)
3337                                 ? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0;
3338                 allow_uncharge = true;  /* Allow uncharge when objcg changes */
3339         }
3340         stock->nr_bytes += nr_bytes;
3341
3342         if (allow_uncharge && (stock->nr_bytes > PAGE_SIZE)) {
3343                 nr_pages = stock->nr_bytes >> PAGE_SHIFT;
3344                 stock->nr_bytes &= (PAGE_SIZE - 1);
3345         }
3346
3347         local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
3348         if (old)
3349                 obj_cgroup_put(old);
3350
3351         if (nr_pages)
3352                 obj_cgroup_uncharge_pages(objcg, nr_pages);
3353 }
3354
3355 int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size)
3356 {
3357         unsigned int nr_pages, nr_bytes;
3358         int ret;
3359
3360         if (consume_obj_stock(objcg, size))
3361                 return 0;
3362
3363         /*
3364          * In theory, objcg->nr_charged_bytes can have enough
3365          * pre-charged bytes to satisfy the allocation. However,
3366          * flushing objcg->nr_charged_bytes requires two atomic
3367          * operations, and objcg->nr_charged_bytes can't be big.
3368          * The shared objcg->nr_charged_bytes can also become a
3369          * performance bottleneck if all tasks of the same memcg are
3370          * trying to update it. So it's better to ignore it and try
3371          * grab some new pages. The stock's nr_bytes will be flushed to
3372          * objcg->nr_charged_bytes later on when objcg changes.
3373          *
3374          * The stock's nr_bytes may contain enough pre-charged bytes
3375          * to allow one less page from being charged, but we can't rely
3376          * on the pre-charged bytes not being changed outside of
3377          * consume_obj_stock() or refill_obj_stock(). So ignore those
3378          * pre-charged bytes as well when charging pages. To avoid a
3379          * page uncharge right after a page charge, we set the
3380          * allow_uncharge flag to false when calling refill_obj_stock()
3381          * to temporarily allow the pre-charged bytes to exceed the page
3382          * size limit. The maximum reachable value of the pre-charged
3383          * bytes is (sizeof(object) + PAGE_SIZE - 2) if there is no data
3384          * race.
3385          */
3386         nr_pages = size >> PAGE_SHIFT;
3387         nr_bytes = size & (PAGE_SIZE - 1);
3388
3389         if (nr_bytes)
3390                 nr_pages += 1;
3391
3392         ret = obj_cgroup_charge_pages(objcg, gfp, nr_pages);
3393         if (!ret && nr_bytes)
3394                 refill_obj_stock(objcg, PAGE_SIZE - nr_bytes, false);
3395
3396         return ret;
3397 }
3398
3399 void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size)
3400 {
3401         refill_obj_stock(objcg, size, true);
3402 }
3403
3404 #endif /* CONFIG_MEMCG_KMEM */
3405
3406 /*
3407  * Because page_memcg(head) is not set on tails, set it now.
3408  */
3409 void split_page_memcg(struct page *head, unsigned int nr)
3410 {
3411         struct folio *folio = page_folio(head);
3412         struct mem_cgroup *memcg = folio_memcg(folio);
3413         int i;
3414
3415         if (mem_cgroup_disabled() || !memcg)
3416                 return;
3417
3418         for (i = 1; i < nr; i++)
3419                 folio_page(folio, i)->memcg_data = folio->memcg_data;
3420
3421         if (folio_memcg_kmem(folio))
3422                 obj_cgroup_get_many(__folio_objcg(folio), nr - 1);
3423         else
3424                 css_get_many(&memcg->css, nr - 1);
3425 }
3426
3427 #ifdef CONFIG_SWAP
3428 /**
3429  * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
3430  * @entry: swap entry to be moved
3431  * @from:  mem_cgroup which the entry is moved from
3432  * @to:  mem_cgroup which the entry is moved to
3433  *
3434  * It succeeds only when the swap_cgroup's record for this entry is the same
3435  * as the mem_cgroup's id of @from.
3436  *
3437  * Returns 0 on success, -EINVAL on failure.
3438  *
3439  * The caller must have charged to @to, IOW, called page_counter_charge() about
3440  * both res and memsw, and called css_get().
3441  */
3442 static int mem_cgroup_move_swap_account(swp_entry_t entry,
3443                                 struct mem_cgroup *from, struct mem_cgroup *to)
3444 {
3445         unsigned short old_id, new_id;
3446
3447         old_id = mem_cgroup_id(from);
3448         new_id = mem_cgroup_id(to);
3449
3450         if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
3451                 mod_memcg_state(from, MEMCG_SWAP, -1);
3452                 mod_memcg_state(to, MEMCG_SWAP, 1);
3453                 return 0;
3454         }
3455         return -EINVAL;
3456 }
3457 #else
3458 static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
3459                                 struct mem_cgroup *from, struct mem_cgroup *to)
3460 {
3461         return -EINVAL;
3462 }
3463 #endif
3464
3465 static DEFINE_MUTEX(memcg_max_mutex);
3466
3467 static int mem_cgroup_resize_max(struct mem_cgroup *memcg,
3468                                  unsigned long max, bool memsw)
3469 {
3470         bool enlarge = false;
3471         bool drained = false;
3472         int ret;
3473         bool limits_invariant;
3474         struct page_counter *counter = memsw ? &memcg->memsw : &memcg->memory;
3475
3476         do {
3477                 if (signal_pending(current)) {
3478                         ret = -EINTR;
3479                         break;
3480                 }
3481
3482                 mutex_lock(&memcg_max_mutex);
3483                 /*
3484                  * Make sure that the new limit (memsw or memory limit) doesn't
3485                  * break our basic invariant rule memory.max <= memsw.max.
3486                  */
3487                 limits_invariant = memsw ? max >= READ_ONCE(memcg->memory.max) :
3488                                            max <= memcg->memsw.max;
3489                 if (!limits_invariant) {
3490                         mutex_unlock(&memcg_max_mutex);
3491                         ret = -EINVAL;
3492                         break;
3493                 }
3494                 if (max > counter->max)
3495                         enlarge = true;
3496                 ret = page_counter_set_max(counter, max);
3497                 mutex_unlock(&memcg_max_mutex);
3498
3499                 if (!ret)
3500                         break;
3501
3502                 if (!drained) {
3503                         drain_all_stock(memcg);
3504                         drained = true;
3505                         continue;
3506                 }
3507
3508                 if (!try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL,
3509                                         memsw ? 0 : MEMCG_RECLAIM_MAY_SWAP,
3510                                         NULL)) {
3511                         ret = -EBUSY;
3512                         break;
3513                 }
3514         } while (true);
3515
3516         if (!ret && enlarge)
3517                 memcg_oom_recover(memcg);
3518
3519         return ret;
3520 }
3521
3522 unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
3523                                             gfp_t gfp_mask,
3524                                             unsigned long *total_scanned)
3525 {
3526         unsigned long nr_reclaimed = 0;
3527         struct mem_cgroup_per_node *mz, *next_mz = NULL;
3528         unsigned long reclaimed;
3529         int loop = 0;
3530         struct mem_cgroup_tree_per_node *mctz;
3531         unsigned long excess;
3532
3533         if (order > 0)
3534                 return 0;
3535
3536         mctz = soft_limit_tree.rb_tree_per_node[pgdat->node_id];
3537
3538         /*
3539          * Do not even bother to check the largest node if the root
3540          * is empty. Do it lockless to prevent lock bouncing. Races
3541          * are acceptable as soft limit is best effort anyway.
3542          */
3543         if (!mctz || RB_EMPTY_ROOT(&mctz->rb_root))
3544                 return 0;
3545
3546         /*
3547          * This loop can run a while, specially if mem_cgroup's continuously
3548          * keep exceeding their soft limit and putting the system under
3549          * pressure
3550          */
3551         do {
3552                 if (next_mz)
3553                         mz = next_mz;
3554                 else
3555                         mz = mem_cgroup_largest_soft_limit_node(mctz);
3556                 if (!mz)
3557                         break;
3558
3559                 reclaimed = mem_cgroup_soft_reclaim(mz->memcg, pgdat,
3560                                                     gfp_mask, total_scanned);
3561                 nr_reclaimed += reclaimed;
3562                 spin_lock_irq(&mctz->lock);
3563
3564                 /*
3565                  * If we failed to reclaim anything from this memory cgroup
3566                  * it is time to move on to the next cgroup
3567                  */
3568                 next_mz = NULL;
3569                 if (!reclaimed)
3570                         next_mz = __mem_cgroup_largest_soft_limit_node(mctz);
3571
3572                 excess = soft_limit_excess(mz->memcg);
3573                 /*
3574                  * One school of thought says that we should not add
3575                  * back the node to the tree if reclaim returns 0.
3576                  * But our reclaim could return 0, simply because due
3577                  * to priority we are exposing a smaller subset of
3578                  * memory to reclaim from. Consider this as a longer
3579                  * term TODO.
3580                  */
3581                 /* If excess == 0, no tree ops */
3582                 __mem_cgroup_insert_exceeded(mz, mctz, excess);
3583                 spin_unlock_irq(&mctz->lock);
3584                 css_put(&mz->memcg->css);
3585                 loop++;
3586                 /*
3587                  * Could not reclaim anything and there are no more
3588                  * mem cgroups to try or we seem to be looping without
3589                  * reclaiming anything.
3590                  */
3591                 if (!nr_reclaimed &&
3592                         (next_mz == NULL ||
3593                         loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
3594                         break;
3595         } while (!nr_reclaimed);
3596         if (next_mz)
3597                 css_put(&next_mz->memcg->css);
3598         return nr_reclaimed;
3599 }
3600
3601 /*
3602  * Reclaims as many pages from the given memcg as possible.
3603  *
3604  * Caller is responsible for holding css reference for memcg.
3605  */
3606 static int mem_cgroup_force_empty(struct mem_cgroup *memcg)
3607 {
3608         int nr_retries = MAX_RECLAIM_RETRIES;
3609
3610         /* we call try-to-free pages for make this cgroup empty */
3611         lru_add_drain_all();
3612
3613         drain_all_stock(memcg);
3614
3615         /* try to free all pages in this cgroup */
3616         while (nr_retries && page_counter_read(&memcg->memory)) {
3617                 if (signal_pending(current))
3618                         return -EINTR;
3619
3620                 if (!try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL,
3621                                                   MEMCG_RECLAIM_MAY_SWAP,
3622                                                   NULL))
3623                         nr_retries--;
3624         }
3625
3626         return 0;
3627 }
3628
3629 static ssize_t mem_cgroup_force_empty_write(struct kernfs_open_file *of,
3630                                             char *buf, size_t nbytes,
3631                                             loff_t off)
3632 {
3633         struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3634
3635         if (mem_cgroup_is_root(memcg))
3636                 return -EINVAL;
3637         return mem_cgroup_force_empty(memcg) ?: nbytes;
3638 }
3639
3640 static u64 mem_cgroup_hierarchy_read(struct cgroup_subsys_state *css,
3641                                      struct cftype *cft)
3642 {
3643         return 1;
3644 }
3645
3646 static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css,
3647                                       struct cftype *cft, u64 val)
3648 {
3649         if (val == 1)
3650                 return 0;
3651
3652         pr_warn_once("Non-hierarchical mode is deprecated. "
3653                      "Please report your usecase to linux-mm@kvack.org if you "
3654                      "depend on this functionality.\n");
3655
3656         return -EINVAL;
3657 }
3658
3659 static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
3660 {
3661         unsigned long val;
3662
3663         if (mem_cgroup_is_root(memcg)) {
3664                 mem_cgroup_flush_stats();
3665                 val = memcg_page_state(memcg, NR_FILE_PAGES) +
3666                         memcg_page_state(memcg, NR_ANON_MAPPED);
3667                 if (swap)
3668                         val += memcg_page_state(memcg, MEMCG_SWAP);
3669         } else {
3670                 if (!swap)
3671                         val = page_counter_read(&memcg->memory);
3672                 else
3673                         val = page_counter_read(&memcg->memsw);
3674         }
3675         return val;
3676 }
3677
3678 enum {
3679         RES_USAGE,
3680         RES_LIMIT,
3681         RES_MAX_USAGE,
3682         RES_FAILCNT,
3683         RES_SOFT_LIMIT,
3684 };
3685
3686 static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css,
3687                                struct cftype *cft)
3688 {
3689         struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3690         struct page_counter *counter;
3691
3692         switch (MEMFILE_TYPE(cft->private)) {
3693         case _MEM:
3694                 counter = &memcg->memory;
3695                 break;
3696         case _MEMSWAP:
3697                 counter = &memcg->memsw;
3698                 break;
3699         case _KMEM:
3700                 counter = &memcg->kmem;
3701                 break;
3702         case _TCP:
3703                 counter = &memcg->tcpmem;
3704                 break;
3705         default:
3706                 BUG();
3707         }
3708
3709         switch (MEMFILE_ATTR(cft->private)) {
3710         case RES_USAGE:
3711                 if (counter == &memcg->memory)
3712                         return (u64)mem_cgroup_usage(memcg, false) * PAGE_SIZE;
3713                 if (counter == &memcg->memsw)
3714                         return (u64)mem_cgroup_usage(memcg, true) * PAGE_SIZE;
3715                 return (u64)page_counter_read(counter) * PAGE_SIZE;
3716         case RES_LIMIT:
3717                 return (u64)counter->max * PAGE_SIZE;
3718         case RES_MAX_USAGE:
3719                 return (u64)counter->watermark * PAGE_SIZE;
3720         case RES_FAILCNT:
3721                 return counter->failcnt;
3722         case RES_SOFT_LIMIT:
3723                 return (u64)memcg->soft_limit * PAGE_SIZE;
3724         default:
3725                 BUG();
3726         }
3727 }
3728
3729 #ifdef CONFIG_MEMCG_KMEM
3730 static int memcg_online_kmem(struct mem_cgroup *memcg)
3731 {
3732         struct obj_cgroup *objcg;
3733
3734         if (mem_cgroup_kmem_disabled())
3735                 return 0;
3736
3737         if (unlikely(mem_cgroup_is_root(memcg)))
3738                 return 0;
3739
3740         objcg = obj_cgroup_alloc();
3741         if (!objcg)
3742                 return -ENOMEM;
3743
3744         objcg->memcg = memcg;
3745         rcu_assign_pointer(memcg->objcg, objcg);
3746
3747         static_branch_enable(&memcg_kmem_enabled_key);
3748
3749         memcg->kmemcg_id = memcg->id.id;
3750
3751         return 0;
3752 }
3753
3754 static void memcg_offline_kmem(struct mem_cgroup *memcg)
3755 {
3756         struct mem_cgroup *parent;
3757
3758         if (mem_cgroup_kmem_disabled())
3759                 return;
3760
3761         if (unlikely(mem_cgroup_is_root(memcg)))
3762                 return;
3763
3764         parent = parent_mem_cgroup(memcg);
3765         if (!parent)
3766                 parent = root_mem_cgroup;
3767
3768         memcg_reparent_objcgs(memcg, parent);
3769
3770         /*
3771          * After we have finished memcg_reparent_objcgs(), all list_lrus
3772          * corresponding to this cgroup are guaranteed to remain empty.
3773          * The ordering is imposed by list_lru_node->lock taken by
3774          * memcg_reparent_list_lrus().
3775          */
3776         memcg_reparent_list_lrus(memcg, parent);
3777 }
3778 #else
3779 static int memcg_online_kmem(struct mem_cgroup *memcg)
3780 {
3781         return 0;
3782 }
3783 static void memcg_offline_kmem(struct mem_cgroup *memcg)
3784 {
3785 }
3786 #endif /* CONFIG_MEMCG_KMEM */
3787
3788 static int memcg_update_tcp_max(struct mem_cgroup *memcg, unsigned long max)
3789 {
3790         int ret;
3791
3792         mutex_lock(&memcg_max_mutex);
3793
3794         ret = page_counter_set_max(&memcg->tcpmem, max);
3795         if (ret)
3796                 goto out;
3797
3798         if (!memcg->tcpmem_active) {
3799                 /*
3800                  * The active flag needs to be written after the static_key
3801                  * update. This is what guarantees that the socket activation
3802                  * function is the last one to run. See mem_cgroup_sk_alloc()
3803                  * for details, and note that we don't mark any socket as
3804                  * belonging to this memcg until that flag is up.
3805                  *
3806                  * We need to do this, because static_keys will span multiple
3807                  * sites, but we can't control their order. If we mark a socket
3808                  * as accounted, but the accounting functions are not patched in
3809                  * yet, we'll lose accounting.
3810                  *
3811                  * We never race with the readers in mem_cgroup_sk_alloc(),
3812                  * because when this value change, the code to process it is not
3813                  * patched in yet.
3814                  */
3815                 static_branch_inc(&memcg_sockets_enabled_key);
3816                 memcg->tcpmem_active = true;
3817         }
3818 out:
3819         mutex_unlock(&memcg_max_mutex);
3820         return ret;
3821 }
3822
3823 /*
3824  * The user of this function is...
3825  * RES_LIMIT.
3826  */
3827 static ssize_t mem_cgroup_write(struct kernfs_open_file *of,
3828                                 char *buf, size_t nbytes, loff_t off)
3829 {
3830         struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3831         unsigned long nr_pages;
3832         int ret;
3833
3834         buf = strstrip(buf);
3835         ret = page_counter_memparse(buf, "-1", &nr_pages);
3836         if (ret)
3837                 return ret;
3838
3839         switch (MEMFILE_ATTR(of_cft(of)->private)) {
3840         case RES_LIMIT:
3841                 if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */
3842                         ret = -EINVAL;
3843                         break;
3844                 }
3845                 switch (MEMFILE_TYPE(of_cft(of)->private)) {
3846                 case _MEM:
3847                         ret = mem_cgroup_resize_max(memcg, nr_pages, false);
3848                         break;
3849                 case _MEMSWAP:
3850                         ret = mem_cgroup_resize_max(memcg, nr_pages, true);
3851                         break;
3852                 case _KMEM:
3853                         /* kmem.limit_in_bytes is deprecated. */
3854                         ret = -EOPNOTSUPP;
3855                         break;
3856                 case _TCP:
3857                         ret = memcg_update_tcp_max(memcg, nr_pages);
3858                         break;
3859                 }
3860                 break;
3861         case RES_SOFT_LIMIT:
3862                 if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
3863                         ret = -EOPNOTSUPP;
3864                 } else {
3865                         memcg->soft_limit = nr_pages;
3866                         ret = 0;
3867                 }
3868                 break;
3869         }
3870         return ret ?: nbytes;
3871 }
3872
3873 static ssize_t mem_cgroup_reset(struct kernfs_open_file *of, char *buf,
3874                                 size_t nbytes, loff_t off)
3875 {
3876         struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3877         struct page_counter *counter;
3878
3879         switch (MEMFILE_TYPE(of_cft(of)->private)) {
3880         case _MEM:
3881                 counter = &memcg->memory;
3882                 break;
3883         case _MEMSWAP:
3884                 counter = &memcg->memsw;
3885                 break;
3886         case _KMEM:
3887                 counter = &memcg->kmem;
3888                 break;
3889         case _TCP:
3890                 counter = &memcg->tcpmem;
3891                 break;
3892         default:
3893                 BUG();
3894         }
3895
3896         switch (MEMFILE_ATTR(of_cft(of)->private)) {
3897         case RES_MAX_USAGE:
3898                 page_counter_reset_watermark(counter);
3899                 break;
3900         case RES_FAILCNT:
3901                 counter->failcnt = 0;
3902                 break;
3903         default:
3904                 BUG();
3905         }
3906
3907         return nbytes;
3908 }
3909
3910 static u64 mem_cgroup_move_charge_read(struct cgroup_subsys_state *css,
3911                                         struct cftype *cft)
3912 {
3913         return mem_cgroup_from_css(css)->move_charge_at_immigrate;
3914 }
3915
3916 #ifdef CONFIG_MMU
3917 static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
3918                                         struct cftype *cft, u64 val)
3919 {
3920         struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3921
3922         if (val & ~MOVE_MASK)
3923                 return -EINVAL;
3924
3925         /*
3926          * No kind of locking is needed in here, because ->can_attach() will
3927          * check this value once in the beginning of the process, and then carry
3928          * on with stale data. This means that changes to this value will only
3929          * affect task migrations starting after the change.
3930          */
3931         memcg->move_charge_at_immigrate = val;
3932         return 0;
3933 }
3934 #else
3935 static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
3936                                         struct cftype *cft, u64 val)
3937 {
3938         return -ENOSYS;
3939 }
3940 #endif
3941
3942 #ifdef CONFIG_NUMA
3943
3944 #define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE))
3945 #define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON))
3946 #define LRU_ALL      ((1 << NR_LRU_LISTS) - 1)
3947
3948 static unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
3949                                 int nid, unsigned int lru_mask, bool tree)
3950 {
3951         struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
3952         unsigned long nr = 0;
3953         enum lru_list lru;
3954
3955         VM_BUG_ON((unsigned)nid >= nr_node_ids);
3956
3957         for_each_lru(lru) {
3958                 if (!(BIT(lru) & lru_mask))
3959                         continue;
3960                 if (tree)
3961                         nr += lruvec_page_state(lruvec, NR_LRU_BASE + lru);
3962                 else
3963                         nr += lruvec_page_state_local(lruvec, NR_LRU_BASE + lru);
3964         }
3965         return nr;
3966 }
3967
3968 static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg,
3969                                              unsigned int lru_mask,
3970                                              bool tree)
3971 {
3972         unsigned long nr = 0;
3973         enum lru_list lru;
3974
3975         for_each_lru(lru) {
3976                 if (!(BIT(lru) & lru_mask))
3977                         continue;
3978                 if (tree)
3979                         nr += memcg_page_state(memcg, NR_LRU_BASE + lru);
3980                 else
3981                         nr += memcg_page_state_local(memcg, NR_LRU_BASE + lru);
3982         }
3983         return nr;
3984 }
3985
3986 static int memcg_numa_stat_show(struct seq_file *m, void *v)
3987 {
3988         struct numa_stat {
3989                 const char *name;
3990                 unsigned int lru_mask;
3991         };
3992
3993         static const struct numa_stat stats[] = {
3994                 { "total", LRU_ALL },
3995                 { "file", LRU_ALL_FILE },
3996                 { "anon", LRU_ALL_ANON },
3997                 { "unevictable", BIT(LRU_UNEVICTABLE) },
3998         };
3999         const struct numa_stat *stat;
4000         int nid;
4001         struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
4002
4003         mem_cgroup_flush_stats();
4004
4005         for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
4006                 seq_printf(m, "%s=%lu", stat->name,
4007                            mem_cgroup_nr_lru_pages(memcg, stat->lru_mask,
4008                                                    false));
4009                 for_each_node_state(nid, N_MEMORY)
4010                         seq_printf(m, " N%d=%lu", nid,
4011                                    mem_cgroup_node_nr_lru_pages(memcg, nid,
4012                                                         stat->lru_mask, false));
4013                 seq_putc(m, '\n');
4014         }
4015
4016         for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
4017
4018                 seq_printf(m, "hierarchical_%s=%lu", stat->name,
4019                            mem_cgroup_nr_lru_pages(memcg, stat->lru_mask,
4020                                                    true));
4021                 for_each_node_state(nid, N_MEMORY)
4022                         seq_printf(m, " N%d=%lu", nid,
4023                                    mem_cgroup_node_nr_lru_pages(memcg, nid,
4024                                                         stat->lru_mask, true));
4025                 seq_putc(m, '\n');
4026         }
4027
4028         return 0;
4029 }
4030 #endif /* CONFIG_NUMA */
4031
4032 static const unsigned int memcg1_stats[] = {
4033         NR_FILE_PAGES,
4034         NR_ANON_MAPPED,
4035 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
4036         NR_ANON_THPS,
4037 #endif
4038         NR_SHMEM,
4039         NR_FILE_MAPPED,
4040         NR_FILE_DIRTY,
4041         NR_WRITEBACK,
4042         WORKINGSET_REFAULT_ANON,
4043         WORKINGSET_REFAULT_FILE,
4044         MEMCG_SWAP,
4045 };
4046
4047 static const char *const memcg1_stat_names[] = {
4048         "cache",
4049         "rss",
4050 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
4051         "rss_huge",
4052 #endif
4053         "shmem",
4054         "mapped_file",
4055         "dirty",
4056         "writeback",
4057         "workingset_refault_anon",
4058         "workingset_refault_file",
4059         "swap",
4060 };
4061
4062 /* Universal VM events cgroup1 shows, original sort order */
4063 static const unsigned int memcg1_events[] = {
4064         PGPGIN,
4065         PGPGOUT,
4066         PGFAULT,
4067         PGMAJFAULT,
4068 };
4069
4070 static int memcg_stat_show(struct seq_file *m, void *v)
4071 {
4072         struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
4073         unsigned long memory, memsw;
4074         struct mem_cgroup *mi;
4075         unsigned int i;
4076
4077         BUILD_BUG_ON(ARRAY_SIZE(memcg1_stat_names) != ARRAY_SIZE(memcg1_stats));
4078
4079         mem_cgroup_flush_stats();
4080
4081         for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
4082                 unsigned long nr;
4083
4084                 if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account())
4085                         continue;
4086                 nr = memcg_page_state_local(memcg, memcg1_stats[i]);
4087                 seq_printf(m, "%s %lu\n", memcg1_stat_names[i],
4088                            nr * memcg_page_state_unit(memcg1_stats[i]));
4089         }
4090
4091         for (i = 0; i < ARRAY_SIZE(memcg1_events); i++)
4092                 seq_printf(m, "%s %lu\n", vm_event_name(memcg1_events[i]),
4093                            memcg_events_local(memcg, memcg1_events[i]));
4094
4095         for (i = 0; i < NR_LRU_LISTS; i++)
4096                 seq_printf(m, "%s %lu\n", lru_list_name(i),
4097                            memcg_page_state_local(memcg, NR_LRU_BASE + i) *
4098                            PAGE_SIZE);
4099
4100         /* Hierarchical information */
4101         memory = memsw = PAGE_COUNTER_MAX;
4102         for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) {
4103                 memory = min(memory, READ_ONCE(mi->memory.max));
4104                 memsw = min(memsw, READ_ONCE(mi->memsw.max));
4105         }
4106         seq_printf(m, "hierarchical_memory_limit %llu\n",
4107                    (u64)memory * PAGE_SIZE);
4108         if (do_memsw_account())
4109                 seq_printf(m, "hierarchical_memsw_limit %llu\n",
4110                            (u64)memsw * PAGE_SIZE);
4111
4112         for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
4113                 unsigned long nr;
4114
4115                 if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account())
4116                         continue;
4117                 nr = memcg_page_state(memcg, memcg1_stats[i]);
4118                 seq_printf(m, "total_%s %llu\n", memcg1_stat_names[i],
4119                            (u64)nr * memcg_page_state_unit(memcg1_stats[i]));
4120         }
4121
4122         for (i = 0; i < ARRAY_SIZE(memcg1_events); i++)
4123                 seq_printf(m, "total_%s %llu\n",
4124                            vm_event_name(memcg1_events[i]),
4125                            (u64)memcg_events(memcg, memcg1_events[i]));
4126
4127         for (i = 0; i < NR_LRU_LISTS; i++)
4128                 seq_printf(m, "total_%s %llu\n", lru_list_name(i),
4129                            (u64)memcg_page_state(memcg, NR_LRU_BASE + i) *
4130                            PAGE_SIZE);
4131
4132 #ifdef CONFIG_DEBUG_VM
4133         {
4134                 pg_data_t *pgdat;
4135                 struct mem_cgroup_per_node *mz;
4136                 unsigned long anon_cost = 0;
4137                 unsigned long file_cost = 0;
4138
4139                 for_each_online_pgdat(pgdat) {
4140                         mz = memcg->nodeinfo[pgdat->node_id];
4141
4142                         anon_cost += mz->lruvec.anon_cost;
4143                         file_cost += mz->lruvec.file_cost;
4144                 }
4145                 seq_printf(m, "anon_cost %lu\n", anon_cost);
4146                 seq_printf(m, "file_cost %lu\n", file_cost);
4147         }
4148 #endif
4149
4150         return 0;
4151 }
4152
4153 static u64 mem_cgroup_swappiness_read(struct cgroup_subsys_state *css,
4154                                       struct cftype *cft)
4155 {
4156         struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4157
4158         return mem_cgroup_swappiness(memcg);
4159 }
4160
4161 static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css,
4162                                        struct cftype *cft, u64 val)
4163 {
4164         struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4165
4166         if (val > 200)
4167                 return -EINVAL;
4168
4169         if (!mem_cgroup_is_root(memcg))
4170                 memcg->swappiness = val;
4171         else
4172                 vm_swappiness = val;
4173
4174         return 0;
4175 }
4176
4177 static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
4178 {
4179         struct mem_cgroup_threshold_ary *t;
4180         unsigned long usage;
4181         int i;
4182
4183         rcu_read_lock();
4184         if (!swap)
4185                 t = rcu_dereference(memcg->thresholds.primary);
4186         else
4187                 t = rcu_dereference(memcg->memsw_thresholds.primary);
4188
4189         if (!t)
4190                 goto unlock;
4191
4192         usage = mem_cgroup_usage(memcg, swap);
4193
4194         /*
4195          * current_threshold points to threshold just below or equal to usage.
4196          * If it's not true, a threshold was crossed after last
4197          * call of __mem_cgroup_threshold().
4198          */
4199         i = t->current_threshold;
4200
4201         /*
4202          * Iterate backward over array of thresholds starting from
4203          * current_threshold and check if a threshold is crossed.
4204          * If none of thresholds below usage is crossed, we read
4205          * only one element of the array here.
4206          */
4207         for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--)
4208                 eventfd_signal(t->entries[i].eventfd, 1);
4209
4210         /* i = current_threshold + 1 */
4211         i++;
4212
4213         /*
4214          * Iterate forward over array of thresholds starting from
4215          * current_threshold+1 and check if a threshold is crossed.
4216          * If none of thresholds above usage is crossed, we read
4217          * only one element of the array here.
4218          */
4219         for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++)
4220                 eventfd_signal(t->entries[i].eventfd, 1);
4221
4222         /* Update current_threshold */
4223         t->current_threshold = i - 1;
4224 unlock:
4225         rcu_read_unlock();
4226 }
4227
4228 static void mem_cgroup_threshold(struct mem_cgroup *memcg)
4229 {
4230         while (memcg) {
4231                 __mem_cgroup_threshold(memcg, false);
4232                 if (do_memsw_account())
4233                         __mem_cgroup_threshold(memcg, true);
4234
4235                 memcg = parent_mem_cgroup(memcg);
4236         }
4237 }
4238
4239 static int compare_thresholds(const void *a, const void *b)
4240 {
4241         const struct mem_cgroup_threshold *_a = a;
4242         const struct mem_cgroup_threshold *_b = b;
4243
4244         if (_a->threshold > _b->threshold)
4245                 return 1;
4246
4247         if (_a->threshold < _b->threshold)
4248                 return -1;
4249
4250         return 0;
4251 }
4252
4253 static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
4254 {
4255         struct mem_cgroup_eventfd_list *ev;
4256
4257         spin_lock(&memcg_oom_lock);
4258
4259         list_for_each_entry(ev, &memcg->oom_notify, list)
4260                 eventfd_signal(ev->eventfd, 1);
4261
4262         spin_unlock(&memcg_oom_lock);
4263         return 0;
4264 }
4265
4266 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg)
4267 {
4268         struct mem_cgroup *iter;
4269
4270         for_each_mem_cgroup_tree(iter, memcg)
4271                 mem_cgroup_oom_notify_cb(iter);
4272 }
4273
4274 static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
4275         struct eventfd_ctx *eventfd, const char *args, enum res_type type)
4276 {
4277         struct mem_cgroup_thresholds *thresholds;
4278         struct mem_cgroup_threshold_ary *new;
4279         unsigned long threshold;
4280         unsigned long usage;
4281         int i, size, ret;
4282
4283         ret = page_counter_memparse(args, "-1", &threshold);
4284         if (ret)
4285                 return ret;
4286
4287         mutex_lock(&memcg->thresholds_lock);
4288
4289         if (type == _MEM) {
4290                 thresholds = &memcg->thresholds;
4291                 usage = mem_cgroup_usage(memcg, false);
4292         } else if (type == _MEMSWAP) {
4293                 thresholds = &memcg->memsw_thresholds;
4294                 usage = mem_cgroup_usage(memcg, true);
4295         } else
4296                 BUG();
4297
4298         /* Check if a threshold crossed before adding a new one */
4299         if (thresholds->primary)
4300                 __mem_cgroup_threshold(memcg, type == _MEMSWAP);
4301
4302         size = thresholds->primary ? thresholds->primary->size + 1 : 1;
4303
4304         /* Allocate memory for new array of thresholds */
4305         new = kmalloc(struct_size(new, entries, size), GFP_KERNEL);
4306         if (!new) {
4307                 ret = -ENOMEM;
4308                 goto unlock;
4309         }
4310         new->size = size;
4311
4312         /* Copy thresholds (if any) to new array */
4313         if (thresholds->primary)
4314                 memcpy(new->entries, thresholds->primary->entries,
4315                        flex_array_size(new, entries, size - 1));
4316
4317         /* Add new threshold */
4318         new->entries[size - 1].eventfd = eventfd;
4319         new->entries[size - 1].threshold = threshold;
4320
4321         /* Sort thresholds. Registering of new threshold isn't time-critical */
4322         sort(new->entries, size, sizeof(*new->entries),
4323                         compare_thresholds, NULL);
4324
4325         /* Find current threshold */
4326         new->current_threshold = -1;
4327         for (i = 0; i < size; i++) {
4328                 if (new->entries[i].threshold <= usage) {
4329                         /*
4330                          * new->current_threshold will not be used until
4331                          * rcu_assign_pointer(), so it's safe to increment
4332                          * it here.
4333                          */
4334                         ++new->current_threshold;
4335                 } else
4336                         break;
4337         }
4338
4339         /* Free old spare buffer and save old primary buffer as spare */
4340         kfree(thresholds->spare);
4341         thresholds->spare = thresholds->primary;
4342
4343         rcu_assign_pointer(thresholds->primary, new);
4344
4345         /* To be sure that nobody uses thresholds */
4346         synchronize_rcu();
4347
4348 unlock:
4349         mutex_unlock(&memcg->thresholds_lock);
4350
4351         return ret;
4352 }
4353
4354 static int mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
4355         struct eventfd_ctx *eventfd, const char *args)
4356 {
4357         return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEM);
4358 }
4359
4360 static int memsw_cgroup_usage_register_event(struct mem_cgroup *memcg,
4361         struct eventfd_ctx *eventfd, const char *args)
4362 {
4363         return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEMSWAP);
4364 }
4365
4366 static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
4367         struct eventfd_ctx *eventfd, enum res_type type)
4368 {
4369         struct mem_cgroup_thresholds *thresholds;
4370         struct mem_cgroup_threshold_ary *new;
4371         unsigned long usage;
4372         int i, j, size, entries;
4373
4374         mutex_lock(&memcg->thresholds_lock);
4375
4376         if (type == _MEM) {
4377                 thresholds = &memcg->thresholds;
4378                 usage = mem_cgroup_usage(memcg, false);
4379         } else if (type == _MEMSWAP) {
4380                 thresholds = &memcg->memsw_thresholds;
4381                 usage = mem_cgroup_usage(memcg, true);
4382         } else
4383                 BUG();
4384
4385         if (!thresholds->primary)
4386                 goto unlock;
4387
4388         /* Check if a threshold crossed before removing */
4389         __mem_cgroup_threshold(memcg, type == _MEMSWAP);
4390
4391         /* Calculate new number of threshold */
4392         size = entries = 0;
4393         for (i = 0; i < thresholds->primary->size; i++) {
4394                 if (thresholds->primary->entries[i].eventfd != eventfd)
4395                         size++;
4396                 else
4397                         entries++;
4398         }
4399
4400         new = thresholds->spare;
4401
4402         /* If no items related to eventfd have been cleared, nothing to do */
4403         if (!entries)
4404                 goto unlock;
4405
4406         /* Set thresholds array to NULL if we don't have thresholds */
4407         if (!size) {
4408                 kfree(new);
4409                 new = NULL;
4410                 goto swap_buffers;
4411         }
4412
4413         new->size = size;
4414
4415         /* Copy thresholds and find current threshold */
4416         new->current_threshold = -1;
4417         for (i = 0, j = 0; i < thresholds->primary->size; i++) {
4418                 if (thresholds->primary->entries[i].eventfd == eventfd)
4419                         continue;
4420
4421                 new->entries[j] = thresholds->primary->entries[i];
4422                 if (new->entries[j].threshold <= usage) {
4423                         /*
4424                          * new->current_threshold will not be used
4425                          * until rcu_assign_pointer(), so it's safe to increment
4426                          * it here.
4427                          */
4428                         ++new->current_threshold;
4429                 }
4430                 j++;
4431         }
4432
4433 swap_buffers:
4434         /* Swap primary and spare array */
4435         thresholds->spare = thresholds->primary;
4436
4437         rcu_assign_pointer(thresholds->primary, new);
4438
4439         /* To be sure that nobody uses thresholds */
4440         synchronize_rcu();
4441
4442         /* If all events are unregistered, free the spare array */
4443         if (!new) {
4444                 kfree(thresholds->spare);
4445                 thresholds->spare = NULL;
4446         }
4447 unlock:
4448         mutex_unlock(&memcg->thresholds_lock);
4449 }
4450
4451 static void mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
4452         struct eventfd_ctx *eventfd)
4453 {
4454         return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEM);
4455 }
4456
4457 static void memsw_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
4458         struct eventfd_ctx *eventfd)
4459 {
4460         return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEMSWAP);
4461 }
4462
4463 static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg,
4464         struct eventfd_ctx *eventfd, const char *args)
4465 {
4466         struct mem_cgroup_eventfd_list *event;
4467
4468         event = kmalloc(sizeof(*event), GFP_KERNEL);
4469         if (!event)
4470                 return -ENOMEM;
4471
4472         spin_lock(&memcg_oom_lock);
4473
4474         event->eventfd = eventfd;
4475         list_add(&event->list, &memcg->oom_notify);
4476
4477         /* already in OOM ? */
4478         if (memcg->under_oom)
4479                 eventfd_signal(eventfd, 1);
4480         spin_unlock(&memcg_oom_lock);
4481
4482         return 0;
4483 }
4484
4485 static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg,
4486         struct eventfd_ctx *eventfd)
4487 {
4488         struct mem_cgroup_eventfd_list *ev, *tmp;
4489
4490         spin_lock(&memcg_oom_lock);
4491
4492         list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) {
4493                 if (ev->eventfd == eventfd) {
4494                         list_del(&ev->list);
4495                         kfree(ev);
4496                 }
4497         }
4498
4499         spin_unlock(&memcg_oom_lock);
4500 }
4501
4502 static int mem_cgroup_oom_control_read(struct seq_file *sf, void *v)
4503 {
4504         struct mem_cgroup *memcg = mem_cgroup_from_seq(sf);
4505
4506         seq_printf(sf, "oom_kill_disable %d\n", memcg->oom_kill_disable);
4507         seq_printf(sf, "under_oom %d\n", (bool)memcg->under_oom);
4508         seq_printf(sf, "oom_kill %lu\n",
4509                    atomic_long_read(&memcg->memory_events[MEMCG_OOM_KILL]));
4510         return 0;
4511 }
4512
4513 static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css,
4514         struct cftype *cft, u64 val)
4515 {
4516         struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4517
4518         /* cannot set to root cgroup and only 0 and 1 are allowed */
4519         if (mem_cgroup_is_root(memcg) || !((val == 0) || (val == 1)))
4520                 return -EINVAL;
4521
4522         memcg->oom_kill_disable = val;
4523         if (!val)
4524                 memcg_oom_recover(memcg);
4525
4526         return 0;
4527 }
4528
4529 #ifdef CONFIG_CGROUP_WRITEBACK
4530
4531 #include <trace/events/writeback.h>
4532
4533 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
4534 {
4535         return wb_domain_init(&memcg->cgwb_domain, gfp);
4536 }
4537
4538 static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
4539 {
4540         wb_domain_exit(&memcg->cgwb_domain);
4541 }
4542
4543 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
4544 {
4545         wb_domain_size_changed(&memcg->cgwb_domain);
4546 }
4547
4548 struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
4549 {
4550         struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
4551
4552         if (!memcg->css.parent)
4553                 return NULL;
4554
4555         return &memcg->cgwb_domain;
4556 }
4557
4558 /**
4559  * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg
4560  * @wb: bdi_writeback in question
4561  * @pfilepages: out parameter for number of file pages
4562  * @pheadroom: out parameter for number of allocatable pages according to memcg
4563  * @pdirty: out parameter for number of dirty pages
4564  * @pwriteback: out parameter for number of pages under writeback
4565  *
4566  * Determine the numbers of file, headroom, dirty, and writeback pages in
4567  * @wb's memcg.  File, dirty and writeback are self-explanatory.  Headroom
4568  * is a bit more involved.
4569  *
4570  * A memcg's headroom is "min(max, high) - used".  In the hierarchy, the
4571  * headroom is calculated as the lowest headroom of itself and the
4572  * ancestors.  Note that this doesn't consider the actual amount of
4573  * available memory in the system.  The caller should further cap
4574  * *@pheadroom accordingly.
4575  */
4576 void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
4577                          unsigned long *pheadroom, unsigned long *pdirty,
4578                          unsigned long *pwriteback)
4579 {
4580         struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
4581         struct mem_cgroup *parent;
4582
4583         mem_cgroup_flush_stats();
4584
4585         *pdirty = memcg_page_state(memcg, NR_FILE_DIRTY);
4586         *pwriteback = memcg_page_state(memcg, NR_WRITEBACK);
4587         *pfilepages = memcg_page_state(memcg, NR_INACTIVE_FILE) +
4588                         memcg_page_state(memcg, NR_ACTIVE_FILE);
4589
4590         *pheadroom = PAGE_COUNTER_MAX;
4591         while ((parent = parent_mem_cgroup(memcg))) {
4592                 unsigned long ceiling = min(READ_ONCE(memcg->memory.max),
4593                                             READ_ONCE(memcg->memory.high));
4594                 unsigned long used = page_counter_read(&memcg->memory);
4595
4596                 *pheadroom = min(*pheadroom, ceiling - min(ceiling, used));
4597                 memcg = parent;
4598         }
4599 }
4600
4601 /*
4602  * Foreign dirty flushing
4603  *
4604  * There's an inherent mismatch between memcg and writeback.  The former
4605  * tracks ownership per-page while the latter per-inode.  This was a
4606  * deliberate design decision because honoring per-page ownership in the
4607  * writeback path is complicated, may lead to higher CPU and IO overheads
4608  * and deemed unnecessary given that write-sharing an inode across
4609  * different cgroups isn't a common use-case.
4610  *
4611  * Combined with inode majority-writer ownership switching, this works well
4612  * enough in most cases but there are some pathological cases.  For
4613  * example, let's say there are two cgroups A and B which keep writing to
4614  * different but confined parts of the same inode.  B owns the inode and
4615  * A's memory is limited far below B's.  A's dirty ratio can rise enough to
4616  * trigger balance_dirty_pages() sleeps but B's can be low enough to avoid
4617  * triggering background writeback.  A will be slowed down without a way to
4618  * make writeback of the dirty pages happen.
4619  *
4620  * Conditions like the above can lead to a cgroup getting repeatedly and
4621  * severely throttled after making some progress after each
4622  * dirty_expire_interval while the underlying IO device is almost
4623  * completely idle.
4624  *
4625  * Solving this problem completely requires matching the ownership tracking
4626  * granularities between memcg and writeback in either direction.  However,
4627  * the more egregious behaviors can be avoided by simply remembering the
4628  * most recent foreign dirtying events and initiating remote flushes on
4629  * them when local writeback isn't enough to keep the memory clean enough.
4630  *
4631  * The following two functions implement such mechanism.  When a foreign
4632  * page - a page whose memcg and writeback ownerships don't match - is
4633  * dirtied, mem_cgroup_track_foreign_dirty() records the inode owning
4634  * bdi_writeback on the page owning memcg.  When balance_dirty_pages()
4635  * decides that the memcg needs to sleep due to high dirty ratio, it calls
4636  * mem_cgroup_flush_foreign() which queues writeback on the recorded
4637  * foreign bdi_writebacks which haven't expired.  Both the numbers of
4638  * recorded bdi_writebacks and concurrent in-flight foreign writebacks are
4639  * limited to MEMCG_CGWB_FRN_CNT.
4640  *
4641  * The mechanism only remembers IDs and doesn't hold any object references.
4642  * As being wrong occasionally doesn't matter, updates and accesses to the
4643  * records are lockless and racy.
4644  */
4645 void mem_cgroup_track_foreign_dirty_slowpath(struct folio *folio,
4646                                              struct bdi_writeback *wb)
4647 {
4648         struct mem_cgroup *memcg = folio_memcg(folio);
4649         struct memcg_cgwb_frn *frn;
4650         u64 now = get_jiffies_64();
4651         u64 oldest_at = now;
4652         int oldest = -1;
4653         int i;
4654
4655         trace_track_foreign_dirty(folio, wb);
4656
4657         /*
4658          * Pick the slot to use.  If there is already a slot for @wb, keep
4659          * using it.  If not replace the oldest one which isn't being
4660          * written out.
4661          */
4662         for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) {
4663                 frn = &memcg->cgwb_frn[i];
4664                 if (frn->bdi_id == wb->bdi->id &&
4665                     frn->memcg_id == wb->memcg_css->id)
4666                         break;
4667                 if (time_before64(frn->at, oldest_at) &&
4668                     atomic_read(&frn->done.cnt) == 1) {
4669                         oldest = i;
4670                         oldest_at = frn->at;
4671                 }
4672         }
4673
4674         if (i < MEMCG_CGWB_FRN_CNT) {
4675                 /*
4676                  * Re-using an existing one.  Update timestamp lazily to
4677                  * avoid making the cacheline hot.  We want them to be
4678                  * reasonably up-to-date and significantly shorter than
4679                  * dirty_expire_interval as that's what expires the record.
4680                  * Use the shorter of 1s and dirty_expire_interval / 8.
4681                  */
4682                 unsigned long update_intv =
4683                         min_t(unsigned long, HZ,
4684                               msecs_to_jiffies(dirty_expire_interval * 10) / 8);
4685
4686                 if (time_before64(frn->at, now - update_intv))
4687                         frn->at = now;
4688         } else if (oldest >= 0) {
4689                 /* replace the oldest free one */
4690                 frn = &memcg->cgwb_frn[oldest];
4691                 frn->bdi_id = wb->bdi->id;
4692                 frn->memcg_id = wb->memcg_css->id;
4693                 frn->at = now;
4694         }
4695 }
4696
4697 /* issue foreign writeback flushes for recorded foreign dirtying events */
4698 void mem_cgroup_flush_foreign(struct bdi_writeback *wb)
4699 {
4700         struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
4701         unsigned long intv = msecs_to_jiffies(dirty_expire_interval * 10);
4702         u64 now = jiffies_64;
4703         int i;
4704
4705         for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) {
4706                 struct memcg_cgwb_frn *frn = &memcg->cgwb_frn[i];
4707
4708                 /*
4709                  * If the record is older than dirty_expire_interval,
4710                  * writeback on it has already started.  No need to kick it
4711                  * off again.  Also, don't start a new one if there's
4712                  * already one in flight.
4713                  */
4714                 if (time_after64(frn->at, now - intv) &&
4715                     atomic_read(&frn->done.cnt) == 1) {
4716                         frn->at = 0;
4717                         trace_flush_foreign(wb, frn->bdi_id, frn->memcg_id);
4718                         cgroup_writeback_by_id(frn->bdi_id, frn->memcg_id,
4719                                                WB_REASON_FOREIGN_FLUSH,
4720                                                &frn->done);
4721                 }
4722         }
4723 }
4724
4725 #else   /* CONFIG_CGROUP_WRITEBACK */
4726
4727 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
4728 {
4729         return 0;
4730 }
4731
4732 static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
4733 {
4734 }
4735
4736 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
4737 {
4738 }
4739
4740 #endif  /* CONFIG_CGROUP_WRITEBACK */
4741
4742 /*
4743  * DO NOT USE IN NEW FILES.
4744  *
4745  * "cgroup.event_control" implementation.
4746  *
4747  * This is way over-engineered.  It tries to support fully configurable
4748  * events for each user.  Such level of flexibility is completely
4749  * unnecessary especially in the light of the planned unified hierarchy.
4750  *
4751  * Please deprecate this and replace with something simpler if at all
4752  * possible.
4753  */
4754
4755 /*
4756  * Unregister event and free resources.
4757  *
4758  * Gets called from workqueue.
4759  */
4760 static void memcg_event_remove(struct work_struct *work)
4761 {
4762         struct mem_cgroup_event *event =
4763                 container_of(work, struct mem_cgroup_event, remove);
4764         struct mem_cgroup *memcg = event->memcg;
4765
4766         remove_wait_queue(event->wqh, &event->wait);
4767
4768         event->unregister_event(memcg, event->eventfd);
4769
4770         /* Notify userspace the event is going away. */
4771         eventfd_signal(event->eventfd, 1);
4772
4773         eventfd_ctx_put(event->eventfd);
4774         kfree(event);
4775         css_put(&memcg->css);
4776 }
4777
4778 /*
4779  * Gets called on EPOLLHUP on eventfd when user closes it.
4780  *
4781  * Called with wqh->lock held and interrupts disabled.
4782  */
4783 static int memcg_event_wake(wait_queue_entry_t *wait, unsigned mode,
4784                             int sync, void *key)
4785 {
4786         struct mem_cgroup_event *event =
4787                 container_of(wait, struct mem_cgroup_event, wait);
4788         struct mem_cgroup *memcg = event->memcg;
4789         __poll_t flags = key_to_poll(key);
4790
4791         if (flags & EPOLLHUP) {
4792                 /*
4793                  * If the event has been detached at cgroup removal, we
4794                  * can simply return knowing the other side will cleanup
4795                  * for us.
4796                  *
4797                  * We can't race against event freeing since the other
4798                  * side will require wqh->lock via remove_wait_queue(),
4799                  * which we hold.
4800                  */
4801                 spin_lock(&memcg->event_list_lock);
4802                 if (!list_empty(&event->list)) {
4803                         list_del_init(&event->list);
4804                         /*
4805                          * We are in atomic context, but cgroup_event_remove()
4806                          * may sleep, so we have to call it in workqueue.
4807                          */
4808                         schedule_work(&event->remove);
4809                 }
4810                 spin_unlock(&memcg->event_list_lock);
4811         }
4812
4813         return 0;
4814 }
4815
4816 static void memcg_event_ptable_queue_proc(struct file *file,
4817                 wait_queue_head_t *wqh, poll_table *pt)
4818 {
4819         struct mem_cgroup_event *event =
4820                 container_of(pt, struct mem_cgroup_event, pt);
4821
4822         event->wqh = wqh;
4823         add_wait_queue(wqh, &event->wait);
4824 }
4825
4826 /*
4827  * DO NOT USE IN NEW FILES.
4828  *
4829  * Parse input and register new cgroup event handler.
4830  *
4831  * Input must be in format '<event_fd> <control_fd> <args>'.
4832  * Interpretation of args is defined by control file implementation.
4833  */
4834 static ssize_t memcg_write_event_control(struct kernfs_open_file *of,
4835                                          char *buf, size_t nbytes, loff_t off)
4836 {
4837         struct cgroup_subsys_state *css = of_css(of);
4838         struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4839         struct mem_cgroup_event *event;
4840         struct cgroup_subsys_state *cfile_css;
4841         unsigned int efd, cfd;
4842         struct fd efile;
4843         struct fd cfile;
4844         struct dentry *cdentry;
4845         const char *name;
4846         char *endp;
4847         int ret;
4848
4849         if (IS_ENABLED(CONFIG_PREEMPT_RT))
4850                 return -EOPNOTSUPP;
4851
4852         buf = strstrip(buf);
4853
4854         efd = simple_strtoul(buf, &endp, 10);
4855         if (*endp != ' ')
4856                 return -EINVAL;
4857         buf = endp + 1;
4858
4859         cfd = simple_strtoul(buf, &endp, 10);
4860         if ((*endp != ' ') && (*endp != '\0'))
4861                 return -EINVAL;
4862         buf = endp + 1;
4863
4864         event = kzalloc(sizeof(*event), GFP_KERNEL);
4865         if (!event)
4866                 return -ENOMEM;
4867
4868         event->memcg = memcg;
4869         INIT_LIST_HEAD(&event->list);
4870         init_poll_funcptr(&event->pt, memcg_event_ptable_queue_proc);
4871         init_waitqueue_func_entry(&event->wait, memcg_event_wake);
4872         INIT_WORK(&event->remove, memcg_event_remove);
4873
4874         efile = fdget(efd);
4875         if (!efile.file) {
4876                 ret = -EBADF;
4877                 goto out_kfree;
4878         }
4879
4880         event->eventfd = eventfd_ctx_fileget(efile.file);
4881         if (IS_ERR(event->eventfd)) {
4882                 ret = PTR_ERR(event->eventfd);
4883                 goto out_put_efile;
4884         }
4885
4886         cfile = fdget(cfd);
4887         if (!cfile.file) {
4888                 ret = -EBADF;
4889                 goto out_put_eventfd;
4890         }
4891
4892         /* the process need read permission on control file */
4893         /* AV: shouldn't we check that it's been opened for read instead? */
4894         ret = file_permission(cfile.file, MAY_READ);
4895         if (ret < 0)
4896                 goto out_put_cfile;
4897
4898         /*
4899          * The control file must be a regular cgroup1 file. As a regular cgroup
4900          * file can't be renamed, it's safe to access its name afterwards.
4901          */
4902         cdentry = cfile.file->f_path.dentry;
4903         if (cdentry->d_sb->s_type != &cgroup_fs_type || !d_is_reg(cdentry)) {
4904                 ret = -EINVAL;
4905                 goto out_put_cfile;
4906         }
4907
4908         /*
4909          * Determine the event callbacks and set them in @event.  This used
4910          * to be done via struct cftype but cgroup core no longer knows
4911          * about these events.  The following is crude but the whole thing
4912          * is for compatibility anyway.
4913          *
4914          * DO NOT ADD NEW FILES.
4915          */
4916         name = cdentry->d_name.name;
4917
4918         if (!strcmp(name, "memory.usage_in_bytes")) {
4919                 event->register_event = mem_cgroup_usage_register_event;
4920                 event->unregister_event = mem_cgroup_usage_unregister_event;
4921         } else if (!strcmp(name, "memory.oom_control")) {
4922                 event->register_event = mem_cgroup_oom_register_event;
4923                 event->unregister_event = mem_cgroup_oom_unregister_event;
4924         } else if (!strcmp(name, "memory.pressure_level")) {
4925                 event->register_event = vmpressure_register_event;
4926                 event->unregister_event = vmpressure_unregister_event;
4927         } else if (!strcmp(name, "memory.memsw.usage_in_bytes")) {
4928                 event->register_event = memsw_cgroup_usage_register_event;
4929                 event->unregister_event = memsw_cgroup_usage_unregister_event;
4930         } else {
4931                 ret = -EINVAL;
4932                 goto out_put_cfile;
4933         }
4934
4935         /*
4936          * Verify @cfile should belong to @css.  Also, remaining events are
4937          * automatically removed on cgroup destruction but the removal is
4938          * asynchronous, so take an extra ref on @css.
4939          */
4940         cfile_css = css_tryget_online_from_dir(cdentry->d_parent,
4941                                                &memory_cgrp_subsys);
4942         ret = -EINVAL;
4943         if (IS_ERR(cfile_css))
4944                 goto out_put_cfile;
4945         if (cfile_css != css) {
4946                 css_put(cfile_css);
4947                 goto out_put_cfile;
4948         }
4949
4950         ret = event->register_event(memcg, event->eventfd, buf);
4951         if (ret)
4952                 goto out_put_css;
4953
4954         vfs_poll(efile.file, &event->pt);
4955
4956         spin_lock_irq(&memcg->event_list_lock);
4957         list_add(&event->list, &memcg->event_list);
4958         spin_unlock_irq(&memcg->event_list_lock);
4959
4960         fdput(cfile);
4961         fdput(efile);
4962
4963         return nbytes;
4964
4965 out_put_css:
4966         css_put(css);
4967 out_put_cfile:
4968         fdput(cfile);
4969 out_put_eventfd:
4970         eventfd_ctx_put(event->eventfd);
4971 out_put_efile:
4972         fdput(efile);
4973 out_kfree:
4974         kfree(event);
4975
4976         return ret;
4977 }
4978
4979 #if defined(CONFIG_MEMCG_KMEM) && (defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG))
4980 static int mem_cgroup_slab_show(struct seq_file *m, void *p)
4981 {
4982         /*
4983          * Deprecated.
4984          * Please, take a look at tools/cgroup/memcg_slabinfo.py .
4985          */
4986         return 0;
4987 }
4988 #endif
4989
4990 static struct cftype mem_cgroup_legacy_files[] = {
4991         {
4992                 .name = "usage_in_bytes",
4993                 .private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
4994                 .read_u64 = mem_cgroup_read_u64,
4995         },
4996         {
4997                 .name = "max_usage_in_bytes",
4998                 .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
4999                 .write = mem_cgroup_reset,
5000                 .read_u64 = mem_cgroup_read_u64,
5001         },
5002         {
5003                 .name = "limit_in_bytes",
5004                 .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
5005                 .write = mem_cgroup_write,
5006                 .read_u64 = mem_cgroup_read_u64,
5007         },
5008         {
5009                 .name = "soft_limit_in_bytes",
5010                 .private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT),
5011                 .write = mem_cgroup_write,
5012                 .read_u64 = mem_cgroup_read_u64,
5013         },
5014         {
5015                 .name = "failcnt",
5016                 .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
5017                 .write = mem_cgroup_reset,
5018                 .read_u64 = mem_cgroup_read_u64,
5019         },
5020         {
5021                 .name = "stat",
5022                 .seq_show = memcg_stat_show,
5023         },
5024         {
5025                 .name = "force_empty",
5026                 .write = mem_cgroup_force_empty_write,
5027         },
5028         {
5029                 .name = "use_hierarchy",
5030                 .write_u64 = mem_cgroup_hierarchy_write,
5031                 .read_u64 = mem_cgroup_hierarchy_read,
5032         },
5033         {
5034                 .name = "cgroup.event_control",         /* XXX: for compat */
5035                 .write = memcg_write_event_control,
5036                 .flags = CFTYPE_NO_PREFIX | CFTYPE_WORLD_WRITABLE,
5037         },
5038         {
5039                 .name = "swappiness",
5040                 .read_u64 = mem_cgroup_swappiness_read,
5041                 .write_u64 = mem_cgroup_swappiness_write,
5042         },
5043         {
5044                 .name = "move_charge_at_immigrate",
5045                 .read_u64 = mem_cgroup_move_charge_read,
5046                 .write_u64 = mem_cgroup_move_charge_write,
5047         },
5048         {
5049                 .name = "oom_control",
5050                 .seq_show = mem_cgroup_oom_control_read,
5051                 .write_u64 = mem_cgroup_oom_control_write,
5052         },
5053         {
5054                 .name = "pressure_level",
5055         },
5056 #ifdef CONFIG_NUMA
5057         {
5058                 .name = "numa_stat",
5059                 .seq_show = memcg_numa_stat_show,
5060         },
5061 #endif
5062         {
5063                 .name = "kmem.limit_in_bytes",
5064                 .private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT),
5065                 .write = mem_cgroup_write,
5066                 .read_u64 = mem_cgroup_read_u64,
5067         },
5068         {
5069                 .name = "kmem.usage_in_bytes",
5070                 .private = MEMFILE_PRIVATE(_KMEM, RES_USAGE),
5071                 .read_u64 = mem_cgroup_read_u64,
5072         },
5073         {
5074                 .name = "kmem.failcnt",
5075                 .private = MEMFILE_PRIVATE(_KMEM, RES_FAILCNT),
5076                 .write = mem_cgroup_reset,
5077                 .read_u64 = mem_cgroup_read_u64,
5078         },
5079         {
5080                 .name = "kmem.max_usage_in_bytes",
5081                 .private = MEMFILE_PRIVATE(_KMEM, RES_MAX_USAGE),
5082                 .write = mem_cgroup_reset,
5083                 .read_u64 = mem_cgroup_read_u64,
5084         },
5085 #if defined(CONFIG_MEMCG_KMEM) && \
5086         (defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG))
5087         {
5088                 .name = "kmem.slabinfo",
5089                 .seq_show = mem_cgroup_slab_show,
5090         },
5091 #endif
5092         {
5093                 .name = "kmem.tcp.limit_in_bytes",
5094                 .private = MEMFILE_PRIVATE(_TCP, RES_LIMIT),
5095                 .write = mem_cgroup_write,
5096                 .read_u64 = mem_cgroup_read_u64,
5097         },
5098         {
5099                 .name = "kmem.tcp.usage_in_bytes",
5100                 .private = MEMFILE_PRIVATE(_TCP, RES_USAGE),
5101                 .read_u64 = mem_cgroup_read_u64,
5102         },
5103         {
5104                 .name = "kmem.tcp.failcnt",
5105                 .private = MEMFILE_PRIVATE(_TCP, RES_FAILCNT),
5106                 .write = mem_cgroup_reset,
5107                 .read_u64 = mem_cgroup_read_u64,
5108         },
5109         {
5110                 .name = "kmem.tcp.max_usage_in_bytes",
5111                 .private = MEMFILE_PRIVATE(_TCP, RES_MAX_USAGE),
5112                 .write = mem_cgroup_reset,
5113                 .read_u64 = mem_cgroup_read_u64,
5114         },
5115         { },    /* terminate */
5116 };
5117
5118 /*
5119  * Private memory cgroup IDR
5120  *
5121  * Swap-out records and page cache shadow entries need to store memcg
5122  * references in constrained space, so we maintain an ID space that is
5123  * limited to 16 bit (MEM_CGROUP_ID_MAX), limiting the total number of
5124  * memory-controlled cgroups to 64k.
5125  *
5126  * However, there usually are many references to the offline CSS after
5127  * the cgroup has been destroyed, such as page cache or reclaimable
5128  * slab objects, that don't need to hang on to the ID. We want to keep
5129  * those dead CSS from occupying IDs, or we might quickly exhaust the
5130  * relatively small ID space and prevent the creation of new cgroups
5131  * even when there are much fewer than 64k cgroups - possibly none.
5132  *
5133  * Maintain a private 16-bit ID space for memcg, and allow the ID to
5134  * be freed and recycled when it's no longer needed, which is usually
5135  * when the CSS is offlined.
5136  *
5137  * The only exception to that are records of swapped out tmpfs/shmem
5138  * pages that need to be attributed to live ancestors on swapin. But
5139  * those references are manageable from userspace.
5140  */
5141
5142 static DEFINE_IDR(mem_cgroup_idr);
5143
5144 static void mem_cgroup_id_remove(struct mem_cgroup *memcg)
5145 {
5146         if (memcg->id.id > 0) {
5147                 idr_remove(&mem_cgroup_idr, memcg->id.id);
5148                 memcg->id.id = 0;
5149         }
5150 }
5151
5152 static void __maybe_unused mem_cgroup_id_get_many(struct mem_cgroup *memcg,
5153                                                   unsigned int n)
5154 {
5155         refcount_add(n, &memcg->id.ref);
5156 }
5157
5158 static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n)
5159 {
5160         if (refcount_sub_and_test(n, &memcg->id.ref)) {
5161                 mem_cgroup_id_remove(memcg);
5162
5163                 /* Memcg ID pins CSS */
5164                 css_put(&memcg->css);
5165         }
5166 }
5167
5168 static inline void mem_cgroup_id_put(struct mem_cgroup *memcg)
5169 {
5170         mem_cgroup_id_put_many(memcg, 1);
5171 }
5172
5173 /**
5174  * mem_cgroup_from_id - look up a memcg from a memcg id
5175  * @id: the memcg id to look up
5176  *
5177  * Caller must hold rcu_read_lock().
5178  */
5179 struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
5180 {
5181         WARN_ON_ONCE(!rcu_read_lock_held());
5182         return idr_find(&mem_cgroup_idr, id);
5183 }
5184
5185 #ifdef CONFIG_SHRINKER_DEBUG
5186 struct mem_cgroup *mem_cgroup_get_from_ino(unsigned long ino)
5187 {
5188         struct cgroup *cgrp;
5189         struct cgroup_subsys_state *css;
5190         struct mem_cgroup *memcg;
5191
5192         cgrp = cgroup_get_from_id(ino);
5193         if (IS_ERR(cgrp))
5194                 return ERR_CAST(cgrp);
5195
5196         css = cgroup_get_e_css(cgrp, &memory_cgrp_subsys);
5197         if (css)
5198                 memcg = container_of(css, struct mem_cgroup, css);
5199         else
5200                 memcg = ERR_PTR(-ENOENT);
5201
5202         cgroup_put(cgrp);
5203
5204         return memcg;
5205 }
5206 #endif
5207
5208 static int alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
5209 {
5210         struct mem_cgroup_per_node *pn;
5211
5212         pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, node);
5213         if (!pn)
5214                 return 1;
5215
5216         pn->lruvec_stats_percpu = alloc_percpu_gfp(struct lruvec_stats_percpu,
5217                                                    GFP_KERNEL_ACCOUNT);
5218         if (!pn->lruvec_stats_percpu) {
5219                 kfree(pn);
5220                 return 1;
5221         }
5222
5223         lruvec_init(&pn->lruvec);
5224         pn->memcg = memcg;
5225
5226         memcg->nodeinfo[node] = pn;
5227         return 0;
5228 }
5229
5230 static void free_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
5231 {
5232         struct mem_cgroup_per_node *pn = memcg->nodeinfo[node];
5233
5234         if (!pn)
5235                 return;
5236
5237         free_percpu(pn->lruvec_stats_percpu);
5238         kfree(pn);
5239 }
5240
5241 static void __mem_cgroup_free(struct mem_cgroup *memcg)
5242 {
5243         int node;
5244
5245         for_each_node(node)
5246                 free_mem_cgroup_per_node_info(memcg, node);
5247         kfree(memcg->vmstats);
5248         free_percpu(memcg->vmstats_percpu);
5249         kfree(memcg);
5250 }
5251
5252 static void mem_cgroup_free(struct mem_cgroup *memcg)
5253 {
5254         lru_gen_exit_memcg(memcg);
5255         memcg_wb_domain_exit(memcg);
5256         __mem_cgroup_free(memcg);
5257 }
5258
5259 static struct mem_cgroup *mem_cgroup_alloc(void)
5260 {
5261         struct mem_cgroup *memcg;
5262         int node;
5263         int __maybe_unused i;
5264         long error = -ENOMEM;
5265
5266         memcg = kzalloc(struct_size(memcg, nodeinfo, nr_node_ids), GFP_KERNEL);
5267         if (!memcg)
5268                 return ERR_PTR(error);
5269
5270         memcg->id.id = idr_alloc(&mem_cgroup_idr, NULL,
5271                                  1, MEM_CGROUP_ID_MAX + 1, GFP_KERNEL);
5272         if (memcg->id.id < 0) {
5273                 error = memcg->id.id;
5274                 goto fail;
5275         }
5276
5277         memcg->vmstats = kzalloc(sizeof(struct memcg_vmstats), GFP_KERNEL);
5278         if (!memcg->vmstats)
5279                 goto fail;
5280
5281         memcg->vmstats_percpu = alloc_percpu_gfp(struct memcg_vmstats_percpu,
5282                                                  GFP_KERNEL_ACCOUNT);
5283         if (!memcg->vmstats_percpu)
5284                 goto fail;
5285
5286         for_each_node(node)
5287                 if (alloc_mem_cgroup_per_node_info(memcg, node))
5288                         goto fail;
5289
5290         if (memcg_wb_domain_init(memcg, GFP_KERNEL))
5291                 goto fail;
5292
5293         INIT_WORK(&memcg->high_work, high_work_func);
5294         INIT_LIST_HEAD(&memcg->oom_notify);
5295         mutex_init(&memcg->thresholds_lock);
5296         spin_lock_init(&memcg->move_lock);
5297         vmpressure_init(&memcg->vmpressure);
5298         INIT_LIST_HEAD(&memcg->event_list);
5299         spin_lock_init(&memcg->event_list_lock);
5300         memcg->socket_pressure = jiffies;
5301 #ifdef CONFIG_MEMCG_KMEM
5302         memcg->kmemcg_id = -1;
5303         INIT_LIST_HEAD(&memcg->objcg_list);
5304 #endif
5305 #ifdef CONFIG_CGROUP_WRITEBACK
5306         INIT_LIST_HEAD(&memcg->cgwb_list);
5307         for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++)
5308                 memcg->cgwb_frn[i].done =
5309                         __WB_COMPLETION_INIT(&memcg_cgwb_frn_waitq);
5310 #endif
5311 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
5312         spin_lock_init(&memcg->deferred_split_queue.split_queue_lock);
5313         INIT_LIST_HEAD(&memcg->deferred_split_queue.split_queue);
5314         memcg->deferred_split_queue.split_queue_len = 0;
5315 #endif
5316         idr_replace(&mem_cgroup_idr, memcg, memcg->id.id);
5317         lru_gen_init_memcg(memcg);
5318         return memcg;
5319 fail:
5320         mem_cgroup_id_remove(memcg);
5321         __mem_cgroup_free(memcg);
5322         return ERR_PTR(error);
5323 }
5324
5325 static struct cgroup_subsys_state * __ref
5326 mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
5327 {
5328         struct mem_cgroup *parent = mem_cgroup_from_css(parent_css);
5329         struct mem_cgroup *memcg, *old_memcg;
5330
5331         old_memcg = set_active_memcg(parent);
5332         memcg = mem_cgroup_alloc();
5333         set_active_memcg(old_memcg);
5334         if (IS_ERR(memcg))
5335                 return ERR_CAST(memcg);
5336
5337         page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX);
5338         memcg->soft_limit = PAGE_COUNTER_MAX;
5339 #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
5340         memcg->zswap_max = PAGE_COUNTER_MAX;
5341 #endif
5342         page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX);
5343         if (parent) {
5344                 memcg->swappiness = mem_cgroup_swappiness(parent);
5345                 memcg->oom_kill_disable = parent->oom_kill_disable;
5346
5347                 page_counter_init(&memcg->memory, &parent->memory);
5348                 page_counter_init(&memcg->swap, &parent->swap);
5349                 page_counter_init(&memcg->kmem, &parent->kmem);
5350                 page_counter_init(&memcg->tcpmem, &parent->tcpmem);
5351         } else {
5352                 init_memcg_events();
5353                 page_counter_init(&memcg->memory, NULL);
5354                 page_counter_init(&memcg->swap, NULL);
5355                 page_counter_init(&memcg->kmem, NULL);
5356                 page_counter_init(&memcg->tcpmem, NULL);
5357
5358                 root_mem_cgroup = memcg;
5359                 return &memcg->css;
5360         }
5361
5362         if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
5363                 static_branch_inc(&memcg_sockets_enabled_key);
5364
5365         return &memcg->css;
5366 }
5367
5368 static int mem_cgroup_css_online(struct cgroup_subsys_state *css)
5369 {
5370         struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5371
5372         if (memcg_online_kmem(memcg))
5373                 goto remove_id;
5374
5375         /*
5376          * A memcg must be visible for expand_shrinker_info()
5377          * by the time the maps are allocated. So, we allocate maps
5378          * here, when for_each_mem_cgroup() can't skip it.
5379          */
5380         if (alloc_shrinker_info(memcg))
5381                 goto offline_kmem;
5382
5383         /* Online state pins memcg ID, memcg ID pins CSS */
5384         refcount_set(&memcg->id.ref, 1);
5385         css_get(css);
5386
5387         if (unlikely(mem_cgroup_is_root(memcg)))
5388                 queue_delayed_work(system_unbound_wq, &stats_flush_dwork,
5389                                    2UL*HZ);
5390         return 0;
5391 offline_kmem:
5392         memcg_offline_kmem(memcg);
5393 remove_id:
5394         mem_cgroup_id_remove(memcg);
5395         return -ENOMEM;
5396 }
5397
5398 static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
5399 {
5400         struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5401         struct mem_cgroup_event *event, *tmp;
5402
5403         /*
5404          * Unregister events and notify userspace.
5405          * Notify userspace about cgroup removing only after rmdir of cgroup
5406          * directory to avoid race between userspace and kernelspace.
5407          */
5408         spin_lock_irq(&memcg->event_list_lock);
5409         list_for_each_entry_safe(event, tmp, &memcg->event_list, list) {
5410                 list_del_init(&event->list);
5411                 schedule_work(&event->remove);
5412         }
5413         spin_unlock_irq(&memcg->event_list_lock);
5414
5415         page_counter_set_min(&memcg->memory, 0);
5416         page_counter_set_low(&memcg->memory, 0);
5417
5418         memcg_offline_kmem(memcg);
5419         reparent_shrinker_deferred(memcg);
5420         wb_memcg_offline(memcg);
5421
5422         drain_all_stock(memcg);
5423
5424         mem_cgroup_id_put(memcg);
5425 }
5426
5427 static void mem_cgroup_css_released(struct cgroup_subsys_state *css)
5428 {
5429         struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5430
5431         invalidate_reclaim_iterators(memcg);
5432 }
5433
5434 static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
5435 {
5436         struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5437         int __maybe_unused i;
5438
5439 #ifdef CONFIG_CGROUP_WRITEBACK
5440         for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++)
5441                 wb_wait_for_completion(&memcg->cgwb_frn[i].done);
5442 #endif
5443         if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
5444                 static_branch_dec(&memcg_sockets_enabled_key);
5445
5446         if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_active)
5447                 static_branch_dec(&memcg_sockets_enabled_key);
5448
5449         vmpressure_cleanup(&memcg->vmpressure);
5450         cancel_work_sync(&memcg->high_work);
5451         mem_cgroup_remove_from_trees(memcg);
5452         free_shrinker_info(memcg);
5453         mem_cgroup_free(memcg);
5454 }
5455
5456 /**
5457  * mem_cgroup_css_reset - reset the states of a mem_cgroup
5458  * @css: the target css
5459  *
5460  * Reset the states of the mem_cgroup associated with @css.  This is
5461  * invoked when the userland requests disabling on the default hierarchy
5462  * but the memcg is pinned through dependency.  The memcg should stop
5463  * applying policies and should revert to the vanilla state as it may be
5464  * made visible again.
5465  *
5466  * The current implementation only resets the essential configurations.
5467  * This needs to be expanded to cover all the visible parts.
5468  */
5469 static void mem_cgroup_css_reset(struct cgroup_subsys_state *css)
5470 {
5471         struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5472
5473         page_counter_set_max(&memcg->memory, PAGE_COUNTER_MAX);
5474         page_counter_set_max(&memcg->swap, PAGE_COUNTER_MAX);
5475         page_counter_set_max(&memcg->kmem, PAGE_COUNTER_MAX);
5476         page_counter_set_max(&memcg->tcpmem, PAGE_COUNTER_MAX);
5477         page_counter_set_min(&memcg->memory, 0);
5478         page_counter_set_low(&memcg->memory, 0);
5479         page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX);
5480         memcg->soft_limit = PAGE_COUNTER_MAX;
5481         page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX);
5482         memcg_wb_domain_size_changed(memcg);
5483 }
5484
5485 static void mem_cgroup_css_rstat_flush(struct cgroup_subsys_state *css, int cpu)
5486 {
5487         struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5488         struct mem_cgroup *parent = parent_mem_cgroup(memcg);
5489         struct memcg_vmstats_percpu *statc;
5490         long delta, v;
5491         int i, nid;
5492
5493         statc = per_cpu_ptr(memcg->vmstats_percpu, cpu);
5494
5495         for (i = 0; i < MEMCG_NR_STAT; i++) {
5496                 /*
5497                  * Collect the aggregated propagation counts of groups
5498                  * below us. We're in a per-cpu loop here and this is
5499                  * a global counter, so the first cycle will get them.
5500                  */
5501                 delta = memcg->vmstats->state_pending[i];
5502                 if (delta)
5503                         memcg->vmstats->state_pending[i] = 0;
5504
5505                 /* Add CPU changes on this level since the last flush */
5506                 v = READ_ONCE(statc->state[i]);
5507                 if (v != statc->state_prev[i]) {
5508                         delta += v - statc->state_prev[i];
5509                         statc->state_prev[i] = v;
5510                 }
5511
5512                 if (!delta)
5513                         continue;
5514
5515                 /* Aggregate counts on this level and propagate upwards */
5516                 memcg->vmstats->state[i] += delta;
5517                 if (parent)
5518                         parent->vmstats->state_pending[i] += delta;
5519         }
5520
5521         for (i = 0; i < NR_MEMCG_EVENTS; i++) {
5522                 delta = memcg->vmstats->events_pending[i];
5523                 if (delta)
5524                         memcg->vmstats->events_pending[i] = 0;
5525
5526                 v = READ_ONCE(statc->events[i]);
5527                 if (v != statc->events_prev[i]) {
5528                         delta += v - statc->events_prev[i];
5529                         statc->events_prev[i] = v;
5530                 }
5531
5532                 if (!delta)
5533                         continue;
5534
5535                 memcg->vmstats->events[i] += delta;
5536                 if (parent)
5537                         parent->vmstats->events_pending[i] += delta;
5538         }
5539
5540         for_each_node_state(nid, N_MEMORY) {
5541                 struct mem_cgroup_per_node *pn = memcg->nodeinfo[nid];
5542                 struct mem_cgroup_per_node *ppn = NULL;
5543                 struct lruvec_stats_percpu *lstatc;
5544
5545                 if (parent)
5546                         ppn = parent->nodeinfo[nid];
5547
5548                 lstatc = per_cpu_ptr(pn->lruvec_stats_percpu, cpu);
5549
5550                 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
5551                         delta = pn->lruvec_stats.state_pending[i];
5552                         if (delta)
5553                                 pn->lruvec_stats.state_pending[i] = 0;
5554
5555                         v = READ_ONCE(lstatc->state[i]);
5556                         if (v != lstatc->state_prev[i]) {
5557                                 delta += v - lstatc->state_prev[i];
5558                                 lstatc->state_prev[i] = v;
5559                         }
5560
5561                         if (!delta)
5562                                 continue;
5563
5564                         pn->lruvec_stats.state[i] += delta;
5565                         if (ppn)
5566                                 ppn->lruvec_stats.state_pending[i] += delta;
5567                 }
5568         }
5569 }
5570
5571 #ifdef CONFIG_MMU
5572 /* Handlers for move charge at task migration. */
5573 static int mem_cgroup_do_precharge(unsigned long count)
5574 {
5575         int ret;
5576
5577         /* Try a single bulk charge without reclaim first, kswapd may wake */
5578         ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_DIRECT_RECLAIM, count);
5579         if (!ret) {
5580                 mc.precharge += count;
5581                 return ret;
5582         }
5583
5584         /* Try charges one by one with reclaim, but do not retry */
5585         while (count--) {
5586                 ret = try_charge(mc.to, GFP_KERNEL | __GFP_NORETRY, 1);
5587                 if (ret)
5588                         return ret;
5589                 mc.precharge++;
5590                 cond_resched();
5591         }
5592         return 0;
5593 }
5594
5595 union mc_target {
5596         struct page     *page;
5597         swp_entry_t     ent;
5598 };
5599
5600 enum mc_target_type {
5601         MC_TARGET_NONE = 0,
5602         MC_TARGET_PAGE,
5603         MC_TARGET_SWAP,
5604         MC_TARGET_DEVICE,
5605 };
5606
5607 static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
5608                                                 unsigned long addr, pte_t ptent)
5609 {
5610         struct page *page = vm_normal_page(vma, addr, ptent);
5611
5612         if (!page || !page_mapped(page))
5613                 return NULL;
5614         if (PageAnon(page)) {
5615                 if (!(mc.flags & MOVE_ANON))
5616                         return NULL;
5617         } else {
5618                 if (!(mc.flags & MOVE_FILE))
5619                         return NULL;
5620         }
5621         if (!get_page_unless_zero(page))
5622                 return NULL;
5623
5624         return page;
5625 }
5626
5627 #if defined(CONFIG_SWAP) || defined(CONFIG_DEVICE_PRIVATE)
5628 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
5629                         pte_t ptent, swp_entry_t *entry)
5630 {
5631         struct page *page = NULL;
5632         swp_entry_t ent = pte_to_swp_entry(ptent);
5633
5634         if (!(mc.flags & MOVE_ANON))
5635                 return NULL;
5636
5637         /*
5638          * Handle device private pages that are not accessible by the CPU, but
5639          * stored as special swap entries in the page table.
5640          */
5641         if (is_device_private_entry(ent)) {
5642                 page = pfn_swap_entry_to_page(ent);
5643                 if (!get_page_unless_zero(page))
5644                         return NULL;
5645                 return page;
5646         }
5647
5648         if (non_swap_entry(ent))
5649                 return NULL;
5650
5651         /*
5652          * Because swap_cache_get_folio() updates some statistics counter,
5653          * we call find_get_page() with swapper_space directly.
5654          */
5655         page = find_get_page(swap_address_space(ent), swp_offset(ent));
5656         entry->val = ent.val;
5657
5658         return page;
5659 }
5660 #else
5661 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
5662                         pte_t ptent, swp_entry_t *entry)
5663 {
5664         return NULL;
5665 }
5666 #endif
5667
5668 static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
5669                         unsigned long addr, pte_t ptent)
5670 {
5671         unsigned long index;
5672         struct folio *folio;
5673
5674         if (!vma->vm_file) /* anonymous vma */
5675                 return NULL;
5676         if (!(mc.flags & MOVE_FILE))
5677                 return NULL;
5678
5679         /* folio is moved even if it's not RSS of this task(page-faulted). */
5680         /* shmem/tmpfs may report page out on swap: account for that too. */
5681         index = linear_page_index(vma, addr);
5682         folio = filemap_get_incore_folio(vma->vm_file->f_mapping, index);
5683         if (!folio)
5684                 return NULL;
5685         return folio_file_page(folio, index);
5686 }
5687
5688 /**
5689  * mem_cgroup_move_account - move account of the page
5690  * @page: the page
5691  * @compound: charge the page as compound or small page
5692  * @from: mem_cgroup which the page is moved from.
5693  * @to: mem_cgroup which the page is moved to. @from != @to.
5694  *
5695  * The caller must make sure the page is not on LRU (isolate_page() is useful.)
5696  *
5697  * This function doesn't do "charge" to new cgroup and doesn't do "uncharge"
5698  * from old cgroup.
5699  */
5700 static int mem_cgroup_move_account(struct page *page,
5701                                    bool compound,
5702                                    struct mem_cgroup *from,
5703                                    struct mem_cgroup *to)
5704 {
5705         struct folio *folio = page_folio(page);
5706         struct lruvec *from_vec, *to_vec;
5707         struct pglist_data *pgdat;
5708         unsigned int nr_pages = compound ? folio_nr_pages(folio) : 1;
5709         int nid, ret;
5710
5711         VM_BUG_ON(from == to);
5712         VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
5713         VM_BUG_ON(compound && !folio_test_large(folio));
5714
5715         /*
5716          * Prevent mem_cgroup_migrate() from looking at
5717          * page's memory cgroup of its source page while we change it.
5718          */
5719         ret = -EBUSY;
5720         if (!folio_trylock(folio))
5721                 goto out;
5722
5723         ret = -EINVAL;
5724         if (folio_memcg(folio) != from)
5725                 goto out_unlock;
5726
5727         pgdat = folio_pgdat(folio);
5728         from_vec = mem_cgroup_lruvec(from, pgdat);
5729         to_vec = mem_cgroup_lruvec(to, pgdat);
5730
5731         folio_memcg_lock(folio);
5732
5733         if (folio_test_anon(folio)) {
5734                 if (folio_mapped(folio)) {
5735                         __mod_lruvec_state(from_vec, NR_ANON_MAPPED, -nr_pages);
5736                         __mod_lruvec_state(to_vec, NR_ANON_MAPPED, nr_pages);
5737                         if (folio_test_transhuge(folio)) {
5738                                 __mod_lruvec_state(from_vec, NR_ANON_THPS,
5739                                                    -nr_pages);
5740                                 __mod_lruvec_state(to_vec, NR_ANON_THPS,
5741                                                    nr_pages);
5742                         }
5743                 }
5744         } else {
5745                 __mod_lruvec_state(from_vec, NR_FILE_PAGES, -nr_pages);
5746                 __mod_lruvec_state(to_vec, NR_FILE_PAGES, nr_pages);
5747
5748                 if (folio_test_swapbacked(folio)) {
5749                         __mod_lruvec_state(from_vec, NR_SHMEM, -nr_pages);
5750                         __mod_lruvec_state(to_vec, NR_SHMEM, nr_pages);
5751                 }
5752
5753                 if (folio_mapped(folio)) {
5754                         __mod_lruvec_state(from_vec, NR_FILE_MAPPED, -nr_pages);
5755                         __mod_lruvec_state(to_vec, NR_FILE_MAPPED, nr_pages);
5756                 }
5757
5758                 if (folio_test_dirty(folio)) {
5759                         struct address_space *mapping = folio_mapping(folio);
5760
5761                         if (mapping_can_writeback(mapping)) {
5762                                 __mod_lruvec_state(from_vec, NR_FILE_DIRTY,
5763                                                    -nr_pages);
5764                                 __mod_lruvec_state(to_vec, NR_FILE_DIRTY,
5765                                                    nr_pages);
5766                         }
5767                 }
5768         }
5769
5770 #ifdef CONFIG_SWAP
5771         if (folio_test_swapcache(folio)) {
5772                 __mod_lruvec_state(from_vec, NR_SWAPCACHE, -nr_pages);
5773                 __mod_lruvec_state(to_vec, NR_SWAPCACHE, nr_pages);
5774         }
5775 #endif
5776         if (folio_test_writeback(folio)) {
5777                 __mod_lruvec_state(from_vec, NR_WRITEBACK, -nr_pages);
5778                 __mod_lruvec_state(to_vec, NR_WRITEBACK, nr_pages);
5779         }
5780
5781         /*
5782          * All state has been migrated, let's switch to the new memcg.
5783          *
5784          * It is safe to change page's memcg here because the page
5785          * is referenced, charged, isolated, and locked: we can't race
5786          * with (un)charging, migration, LRU putback, or anything else
5787          * that would rely on a stable page's memory cgroup.
5788          *
5789          * Note that lock_page_memcg is a memcg lock, not a page lock,
5790          * to save space. As soon as we switch page's memory cgroup to a
5791          * new memcg that isn't locked, the above state can change
5792          * concurrently again. Make sure we're truly done with it.
5793          */
5794         smp_mb();
5795
5796         css_get(&to->css);
5797         css_put(&from->css);
5798
5799         folio->memcg_data = (unsigned long)to;
5800
5801         __folio_memcg_unlock(from);
5802
5803         ret = 0;
5804         nid = folio_nid(folio);
5805
5806         local_irq_disable();
5807         mem_cgroup_charge_statistics(to, nr_pages);
5808         memcg_check_events(to, nid);
5809         mem_cgroup_charge_statistics(from, -nr_pages);
5810         memcg_check_events(from, nid);
5811         local_irq_enable();
5812 out_unlock:
5813         folio_unlock(folio);
5814 out:
5815         return ret;
5816 }
5817
5818 /**
5819  * get_mctgt_type - get target type of moving charge
5820  * @vma: the vma the pte to be checked belongs
5821  * @addr: the address corresponding to the pte to be checked
5822  * @ptent: the pte to be checked
5823  * @target: the pointer the target page or swap ent will be stored(can be NULL)
5824  *
5825  * Returns
5826  *   0(MC_TARGET_NONE): if the pte is not a target for move charge.
5827  *   1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for
5828  *     move charge. if @target is not NULL, the page is stored in target->page
5829  *     with extra refcnt got(Callers should handle it).
5830  *   2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a
5831  *     target for charge migration. if @target is not NULL, the entry is stored
5832  *     in target->ent.
5833  *   3(MC_TARGET_DEVICE): like MC_TARGET_PAGE  but page is device memory and
5834  *   thus not on the lru.
5835  *     For now we such page is charge like a regular page would be as for all
5836  *     intent and purposes it is just special memory taking the place of a
5837  *     regular page.
5838  *
5839  *     See Documentations/vm/hmm.txt and include/linux/hmm.h
5840  *
5841  * Called with pte lock held.
5842  */
5843
5844 static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma,
5845                 unsigned long addr, pte_t ptent, union mc_target *target)
5846 {
5847         struct page *page = NULL;
5848         enum mc_target_type ret = MC_TARGET_NONE;
5849         swp_entry_t ent = { .val = 0 };
5850
5851         if (pte_present(ptent))
5852                 page = mc_handle_present_pte(vma, addr, ptent);
5853         else if (pte_none_mostly(ptent))
5854                 /*
5855                  * PTE markers should be treated as a none pte here, separated
5856                  * from other swap handling below.
5857                  */
5858                 page = mc_handle_file_pte(vma, addr, ptent);
5859         else if (is_swap_pte(ptent))
5860                 page = mc_handle_swap_pte(vma, ptent, &ent);
5861
5862         if (!page && !ent.val)
5863                 return ret;
5864         if (page) {
5865                 /*
5866                  * Do only loose check w/o serialization.
5867                  * mem_cgroup_move_account() checks the page is valid or
5868                  * not under LRU exclusion.
5869                  */
5870                 if (page_memcg(page) == mc.from) {
5871                         ret = MC_TARGET_PAGE;
5872                         if (is_device_private_page(page) ||
5873                             is_device_coherent_page(page))
5874                                 ret = MC_TARGET_DEVICE;
5875                         if (target)
5876                                 target->page = page;
5877                 }
5878                 if (!ret || !target)
5879                         put_page(page);
5880         }
5881         /*
5882          * There is a swap entry and a page doesn't exist or isn't charged.
5883          * But we cannot move a tail-page in a THP.
5884          */
5885         if (ent.val && !ret && (!page || !PageTransCompound(page)) &&
5886             mem_cgroup_id(mc.from) == lookup_swap_cgroup_id(ent)) {
5887                 ret = MC_TARGET_SWAP;
5888                 if (target)
5889                         target->ent = ent;
5890         }
5891         return ret;
5892 }
5893
5894 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
5895 /*
5896  * We don't consider PMD mapped swapping or file mapped pages because THP does
5897  * not support them for now.
5898  * Caller should make sure that pmd_trans_huge(pmd) is true.
5899  */
5900 static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
5901                 unsigned long addr, pmd_t pmd, union mc_target *target)
5902 {
5903         struct page *page = NULL;
5904         enum mc_target_type ret = MC_TARGET_NONE;
5905
5906         if (unlikely(is_swap_pmd(pmd))) {
5907                 VM_BUG_ON(thp_migration_supported() &&
5908                                   !is_pmd_migration_entry(pmd));
5909                 return ret;
5910         }
5911         page = pmd_page(pmd);
5912         VM_BUG_ON_PAGE(!page || !PageHead(page), page);
5913         if (!(mc.flags & MOVE_ANON))
5914                 return ret;
5915         if (page_memcg(page) == mc.from) {
5916                 ret = MC_TARGET_PAGE;
5917                 if (target) {
5918                         get_page(page);
5919                         target->page = page;
5920                 }
5921         }
5922         return ret;
5923 }
5924 #else
5925 static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
5926                 unsigned long addr, pmd_t pmd, union mc_target *target)
5927 {
5928         return MC_TARGET_NONE;
5929 }
5930 #endif
5931
5932 static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
5933                                         unsigned long addr, unsigned long end,
5934                                         struct mm_walk *walk)
5935 {
5936         struct vm_area_struct *vma = walk->vma;
5937         pte_t *pte;
5938         spinlock_t *ptl;
5939
5940         ptl = pmd_trans_huge_lock(pmd, vma);
5941         if (ptl) {
5942                 /*
5943                  * Note their can not be MC_TARGET_DEVICE for now as we do not
5944                  * support transparent huge page with MEMORY_DEVICE_PRIVATE but
5945                  * this might change.
5946                  */
5947                 if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE)
5948                         mc.precharge += HPAGE_PMD_NR;
5949                 spin_unlock(ptl);
5950                 return 0;
5951         }
5952
5953         if (pmd_trans_unstable(pmd))
5954                 return 0;
5955         pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
5956         for (; addr != end; pte++, addr += PAGE_SIZE)
5957                 if (get_mctgt_type(vma, addr, *pte, NULL))
5958                         mc.precharge++; /* increment precharge temporarily */
5959         pte_unmap_unlock(pte - 1, ptl);
5960         cond_resched();
5961
5962         return 0;
5963 }
5964
5965 static const struct mm_walk_ops precharge_walk_ops = {
5966         .pmd_entry      = mem_cgroup_count_precharge_pte_range,
5967 };
5968
5969 static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
5970 {
5971         unsigned long precharge;
5972
5973         mmap_read_lock(mm);
5974         walk_page_range(mm, 0, ULONG_MAX, &precharge_walk_ops, NULL);
5975         mmap_read_unlock(mm);
5976
5977         precharge = mc.precharge;
5978         mc.precharge = 0;
5979
5980         return precharge;
5981 }
5982
5983 static int mem_cgroup_precharge_mc(struct mm_struct *mm)
5984 {
5985         unsigned long precharge = mem_cgroup_count_precharge(mm);
5986
5987         VM_BUG_ON(mc.moving_task);
5988         mc.moving_task = current;
5989         return mem_cgroup_do_precharge(precharge);
5990 }
5991
5992 /* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */
5993 static void __mem_cgroup_clear_mc(void)
5994 {
5995         struct mem_cgroup *from = mc.from;
5996         struct mem_cgroup *to = mc.to;
5997
5998         /* we must uncharge all the leftover precharges from mc.to */
5999         if (mc.precharge) {
6000                 cancel_charge(mc.to, mc.precharge);
6001                 mc.precharge = 0;
6002         }
6003         /*
6004          * we didn't uncharge from mc.from at mem_cgroup_move_account(), so
6005          * we must uncharge here.
6006          */
6007         if (mc.moved_charge) {
6008                 cancel_charge(mc.from, mc.moved_charge);
6009                 mc.moved_charge = 0;
6010         }
6011         /* we must fixup refcnts and charges */
6012         if (mc.moved_swap) {
6013                 /* uncharge swap account from the old cgroup */
6014                 if (!mem_cgroup_is_root(mc.from))
6015                         page_counter_uncharge(&mc.from->memsw, mc.moved_swap);
6016
6017                 mem_cgroup_id_put_many(mc.from, mc.moved_swap);
6018
6019                 /*
6020                  * we charged both to->memory and to->memsw, so we
6021                  * should uncharge to->memory.
6022                  */
6023                 if (!mem_cgroup_is_root(mc.to))
6024                         page_counter_uncharge(&mc.to->memory, mc.moved_swap);
6025
6026                 mc.moved_swap = 0;
6027         }
6028         memcg_oom_recover(from);
6029         memcg_oom_recover(to);
6030         wake_up_all(&mc.waitq);
6031 }
6032
6033 static void mem_cgroup_clear_mc(void)
6034 {
6035         struct mm_struct *mm = mc.mm;
6036
6037         /*
6038          * we must clear moving_task before waking up waiters at the end of
6039          * task migration.
6040          */
6041         mc.moving_task = NULL;
6042         __mem_cgroup_clear_mc();
6043         spin_lock(&mc.lock);
6044         mc.from = NULL;
6045         mc.to = NULL;
6046         mc.mm = NULL;
6047         spin_unlock(&mc.lock);
6048
6049         mmput(mm);
6050 }
6051
6052 static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
6053 {
6054         struct cgroup_subsys_state *css;
6055         struct mem_cgroup *memcg = NULL; /* unneeded init to make gcc happy */
6056         struct mem_cgroup *from;
6057         struct task_struct *leader, *p;
6058         struct mm_struct *mm;
6059         unsigned long move_flags;
6060         int ret = 0;
6061
6062         /* charge immigration isn't supported on the default hierarchy */
6063         if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
6064                 return 0;
6065
6066         /*
6067          * Multi-process migrations only happen on the default hierarchy
6068          * where charge immigration is not used.  Perform charge
6069          * immigration if @tset contains a leader and whine if there are
6070          * multiple.
6071          */
6072         p = NULL;
6073         cgroup_taskset_for_each_leader(leader, css, tset) {
6074                 WARN_ON_ONCE(p);
6075                 p = leader;
6076                 memcg = mem_cgroup_from_css(css);
6077         }
6078         if (!p)
6079                 return 0;
6080
6081         /*
6082          * We are now committed to this value whatever it is. Changes in this
6083          * tunable will only affect upcoming migrations, not the current one.
6084          * So we need to save it, and keep it going.
6085          */
6086         move_flags = READ_ONCE(memcg->move_charge_at_immigrate);
6087         if (!move_flags)
6088                 return 0;
6089
6090         from = mem_cgroup_from_task(p);
6091
6092         VM_BUG_ON(from == memcg);
6093
6094         mm = get_task_mm(p);
6095         if (!mm)
6096                 return 0;
6097         /* We move charges only when we move a owner of the mm */
6098         if (mm->owner == p) {
6099                 VM_BUG_ON(mc.from);
6100                 VM_BUG_ON(mc.to);
6101                 VM_BUG_ON(mc.precharge);
6102                 VM_BUG_ON(mc.moved_charge);
6103                 VM_BUG_ON(mc.moved_swap);
6104
6105                 spin_lock(&mc.lock);
6106                 mc.mm = mm;
6107                 mc.from = from;
6108                 mc.to = memcg;
6109                 mc.flags = move_flags;
6110                 spin_unlock(&mc.lock);
6111                 /* We set mc.moving_task later */
6112
6113                 ret = mem_cgroup_precharge_mc(mm);
6114                 if (ret)
6115                         mem_cgroup_clear_mc();
6116         } else {
6117                 mmput(mm);
6118         }
6119         return ret;
6120 }
6121
6122 static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
6123 {
6124         if (mc.to)
6125                 mem_cgroup_clear_mc();
6126 }
6127
6128 static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
6129                                 unsigned long addr, unsigned long end,
6130                                 struct mm_walk *walk)
6131 {
6132         int ret = 0;
6133         struct vm_area_struct *vma = walk->vma;
6134         pte_t *pte;
6135         spinlock_t *ptl;
6136         enum mc_target_type target_type;
6137         union mc_target target;
6138         struct page *page;
6139
6140         ptl = pmd_trans_huge_lock(pmd, vma);
6141         if (ptl) {
6142                 if (mc.precharge < HPAGE_PMD_NR) {
6143                         spin_unlock(ptl);
6144                         return 0;
6145                 }
6146                 target_type = get_mctgt_type_thp(vma, addr, *pmd, &target);
6147                 if (target_type == MC_TARGET_PAGE) {
6148                         page = target.page;
6149                         if (!isolate_lru_page(page)) {
6150                                 if (!mem_cgroup_move_account(page, true,
6151                                                              mc.from, mc.to)) {
6152                                         mc.precharge -= HPAGE_PMD_NR;
6153                                         mc.moved_charge += HPAGE_PMD_NR;
6154                                 }
6155                                 putback_lru_page(page);
6156                         }
6157                         put_page(page);
6158                 } else if (target_type == MC_TARGET_DEVICE) {
6159                         page = target.page;
6160                         if (!mem_cgroup_move_account(page, true,
6161                                                      mc.from, mc.to)) {
6162                                 mc.precharge -= HPAGE_PMD_NR;
6163                                 mc.moved_charge += HPAGE_PMD_NR;
6164                         }
6165                         put_page(page);
6166                 }
6167                 spin_unlock(ptl);
6168                 return 0;
6169         }
6170
6171         if (pmd_trans_unstable(pmd))
6172                 return 0;
6173 retry:
6174         pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
6175         for (; addr != end; addr += PAGE_SIZE) {
6176                 pte_t ptent = *(pte++);
6177                 bool device = false;
6178                 swp_entry_t ent;
6179
6180                 if (!mc.precharge)
6181                         break;
6182
6183                 switch (get_mctgt_type(vma, addr, ptent, &target)) {
6184                 case MC_TARGET_DEVICE:
6185                         device = true;
6186                         fallthrough;
6187                 case MC_TARGET_PAGE:
6188                         page = target.page;
6189                         /*
6190                          * We can have a part of the split pmd here. Moving it
6191                          * can be done but it would be too convoluted so simply
6192                          * ignore such a partial THP and keep it in original
6193                          * memcg. There should be somebody mapping the head.
6194                          */
6195                         if (PageTransCompound(page))
6196                                 goto put;
6197                         if (!device && isolate_lru_page(page))
6198                                 goto put;
6199                         if (!mem_cgroup_move_account(page, false,
6200                                                 mc.from, mc.to)) {
6201                                 mc.precharge--;
6202                                 /* we uncharge from mc.from later. */
6203                                 mc.moved_charge++;
6204                         }
6205                         if (!device)
6206                                 putback_lru_page(page);
6207 put:                    /* get_mctgt_type() gets the page */
6208                         put_page(page);
6209                         break;
6210                 case MC_TARGET_SWAP:
6211                         ent = target.ent;
6212                         if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) {
6213                                 mc.precharge--;
6214                                 mem_cgroup_id_get_many(mc.to, 1);
6215                                 /* we fixup other refcnts and charges later. */
6216                                 mc.moved_swap++;
6217                         }
6218                         break;
6219                 default:
6220                         break;
6221                 }
6222         }
6223         pte_unmap_unlock(pte - 1, ptl);
6224         cond_resched();
6225
6226         if (addr != end) {
6227                 /*
6228                  * We have consumed all precharges we got in can_attach().
6229                  * We try charge one by one, but don't do any additional
6230                  * charges to mc.to if we have failed in charge once in attach()
6231                  * phase.
6232                  */
6233                 ret = mem_cgroup_do_precharge(1);
6234                 if (!ret)
6235                         goto retry;
6236         }
6237
6238         return ret;
6239 }
6240
6241 static const struct mm_walk_ops charge_walk_ops = {
6242         .pmd_entry      = mem_cgroup_move_charge_pte_range,
6243 };
6244
6245 static void mem_cgroup_move_charge(void)
6246 {
6247         lru_add_drain_all();
6248         /*
6249          * Signal lock_page_memcg() to take the memcg's move_lock
6250          * while we're moving its pages to another memcg. Then wait
6251          * for already started RCU-only updates to finish.
6252          */
6253         atomic_inc(&mc.from->moving_account);
6254         synchronize_rcu();
6255 retry:
6256         if (unlikely(!mmap_read_trylock(mc.mm))) {
6257                 /*
6258                  * Someone who are holding the mmap_lock might be waiting in
6259                  * waitq. So we cancel all extra charges, wake up all waiters,
6260                  * and retry. Because we cancel precharges, we might not be able
6261                  * to move enough charges, but moving charge is a best-effort
6262                  * feature anyway, so it wouldn't be a big problem.
6263                  */
6264                 __mem_cgroup_clear_mc();
6265                 cond_resched();
6266                 goto retry;
6267         }
6268         /*
6269          * When we have consumed all precharges and failed in doing
6270          * additional charge, the page walk just aborts.
6271          */
6272         walk_page_range(mc.mm, 0, ULONG_MAX, &charge_walk_ops, NULL);
6273         mmap_read_unlock(mc.mm);
6274         atomic_dec(&mc.from->moving_account);
6275 }
6276
6277 static void mem_cgroup_move_task(void)
6278 {
6279         if (mc.to) {
6280                 mem_cgroup_move_charge();
6281                 mem_cgroup_clear_mc();
6282         }
6283 }
6284 #else   /* !CONFIG_MMU */
6285 static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
6286 {
6287         return 0;
6288 }
6289 static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
6290 {
6291 }
6292 static void mem_cgroup_move_task(void)
6293 {
6294 }
6295 #endif
6296
6297 #ifdef CONFIG_LRU_GEN
6298 static void mem_cgroup_attach(struct cgroup_taskset *tset)
6299 {
6300         struct task_struct *task;
6301         struct cgroup_subsys_state *css;
6302
6303         /* find the first leader if there is any */
6304         cgroup_taskset_for_each_leader(task, css, tset)
6305                 break;
6306
6307         if (!task)
6308                 return;
6309
6310         task_lock(task);
6311         if (task->mm && READ_ONCE(task->mm->owner) == task)
6312                 lru_gen_migrate_mm(task->mm);
6313         task_unlock(task);
6314 }
6315 #else
6316 static void mem_cgroup_attach(struct cgroup_taskset *tset)
6317 {
6318 }
6319 #endif /* CONFIG_LRU_GEN */
6320
6321 static int seq_puts_memcg_tunable(struct seq_file *m, unsigned long value)
6322 {
6323         if (value == PAGE_COUNTER_MAX)
6324                 seq_puts(m, "max\n");
6325         else
6326                 seq_printf(m, "%llu\n", (u64)value * PAGE_SIZE);
6327
6328         return 0;
6329 }
6330
6331 static u64 memory_current_read(struct cgroup_subsys_state *css,
6332                                struct cftype *cft)
6333 {
6334         struct mem_cgroup *memcg = mem_cgroup_from_css(css);
6335
6336         return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE;
6337 }
6338
6339 static u64 memory_peak_read(struct cgroup_subsys_state *css,
6340                             struct cftype *cft)
6341 {
6342         struct mem_cgroup *memcg = mem_cgroup_from_css(css);
6343
6344         return (u64)memcg->memory.watermark * PAGE_SIZE;
6345 }
6346
6347 static int memory_min_show(struct seq_file *m, void *v)
6348 {
6349         return seq_puts_memcg_tunable(m,
6350                 READ_ONCE(mem_cgroup_from_seq(m)->memory.min));
6351 }
6352
6353 static ssize_t memory_min_write(struct kernfs_open_file *of,
6354                                 char *buf, size_t nbytes, loff_t off)
6355 {
6356         struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6357         unsigned long min;
6358         int err;
6359
6360         buf = strstrip(buf);
6361         err = page_counter_memparse(buf, "max", &min);
6362         if (err)
6363                 return err;
6364
6365         page_counter_set_min(&memcg->memory, min);
6366
6367         return nbytes;
6368 }
6369
6370 static int memory_low_show(struct seq_file *m, void *v)
6371 {
6372         return seq_puts_memcg_tunable(m,
6373                 READ_ONCE(mem_cgroup_from_seq(m)->memory.low));
6374 }
6375
6376 static ssize_t memory_low_write(struct kernfs_open_file *of,
6377                                 char *buf, size_t nbytes, loff_t off)
6378 {
6379         struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6380         unsigned long low;
6381         int err;
6382
6383         buf = strstrip(buf);
6384         err = page_counter_memparse(buf, "max", &low);
6385         if (err)
6386                 return err;
6387
6388         page_counter_set_low(&memcg->memory, low);
6389
6390         return nbytes;
6391 }
6392
6393 static int memory_high_show(struct seq_file *m, void *v)
6394 {
6395         return seq_puts_memcg_tunable(m,
6396                 READ_ONCE(mem_cgroup_from_seq(m)->memory.high));
6397 }
6398
6399 static ssize_t memory_high_write(struct kernfs_open_file *of,
6400                                  char *buf, size_t nbytes, loff_t off)
6401 {
6402         struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6403         unsigned int nr_retries = MAX_RECLAIM_RETRIES;
6404         bool drained = false;
6405         unsigned long high;
6406         int err;
6407
6408         buf = strstrip(buf);
6409         err = page_counter_memparse(buf, "max", &high);
6410         if (err)
6411                 return err;
6412
6413         page_counter_set_high(&memcg->memory, high);
6414
6415         for (;;) {
6416                 unsigned long nr_pages = page_counter_read(&memcg->memory);
6417                 unsigned long reclaimed;
6418
6419                 if (nr_pages <= high)
6420                         break;
6421
6422                 if (signal_pending(current))
6423                         break;
6424
6425                 if (!drained) {
6426                         drain_all_stock(memcg);
6427                         drained = true;
6428                         continue;
6429                 }
6430
6431                 reclaimed = try_to_free_mem_cgroup_pages(memcg, nr_pages - high,
6432                                         GFP_KERNEL, MEMCG_RECLAIM_MAY_SWAP,
6433                                         NULL);
6434
6435                 if (!reclaimed && !nr_retries--)
6436                         break;
6437         }
6438
6439         memcg_wb_domain_size_changed(memcg);
6440         return nbytes;
6441 }
6442
6443 static int memory_max_show(struct seq_file *m, void *v)
6444 {
6445         return seq_puts_memcg_tunable(m,
6446                 READ_ONCE(mem_cgroup_from_seq(m)->memory.max));
6447 }
6448
6449 static ssize_t memory_max_write(struct kernfs_open_file *of,
6450                                 char *buf, size_t nbytes, loff_t off)
6451 {
6452         struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6453         unsigned int nr_reclaims = MAX_RECLAIM_RETRIES;
6454         bool drained = false;
6455         unsigned long max;
6456         int err;
6457
6458         buf = strstrip(buf);
6459         err = page_counter_memparse(buf, "max", &max);
6460         if (err)
6461                 return err;
6462
6463         xchg(&memcg->memory.max, max);
6464
6465         for (;;) {
6466                 unsigned long nr_pages = page_counter_read(&memcg->memory);
6467
6468                 if (nr_pages <= max)
6469                         break;
6470
6471                 if (signal_pending(current))
6472                         break;
6473
6474                 if (!drained) {
6475                         drain_all_stock(memcg);
6476                         drained = true;
6477                         continue;
6478                 }
6479
6480                 if (nr_reclaims) {
6481                         if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max,
6482                                         GFP_KERNEL, MEMCG_RECLAIM_MAY_SWAP,
6483                                         NULL))
6484                                 nr_reclaims--;
6485                         continue;
6486                 }
6487
6488                 memcg_memory_event(memcg, MEMCG_OOM);
6489                 if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0))
6490                         break;
6491         }
6492
6493         memcg_wb_domain_size_changed(memcg);
6494         return nbytes;
6495 }
6496
6497 static void __memory_events_show(struct seq_file *m, atomic_long_t *events)
6498 {
6499         seq_printf(m, "low %lu\n", atomic_long_read(&events[MEMCG_LOW]));
6500         seq_printf(m, "high %lu\n", atomic_long_read(&events[MEMCG_HIGH]));
6501         seq_printf(m, "max %lu\n", atomic_long_read(&events[MEMCG_MAX]));
6502         seq_printf(m, "oom %lu\n", atomic_long_read(&events[MEMCG_OOM]));
6503         seq_printf(m, "oom_kill %lu\n",
6504                    atomic_long_read(&events[MEMCG_OOM_KILL]));
6505         seq_printf(m, "oom_group_kill %lu\n",
6506                    atomic_long_read(&events[MEMCG_OOM_GROUP_KILL]));
6507 }
6508
6509 static int memory_events_show(struct seq_file *m, void *v)
6510 {
6511         struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6512
6513         __memory_events_show(m, memcg->memory_events);
6514         return 0;
6515 }
6516
6517 static int memory_events_local_show(struct seq_file *m, void *v)
6518 {
6519         struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6520
6521         __memory_events_show(m, memcg->memory_events_local);
6522         return 0;
6523 }
6524
6525 static int memory_stat_show(struct seq_file *m, void *v)
6526 {
6527         struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6528         char *buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
6529
6530         if (!buf)
6531                 return -ENOMEM;
6532         memory_stat_format(memcg, buf, PAGE_SIZE);
6533         seq_puts(m, buf);
6534         kfree(buf);
6535         return 0;
6536 }
6537
6538 #ifdef CONFIG_NUMA
6539 static inline unsigned long lruvec_page_state_output(struct lruvec *lruvec,
6540                                                      int item)
6541 {
6542         return lruvec_page_state(lruvec, item) * memcg_page_state_unit(item);
6543 }
6544
6545 static int memory_numa_stat_show(struct seq_file *m, void *v)
6546 {
6547         int i;
6548         struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6549
6550         mem_cgroup_flush_stats();
6551
6552         for (i = 0; i < ARRAY_SIZE(memory_stats); i++) {
6553                 int nid;
6554
6555                 if (memory_stats[i].idx >= NR_VM_NODE_STAT_ITEMS)
6556                         continue;
6557
6558                 seq_printf(m, "%s", memory_stats[i].name);
6559                 for_each_node_state(nid, N_MEMORY) {
6560                         u64 size;
6561                         struct lruvec *lruvec;
6562
6563                         lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
6564                         size = lruvec_page_state_output(lruvec,
6565                                                         memory_stats[i].idx);
6566                         seq_printf(m, " N%d=%llu", nid, size);
6567                 }
6568                 seq_putc(m, '\n');
6569         }
6570
6571         return 0;
6572 }
6573 #endif
6574
6575 static int memory_oom_group_show(struct seq_file *m, void *v)
6576 {
6577         struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6578
6579         seq_printf(m, "%d\n", memcg->oom_group);
6580
6581         return 0;
6582 }
6583
6584 static ssize_t memory_oom_group_write(struct kernfs_open_file *of,
6585                                       char *buf, size_t nbytes, loff_t off)
6586 {
6587         struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6588         int ret, oom_group;
6589
6590         buf = strstrip(buf);
6591         if (!buf)
6592                 return -EINVAL;
6593
6594         ret = kstrtoint(buf, 0, &oom_group);
6595         if (ret)
6596                 return ret;
6597
6598         if (oom_group != 0 && oom_group != 1)
6599                 return -EINVAL;
6600
6601         memcg->oom_group = oom_group;
6602
6603         return nbytes;
6604 }
6605
6606 enum {
6607         MEMORY_RECLAIM_NODES = 0,
6608         MEMORY_RECLAIM_NULL,
6609 };
6610
6611 static const match_table_t if_tokens = {
6612         { MEMORY_RECLAIM_NODES, "nodes=%s" },
6613         { MEMORY_RECLAIM_NULL, NULL },
6614 };
6615
6616 static ssize_t memory_reclaim(struct kernfs_open_file *of, char *buf,
6617                               size_t nbytes, loff_t off)
6618 {
6619         struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6620         unsigned int nr_retries = MAX_RECLAIM_RETRIES;
6621         unsigned long nr_to_reclaim, nr_reclaimed = 0;
6622         unsigned int reclaim_options = MEMCG_RECLAIM_MAY_SWAP |
6623                                        MEMCG_RECLAIM_PROACTIVE;
6624         char *old_buf, *start;
6625         substring_t args[MAX_OPT_ARGS];
6626         int token;
6627         char value[256];
6628         nodemask_t nodemask = NODE_MASK_ALL;
6629
6630         buf = strstrip(buf);
6631
6632         old_buf = buf;
6633         nr_to_reclaim = memparse(buf, &buf) / PAGE_SIZE;
6634         if (buf == old_buf)
6635                 return -EINVAL;
6636
6637         buf = strstrip(buf);
6638
6639         while ((start = strsep(&buf, " ")) != NULL) {
6640                 if (!strlen(start))
6641                         continue;
6642                 token = match_token(start, if_tokens, args);
6643                 match_strlcpy(value, args, sizeof(value));
6644                 switch (token) {
6645                 case MEMORY_RECLAIM_NODES:
6646                         if (nodelist_parse(value, nodemask) < 0)
6647                                 return -EINVAL;
6648                         break;
6649                 default:
6650                         return -EINVAL;
6651                 }
6652         }
6653
6654         while (nr_reclaimed < nr_to_reclaim) {
6655                 unsigned long reclaimed;
6656
6657                 if (signal_pending(current))
6658                         return -EINTR;
6659
6660                 /*
6661                  * This is the final attempt, drain percpu lru caches in the
6662                  * hope of introducing more evictable pages for
6663                  * try_to_free_mem_cgroup_pages().
6664                  */
6665                 if (!nr_retries)
6666                         lru_add_drain_all();
6667
6668                 reclaimed = try_to_free_mem_cgroup_pages(memcg,
6669                                                 nr_to_reclaim - nr_reclaimed,
6670                                                 GFP_KERNEL, reclaim_options,
6671                                                 &nodemask);
6672
6673                 if (!reclaimed && !nr_retries--)
6674                         return -EAGAIN;
6675
6676                 nr_reclaimed += reclaimed;
6677         }
6678
6679         return nbytes;
6680 }
6681
6682 static struct cftype memory_files[] = {
6683         {
6684                 .name = "current",
6685                 .flags = CFTYPE_NOT_ON_ROOT,
6686                 .read_u64 = memory_current_read,
6687         },
6688         {
6689                 .name = "peak",
6690                 .flags = CFTYPE_NOT_ON_ROOT,
6691                 .read_u64 = memory_peak_read,
6692         },
6693         {
6694                 .name = "min",
6695                 .flags = CFTYPE_NOT_ON_ROOT,
6696                 .seq_show = memory_min_show,
6697                 .write = memory_min_write,
6698         },
6699         {
6700                 .name = "low",
6701                 .flags = CFTYPE_NOT_ON_ROOT,
6702                 .seq_show = memory_low_show,
6703                 .write = memory_low_write,
6704         },
6705         {
6706                 .name = "high",
6707                 .flags = CFTYPE_NOT_ON_ROOT,
6708                 .seq_show = memory_high_show,
6709                 .write = memory_high_write,
6710         },
6711         {
6712                 .name = "max",
6713                 .flags = CFTYPE_NOT_ON_ROOT,
6714                 .seq_show = memory_max_show,
6715                 .write = memory_max_write,
6716         },
6717         {
6718                 .name = "events",
6719                 .flags = CFTYPE_NOT_ON_ROOT,
6720                 .file_offset = offsetof(struct mem_cgroup, events_file),
6721                 .seq_show = memory_events_show,
6722         },
6723         {
6724                 .name = "events.local",
6725                 .flags = CFTYPE_NOT_ON_ROOT,
6726                 .file_offset = offsetof(struct mem_cgroup, events_local_file),
6727                 .seq_show = memory_events_local_show,
6728         },
6729         {
6730                 .name = "stat",
6731                 .seq_show = memory_stat_show,
6732         },
6733 #ifdef CONFIG_NUMA
6734         {
6735                 .name = "numa_stat",
6736                 .seq_show = memory_numa_stat_show,
6737         },
6738 #endif
6739         {
6740                 .name = "oom.group",
6741                 .flags = CFTYPE_NOT_ON_ROOT | CFTYPE_NS_DELEGATABLE,
6742                 .seq_show = memory_oom_group_show,
6743                 .write = memory_oom_group_write,
6744         },
6745         {
6746                 .name = "reclaim",
6747                 .flags = CFTYPE_NS_DELEGATABLE,
6748                 .write = memory_reclaim,
6749         },
6750         { }     /* terminate */
6751 };
6752
6753 struct cgroup_subsys memory_cgrp_subsys = {
6754         .css_alloc = mem_cgroup_css_alloc,
6755         .css_online = mem_cgroup_css_online,
6756         .css_offline = mem_cgroup_css_offline,
6757         .css_released = mem_cgroup_css_released,
6758         .css_free = mem_cgroup_css_free,
6759         .css_reset = mem_cgroup_css_reset,
6760         .css_rstat_flush = mem_cgroup_css_rstat_flush,
6761         .can_attach = mem_cgroup_can_attach,
6762         .attach = mem_cgroup_attach,
6763         .cancel_attach = mem_cgroup_cancel_attach,
6764         .post_attach = mem_cgroup_move_task,
6765         .dfl_cftypes = memory_files,
6766         .legacy_cftypes = mem_cgroup_legacy_files,
6767         .early_init = 0,
6768 };
6769
6770 /*
6771  * This function calculates an individual cgroup's effective
6772  * protection which is derived from its own memory.min/low, its
6773  * parent's and siblings' settings, as well as the actual memory
6774  * distribution in the tree.
6775  *
6776  * The following rules apply to the effective protection values:
6777  *
6778  * 1. At the first level of reclaim, effective protection is equal to
6779  *    the declared protection in memory.min and memory.low.
6780  *
6781  * 2. To enable safe delegation of the protection configuration, at
6782  *    subsequent levels the effective protection is capped to the
6783  *    parent's effective protection.
6784  *
6785  * 3. To make complex and dynamic subtrees easier to configure, the
6786  *    user is allowed to overcommit the declared protection at a given
6787  *    level. If that is the case, the parent's effective protection is
6788  *    distributed to the children in proportion to how much protection
6789  *    they have declared and how much of it they are utilizing.
6790  *
6791  *    This makes distribution proportional, but also work-conserving:
6792  *    if one cgroup claims much more protection than it uses memory,
6793  *    the unused remainder is available to its siblings.
6794  *
6795  * 4. Conversely, when the declared protection is undercommitted at a
6796  *    given level, the distribution of the larger parental protection
6797  *    budget is NOT proportional. A cgroup's protection from a sibling
6798  *    is capped to its own memory.min/low setting.
6799  *
6800  * 5. However, to allow protecting recursive subtrees from each other
6801  *    without having to declare each individual cgroup's fixed share
6802  *    of the ancestor's claim to protection, any unutilized -
6803  *    "floating" - protection from up the tree is distributed in
6804  *    proportion to each cgroup's *usage*. This makes the protection
6805  *    neutral wrt sibling cgroups and lets them compete freely over
6806  *    the shared parental protection budget, but it protects the
6807  *    subtree as a whole from neighboring subtrees.
6808  *
6809  * Note that 4. and 5. are not in conflict: 4. is about protecting
6810  * against immediate siblings whereas 5. is about protecting against
6811  * neighboring subtrees.
6812  */
6813 static unsigned long effective_protection(unsigned long usage,
6814                                           unsigned long parent_usage,
6815                                           unsigned long setting,
6816                                           unsigned long parent_effective,
6817                                           unsigned long siblings_protected)
6818 {
6819         unsigned long protected;
6820         unsigned long ep;
6821
6822         protected = min(usage, setting);
6823         /*
6824          * If all cgroups at this level combined claim and use more
6825          * protection then what the parent affords them, distribute
6826          * shares in proportion to utilization.
6827          *
6828          * We are using actual utilization rather than the statically
6829          * claimed protection in order to be work-conserving: claimed
6830          * but unused protection is available to siblings that would
6831          * otherwise get a smaller chunk than what they claimed.
6832          */
6833         if (siblings_protected > parent_effective)
6834                 return protected * parent_effective / siblings_protected;
6835
6836         /*
6837          * Ok, utilized protection of all children is within what the
6838          * parent affords them, so we know whatever this child claims
6839          * and utilizes is effectively protected.
6840          *
6841          * If there is unprotected usage beyond this value, reclaim
6842          * will apply pressure in proportion to that amount.
6843          *
6844          * If there is unutilized protection, the cgroup will be fully
6845          * shielded from reclaim, but we do return a smaller value for
6846          * protection than what the group could enjoy in theory. This
6847          * is okay. With the overcommit distribution above, effective
6848          * protection is always dependent on how memory is actually
6849          * consumed among the siblings anyway.
6850          */
6851         ep = protected;
6852
6853         /*
6854          * If the children aren't claiming (all of) the protection
6855          * afforded to them by the parent, distribute the remainder in
6856          * proportion to the (unprotected) memory of each cgroup. That
6857          * way, cgroups that aren't explicitly prioritized wrt each
6858          * other compete freely over the allowance, but they are
6859          * collectively protected from neighboring trees.
6860          *
6861          * We're using unprotected memory for the weight so that if
6862          * some cgroups DO claim explicit protection, we don't protect
6863          * the same bytes twice.
6864          *
6865          * Check both usage and parent_usage against the respective
6866          * protected values. One should imply the other, but they
6867          * aren't read atomically - make sure the division is sane.
6868          */
6869         if (!(cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_RECURSIVE_PROT))
6870                 return ep;
6871         if (parent_effective > siblings_protected &&
6872             parent_usage > siblings_protected &&
6873             usage > protected) {
6874                 unsigned long unclaimed;
6875
6876                 unclaimed = parent_effective - siblings_protected;
6877                 unclaimed *= usage - protected;
6878                 unclaimed /= parent_usage - siblings_protected;
6879
6880                 ep += unclaimed;
6881         }
6882
6883         return ep;
6884 }
6885
6886 /**
6887  * mem_cgroup_calculate_protection - check if memory consumption is in the normal range
6888  * @root: the top ancestor of the sub-tree being checked
6889  * @memcg: the memory cgroup to check
6890  *
6891  * WARNING: This function is not stateless! It can only be used as part
6892  *          of a top-down tree iteration, not for isolated queries.
6893  */
6894 void mem_cgroup_calculate_protection(struct mem_cgroup *root,
6895                                      struct mem_cgroup *memcg)
6896 {
6897         unsigned long usage, parent_usage;
6898         struct mem_cgroup *parent;
6899
6900         if (mem_cgroup_disabled())
6901                 return;
6902
6903         if (!root)
6904                 root = root_mem_cgroup;
6905
6906         /*
6907          * Effective values of the reclaim targets are ignored so they
6908          * can be stale. Have a look at mem_cgroup_protection for more
6909          * details.
6910          * TODO: calculation should be more robust so that we do not need
6911          * that special casing.
6912          */
6913         if (memcg == root)
6914                 return;
6915
6916         usage = page_counter_read(&memcg->memory);
6917         if (!usage)
6918                 return;
6919
6920         parent = parent_mem_cgroup(memcg);
6921
6922         if (parent == root) {
6923                 memcg->memory.emin = READ_ONCE(memcg->memory.min);
6924                 memcg->memory.elow = READ_ONCE(memcg->memory.low);
6925                 return;
6926         }
6927
6928         parent_usage = page_counter_read(&parent->memory);
6929
6930         WRITE_ONCE(memcg->memory.emin, effective_protection(usage, parent_usage,
6931                         READ_ONCE(memcg->memory.min),
6932                         READ_ONCE(parent->memory.emin),
6933                         atomic_long_read(&parent->memory.children_min_usage)));
6934
6935         WRITE_ONCE(memcg->memory.elow, effective_protection(usage, parent_usage,
6936                         READ_ONCE(memcg->memory.low),
6937                         READ_ONCE(parent->memory.elow),
6938                         atomic_long_read(&parent->memory.children_low_usage)));
6939 }
6940
6941 static int charge_memcg(struct folio *folio, struct mem_cgroup *memcg,
6942                         gfp_t gfp)
6943 {
6944         long nr_pages = folio_nr_pages(folio);
6945         int ret;
6946
6947         ret = try_charge(memcg, gfp, nr_pages);
6948         if (ret)
6949                 goto out;
6950
6951         css_get(&memcg->css);
6952         commit_charge(folio, memcg);
6953
6954         local_irq_disable();
6955         mem_cgroup_charge_statistics(memcg, nr_pages);
6956         memcg_check_events(memcg, folio_nid(folio));
6957         local_irq_enable();
6958 out:
6959         return ret;
6960 }
6961
6962 int __mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, gfp_t gfp)
6963 {
6964         struct mem_cgroup *memcg;
6965         int ret;
6966
6967         memcg = get_mem_cgroup_from_mm(mm);
6968         ret = charge_memcg(folio, memcg, gfp);
6969         css_put(&memcg->css);
6970
6971         return ret;
6972 }
6973
6974 /**
6975  * mem_cgroup_swapin_charge_folio - Charge a newly allocated folio for swapin.
6976  * @folio: folio to charge.
6977  * @mm: mm context of the victim
6978  * @gfp: reclaim mode
6979  * @entry: swap entry for which the folio is allocated
6980  *
6981  * This function charges a folio allocated for swapin. Please call this before
6982  * adding the folio to the swapcache.
6983  *
6984  * Returns 0 on success. Otherwise, an error code is returned.
6985  */
6986 int mem_cgroup_swapin_charge_folio(struct folio *folio, struct mm_struct *mm,
6987                                   gfp_t gfp, swp_entry_t entry)
6988 {
6989         struct mem_cgroup *memcg;
6990         unsigned short id;
6991         int ret;
6992
6993         if (mem_cgroup_disabled())
6994                 return 0;
6995
6996         id = lookup_swap_cgroup_id(entry);
6997         rcu_read_lock();
6998         memcg = mem_cgroup_from_id(id);
6999         if (!memcg || !css_tryget_online(&memcg->css))
7000                 memcg = get_mem_cgroup_from_mm(mm);
7001         rcu_read_unlock();
7002
7003         ret = charge_memcg(folio, memcg, gfp);
7004
7005         css_put(&memcg->css);
7006         return ret;
7007 }
7008
7009 /*
7010  * mem_cgroup_swapin_uncharge_swap - uncharge swap slot
7011  * @entry: swap entry for which the page is charged
7012  *
7013  * Call this function after successfully adding the charged page to swapcache.
7014  *
7015  * Note: This function assumes the page for which swap slot is being uncharged
7016  * is order 0 page.
7017  */
7018 void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry)
7019 {
7020         /*
7021          * Cgroup1's unified memory+swap counter has been charged with the
7022          * new swapcache page, finish the transfer by uncharging the swap
7023          * slot. The swap slot would also get uncharged when it dies, but
7024          * it can stick around indefinitely and we'd count the page twice
7025          * the entire time.
7026          *
7027          * Cgroup2 has separate resource counters for memory and swap,
7028          * so this is a non-issue here. Memory and swap charge lifetimes
7029          * correspond 1:1 to page and swap slot lifetimes: we charge the
7030          * page to memory here, and uncharge swap when the slot is freed.
7031          */
7032         if (!mem_cgroup_disabled() && do_memsw_account()) {
7033                 /*
7034                  * The swap entry might not get freed for a long time,
7035                  * let's not wait for it.  The page already received a
7036                  * memory+swap charge, drop the swap entry duplicate.
7037                  */
7038                 mem_cgroup_uncharge_swap(entry, 1);
7039         }
7040 }
7041
7042 struct uncharge_gather {
7043         struct mem_cgroup *memcg;
7044         unsigned long nr_memory;
7045         unsigned long pgpgout;
7046         unsigned long nr_kmem;
7047         int nid;
7048 };
7049
7050 static inline void uncharge_gather_clear(struct uncharge_gather *ug)
7051 {
7052         memset(ug, 0, sizeof(*ug));
7053 }
7054
7055 static void uncharge_batch(const struct uncharge_gather *ug)
7056 {
7057         unsigned long flags;
7058
7059         if (ug->nr_memory) {
7060                 page_counter_uncharge(&ug->memcg->memory, ug->nr_memory);
7061                 if (do_memsw_account())
7062                         page_counter_uncharge(&ug->memcg->memsw, ug->nr_memory);
7063                 if (ug->nr_kmem)
7064                         memcg_account_kmem(ug->memcg, -ug->nr_kmem);
7065                 memcg_oom_recover(ug->memcg);
7066         }
7067
7068         local_irq_save(flags);
7069         __count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout);
7070         __this_cpu_add(ug->memcg->vmstats_percpu->nr_page_events, ug->nr_memory);
7071         memcg_check_events(ug->memcg, ug->nid);
7072         local_irq_restore(flags);
7073
7074         /* drop reference from uncharge_folio */
7075         css_put(&ug->memcg->css);
7076 }
7077
7078 static void uncharge_folio(struct folio *folio, struct uncharge_gather *ug)
7079 {
7080         long nr_pages;
7081         struct mem_cgroup *memcg;
7082         struct obj_cgroup *objcg;
7083
7084         VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
7085
7086         /*
7087          * Nobody should be changing or seriously looking at
7088          * folio memcg or objcg at this point, we have fully
7089          * exclusive access to the folio.
7090          */
7091         if (folio_memcg_kmem(folio)) {
7092                 objcg = __folio_objcg(folio);
7093                 /*
7094                  * This get matches the put at the end of the function and
7095                  * kmem pages do not hold memcg references anymore.
7096                  */
7097                 memcg = get_mem_cgroup_from_objcg(objcg);
7098         } else {
7099                 memcg = __folio_memcg(folio);
7100         }
7101
7102         if (!memcg)
7103                 return;
7104
7105         if (ug->memcg != memcg) {
7106                 if (ug->memcg) {
7107                         uncharge_batch(ug);
7108                         uncharge_gather_clear(ug);
7109                 }
7110                 ug->memcg = memcg;
7111                 ug->nid = folio_nid(folio);
7112
7113                 /* pairs with css_put in uncharge_batch */
7114                 css_get(&memcg->css);
7115         }
7116
7117         nr_pages = folio_nr_pages(folio);
7118
7119         if (folio_memcg_kmem(folio)) {
7120                 ug->nr_memory += nr_pages;
7121                 ug->nr_kmem += nr_pages;
7122
7123                 folio->memcg_data = 0;
7124                 obj_cgroup_put(objcg);
7125         } else {
7126                 /* LRU pages aren't accounted at the root level */
7127                 if (!mem_cgroup_is_root(memcg))
7128                         ug->nr_memory += nr_pages;
7129                 ug->pgpgout++;
7130
7131                 folio->memcg_data = 0;
7132         }
7133
7134         css_put(&memcg->css);
7135 }
7136
7137 void __mem_cgroup_uncharge(struct folio *folio)
7138 {
7139         struct uncharge_gather ug;
7140
7141         /* Don't touch folio->lru of any random page, pre-check: */
7142         if (!folio_memcg(folio))
7143                 return;
7144
7145         uncharge_gather_clear(&ug);
7146         uncharge_folio(folio, &ug);
7147         uncharge_batch(&ug);
7148 }
7149
7150 /**
7151  * __mem_cgroup_uncharge_list - uncharge a list of page
7152  * @page_list: list of pages to uncharge
7153  *
7154  * Uncharge a list of pages previously charged with
7155  * __mem_cgroup_charge().
7156  */
7157 void __mem_cgroup_uncharge_list(struct list_head *page_list)
7158 {
7159         struct uncharge_gather ug;
7160         struct folio *folio;
7161
7162         uncharge_gather_clear(&ug);
7163         list_for_each_entry(folio, page_list, lru)
7164                 uncharge_folio(folio, &ug);
7165         if (ug.memcg)
7166                 uncharge_batch(&ug);
7167 }
7168
7169 /**
7170  * mem_cgroup_migrate - Charge a folio's replacement.
7171  * @old: Currently circulating folio.
7172  * @new: Replacement folio.
7173  *
7174  * Charge @new as a replacement folio for @old. @old will
7175  * be uncharged upon free.
7176  *
7177  * Both folios must be locked, @new->mapping must be set up.
7178  */
7179 void mem_cgroup_migrate(struct folio *old, struct folio *new)
7180 {
7181         struct mem_cgroup *memcg;
7182         long nr_pages = folio_nr_pages(new);
7183         unsigned long flags;
7184
7185         VM_BUG_ON_FOLIO(!folio_test_locked(old), old);
7186         VM_BUG_ON_FOLIO(!folio_test_locked(new), new);
7187         VM_BUG_ON_FOLIO(folio_test_anon(old) != folio_test_anon(new), new);
7188         VM_BUG_ON_FOLIO(folio_nr_pages(old) != nr_pages, new);
7189
7190         if (mem_cgroup_disabled())
7191                 return;
7192
7193         /* Page cache replacement: new folio already charged? */
7194         if (folio_memcg(new))
7195                 return;
7196
7197         memcg = folio_memcg(old);
7198         VM_WARN_ON_ONCE_FOLIO(!memcg, old);
7199         if (!memcg)
7200                 return;
7201
7202         /* Force-charge the new page. The old one will be freed soon */
7203         if (!mem_cgroup_is_root(memcg)) {
7204                 page_counter_charge(&memcg->memory, nr_pages);
7205                 if (do_memsw_account())
7206                         page_counter_charge(&memcg->memsw, nr_pages);
7207         }
7208
7209         css_get(&memcg->css);
7210         commit_charge(new, memcg);
7211
7212         local_irq_save(flags);
7213         mem_cgroup_charge_statistics(memcg, nr_pages);
7214         memcg_check_events(memcg, folio_nid(new));
7215         local_irq_restore(flags);
7216 }
7217
7218 DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
7219 EXPORT_SYMBOL(memcg_sockets_enabled_key);
7220
7221 void mem_cgroup_sk_alloc(struct sock *sk)
7222 {
7223         struct mem_cgroup *memcg;
7224
7225         if (!mem_cgroup_sockets_enabled)
7226                 return;
7227
7228         /* Do not associate the sock with unrelated interrupted task's memcg. */
7229         if (!in_task())
7230                 return;
7231
7232         rcu_read_lock();
7233         memcg = mem_cgroup_from_task(current);
7234         if (mem_cgroup_is_root(memcg))
7235                 goto out;
7236         if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg->tcpmem_active)
7237                 goto out;
7238         if (css_tryget(&memcg->css))
7239                 sk->sk_memcg = memcg;
7240 out:
7241         rcu_read_unlock();
7242 }
7243
7244 void mem_cgroup_sk_free(struct sock *sk)
7245 {
7246         if (sk->sk_memcg)
7247                 css_put(&sk->sk_memcg->css);
7248 }
7249
7250 /**
7251  * mem_cgroup_charge_skmem - charge socket memory
7252  * @memcg: memcg to charge
7253  * @nr_pages: number of pages to charge
7254  * @gfp_mask: reclaim mode
7255  *
7256  * Charges @nr_pages to @memcg. Returns %true if the charge fit within
7257  * @memcg's configured limit, %false if it doesn't.
7258  */
7259 bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages,
7260                              gfp_t gfp_mask)
7261 {
7262         if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
7263                 struct page_counter *fail;
7264
7265                 if (page_counter_try_charge(&memcg->tcpmem, nr_pages, &fail)) {
7266                         memcg->tcpmem_pressure = 0;
7267                         return true;
7268                 }
7269                 memcg->tcpmem_pressure = 1;
7270                 if (gfp_mask & __GFP_NOFAIL) {
7271                         page_counter_charge(&memcg->tcpmem, nr_pages);
7272                         return true;
7273                 }
7274                 return false;
7275         }
7276
7277         if (try_charge(memcg, gfp_mask, nr_pages) == 0) {
7278                 mod_memcg_state(memcg, MEMCG_SOCK, nr_pages);
7279                 return true;
7280         }
7281
7282         return false;
7283 }
7284
7285 /**
7286  * mem_cgroup_uncharge_skmem - uncharge socket memory
7287  * @memcg: memcg to uncharge
7288  * @nr_pages: number of pages to uncharge
7289  */
7290 void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
7291 {
7292         if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
7293                 page_counter_uncharge(&memcg->tcpmem, nr_pages);
7294                 return;
7295         }
7296
7297         mod_memcg_state(memcg, MEMCG_SOCK, -nr_pages);
7298
7299         refill_stock(memcg, nr_pages);
7300 }
7301
7302 static int __init cgroup_memory(char *s)
7303 {
7304         char *token;
7305
7306         while ((token = strsep(&s, ",")) != NULL) {
7307                 if (!*token)
7308                         continue;
7309                 if (!strcmp(token, "nosocket"))
7310                         cgroup_memory_nosocket = true;
7311                 if (!strcmp(token, "nokmem"))
7312                         cgroup_memory_nokmem = true;
7313         }
7314         return 1;
7315 }
7316 __setup("cgroup.memory=", cgroup_memory);
7317
7318 /*
7319  * subsys_initcall() for memory controller.
7320  *
7321  * Some parts like memcg_hotplug_cpu_dead() have to be initialized from this
7322  * context because of lock dependencies (cgroup_lock -> cpu hotplug) but
7323  * basically everything that doesn't depend on a specific mem_cgroup structure
7324  * should be initialized from here.
7325  */
7326 static int __init mem_cgroup_init(void)
7327 {
7328         int cpu, node;
7329
7330         /*
7331          * Currently s32 type (can refer to struct batched_lruvec_stat) is
7332          * used for per-memcg-per-cpu caching of per-node statistics. In order
7333          * to work fine, we should make sure that the overfill threshold can't
7334          * exceed S32_MAX / PAGE_SIZE.
7335          */
7336         BUILD_BUG_ON(MEMCG_CHARGE_BATCH > S32_MAX / PAGE_SIZE);
7337
7338         cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL,
7339                                   memcg_hotplug_cpu_dead);
7340
7341         for_each_possible_cpu(cpu)
7342                 INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work,
7343                           drain_local_stock);
7344
7345         for_each_node(node) {
7346                 struct mem_cgroup_tree_per_node *rtpn;
7347
7348                 rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL,
7349                                     node_online(node) ? node : NUMA_NO_NODE);
7350
7351                 rtpn->rb_root = RB_ROOT;
7352                 rtpn->rb_rightmost = NULL;
7353                 spin_lock_init(&rtpn->lock);
7354                 soft_limit_tree.rb_tree_per_node[node] = rtpn;
7355         }
7356
7357         return 0;
7358 }
7359 subsys_initcall(mem_cgroup_init);
7360
7361 #ifdef CONFIG_SWAP
7362 static struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg)
7363 {
7364         while (!refcount_inc_not_zero(&memcg->id.ref)) {
7365                 /*
7366                  * The root cgroup cannot be destroyed, so it's refcount must
7367                  * always be >= 1.
7368                  */
7369                 if (WARN_ON_ONCE(mem_cgroup_is_root(memcg))) {
7370                         VM_BUG_ON(1);
7371                         break;
7372                 }
7373                 memcg = parent_mem_cgroup(memcg);
7374                 if (!memcg)
7375                         memcg = root_mem_cgroup;
7376         }
7377         return memcg;
7378 }
7379
7380 /**
7381  * mem_cgroup_swapout - transfer a memsw charge to swap
7382  * @folio: folio whose memsw charge to transfer
7383  * @entry: swap entry to move the charge to
7384  *
7385  * Transfer the memsw charge of @folio to @entry.
7386  */
7387 void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry)
7388 {
7389         struct mem_cgroup *memcg, *swap_memcg;
7390         unsigned int nr_entries;
7391         unsigned short oldid;
7392
7393         VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
7394         VM_BUG_ON_FOLIO(folio_ref_count(folio), folio);
7395
7396         if (mem_cgroup_disabled())
7397                 return;
7398
7399         if (!do_memsw_account())
7400                 return;
7401
7402         memcg = folio_memcg(folio);
7403
7404         VM_WARN_ON_ONCE_FOLIO(!memcg, folio);
7405         if (!memcg)
7406                 return;
7407
7408         /*
7409          * In case the memcg owning these pages has been offlined and doesn't
7410          * have an ID allocated to it anymore, charge the closest online
7411          * ancestor for the swap instead and transfer the memory+swap charge.
7412          */
7413         swap_memcg = mem_cgroup_id_get_online(memcg);
7414         nr_entries = folio_nr_pages(folio);
7415         /* Get references for the tail pages, too */
7416         if (nr_entries > 1)
7417                 mem_cgroup_id_get_many(swap_memcg, nr_entries - 1);
7418         oldid = swap_cgroup_record(entry, mem_cgroup_id(swap_memcg),
7419                                    nr_entries);
7420         VM_BUG_ON_FOLIO(oldid, folio);
7421         mod_memcg_state(swap_memcg, MEMCG_SWAP, nr_entries);
7422
7423         folio->memcg_data = 0;
7424
7425         if (!mem_cgroup_is_root(memcg))
7426                 page_counter_uncharge(&memcg->memory, nr_entries);
7427
7428         if (memcg != swap_memcg) {
7429                 if (!mem_cgroup_is_root(swap_memcg))
7430                         page_counter_charge(&swap_memcg->memsw, nr_entries);
7431                 page_counter_uncharge(&memcg->memsw, nr_entries);
7432         }
7433
7434         /*
7435          * Interrupts should be disabled here because the caller holds the
7436          * i_pages lock which is taken with interrupts-off. It is
7437          * important here to have the interrupts disabled because it is the
7438          * only synchronisation we have for updating the per-CPU variables.
7439          */
7440         memcg_stats_lock();
7441         mem_cgroup_charge_statistics(memcg, -nr_entries);
7442         memcg_stats_unlock();
7443         memcg_check_events(memcg, folio_nid(folio));
7444
7445         css_put(&memcg->css);
7446 }
7447
7448 /**
7449  * __mem_cgroup_try_charge_swap - try charging swap space for a folio
7450  * @folio: folio being added to swap
7451  * @entry: swap entry to charge
7452  *
7453  * Try to charge @folio's memcg for the swap space at @entry.
7454  *
7455  * Returns 0 on success, -ENOMEM on failure.
7456  */
7457 int __mem_cgroup_try_charge_swap(struct folio *folio, swp_entry_t entry)
7458 {
7459         unsigned int nr_pages = folio_nr_pages(folio);
7460         struct page_counter *counter;
7461         struct mem_cgroup *memcg;
7462         unsigned short oldid;
7463
7464         if (do_memsw_account())
7465                 return 0;
7466
7467         memcg = folio_memcg(folio);
7468
7469         VM_WARN_ON_ONCE_FOLIO(!memcg, folio);
7470         if (!memcg)
7471                 return 0;
7472
7473         if (!entry.val) {
7474                 memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
7475                 return 0;
7476         }
7477
7478         memcg = mem_cgroup_id_get_online(memcg);
7479
7480         if (!mem_cgroup_is_root(memcg) &&
7481             !page_counter_try_charge(&memcg->swap, nr_pages, &counter)) {
7482                 memcg_memory_event(memcg, MEMCG_SWAP_MAX);
7483                 memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
7484                 mem_cgroup_id_put(memcg);
7485                 return -ENOMEM;
7486         }
7487
7488         /* Get references for the tail pages, too */
7489         if (nr_pages > 1)
7490                 mem_cgroup_id_get_many(memcg, nr_pages - 1);
7491         oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg), nr_pages);
7492         VM_BUG_ON_FOLIO(oldid, folio);
7493         mod_memcg_state(memcg, MEMCG_SWAP, nr_pages);
7494
7495         return 0;
7496 }
7497
7498 /**
7499  * __mem_cgroup_uncharge_swap - uncharge swap space
7500  * @entry: swap entry to uncharge
7501  * @nr_pages: the amount of swap space to uncharge
7502  */
7503 void __mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages)
7504 {
7505         struct mem_cgroup *memcg;
7506         unsigned short id;
7507
7508         if (mem_cgroup_disabled())
7509                 return;
7510
7511         id = swap_cgroup_record(entry, 0, nr_pages);
7512         rcu_read_lock();
7513         memcg = mem_cgroup_from_id(id);
7514         if (memcg) {
7515                 if (!mem_cgroup_is_root(memcg)) {
7516                         if (do_memsw_account())
7517                                 page_counter_uncharge(&memcg->memsw, nr_pages);
7518                         else
7519                                 page_counter_uncharge(&memcg->swap, nr_pages);
7520                 }
7521                 mod_memcg_state(memcg, MEMCG_SWAP, -nr_pages);
7522                 mem_cgroup_id_put_many(memcg, nr_pages);
7523         }
7524         rcu_read_unlock();
7525 }
7526
7527 long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
7528 {
7529         long nr_swap_pages = get_nr_swap_pages();
7530
7531         if (mem_cgroup_disabled() || do_memsw_account())
7532                 return nr_swap_pages;
7533         for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg))
7534                 nr_swap_pages = min_t(long, nr_swap_pages,
7535                                       READ_ONCE(memcg->swap.max) -
7536                                       page_counter_read(&memcg->swap));
7537         return nr_swap_pages;
7538 }
7539
7540 bool mem_cgroup_swap_full(struct folio *folio)
7541 {
7542         struct mem_cgroup *memcg;
7543
7544         VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
7545
7546         if (vm_swap_full())
7547                 return true;
7548         if (do_memsw_account())
7549                 return false;
7550
7551         memcg = folio_memcg(folio);
7552         if (!memcg)
7553                 return false;
7554
7555         for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg)) {
7556                 unsigned long usage = page_counter_read(&memcg->swap);
7557
7558                 if (usage * 2 >= READ_ONCE(memcg->swap.high) ||
7559                     usage * 2 >= READ_ONCE(memcg->swap.max))
7560                         return true;
7561         }
7562
7563         return false;
7564 }
7565
7566 static int __init setup_swap_account(char *s)
7567 {
7568         pr_warn_once("The swapaccount= commandline option is deprecated. "
7569                      "Please report your usecase to linux-mm@kvack.org if you "
7570                      "depend on this functionality.\n");
7571         return 1;
7572 }
7573 __setup("swapaccount=", setup_swap_account);
7574
7575 static u64 swap_current_read(struct cgroup_subsys_state *css,
7576                              struct cftype *cft)
7577 {
7578         struct mem_cgroup *memcg = mem_cgroup_from_css(css);
7579
7580         return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE;
7581 }
7582
7583 static int swap_high_show(struct seq_file *m, void *v)
7584 {
7585         return seq_puts_memcg_tunable(m,
7586                 READ_ONCE(mem_cgroup_from_seq(m)->swap.high));
7587 }
7588
7589 static ssize_t swap_high_write(struct kernfs_open_file *of,
7590                                char *buf, size_t nbytes, loff_t off)
7591 {
7592         struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
7593         unsigned long high;
7594         int err;
7595
7596         buf = strstrip(buf);
7597         err = page_counter_memparse(buf, "max", &high);
7598         if (err)
7599                 return err;
7600
7601         page_counter_set_high(&memcg->swap, high);
7602
7603         return nbytes;
7604 }
7605
7606 static int swap_max_show(struct seq_file *m, void *v)
7607 {
7608         return seq_puts_memcg_tunable(m,
7609                 READ_ONCE(mem_cgroup_from_seq(m)->swap.max));
7610 }
7611
7612 static ssize_t swap_max_write(struct kernfs_open_file *of,
7613                               char *buf, size_t nbytes, loff_t off)
7614 {
7615         struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
7616         unsigned long max;
7617         int err;
7618
7619         buf = strstrip(buf);
7620         err = page_counter_memparse(buf, "max", &max);
7621         if (err)
7622                 return err;
7623
7624         xchg(&memcg->swap.max, max);
7625
7626         return nbytes;
7627 }
7628
7629 static int swap_events_show(struct seq_file *m, void *v)
7630 {
7631         struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
7632
7633         seq_printf(m, "high %lu\n",
7634                    atomic_long_read(&memcg->memory_events[MEMCG_SWAP_HIGH]));
7635         seq_printf(m, "max %lu\n",
7636                    atomic_long_read(&memcg->memory_events[MEMCG_SWAP_MAX]));
7637         seq_printf(m, "fail %lu\n",
7638                    atomic_long_read(&memcg->memory_events[MEMCG_SWAP_FAIL]));
7639
7640         return 0;
7641 }
7642
7643 static struct cftype swap_files[] = {
7644         {
7645                 .name = "swap.current",
7646                 .flags = CFTYPE_NOT_ON_ROOT,
7647                 .read_u64 = swap_current_read,
7648         },
7649         {
7650                 .name = "swap.high",
7651                 .flags = CFTYPE_NOT_ON_ROOT,
7652                 .seq_show = swap_high_show,
7653                 .write = swap_high_write,
7654         },
7655         {
7656                 .name = "swap.max",
7657                 .flags = CFTYPE_NOT_ON_ROOT,
7658                 .seq_show = swap_max_show,
7659                 .write = swap_max_write,
7660         },
7661         {
7662                 .name = "swap.events",
7663                 .flags = CFTYPE_NOT_ON_ROOT,
7664                 .file_offset = offsetof(struct mem_cgroup, swap_events_file),
7665                 .seq_show = swap_events_show,
7666         },
7667         { }     /* terminate */
7668 };
7669
7670 static struct cftype memsw_files[] = {
7671         {
7672                 .name = "memsw.usage_in_bytes",
7673                 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
7674                 .read_u64 = mem_cgroup_read_u64,
7675         },
7676         {
7677                 .name = "memsw.max_usage_in_bytes",
7678                 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
7679                 .write = mem_cgroup_reset,
7680                 .read_u64 = mem_cgroup_read_u64,
7681         },
7682         {
7683                 .name = "memsw.limit_in_bytes",
7684                 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
7685                 .write = mem_cgroup_write,
7686                 .read_u64 = mem_cgroup_read_u64,
7687         },
7688         {
7689                 .name = "memsw.failcnt",
7690                 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
7691                 .write = mem_cgroup_reset,
7692                 .read_u64 = mem_cgroup_read_u64,
7693         },
7694         { },    /* terminate */
7695 };
7696
7697 #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
7698 /**
7699  * obj_cgroup_may_zswap - check if this cgroup can zswap
7700  * @objcg: the object cgroup
7701  *
7702  * Check if the hierarchical zswap limit has been reached.
7703  *
7704  * This doesn't check for specific headroom, and it is not atomic
7705  * either. But with zswap, the size of the allocation is only known
7706  * once compression has occured, and this optimistic pre-check avoids
7707  * spending cycles on compression when there is already no room left
7708  * or zswap is disabled altogether somewhere in the hierarchy.
7709  */
7710 bool obj_cgroup_may_zswap(struct obj_cgroup *objcg)
7711 {
7712         struct mem_cgroup *memcg, *original_memcg;
7713         bool ret = true;
7714
7715         if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
7716                 return true;
7717
7718         original_memcg = get_mem_cgroup_from_objcg(objcg);
7719         for (memcg = original_memcg; !mem_cgroup_is_root(memcg);
7720              memcg = parent_mem_cgroup(memcg)) {
7721                 unsigned long max = READ_ONCE(memcg->zswap_max);
7722                 unsigned long pages;
7723
7724                 if (max == PAGE_COUNTER_MAX)
7725                         continue;
7726                 if (max == 0) {
7727                         ret = false;
7728                         break;
7729                 }
7730
7731                 cgroup_rstat_flush(memcg->css.cgroup);
7732                 pages = memcg_page_state(memcg, MEMCG_ZSWAP_B) / PAGE_SIZE;
7733                 if (pages < max)
7734                         continue;
7735                 ret = false;
7736                 break;
7737         }
7738         mem_cgroup_put(original_memcg);
7739         return ret;
7740 }
7741
7742 /**
7743  * obj_cgroup_charge_zswap - charge compression backend memory
7744  * @objcg: the object cgroup
7745  * @size: size of compressed object
7746  *
7747  * This forces the charge after obj_cgroup_may_swap() allowed
7748  * compression and storage in zwap for this cgroup to go ahead.
7749  */
7750 void obj_cgroup_charge_zswap(struct obj_cgroup *objcg, size_t size)
7751 {
7752         struct mem_cgroup *memcg;
7753
7754         if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
7755                 return;
7756
7757         VM_WARN_ON_ONCE(!(current->flags & PF_MEMALLOC));
7758
7759         /* PF_MEMALLOC context, charging must succeed */
7760         if (obj_cgroup_charge(objcg, GFP_KERNEL, size))
7761                 VM_WARN_ON_ONCE(1);
7762
7763         rcu_read_lock();
7764         memcg = obj_cgroup_memcg(objcg);
7765         mod_memcg_state(memcg, MEMCG_ZSWAP_B, size);
7766         mod_memcg_state(memcg, MEMCG_ZSWAPPED, 1);
7767         rcu_read_unlock();
7768 }
7769
7770 /**
7771  * obj_cgroup_uncharge_zswap - uncharge compression backend memory
7772  * @objcg: the object cgroup
7773  * @size: size of compressed object
7774  *
7775  * Uncharges zswap memory on page in.
7776  */
7777 void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg, size_t size)
7778 {
7779         struct mem_cgroup *memcg;
7780
7781         if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
7782                 return;
7783
7784         obj_cgroup_uncharge(objcg, size);
7785
7786         rcu_read_lock();
7787         memcg = obj_cgroup_memcg(objcg);
7788         mod_memcg_state(memcg, MEMCG_ZSWAP_B, -size);
7789         mod_memcg_state(memcg, MEMCG_ZSWAPPED, -1);
7790         rcu_read_unlock();
7791 }
7792
7793 static u64 zswap_current_read(struct cgroup_subsys_state *css,
7794                               struct cftype *cft)
7795 {
7796         cgroup_rstat_flush(css->cgroup);
7797         return memcg_page_state(mem_cgroup_from_css(css), MEMCG_ZSWAP_B);
7798 }
7799
7800 static int zswap_max_show(struct seq_file *m, void *v)
7801 {
7802         return seq_puts_memcg_tunable(m,
7803                 READ_ONCE(mem_cgroup_from_seq(m)->zswap_max));
7804 }
7805
7806 static ssize_t zswap_max_write(struct kernfs_open_file *of,
7807                                char *buf, size_t nbytes, loff_t off)
7808 {
7809         struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
7810         unsigned long max;
7811         int err;
7812
7813         buf = strstrip(buf);
7814         err = page_counter_memparse(buf, "max", &max);
7815         if (err)
7816                 return err;
7817
7818         xchg(&memcg->zswap_max, max);
7819
7820         return nbytes;
7821 }
7822
7823 static struct cftype zswap_files[] = {
7824         {
7825                 .name = "zswap.current",
7826                 .flags = CFTYPE_NOT_ON_ROOT,
7827                 .read_u64 = zswap_current_read,
7828         },
7829         {
7830                 .name = "zswap.max",
7831                 .flags = CFTYPE_NOT_ON_ROOT,
7832                 .seq_show = zswap_max_show,
7833                 .write = zswap_max_write,
7834         },
7835         { }     /* terminate */
7836 };
7837 #endif /* CONFIG_MEMCG_KMEM && CONFIG_ZSWAP */
7838
7839 static int __init mem_cgroup_swap_init(void)
7840 {
7841         if (mem_cgroup_disabled())
7842                 return 0;
7843
7844         WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, swap_files));
7845         WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys, memsw_files));
7846 #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
7847         WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, zswap_files));
7848 #endif
7849         return 0;
7850 }
7851 subsys_initcall(mem_cgroup_swap_init);
7852
7853 #endif /* CONFIG_SWAP */