1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* memcontrol.c - Memory Controller
4 * Copyright IBM Corporation, 2007
5 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
7 * Copyright 2007 OpenVZ SWsoft Inc
8 * Author: Pavel Emelianov <xemul@openvz.org>
11 * Copyright (C) 2009 Nokia Corporation
12 * Author: Kirill A. Shutemov
14 * Kernel Memory Controller
15 * Copyright (C) 2012 Parallels Inc. and Google Inc.
16 * Authors: Glauber Costa and Suleiman Souhlal
19 * Charge lifetime sanitation
20 * Lockless page tracking & accounting
21 * Unified hierarchy configuration model
22 * Copyright (C) 2015 Red Hat, Inc., Johannes Weiner
24 * Per memcg lru locking
25 * Copyright (C) 2020 Alibaba, Inc, Alex Shi
28 #include <linux/cgroup-defs.h>
29 #include <linux/page_counter.h>
30 #include <linux/memcontrol.h>
31 #include <linux/cgroup.h>
32 #include <linux/cpuset.h>
33 #include <linux/sched/mm.h>
34 #include <linux/shmem_fs.h>
35 #include <linux/hugetlb.h>
36 #include <linux/pagemap.h>
37 #include <linux/pagevec.h>
38 #include <linux/vm_event_item.h>
39 #include <linux/smp.h>
40 #include <linux/page-flags.h>
41 #include <linux/backing-dev.h>
42 #include <linux/bit_spinlock.h>
43 #include <linux/rcupdate.h>
44 #include <linux/limits.h>
45 #include <linux/export.h>
46 #include <linux/list.h>
47 #include <linux/mutex.h>
48 #include <linux/rbtree.h>
49 #include <linux/slab.h>
50 #include <linux/swapops.h>
51 #include <linux/spinlock.h>
53 #include <linux/seq_file.h>
54 #include <linux/vmpressure.h>
55 #include <linux/memremap.h>
56 #include <linux/mm_inline.h>
57 #include <linux/swap_cgroup.h>
58 #include <linux/cpu.h>
59 #include <linux/oom.h>
60 #include <linux/lockdep.h>
61 #include <linux/resume_user_mode.h>
62 #include <linux/psi.h>
63 #include <linux/seq_buf.h>
64 #include <linux/sched/isolation.h>
65 #include <linux/kmemleak.h>
70 #include "memcontrol-v1.h"
72 #include <linux/uaccess.h>
74 #define CREATE_TRACE_POINTS
75 #include <trace/events/memcg.h>
76 #undef CREATE_TRACE_POINTS
78 #include <trace/events/vmscan.h>
80 struct cgroup_subsys memory_cgrp_subsys __read_mostly;
81 EXPORT_SYMBOL(memory_cgrp_subsys);
83 struct mem_cgroup *root_mem_cgroup __read_mostly;
85 /* Active memory cgroup to use from an interrupt context */
86 DEFINE_PER_CPU(struct mem_cgroup *, int_active_memcg);
87 EXPORT_PER_CPU_SYMBOL_GPL(int_active_memcg);
89 /* Socket memory accounting disabled? */
90 static bool cgroup_memory_nosocket __ro_after_init;
92 /* Kernel memory accounting disabled? */
93 static bool cgroup_memory_nokmem __ro_after_init;
95 /* BPF memory accounting disabled? */
96 static bool cgroup_memory_nobpf __ro_after_init;
98 static struct kmem_cache *memcg_cachep;
99 static struct kmem_cache *memcg_pn_cachep;
101 #ifdef CONFIG_CGROUP_WRITEBACK
102 static DECLARE_WAIT_QUEUE_HEAD(memcg_cgwb_frn_waitq);
105 static inline bool task_is_dying(void)
107 return tsk_is_oom_victim(current) || fatal_signal_pending(current) ||
108 (current->flags & PF_EXITING);
111 /* Some nice accessors for the vmpressure. */
112 struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg)
115 memcg = root_mem_cgroup;
116 return &memcg->vmpressure;
119 struct mem_cgroup *vmpressure_to_memcg(struct vmpressure *vmpr)
121 return container_of(vmpr, struct mem_cgroup, vmpressure);
124 #define SEQ_BUF_SIZE SZ_4K
125 #define CURRENT_OBJCG_UPDATE_BIT 0
126 #define CURRENT_OBJCG_UPDATE_FLAG (1UL << CURRENT_OBJCG_UPDATE_BIT)
128 static DEFINE_SPINLOCK(objcg_lock);
130 bool mem_cgroup_kmem_disabled(void)
132 return cgroup_memory_nokmem;
135 static void memcg_uncharge(struct mem_cgroup *memcg, unsigned int nr_pages);
137 static void obj_cgroup_release(struct percpu_ref *ref)
139 struct obj_cgroup *objcg = container_of(ref, struct obj_cgroup, refcnt);
140 unsigned int nr_bytes;
141 unsigned int nr_pages;
145 * At this point all allocated objects are freed, and
146 * objcg->nr_charged_bytes can't have an arbitrary byte value.
147 * However, it can be PAGE_SIZE or (x * PAGE_SIZE).
149 * The following sequence can lead to it:
150 * 1) CPU0: objcg == stock->cached_objcg
151 * 2) CPU1: we do a small allocation (e.g. 92 bytes),
152 * PAGE_SIZE bytes are charged
153 * 3) CPU1: a process from another memcg is allocating something,
154 * the stock if flushed,
155 * objcg->nr_charged_bytes = PAGE_SIZE - 92
156 * 5) CPU0: we do release this object,
157 * 92 bytes are added to stock->nr_bytes
158 * 6) CPU0: stock is flushed,
159 * 92 bytes are added to objcg->nr_charged_bytes
161 * In the result, nr_charged_bytes == PAGE_SIZE.
162 * This page will be uncharged in obj_cgroup_release().
164 nr_bytes = atomic_read(&objcg->nr_charged_bytes);
165 WARN_ON_ONCE(nr_bytes & (PAGE_SIZE - 1));
166 nr_pages = nr_bytes >> PAGE_SHIFT;
169 struct mem_cgroup *memcg;
171 memcg = get_mem_cgroup_from_objcg(objcg);
172 mod_memcg_state(memcg, MEMCG_KMEM, -nr_pages);
173 memcg1_account_kmem(memcg, -nr_pages);
174 if (!mem_cgroup_is_root(memcg))
175 memcg_uncharge(memcg, nr_pages);
176 mem_cgroup_put(memcg);
179 spin_lock_irqsave(&objcg_lock, flags);
180 list_del(&objcg->list);
181 spin_unlock_irqrestore(&objcg_lock, flags);
183 percpu_ref_exit(ref);
184 kfree_rcu(objcg, rcu);
187 static struct obj_cgroup *obj_cgroup_alloc(void)
189 struct obj_cgroup *objcg;
192 objcg = kzalloc(sizeof(struct obj_cgroup), GFP_KERNEL);
196 ret = percpu_ref_init(&objcg->refcnt, obj_cgroup_release, 0,
202 INIT_LIST_HEAD(&objcg->list);
206 static void memcg_reparent_objcgs(struct mem_cgroup *memcg,
207 struct mem_cgroup *parent)
209 struct obj_cgroup *objcg, *iter;
211 objcg = rcu_replace_pointer(memcg->objcg, NULL, true);
213 spin_lock_irq(&objcg_lock);
215 /* 1) Ready to reparent active objcg. */
216 list_add(&objcg->list, &memcg->objcg_list);
217 /* 2) Reparent active objcg and already reparented objcgs to parent. */
218 list_for_each_entry(iter, &memcg->objcg_list, list)
219 WRITE_ONCE(iter->memcg, parent);
220 /* 3) Move already reparented objcgs to the parent's list */
221 list_splice(&memcg->objcg_list, &parent->objcg_list);
223 spin_unlock_irq(&objcg_lock);
225 percpu_ref_kill(&objcg->refcnt);
229 * A lot of the calls to the cache allocation functions are expected to be
230 * inlined by the compiler. Since the calls to memcg_slab_post_alloc_hook() are
231 * conditional to this static branch, we'll have to allow modules that does
232 * kmem_cache_alloc and the such to see this symbol as well
234 DEFINE_STATIC_KEY_FALSE(memcg_kmem_online_key);
235 EXPORT_SYMBOL(memcg_kmem_online_key);
237 DEFINE_STATIC_KEY_FALSE(memcg_bpf_enabled_key);
238 EXPORT_SYMBOL(memcg_bpf_enabled_key);
241 * mem_cgroup_css_from_folio - css of the memcg associated with a folio
242 * @folio: folio of interest
244 * If memcg is bound to the default hierarchy, css of the memcg associated
245 * with @folio is returned. The returned css remains associated with @folio
246 * until it is released.
248 * If memcg is bound to a traditional hierarchy, the css of root_mem_cgroup
251 struct cgroup_subsys_state *mem_cgroup_css_from_folio(struct folio *folio)
253 struct mem_cgroup *memcg = folio_memcg(folio);
255 if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
256 memcg = root_mem_cgroup;
262 * page_cgroup_ino - return inode number of the memcg a page is charged to
265 * Look up the closest online ancestor of the memory cgroup @page is charged to
266 * and return its inode number or 0 if @page is not charged to any cgroup. It
267 * is safe to call this function without holding a reference to @page.
269 * Note, this function is inherently racy, because there is nothing to prevent
270 * the cgroup inode from getting torn down and potentially reallocated a moment
271 * after page_cgroup_ino() returns, so it only should be used by callers that
272 * do not care (such as procfs interfaces).
274 ino_t page_cgroup_ino(struct page *page)
276 struct mem_cgroup *memcg;
277 unsigned long ino = 0;
280 /* page_folio() is racy here, but the entire function is racy anyway */
281 memcg = folio_memcg_check(page_folio(page));
283 while (memcg && !(memcg->css.flags & CSS_ONLINE))
284 memcg = parent_mem_cgroup(memcg);
286 ino = cgroup_ino(memcg->css.cgroup);
291 /* Subset of node_stat_item for memcg stats */
292 static const unsigned int memcg_node_stat_items[] = {
298 NR_SLAB_RECLAIMABLE_B,
299 NR_SLAB_UNRECLAIMABLE_B,
300 WORKINGSET_REFAULT_ANON,
301 WORKINGSET_REFAULT_FILE,
302 WORKINGSET_ACTIVATE_ANON,
303 WORKINGSET_ACTIVATE_FILE,
304 WORKINGSET_RESTORE_ANON,
305 WORKINGSET_RESTORE_FILE,
306 WORKINGSET_NODERECLAIM,
318 NR_SECONDARY_PAGETABLE,
322 #ifdef CONFIG_NUMA_BALANCING
329 #ifdef CONFIG_HUGETLB_PAGE
334 static const unsigned int memcg_stat_items[] = {
344 #define NR_MEMCG_NODE_STAT_ITEMS ARRAY_SIZE(memcg_node_stat_items)
345 #define MEMCG_VMSTAT_SIZE (NR_MEMCG_NODE_STAT_ITEMS + \
346 ARRAY_SIZE(memcg_stat_items))
347 #define BAD_STAT_IDX(index) ((u32)(index) >= U8_MAX)
348 static u8 mem_cgroup_stats_index[MEMCG_NR_STAT] __read_mostly;
350 static void init_memcg_stats(void)
354 BUILD_BUG_ON(MEMCG_NR_STAT >= U8_MAX);
356 memset(mem_cgroup_stats_index, U8_MAX, sizeof(mem_cgroup_stats_index));
358 for (i = 0; i < NR_MEMCG_NODE_STAT_ITEMS; ++i, ++j)
359 mem_cgroup_stats_index[memcg_node_stat_items[i]] = j;
361 for (i = 0; i < ARRAY_SIZE(memcg_stat_items); ++i, ++j)
362 mem_cgroup_stats_index[memcg_stat_items[i]] = j;
365 static inline int memcg_stats_index(int idx)
367 return mem_cgroup_stats_index[idx];
370 struct lruvec_stats_percpu {
371 /* Local (CPU and cgroup) state */
372 long state[NR_MEMCG_NODE_STAT_ITEMS];
374 /* Delta calculation for lockless upward propagation */
375 long state_prev[NR_MEMCG_NODE_STAT_ITEMS];
378 struct lruvec_stats {
379 /* Aggregated (CPU and subtree) state */
380 long state[NR_MEMCG_NODE_STAT_ITEMS];
382 /* Non-hierarchical (CPU aggregated) state */
383 long state_local[NR_MEMCG_NODE_STAT_ITEMS];
385 /* Pending child counts during tree propagation */
386 long state_pending[NR_MEMCG_NODE_STAT_ITEMS];
389 unsigned long lruvec_page_state(struct lruvec *lruvec, enum node_stat_item idx)
391 struct mem_cgroup_per_node *pn;
395 if (mem_cgroup_disabled())
396 return node_page_state(lruvec_pgdat(lruvec), idx);
398 i = memcg_stats_index(idx);
399 if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx))
402 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
403 x = READ_ONCE(pn->lruvec_stats->state[i]);
411 unsigned long lruvec_page_state_local(struct lruvec *lruvec,
412 enum node_stat_item idx)
414 struct mem_cgroup_per_node *pn;
418 if (mem_cgroup_disabled())
419 return node_page_state(lruvec_pgdat(lruvec), idx);
421 i = memcg_stats_index(idx);
422 if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx))
425 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
426 x = READ_ONCE(pn->lruvec_stats->state_local[i]);
434 /* Subset of vm_event_item to report for memcg event stats */
435 static const unsigned int memcg_vm_event_stat[] = {
436 #ifdef CONFIG_MEMCG_V1
466 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
472 #ifdef CONFIG_NUMA_BALANCING
479 #define NR_MEMCG_EVENTS ARRAY_SIZE(memcg_vm_event_stat)
480 static u8 mem_cgroup_events_index[NR_VM_EVENT_ITEMS] __read_mostly;
482 static void init_memcg_events(void)
486 BUILD_BUG_ON(NR_VM_EVENT_ITEMS >= U8_MAX);
488 memset(mem_cgroup_events_index, U8_MAX,
489 sizeof(mem_cgroup_events_index));
491 for (i = 0; i < NR_MEMCG_EVENTS; ++i)
492 mem_cgroup_events_index[memcg_vm_event_stat[i]] = i;
495 static inline int memcg_events_index(enum vm_event_item idx)
497 return mem_cgroup_events_index[idx];
500 struct memcg_vmstats_percpu {
501 /* Stats updates since the last flush */
502 unsigned int stats_updates;
504 /* Cached pointers for fast iteration in memcg_rstat_updated() */
505 struct memcg_vmstats_percpu __percpu *parent_pcpu;
506 struct memcg_vmstats *vmstats;
508 /* The above should fit a single cacheline for memcg_rstat_updated() */
510 /* Local (CPU and cgroup) page state & events */
511 long state[MEMCG_VMSTAT_SIZE];
512 unsigned long events[NR_MEMCG_EVENTS];
514 /* Delta calculation for lockless upward propagation */
515 long state_prev[MEMCG_VMSTAT_SIZE];
516 unsigned long events_prev[NR_MEMCG_EVENTS];
517 } ____cacheline_aligned;
519 struct memcg_vmstats {
520 /* Aggregated (CPU and subtree) page state & events */
521 long state[MEMCG_VMSTAT_SIZE];
522 unsigned long events[NR_MEMCG_EVENTS];
524 /* Non-hierarchical (CPU aggregated) page state & events */
525 long state_local[MEMCG_VMSTAT_SIZE];
526 unsigned long events_local[NR_MEMCG_EVENTS];
528 /* Pending child counts during tree propagation */
529 long state_pending[MEMCG_VMSTAT_SIZE];
530 unsigned long events_pending[NR_MEMCG_EVENTS];
532 /* Stats updates since the last flush */
533 atomic_t stats_updates;
537 * memcg and lruvec stats flushing
539 * Many codepaths leading to stats update or read are performance sensitive and
540 * adding stats flushing in such codepaths is not desirable. So, to optimize the
541 * flushing the kernel does:
543 * 1) Periodically and asynchronously flush the stats every 2 seconds to not let
544 * rstat update tree grow unbounded.
546 * 2) Flush the stats synchronously on reader side only when there are more than
547 * (MEMCG_CHARGE_BATCH * nr_cpus) update events. Though this optimization
548 * will let stats be out of sync by atmost (MEMCG_CHARGE_BATCH * nr_cpus) but
549 * only for 2 seconds due to (1).
551 static void flush_memcg_stats_dwork(struct work_struct *w);
552 static DECLARE_DEFERRABLE_WORK(stats_flush_dwork, flush_memcg_stats_dwork);
553 static u64 flush_last_time;
555 #define FLUSH_TIME (2UL*HZ)
557 static bool memcg_vmstats_needs_flush(struct memcg_vmstats *vmstats)
559 return atomic_read(&vmstats->stats_updates) >
560 MEMCG_CHARGE_BATCH * num_online_cpus();
563 static inline void memcg_rstat_updated(struct mem_cgroup *memcg, int val,
566 struct memcg_vmstats_percpu __percpu *statc_pcpu;
567 struct memcg_vmstats_percpu *statc;
568 unsigned int stats_updates;
573 css_rstat_updated(&memcg->css, cpu);
574 statc_pcpu = memcg->vmstats_percpu;
575 for (; statc_pcpu; statc_pcpu = statc->parent_pcpu) {
576 statc = this_cpu_ptr(statc_pcpu);
578 * If @memcg is already flushable then all its ancestors are
579 * flushable as well and also there is no need to increase
582 if (memcg_vmstats_needs_flush(statc->vmstats))
585 stats_updates = this_cpu_add_return(statc_pcpu->stats_updates,
587 if (stats_updates < MEMCG_CHARGE_BATCH)
590 stats_updates = this_cpu_xchg(statc_pcpu->stats_updates, 0);
591 atomic_add(stats_updates, &statc->vmstats->stats_updates);
595 static void __mem_cgroup_flush_stats(struct mem_cgroup *memcg, bool force)
597 bool needs_flush = memcg_vmstats_needs_flush(memcg->vmstats);
599 trace_memcg_flush_stats(memcg, atomic_read(&memcg->vmstats->stats_updates),
602 if (!force && !needs_flush)
605 if (mem_cgroup_is_root(memcg))
606 WRITE_ONCE(flush_last_time, jiffies_64);
608 css_rstat_flush(&memcg->css);
612 * mem_cgroup_flush_stats - flush the stats of a memory cgroup subtree
613 * @memcg: root of the subtree to flush
615 * Flushing is serialized by the underlying global rstat lock. There is also a
616 * minimum amount of work to be done even if there are no stat updates to flush.
617 * Hence, we only flush the stats if the updates delta exceeds a threshold. This
618 * avoids unnecessary work and contention on the underlying lock.
620 void mem_cgroup_flush_stats(struct mem_cgroup *memcg)
622 if (mem_cgroup_disabled())
626 memcg = root_mem_cgroup;
628 __mem_cgroup_flush_stats(memcg, false);
631 void mem_cgroup_flush_stats_ratelimited(struct mem_cgroup *memcg)
633 /* Only flush if the periodic flusher is one full cycle late */
634 if (time_after64(jiffies_64, READ_ONCE(flush_last_time) + 2*FLUSH_TIME))
635 mem_cgroup_flush_stats(memcg);
638 static void flush_memcg_stats_dwork(struct work_struct *w)
641 * Deliberately ignore memcg_vmstats_needs_flush() here so that flushing
642 * in latency-sensitive paths is as cheap as possible.
644 __mem_cgroup_flush_stats(root_mem_cgroup, true);
645 queue_delayed_work(system_unbound_wq, &stats_flush_dwork, FLUSH_TIME);
648 unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
651 int i = memcg_stats_index(idx);
653 if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx))
656 x = READ_ONCE(memcg->vmstats->state[i]);
664 static int memcg_page_state_unit(int item);
667 * Normalize the value passed into memcg_rstat_updated() to be in pages. Round
668 * up non-zero sub-page updates to 1 page as zero page updates are ignored.
670 static int memcg_state_val_in_pages(int idx, int val)
672 int unit = memcg_page_state_unit(idx);
674 if (!val || unit == PAGE_SIZE)
677 return max(val * unit / PAGE_SIZE, 1UL);
681 * mod_memcg_state - update cgroup memory statistics
682 * @memcg: the memory cgroup
683 * @idx: the stat item - can be enum memcg_stat_item or enum node_stat_item
684 * @val: delta to add to the counter, can be negative
686 void mod_memcg_state(struct mem_cgroup *memcg, enum memcg_stat_item idx,
689 int i = memcg_stats_index(idx);
692 if (mem_cgroup_disabled())
695 if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx))
700 this_cpu_add(memcg->vmstats_percpu->state[i], val);
701 val = memcg_state_val_in_pages(idx, val);
702 memcg_rstat_updated(memcg, val, cpu);
703 trace_mod_memcg_state(memcg, idx, val);
708 #ifdef CONFIG_MEMCG_V1
709 /* idx can be of type enum memcg_stat_item or node_stat_item. */
710 unsigned long memcg_page_state_local(struct mem_cgroup *memcg, int idx)
713 int i = memcg_stats_index(idx);
715 if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx))
718 x = READ_ONCE(memcg->vmstats->state_local[i]);
727 static void mod_memcg_lruvec_state(struct lruvec *lruvec,
728 enum node_stat_item idx,
731 struct mem_cgroup_per_node *pn;
732 struct mem_cgroup *memcg;
733 int i = memcg_stats_index(idx);
736 if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx))
739 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
745 this_cpu_add(memcg->vmstats_percpu->state[i], val);
748 this_cpu_add(pn->lruvec_stats_percpu->state[i], val);
750 val = memcg_state_val_in_pages(idx, val);
751 memcg_rstat_updated(memcg, val, cpu);
752 trace_mod_memcg_lruvec_state(memcg, idx, val);
758 * __mod_lruvec_state - update lruvec memory statistics
759 * @lruvec: the lruvec
760 * @idx: the stat item
761 * @val: delta to add to the counter, can be negative
763 * The lruvec is the intersection of the NUMA node and a cgroup. This
764 * function updates the all three counters that are affected by a
765 * change of state at this level: per-node, per-cgroup, per-lruvec.
767 void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
771 __mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
773 /* Update memcg and lruvec */
774 if (!mem_cgroup_disabled())
775 mod_memcg_lruvec_state(lruvec, idx, val);
778 void __lruvec_stat_mod_folio(struct folio *folio, enum node_stat_item idx,
781 struct mem_cgroup *memcg;
782 pg_data_t *pgdat = folio_pgdat(folio);
783 struct lruvec *lruvec;
786 memcg = folio_memcg(folio);
787 /* Untracked pages have no memcg, no lruvec. Update only the node */
790 __mod_node_page_state(pgdat, idx, val);
794 lruvec = mem_cgroup_lruvec(memcg, pgdat);
795 __mod_lruvec_state(lruvec, idx, val);
798 EXPORT_SYMBOL(__lruvec_stat_mod_folio);
800 void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val)
802 pg_data_t *pgdat = page_pgdat(virt_to_page(p));
803 struct mem_cgroup *memcg;
804 struct lruvec *lruvec;
807 memcg = mem_cgroup_from_slab_obj(p);
810 * Untracked pages have no memcg, no lruvec. Update only the
811 * node. If we reparent the slab objects to the root memcg,
812 * when we free the slab object, we need to update the per-memcg
813 * vmstats to keep it correct for the root memcg.
816 __mod_node_page_state(pgdat, idx, val);
818 lruvec = mem_cgroup_lruvec(memcg, pgdat);
819 __mod_lruvec_state(lruvec, idx, val);
825 * count_memcg_events - account VM events in a cgroup
826 * @memcg: the memory cgroup
827 * @idx: the event item
828 * @count: the number of events that occurred
830 void count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
833 int i = memcg_events_index(idx);
836 if (mem_cgroup_disabled())
839 if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx))
844 this_cpu_add(memcg->vmstats_percpu->events[i], count);
845 memcg_rstat_updated(memcg, count, cpu);
846 trace_count_memcg_events(memcg, idx, count);
851 unsigned long memcg_events(struct mem_cgroup *memcg, int event)
853 int i = memcg_events_index(event);
855 if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, event))
858 return READ_ONCE(memcg->vmstats->events[i]);
861 #ifdef CONFIG_MEMCG_V1
862 unsigned long memcg_events_local(struct mem_cgroup *memcg, int event)
864 int i = memcg_events_index(event);
866 if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, event))
869 return READ_ONCE(memcg->vmstats->events_local[i]);
873 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
876 * mm_update_next_owner() may clear mm->owner to NULL
877 * if it races with swapoff, page migration, etc.
878 * So this can be called with p == NULL.
883 return mem_cgroup_from_css(task_css(p, memory_cgrp_id));
885 EXPORT_SYMBOL(mem_cgroup_from_task);
887 static __always_inline struct mem_cgroup *active_memcg(void)
890 return this_cpu_read(int_active_memcg);
892 return current->active_memcg;
896 * get_mem_cgroup_from_mm: Obtain a reference on given mm_struct's memcg.
897 * @mm: mm from which memcg should be extracted. It can be NULL.
899 * Obtain a reference on mm->memcg and returns it if successful. If mm
900 * is NULL, then the memcg is chosen as follows:
901 * 1) The active memcg, if set.
902 * 2) current->mm->memcg, if available
904 * If mem_cgroup is disabled, NULL is returned.
906 struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
908 struct mem_cgroup *memcg;
910 if (mem_cgroup_disabled())
914 * Page cache insertions can happen without an
915 * actual mm context, e.g. during disk probing
916 * on boot, loopback IO, acct() writes etc.
918 * No need to css_get on root memcg as the reference
919 * counting is disabled on the root level in the
920 * cgroup core. See CSS_NO_REF.
923 memcg = active_memcg();
924 if (unlikely(memcg)) {
925 /* remote memcg must hold a ref */
926 css_get(&memcg->css);
931 return root_mem_cgroup;
936 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
937 if (unlikely(!memcg))
938 memcg = root_mem_cgroup;
939 } while (!css_tryget(&memcg->css));
943 EXPORT_SYMBOL(get_mem_cgroup_from_mm);
946 * get_mem_cgroup_from_current - Obtain a reference on current task's memcg.
948 struct mem_cgroup *get_mem_cgroup_from_current(void)
950 struct mem_cgroup *memcg;
952 if (mem_cgroup_disabled())
957 memcg = mem_cgroup_from_task(current);
958 if (!css_tryget(&memcg->css)) {
967 * get_mem_cgroup_from_folio - Obtain a reference on a given folio's memcg.
968 * @folio: folio from which memcg should be extracted.
970 struct mem_cgroup *get_mem_cgroup_from_folio(struct folio *folio)
972 struct mem_cgroup *memcg = folio_memcg(folio);
974 if (mem_cgroup_disabled())
978 if (!memcg || WARN_ON_ONCE(!css_tryget(&memcg->css)))
979 memcg = root_mem_cgroup;
985 * mem_cgroup_iter - iterate over memory cgroup hierarchy
986 * @root: hierarchy root
987 * @prev: previously returned memcg, NULL on first invocation
988 * @reclaim: cookie for shared reclaim walks, NULL for full walks
990 * Returns references to children of the hierarchy below @root, or
991 * @root itself, or %NULL after a full round-trip.
993 * Caller must pass the return value in @prev on subsequent
994 * invocations for reference counting, or use mem_cgroup_iter_break()
995 * to cancel a hierarchy walk before the round-trip is complete.
997 * Reclaimers can specify a node in @reclaim to divide up the memcgs
998 * in the hierarchy among all concurrent reclaimers operating on the
1001 struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
1002 struct mem_cgroup *prev,
1003 struct mem_cgroup_reclaim_cookie *reclaim)
1005 struct mem_cgroup_reclaim_iter *iter;
1006 struct cgroup_subsys_state *css;
1007 struct mem_cgroup *pos;
1008 struct mem_cgroup *next;
1010 if (mem_cgroup_disabled())
1014 root = root_mem_cgroup;
1022 int nid = reclaim->pgdat->node_id;
1024 iter = &root->nodeinfo[nid]->iter;
1025 gen = atomic_read(&iter->generation);
1028 * On start, join the current reclaim iteration cycle.
1029 * Exit when a concurrent walker completes it.
1032 reclaim->generation = gen;
1033 else if (reclaim->generation != gen)
1036 pos = READ_ONCE(iter->position);
1040 css = pos ? &pos->css : NULL;
1042 while ((css = css_next_descendant_pre(css, &root->css))) {
1044 * Verify the css and acquire a reference. The root
1045 * is provided by the caller, so we know it's alive
1046 * and kicking, and don't take an extra reference.
1048 if (css == &root->css || css_tryget(css))
1052 next = mem_cgroup_from_css(css);
1056 * The position could have already been updated by a competing
1057 * thread, so check that the value hasn't changed since we read
1058 * it to avoid reclaiming from the same cgroup twice.
1060 if (cmpxchg(&iter->position, pos, next) != pos) {
1061 if (css && css != &root->css)
1067 atomic_inc(&iter->generation);
1070 * Reclaimers share the hierarchy walk, and a
1071 * new one might jump in right at the end of
1072 * the hierarchy - make sure they see at least
1073 * one group and restart from the beginning.
1082 if (prev && prev != root)
1083 css_put(&prev->css);
1089 * mem_cgroup_iter_break - abort a hierarchy walk prematurely
1090 * @root: hierarchy root
1091 * @prev: last visited hierarchy member as returned by mem_cgroup_iter()
1093 void mem_cgroup_iter_break(struct mem_cgroup *root,
1094 struct mem_cgroup *prev)
1097 root = root_mem_cgroup;
1098 if (prev && prev != root)
1099 css_put(&prev->css);
1102 static void __invalidate_reclaim_iterators(struct mem_cgroup *from,
1103 struct mem_cgroup *dead_memcg)
1105 struct mem_cgroup_reclaim_iter *iter;
1106 struct mem_cgroup_per_node *mz;
1109 for_each_node(nid) {
1110 mz = from->nodeinfo[nid];
1112 cmpxchg(&iter->position, dead_memcg, NULL);
1116 static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
1118 struct mem_cgroup *memcg = dead_memcg;
1119 struct mem_cgroup *last;
1122 __invalidate_reclaim_iterators(memcg, dead_memcg);
1124 } while ((memcg = parent_mem_cgroup(memcg)));
1127 * When cgroup1 non-hierarchy mode is used,
1128 * parent_mem_cgroup() does not walk all the way up to the
1129 * cgroup root (root_mem_cgroup). So we have to handle
1130 * dead_memcg from cgroup root separately.
1132 if (!mem_cgroup_is_root(last))
1133 __invalidate_reclaim_iterators(root_mem_cgroup,
1138 * mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy
1139 * @memcg: hierarchy root
1140 * @fn: function to call for each task
1141 * @arg: argument passed to @fn
1143 * This function iterates over tasks attached to @memcg or to any of its
1144 * descendants and calls @fn for each task. If @fn returns a non-zero
1145 * value, the function breaks the iteration loop. Otherwise, it will iterate
1146 * over all tasks and return 0.
1148 * This function must not be called for the root memory cgroup.
1150 void mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
1151 int (*fn)(struct task_struct *, void *), void *arg)
1153 struct mem_cgroup *iter;
1156 BUG_ON(mem_cgroup_is_root(memcg));
1158 for_each_mem_cgroup_tree(iter, memcg) {
1159 struct css_task_iter it;
1160 struct task_struct *task;
1162 css_task_iter_start(&iter->css, CSS_TASK_ITER_PROCS, &it);
1163 while (!ret && (task = css_task_iter_next(&it))) {
1164 ret = fn(task, arg);
1165 /* Avoid potential softlockup warning */
1168 css_task_iter_end(&it);
1170 mem_cgroup_iter_break(memcg, iter);
1176 #ifdef CONFIG_DEBUG_VM
1177 void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio)
1179 struct mem_cgroup *memcg;
1181 if (mem_cgroup_disabled())
1184 memcg = folio_memcg(folio);
1187 VM_BUG_ON_FOLIO(!mem_cgroup_is_root(lruvec_memcg(lruvec)), folio);
1189 VM_BUG_ON_FOLIO(lruvec_memcg(lruvec) != memcg, folio);
1194 * folio_lruvec_lock - Lock the lruvec for a folio.
1195 * @folio: Pointer to the folio.
1197 * These functions are safe to use under any of the following conditions:
1199 * - folio_test_lru false
1200 * - folio frozen (refcount of 0)
1202 * Return: The lruvec this folio is on with its lock held.
1204 struct lruvec *folio_lruvec_lock(struct folio *folio)
1206 struct lruvec *lruvec = folio_lruvec(folio);
1208 spin_lock(&lruvec->lru_lock);
1209 lruvec_memcg_debug(lruvec, folio);
1215 * folio_lruvec_lock_irq - Lock the lruvec for a folio.
1216 * @folio: Pointer to the folio.
1218 * These functions are safe to use under any of the following conditions:
1220 * - folio_test_lru false
1221 * - folio frozen (refcount of 0)
1223 * Return: The lruvec this folio is on with its lock held and interrupts
1226 struct lruvec *folio_lruvec_lock_irq(struct folio *folio)
1228 struct lruvec *lruvec = folio_lruvec(folio);
1230 spin_lock_irq(&lruvec->lru_lock);
1231 lruvec_memcg_debug(lruvec, folio);
1237 * folio_lruvec_lock_irqsave - Lock the lruvec for a folio.
1238 * @folio: Pointer to the folio.
1239 * @flags: Pointer to irqsave flags.
1241 * These functions are safe to use under any of the following conditions:
1243 * - folio_test_lru false
1244 * - folio frozen (refcount of 0)
1246 * Return: The lruvec this folio is on with its lock held and interrupts
1249 struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio,
1250 unsigned long *flags)
1252 struct lruvec *lruvec = folio_lruvec(folio);
1254 spin_lock_irqsave(&lruvec->lru_lock, *flags);
1255 lruvec_memcg_debug(lruvec, folio);
1261 * mem_cgroup_update_lru_size - account for adding or removing an lru page
1262 * @lruvec: mem_cgroup per zone lru vector
1263 * @lru: index of lru list the page is sitting on
1264 * @zid: zone id of the accounted pages
1265 * @nr_pages: positive when adding or negative when removing
1267 * This function must be called under lru_lock, just before a page is added
1268 * to or just after a page is removed from an lru list.
1270 void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
1271 int zid, int nr_pages)
1273 struct mem_cgroup_per_node *mz;
1274 unsigned long *lru_size;
1277 if (mem_cgroup_disabled())
1280 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
1281 lru_size = &mz->lru_zone_size[zid][lru];
1284 *lru_size += nr_pages;
1287 if (WARN_ONCE(size < 0,
1288 "%s(%p, %d, %d): lru_size %ld\n",
1289 __func__, lruvec, lru, nr_pages, size)) {
1295 *lru_size += nr_pages;
1299 * mem_cgroup_margin - calculate chargeable space of a memory cgroup
1300 * @memcg: the memory cgroup
1302 * Returns the maximum amount of memory @mem can be charged with, in
1305 static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
1307 unsigned long margin = 0;
1308 unsigned long count;
1309 unsigned long limit;
1311 count = page_counter_read(&memcg->memory);
1312 limit = READ_ONCE(memcg->memory.max);
1314 margin = limit - count;
1316 if (do_memsw_account()) {
1317 count = page_counter_read(&memcg->memsw);
1318 limit = READ_ONCE(memcg->memsw.max);
1320 margin = min(margin, limit - count);
1328 struct memory_stat {
1333 static const struct memory_stat memory_stats[] = {
1334 { "anon", NR_ANON_MAPPED },
1335 { "file", NR_FILE_PAGES },
1336 { "kernel", MEMCG_KMEM },
1337 { "kernel_stack", NR_KERNEL_STACK_KB },
1338 { "pagetables", NR_PAGETABLE },
1339 { "sec_pagetables", NR_SECONDARY_PAGETABLE },
1340 { "percpu", MEMCG_PERCPU_B },
1341 { "sock", MEMCG_SOCK },
1342 { "vmalloc", MEMCG_VMALLOC },
1343 { "shmem", NR_SHMEM },
1345 { "zswap", MEMCG_ZSWAP_B },
1346 { "zswapped", MEMCG_ZSWAPPED },
1348 { "file_mapped", NR_FILE_MAPPED },
1349 { "file_dirty", NR_FILE_DIRTY },
1350 { "file_writeback", NR_WRITEBACK },
1352 { "swapcached", NR_SWAPCACHE },
1354 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1355 { "anon_thp", NR_ANON_THPS },
1356 { "file_thp", NR_FILE_THPS },
1357 { "shmem_thp", NR_SHMEM_THPS },
1359 { "inactive_anon", NR_INACTIVE_ANON },
1360 { "active_anon", NR_ACTIVE_ANON },
1361 { "inactive_file", NR_INACTIVE_FILE },
1362 { "active_file", NR_ACTIVE_FILE },
1363 { "unevictable", NR_UNEVICTABLE },
1364 { "slab_reclaimable", NR_SLAB_RECLAIMABLE_B },
1365 { "slab_unreclaimable", NR_SLAB_UNRECLAIMABLE_B },
1366 #ifdef CONFIG_HUGETLB_PAGE
1367 { "hugetlb", NR_HUGETLB },
1370 /* The memory events */
1371 { "workingset_refault_anon", WORKINGSET_REFAULT_ANON },
1372 { "workingset_refault_file", WORKINGSET_REFAULT_FILE },
1373 { "workingset_activate_anon", WORKINGSET_ACTIVATE_ANON },
1374 { "workingset_activate_file", WORKINGSET_ACTIVATE_FILE },
1375 { "workingset_restore_anon", WORKINGSET_RESTORE_ANON },
1376 { "workingset_restore_file", WORKINGSET_RESTORE_FILE },
1377 { "workingset_nodereclaim", WORKINGSET_NODERECLAIM },
1379 { "pgdemote_kswapd", PGDEMOTE_KSWAPD },
1380 { "pgdemote_direct", PGDEMOTE_DIRECT },
1381 { "pgdemote_khugepaged", PGDEMOTE_KHUGEPAGED },
1382 { "pgdemote_proactive", PGDEMOTE_PROACTIVE },
1383 #ifdef CONFIG_NUMA_BALANCING
1384 { "pgpromote_success", PGPROMOTE_SUCCESS },
1388 /* The actual unit of the state item, not the same as the output unit */
1389 static int memcg_page_state_unit(int item)
1392 case MEMCG_PERCPU_B:
1394 case NR_SLAB_RECLAIMABLE_B:
1395 case NR_SLAB_UNRECLAIMABLE_B:
1397 case NR_KERNEL_STACK_KB:
1404 /* Translate stat items to the correct unit for memory.stat output */
1405 static int memcg_page_state_output_unit(int item)
1408 * Workingset state is actually in pages, but we export it to userspace
1409 * as a scalar count of events, so special case it here.
1411 * Demotion and promotion activities are exported in pages, consistent
1412 * with their global counterparts.
1415 case WORKINGSET_REFAULT_ANON:
1416 case WORKINGSET_REFAULT_FILE:
1417 case WORKINGSET_ACTIVATE_ANON:
1418 case WORKINGSET_ACTIVATE_FILE:
1419 case WORKINGSET_RESTORE_ANON:
1420 case WORKINGSET_RESTORE_FILE:
1421 case WORKINGSET_NODERECLAIM:
1422 case PGDEMOTE_KSWAPD:
1423 case PGDEMOTE_DIRECT:
1424 case PGDEMOTE_KHUGEPAGED:
1425 case PGDEMOTE_PROACTIVE:
1426 #ifdef CONFIG_NUMA_BALANCING
1427 case PGPROMOTE_SUCCESS:
1431 return memcg_page_state_unit(item);
1435 unsigned long memcg_page_state_output(struct mem_cgroup *memcg, int item)
1437 return memcg_page_state(memcg, item) *
1438 memcg_page_state_output_unit(item);
1441 #ifdef CONFIG_MEMCG_V1
1442 unsigned long memcg_page_state_local_output(struct mem_cgroup *memcg, int item)
1444 return memcg_page_state_local(memcg, item) *
1445 memcg_page_state_output_unit(item);
1449 #ifdef CONFIG_HUGETLB_PAGE
1450 static bool memcg_accounts_hugetlb(void)
1452 return cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_HUGETLB_ACCOUNTING;
1454 #else /* CONFIG_HUGETLB_PAGE */
1455 static bool memcg_accounts_hugetlb(void)
1459 #endif /* CONFIG_HUGETLB_PAGE */
1461 static void memcg_stat_format(struct mem_cgroup *memcg, struct seq_buf *s)
1466 * Provide statistics on the state of the memory subsystem as
1467 * well as cumulative event counters that show past behavior.
1469 * This list is ordered following a combination of these gradients:
1470 * 1) generic big picture -> specifics and details
1471 * 2) reflecting userspace activity -> reflecting kernel heuristics
1473 * Current memory state:
1475 mem_cgroup_flush_stats(memcg);
1477 for (i = 0; i < ARRAY_SIZE(memory_stats); i++) {
1480 #ifdef CONFIG_HUGETLB_PAGE
1481 if (unlikely(memory_stats[i].idx == NR_HUGETLB) &&
1482 !memcg_accounts_hugetlb())
1485 size = memcg_page_state_output(memcg, memory_stats[i].idx);
1486 seq_buf_printf(s, "%s %llu\n", memory_stats[i].name, size);
1488 if (unlikely(memory_stats[i].idx == NR_SLAB_UNRECLAIMABLE_B)) {
1489 size += memcg_page_state_output(memcg,
1490 NR_SLAB_RECLAIMABLE_B);
1491 seq_buf_printf(s, "slab %llu\n", size);
1495 /* Accumulated memory events */
1496 seq_buf_printf(s, "pgscan %lu\n",
1497 memcg_events(memcg, PGSCAN_KSWAPD) +
1498 memcg_events(memcg, PGSCAN_DIRECT) +
1499 memcg_events(memcg, PGSCAN_PROACTIVE) +
1500 memcg_events(memcg, PGSCAN_KHUGEPAGED));
1501 seq_buf_printf(s, "pgsteal %lu\n",
1502 memcg_events(memcg, PGSTEAL_KSWAPD) +
1503 memcg_events(memcg, PGSTEAL_DIRECT) +
1504 memcg_events(memcg, PGSTEAL_PROACTIVE) +
1505 memcg_events(memcg, PGSTEAL_KHUGEPAGED));
1507 for (i = 0; i < ARRAY_SIZE(memcg_vm_event_stat); i++) {
1508 #ifdef CONFIG_MEMCG_V1
1509 if (memcg_vm_event_stat[i] == PGPGIN ||
1510 memcg_vm_event_stat[i] == PGPGOUT)
1513 seq_buf_printf(s, "%s %lu\n",
1514 vm_event_name(memcg_vm_event_stat[i]),
1515 memcg_events(memcg, memcg_vm_event_stat[i]));
1519 static void memory_stat_format(struct mem_cgroup *memcg, struct seq_buf *s)
1521 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
1522 memcg_stat_format(memcg, s);
1524 memcg1_stat_format(memcg, s);
1525 if (seq_buf_has_overflowed(s))
1526 pr_warn("%s: Warning, stat buffer overflow, please report\n", __func__);
1530 * mem_cgroup_print_oom_context: Print OOM information relevant to
1531 * memory controller.
1532 * @memcg: The memory cgroup that went over limit
1533 * @p: Task that is going to be killed
1535 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
1538 void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p)
1543 pr_cont(",oom_memcg=");
1544 pr_cont_cgroup_path(memcg->css.cgroup);
1546 pr_cont(",global_oom");
1548 pr_cont(",task_memcg=");
1549 pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id));
1555 * mem_cgroup_print_oom_meminfo: Print OOM memory information relevant to
1556 * memory controller.
1557 * @memcg: The memory cgroup that went over limit
1559 void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
1561 /* Use static buffer, for the caller is holding oom_lock. */
1562 static char buf[SEQ_BUF_SIZE];
1564 unsigned long memory_failcnt;
1566 lockdep_assert_held(&oom_lock);
1568 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
1569 memory_failcnt = atomic_long_read(&memcg->memory_events[MEMCG_MAX]);
1571 memory_failcnt = memcg->memory.failcnt;
1573 pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n",
1574 K((u64)page_counter_read(&memcg->memory)),
1575 K((u64)READ_ONCE(memcg->memory.max)), memory_failcnt);
1576 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
1577 pr_info("swap: usage %llukB, limit %llukB, failcnt %lu\n",
1578 K((u64)page_counter_read(&memcg->swap)),
1579 K((u64)READ_ONCE(memcg->swap.max)),
1580 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_MAX]));
1581 #ifdef CONFIG_MEMCG_V1
1583 pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n",
1584 K((u64)page_counter_read(&memcg->memsw)),
1585 K((u64)memcg->memsw.max), memcg->memsw.failcnt);
1586 pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n",
1587 K((u64)page_counter_read(&memcg->kmem)),
1588 K((u64)memcg->kmem.max), memcg->kmem.failcnt);
1592 pr_info("Memory cgroup stats for ");
1593 pr_cont_cgroup_path(memcg->css.cgroup);
1595 seq_buf_init(&s, buf, SEQ_BUF_SIZE);
1596 memory_stat_format(memcg, &s);
1597 seq_buf_do_printk(&s, KERN_INFO);
1601 * Return the memory (and swap, if configured) limit for a memcg.
1603 unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
1605 unsigned long max = READ_ONCE(memcg->memory.max);
1607 if (do_memsw_account()) {
1608 if (mem_cgroup_swappiness(memcg)) {
1609 /* Calculate swap excess capacity from memsw limit */
1610 unsigned long swap = READ_ONCE(memcg->memsw.max) - max;
1612 max += min(swap, (unsigned long)total_swap_pages);
1615 if (mem_cgroup_swappiness(memcg))
1616 max += min(READ_ONCE(memcg->swap.max),
1617 (unsigned long)total_swap_pages);
1622 unsigned long mem_cgroup_size(struct mem_cgroup *memcg)
1624 return page_counter_read(&memcg->memory);
1627 static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
1630 struct oom_control oc = {
1634 .gfp_mask = gfp_mask,
1639 if (mutex_lock_killable(&oom_lock))
1642 if (mem_cgroup_margin(memcg) >= (1 << order))
1646 * A few threads which were not waiting at mutex_lock_killable() can
1647 * fail to bail out. Therefore, check again after holding oom_lock.
1649 ret = out_of_memory(&oc);
1652 mutex_unlock(&oom_lock);
1657 * Returns true if successfully killed one or more processes. Though in some
1658 * corner cases it can return true even without killing any process.
1660 static bool mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
1664 if (order > PAGE_ALLOC_COSTLY_ORDER)
1667 memcg_memory_event(memcg, MEMCG_OOM);
1669 if (!memcg1_oom_prepare(memcg, &locked))
1672 ret = mem_cgroup_out_of_memory(memcg, mask, order);
1674 memcg1_oom_finish(memcg, locked);
1680 * mem_cgroup_get_oom_group - get a memory cgroup to clean up after OOM
1681 * @victim: task to be killed by the OOM killer
1682 * @oom_domain: memcg in case of memcg OOM, NULL in case of system-wide OOM
1684 * Returns a pointer to a memory cgroup, which has to be cleaned up
1685 * by killing all belonging OOM-killable tasks.
1687 * Caller has to call mem_cgroup_put() on the returned non-NULL memcg.
1689 struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim,
1690 struct mem_cgroup *oom_domain)
1692 struct mem_cgroup *oom_group = NULL;
1693 struct mem_cgroup *memcg;
1695 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
1699 oom_domain = root_mem_cgroup;
1703 memcg = mem_cgroup_from_task(victim);
1704 if (mem_cgroup_is_root(memcg))
1708 * If the victim task has been asynchronously moved to a different
1709 * memory cgroup, we might end up killing tasks outside oom_domain.
1710 * In this case it's better to ignore memory.group.oom.
1712 if (unlikely(!mem_cgroup_is_descendant(memcg, oom_domain)))
1716 * Traverse the memory cgroup hierarchy from the victim task's
1717 * cgroup up to the OOMing cgroup (or root) to find the
1718 * highest-level memory cgroup with oom.group set.
1720 for (; memcg; memcg = parent_mem_cgroup(memcg)) {
1721 if (READ_ONCE(memcg->oom_group))
1724 if (memcg == oom_domain)
1729 css_get(&oom_group->css);
1736 void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
1738 pr_info("Tasks in ");
1739 pr_cont_cgroup_path(memcg->css.cgroup);
1740 pr_cont(" are going to be killed due to memory.oom.group set\n");
1744 * The value of NR_MEMCG_STOCK is selected to keep the cached memcgs and their
1745 * nr_pages in a single cacheline. This may change in future.
1747 #define NR_MEMCG_STOCK 7
1748 #define FLUSHING_CACHED_CHARGE 0
1749 struct memcg_stock_pcp {
1750 local_trylock_t lock;
1751 uint8_t nr_pages[NR_MEMCG_STOCK];
1752 struct mem_cgroup *cached[NR_MEMCG_STOCK];
1754 struct work_struct work;
1755 unsigned long flags;
1758 static DEFINE_PER_CPU_ALIGNED(struct memcg_stock_pcp, memcg_stock) = {
1759 .lock = INIT_LOCAL_TRYLOCK(lock),
1762 struct obj_stock_pcp {
1763 local_trylock_t lock;
1764 unsigned int nr_bytes;
1765 struct obj_cgroup *cached_objcg;
1766 struct pglist_data *cached_pgdat;
1767 int nr_slab_reclaimable_b;
1768 int nr_slab_unreclaimable_b;
1770 struct work_struct work;
1771 unsigned long flags;
1774 static DEFINE_PER_CPU_ALIGNED(struct obj_stock_pcp, obj_stock) = {
1775 .lock = INIT_LOCAL_TRYLOCK(lock),
1778 static DEFINE_MUTEX(percpu_charge_mutex);
1780 static void drain_obj_stock(struct obj_stock_pcp *stock);
1781 static bool obj_stock_flush_required(struct obj_stock_pcp *stock,
1782 struct mem_cgroup *root_memcg);
1785 * consume_stock: Try to consume stocked charge on this cpu.
1786 * @memcg: memcg to consume from.
1787 * @nr_pages: how many pages to charge.
1789 * Consume the cached charge if enough nr_pages are present otherwise return
1790 * failure. Also return failure for charge request larger than
1791 * MEMCG_CHARGE_BATCH or if the local lock is already taken.
1793 * returns true if successful, false otherwise.
1795 static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
1797 struct memcg_stock_pcp *stock;
1798 uint8_t stock_pages;
1802 if (nr_pages > MEMCG_CHARGE_BATCH ||
1803 !local_trylock(&memcg_stock.lock))
1806 stock = this_cpu_ptr(&memcg_stock);
1808 for (i = 0; i < NR_MEMCG_STOCK; ++i) {
1809 if (memcg != READ_ONCE(stock->cached[i]))
1812 stock_pages = READ_ONCE(stock->nr_pages[i]);
1813 if (stock_pages >= nr_pages) {
1814 WRITE_ONCE(stock->nr_pages[i], stock_pages - nr_pages);
1820 local_unlock(&memcg_stock.lock);
1825 static void memcg_uncharge(struct mem_cgroup *memcg, unsigned int nr_pages)
1827 page_counter_uncharge(&memcg->memory, nr_pages);
1828 if (do_memsw_account())
1829 page_counter_uncharge(&memcg->memsw, nr_pages);
1833 * Returns stocks cached in percpu and reset cached information.
1835 static void drain_stock(struct memcg_stock_pcp *stock, int i)
1837 struct mem_cgroup *old = READ_ONCE(stock->cached[i]);
1838 uint8_t stock_pages;
1843 stock_pages = READ_ONCE(stock->nr_pages[i]);
1845 memcg_uncharge(old, stock_pages);
1846 WRITE_ONCE(stock->nr_pages[i], 0);
1850 WRITE_ONCE(stock->cached[i], NULL);
1853 static void drain_stock_fully(struct memcg_stock_pcp *stock)
1857 for (i = 0; i < NR_MEMCG_STOCK; ++i)
1858 drain_stock(stock, i);
1861 static void drain_local_memcg_stock(struct work_struct *dummy)
1863 struct memcg_stock_pcp *stock;
1865 if (WARN_ONCE(!in_task(), "drain in non-task context"))
1868 local_lock(&memcg_stock.lock);
1870 stock = this_cpu_ptr(&memcg_stock);
1871 drain_stock_fully(stock);
1872 clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
1874 local_unlock(&memcg_stock.lock);
1877 static void drain_local_obj_stock(struct work_struct *dummy)
1879 struct obj_stock_pcp *stock;
1881 if (WARN_ONCE(!in_task(), "drain in non-task context"))
1884 local_lock(&obj_stock.lock);
1886 stock = this_cpu_ptr(&obj_stock);
1887 drain_obj_stock(stock);
1888 clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
1890 local_unlock(&obj_stock.lock);
1893 static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
1895 struct memcg_stock_pcp *stock;
1896 struct mem_cgroup *cached;
1897 uint8_t stock_pages;
1898 bool success = false;
1899 int empty_slot = -1;
1903 * For now limit MEMCG_CHARGE_BATCH to 127 and less. In future if we
1904 * decide to increase it more than 127 then we will need more careful
1905 * handling of nr_pages[] in struct memcg_stock_pcp.
1907 BUILD_BUG_ON(MEMCG_CHARGE_BATCH > S8_MAX);
1909 VM_WARN_ON_ONCE(mem_cgroup_is_root(memcg));
1911 if (nr_pages > MEMCG_CHARGE_BATCH ||
1912 !local_trylock(&memcg_stock.lock)) {
1914 * In case of larger than batch refill or unlikely failure to
1915 * lock the percpu memcg_stock.lock, uncharge memcg directly.
1917 memcg_uncharge(memcg, nr_pages);
1921 stock = this_cpu_ptr(&memcg_stock);
1922 for (i = 0; i < NR_MEMCG_STOCK; ++i) {
1923 cached = READ_ONCE(stock->cached[i]);
1924 if (!cached && empty_slot == -1)
1926 if (memcg == READ_ONCE(stock->cached[i])) {
1927 stock_pages = READ_ONCE(stock->nr_pages[i]) + nr_pages;
1928 WRITE_ONCE(stock->nr_pages[i], stock_pages);
1929 if (stock_pages > MEMCG_CHARGE_BATCH)
1930 drain_stock(stock, i);
1939 i = get_random_u32_below(NR_MEMCG_STOCK);
1940 drain_stock(stock, i);
1942 css_get(&memcg->css);
1943 WRITE_ONCE(stock->cached[i], memcg);
1944 WRITE_ONCE(stock->nr_pages[i], nr_pages);
1947 local_unlock(&memcg_stock.lock);
1950 static bool is_memcg_drain_needed(struct memcg_stock_pcp *stock,
1951 struct mem_cgroup *root_memcg)
1953 struct mem_cgroup *memcg;
1958 for (i = 0; i < NR_MEMCG_STOCK; ++i) {
1959 memcg = READ_ONCE(stock->cached[i]);
1963 if (READ_ONCE(stock->nr_pages[i]) &&
1964 mem_cgroup_is_descendant(memcg, root_memcg)) {
1974 * Drains all per-CPU charge caches for given root_memcg resp. subtree
1975 * of the hierarchy under it.
1977 void drain_all_stock(struct mem_cgroup *root_memcg)
1981 /* If someone's already draining, avoid adding running more workers. */
1982 if (!mutex_trylock(&percpu_charge_mutex))
1985 * Notify other cpus that system-wide "drain" is running
1986 * We do not care about races with the cpu hotplug because cpu down
1987 * as well as workers from this path always operate on the local
1988 * per-cpu data. CPU up doesn't touch memcg_stock at all.
1991 curcpu = smp_processor_id();
1992 for_each_online_cpu(cpu) {
1993 struct memcg_stock_pcp *memcg_st = &per_cpu(memcg_stock, cpu);
1994 struct obj_stock_pcp *obj_st = &per_cpu(obj_stock, cpu);
1996 if (!test_bit(FLUSHING_CACHED_CHARGE, &memcg_st->flags) &&
1997 is_memcg_drain_needed(memcg_st, root_memcg) &&
1998 !test_and_set_bit(FLUSHING_CACHED_CHARGE,
1999 &memcg_st->flags)) {
2001 drain_local_memcg_stock(&memcg_st->work);
2002 else if (!cpu_is_isolated(cpu))
2003 schedule_work_on(cpu, &memcg_st->work);
2006 if (!test_bit(FLUSHING_CACHED_CHARGE, &obj_st->flags) &&
2007 obj_stock_flush_required(obj_st, root_memcg) &&
2008 !test_and_set_bit(FLUSHING_CACHED_CHARGE,
2011 drain_local_obj_stock(&obj_st->work);
2012 else if (!cpu_is_isolated(cpu))
2013 schedule_work_on(cpu, &obj_st->work);
2017 mutex_unlock(&percpu_charge_mutex);
2020 static int memcg_hotplug_cpu_dead(unsigned int cpu)
2022 /* no need for the local lock */
2023 drain_obj_stock(&per_cpu(obj_stock, cpu));
2024 drain_stock_fully(&per_cpu(memcg_stock, cpu));
2029 static unsigned long reclaim_high(struct mem_cgroup *memcg,
2030 unsigned int nr_pages,
2033 unsigned long nr_reclaimed = 0;
2036 unsigned long pflags;
2038 if (page_counter_read(&memcg->memory) <=
2039 READ_ONCE(memcg->memory.high))
2042 memcg_memory_event(memcg, MEMCG_HIGH);
2044 psi_memstall_enter(&pflags);
2045 nr_reclaimed += try_to_free_mem_cgroup_pages(memcg, nr_pages,
2047 MEMCG_RECLAIM_MAY_SWAP,
2049 psi_memstall_leave(&pflags);
2050 } while ((memcg = parent_mem_cgroup(memcg)) &&
2051 !mem_cgroup_is_root(memcg));
2053 return nr_reclaimed;
2056 static void high_work_func(struct work_struct *work)
2058 struct mem_cgroup *memcg;
2060 memcg = container_of(work, struct mem_cgroup, high_work);
2061 reclaim_high(memcg, MEMCG_CHARGE_BATCH, GFP_KERNEL);
2065 * Clamp the maximum sleep time per allocation batch to 2 seconds. This is
2066 * enough to still cause a significant slowdown in most cases, while still
2067 * allowing diagnostics and tracing to proceed without becoming stuck.
2069 #define MEMCG_MAX_HIGH_DELAY_JIFFIES (2UL*HZ)
2072 * When calculating the delay, we use these either side of the exponentiation to
2073 * maintain precision and scale to a reasonable number of jiffies (see the table
2076 * - MEMCG_DELAY_PRECISION_SHIFT: Extra precision bits while translating the
2077 * overage ratio to a delay.
2078 * - MEMCG_DELAY_SCALING_SHIFT: The number of bits to scale down the
2079 * proposed penalty in order to reduce to a reasonable number of jiffies, and
2080 * to produce a reasonable delay curve.
2082 * MEMCG_DELAY_SCALING_SHIFT just happens to be a number that produces a
2083 * reasonable delay curve compared to precision-adjusted overage, not
2084 * penalising heavily at first, but still making sure that growth beyond the
2085 * limit penalises misbehaviour cgroups by slowing them down exponentially. For
2086 * example, with a high of 100 megabytes:
2088 * +-------+------------------------+
2089 * | usage | time to allocate in ms |
2090 * +-------+------------------------+
2112 * +-------+------------------------+
2114 #define MEMCG_DELAY_PRECISION_SHIFT 20
2115 #define MEMCG_DELAY_SCALING_SHIFT 14
2117 static u64 calculate_overage(unsigned long usage, unsigned long high)
2125 * Prevent division by 0 in overage calculation by acting as if
2126 * it was a threshold of 1 page
2128 high = max(high, 1UL);
2130 overage = usage - high;
2131 overage <<= MEMCG_DELAY_PRECISION_SHIFT;
2132 return div64_u64(overage, high);
2135 static u64 mem_find_max_overage(struct mem_cgroup *memcg)
2137 u64 overage, max_overage = 0;
2140 overage = calculate_overage(page_counter_read(&memcg->memory),
2141 READ_ONCE(memcg->memory.high));
2142 max_overage = max(overage, max_overage);
2143 } while ((memcg = parent_mem_cgroup(memcg)) &&
2144 !mem_cgroup_is_root(memcg));
2149 static u64 swap_find_max_overage(struct mem_cgroup *memcg)
2151 u64 overage, max_overage = 0;
2154 overage = calculate_overage(page_counter_read(&memcg->swap),
2155 READ_ONCE(memcg->swap.high));
2157 memcg_memory_event(memcg, MEMCG_SWAP_HIGH);
2158 max_overage = max(overage, max_overage);
2159 } while ((memcg = parent_mem_cgroup(memcg)) &&
2160 !mem_cgroup_is_root(memcg));
2166 * Get the number of jiffies that we should penalise a mischievous cgroup which
2167 * is exceeding its memory.high by checking both it and its ancestors.
2169 static unsigned long calculate_high_delay(struct mem_cgroup *memcg,
2170 unsigned int nr_pages,
2173 unsigned long penalty_jiffies;
2179 * We use overage compared to memory.high to calculate the number of
2180 * jiffies to sleep (penalty_jiffies). Ideally this value should be
2181 * fairly lenient on small overages, and increasingly harsh when the
2182 * memcg in question makes it clear that it has no intention of stopping
2183 * its crazy behaviour, so we exponentially increase the delay based on
2186 penalty_jiffies = max_overage * max_overage * HZ;
2187 penalty_jiffies >>= MEMCG_DELAY_PRECISION_SHIFT;
2188 penalty_jiffies >>= MEMCG_DELAY_SCALING_SHIFT;
2191 * Factor in the task's own contribution to the overage, such that four
2192 * N-sized allocations are throttled approximately the same as one
2193 * 4N-sized allocation.
2195 * MEMCG_CHARGE_BATCH pages is nominal, so work out how much smaller or
2196 * larger the current charge patch is than that.
2198 return penalty_jiffies * nr_pages / MEMCG_CHARGE_BATCH;
2202 * Reclaims memory over the high limit. Called directly from
2203 * try_charge() (context permitting), as well as from the userland
2204 * return path where reclaim is always able to block.
2206 void mem_cgroup_handle_over_high(gfp_t gfp_mask)
2208 unsigned long penalty_jiffies;
2209 unsigned long pflags;
2210 unsigned long nr_reclaimed;
2211 unsigned int nr_pages = current->memcg_nr_pages_over_high;
2212 int nr_retries = MAX_RECLAIM_RETRIES;
2213 struct mem_cgroup *memcg;
2214 bool in_retry = false;
2216 if (likely(!nr_pages))
2219 memcg = get_mem_cgroup_from_mm(current->mm);
2220 current->memcg_nr_pages_over_high = 0;
2224 * Bail if the task is already exiting. Unlike memory.max,
2225 * memory.high enforcement isn't as strict, and there is no
2226 * OOM killer involved, which means the excess could already
2227 * be much bigger (and still growing) than it could for
2228 * memory.max; the dying task could get stuck in fruitless
2229 * reclaim for a long time, which isn't desirable.
2231 if (task_is_dying())
2235 * The allocating task should reclaim at least the batch size, but for
2236 * subsequent retries we only want to do what's necessary to prevent oom
2237 * or breaching resource isolation.
2239 * This is distinct from memory.max or page allocator behaviour because
2240 * memory.high is currently batched, whereas memory.max and the page
2241 * allocator run every time an allocation is made.
2243 nr_reclaimed = reclaim_high(memcg,
2244 in_retry ? SWAP_CLUSTER_MAX : nr_pages,
2248 * memory.high is breached and reclaim is unable to keep up. Throttle
2249 * allocators proactively to slow down excessive growth.
2251 penalty_jiffies = calculate_high_delay(memcg, nr_pages,
2252 mem_find_max_overage(memcg));
2254 penalty_jiffies += calculate_high_delay(memcg, nr_pages,
2255 swap_find_max_overage(memcg));
2258 * Clamp the max delay per usermode return so as to still keep the
2259 * application moving forwards and also permit diagnostics, albeit
2262 penalty_jiffies = min(penalty_jiffies, MEMCG_MAX_HIGH_DELAY_JIFFIES);
2265 * Don't sleep if the amount of jiffies this memcg owes us is so low
2266 * that it's not even worth doing, in an attempt to be nice to those who
2267 * go only a small amount over their memory.high value and maybe haven't
2268 * been aggressively reclaimed enough yet.
2270 if (penalty_jiffies <= HZ / 100)
2274 * If reclaim is making forward progress but we're still over
2275 * memory.high, we want to encourage that rather than doing allocator
2278 if (nr_reclaimed || nr_retries--) {
2284 * Reclaim didn't manage to push usage below the limit, slow
2285 * this allocating task down.
2287 * If we exit early, we're guaranteed to die (since
2288 * schedule_timeout_killable sets TASK_KILLABLE). This means we don't
2289 * need to account for any ill-begotten jiffies to pay them off later.
2291 psi_memstall_enter(&pflags);
2292 schedule_timeout_killable(penalty_jiffies);
2293 psi_memstall_leave(&pflags);
2296 css_put(&memcg->css);
2299 static int try_charge_memcg(struct mem_cgroup *memcg, gfp_t gfp_mask,
2300 unsigned int nr_pages)
2302 unsigned int batch = max(MEMCG_CHARGE_BATCH, nr_pages);
2303 int nr_retries = MAX_RECLAIM_RETRIES;
2304 struct mem_cgroup *mem_over_limit;
2305 struct page_counter *counter;
2306 unsigned long nr_reclaimed;
2307 bool passed_oom = false;
2308 unsigned int reclaim_options = MEMCG_RECLAIM_MAY_SWAP;
2309 bool drained = false;
2310 bool raised_max_event = false;
2311 unsigned long pflags;
2314 if (consume_stock(memcg, nr_pages))
2317 if (!gfpflags_allow_spinning(gfp_mask))
2318 /* Avoid the refill and flush of the older stock */
2321 if (!do_memsw_account() ||
2322 page_counter_try_charge(&memcg->memsw, batch, &counter)) {
2323 if (page_counter_try_charge(&memcg->memory, batch, &counter))
2325 if (do_memsw_account())
2326 page_counter_uncharge(&memcg->memsw, batch);
2327 mem_over_limit = mem_cgroup_from_counter(counter, memory);
2329 mem_over_limit = mem_cgroup_from_counter(counter, memsw);
2330 reclaim_options &= ~MEMCG_RECLAIM_MAY_SWAP;
2333 if (batch > nr_pages) {
2339 * Prevent unbounded recursion when reclaim operations need to
2340 * allocate memory. This might exceed the limits temporarily,
2341 * but we prefer facilitating memory reclaim and getting back
2342 * under the limit over triggering OOM kills in these cases.
2344 if (unlikely(current->flags & PF_MEMALLOC))
2347 if (unlikely(task_in_memcg_oom(current)))
2350 if (!gfpflags_allow_blocking(gfp_mask))
2353 memcg_memory_event(mem_over_limit, MEMCG_MAX);
2354 raised_max_event = true;
2356 psi_memstall_enter(&pflags);
2357 nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages,
2358 gfp_mask, reclaim_options, NULL);
2359 psi_memstall_leave(&pflags);
2361 if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
2365 drain_all_stock(mem_over_limit);
2370 if (gfp_mask & __GFP_NORETRY)
2373 * Even though the limit is exceeded at this point, reclaim
2374 * may have been able to free some pages. Retry the charge
2375 * before killing the task.
2377 * Only for regular pages, though: huge pages are rather
2378 * unlikely to succeed so close to the limit, and we fall back
2379 * to regular pages anyway in case of failure.
2381 if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER))
2387 if (gfp_mask & __GFP_RETRY_MAYFAIL)
2390 /* Avoid endless loop for tasks bypassed by the oom killer */
2391 if (passed_oom && task_is_dying())
2395 * keep retrying as long as the memcg oom killer is able to make
2396 * a forward progress or bypass the charge if the oom killer
2397 * couldn't make any progress.
2399 if (mem_cgroup_oom(mem_over_limit, gfp_mask,
2400 get_order(nr_pages * PAGE_SIZE))) {
2402 nr_retries = MAX_RECLAIM_RETRIES;
2407 * Memcg doesn't have a dedicated reserve for atomic
2408 * allocations. But like the global atomic pool, we need to
2409 * put the burden of reclaim on regular allocation requests
2410 * and let these go through as privileged allocations.
2412 if (!(gfp_mask & (__GFP_NOFAIL | __GFP_HIGH)))
2416 * If the allocation has to be enforced, don't forget to raise
2417 * a MEMCG_MAX event.
2419 if (!raised_max_event)
2420 memcg_memory_event(mem_over_limit, MEMCG_MAX);
2423 * The allocation either can't fail or will lead to more memory
2424 * being freed very soon. Allow memory usage go over the limit
2425 * temporarily by force charging it.
2427 page_counter_charge(&memcg->memory, nr_pages);
2428 if (do_memsw_account())
2429 page_counter_charge(&memcg->memsw, nr_pages);
2434 if (batch > nr_pages)
2435 refill_stock(memcg, batch - nr_pages);
2438 * If the hierarchy is above the normal consumption range, schedule
2439 * reclaim on returning to userland. We can perform reclaim here
2440 * if __GFP_RECLAIM but let's always punt for simplicity and so that
2441 * GFP_KERNEL can consistently be used during reclaim. @memcg is
2442 * not recorded as it most likely matches current's and won't
2443 * change in the meantime. As high limit is checked again before
2444 * reclaim, the cost of mismatch is negligible.
2447 bool mem_high, swap_high;
2449 mem_high = page_counter_read(&memcg->memory) >
2450 READ_ONCE(memcg->memory.high);
2451 swap_high = page_counter_read(&memcg->swap) >
2452 READ_ONCE(memcg->swap.high);
2454 /* Don't bother a random interrupted task */
2457 schedule_work(&memcg->high_work);
2463 if (mem_high || swap_high) {
2465 * The allocating tasks in this cgroup will need to do
2466 * reclaim or be throttled to prevent further growth
2467 * of the memory or swap footprints.
2469 * Target some best-effort fairness between the tasks,
2470 * and distribute reclaim work and delay penalties
2471 * based on how much each task is actually allocating.
2473 current->memcg_nr_pages_over_high += batch;
2474 set_notify_resume(current);
2477 } while ((memcg = parent_mem_cgroup(memcg)));
2480 * Reclaim is set up above to be called from the userland
2481 * return path. But also attempt synchronous reclaim to avoid
2482 * excessive overrun while the task is still inside the
2483 * kernel. If this is successful, the return path will see it
2484 * when it rechecks the overage and simply bail out.
2486 if (current->memcg_nr_pages_over_high > MEMCG_CHARGE_BATCH &&
2487 !(current->flags & PF_MEMALLOC) &&
2488 gfpflags_allow_blocking(gfp_mask))
2489 mem_cgroup_handle_over_high(gfp_mask);
2493 static inline int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
2494 unsigned int nr_pages)
2496 if (mem_cgroup_is_root(memcg))
2499 return try_charge_memcg(memcg, gfp_mask, nr_pages);
2502 static void commit_charge(struct folio *folio, struct mem_cgroup *memcg)
2504 VM_BUG_ON_FOLIO(folio_memcg_charged(folio), folio);
2506 * Any of the following ensures page's memcg stability:
2510 * - exclusive reference
2512 folio->memcg_data = (unsigned long)memcg;
2515 #ifdef CONFIG_MEMCG_NMI_SAFETY_REQUIRES_ATOMIC
2516 static inline void account_slab_nmi_safe(struct mem_cgroup *memcg,
2517 struct pglist_data *pgdat,
2518 enum node_stat_item idx, int nr)
2520 struct lruvec *lruvec;
2522 if (likely(!in_nmi())) {
2523 lruvec = mem_cgroup_lruvec(memcg, pgdat);
2524 mod_memcg_lruvec_state(lruvec, idx, nr);
2526 struct mem_cgroup_per_node *pn = memcg->nodeinfo[pgdat->node_id];
2528 /* preemption is disabled in_nmi(). */
2529 css_rstat_updated(&memcg->css, smp_processor_id());
2530 if (idx == NR_SLAB_RECLAIMABLE_B)
2531 atomic_add(nr, &pn->slab_reclaimable);
2533 atomic_add(nr, &pn->slab_unreclaimable);
2537 static inline void account_slab_nmi_safe(struct mem_cgroup *memcg,
2538 struct pglist_data *pgdat,
2539 enum node_stat_item idx, int nr)
2541 struct lruvec *lruvec;
2543 lruvec = mem_cgroup_lruvec(memcg, pgdat);
2544 mod_memcg_lruvec_state(lruvec, idx, nr);
2548 static inline void mod_objcg_mlstate(struct obj_cgroup *objcg,
2549 struct pglist_data *pgdat,
2550 enum node_stat_item idx, int nr)
2552 struct mem_cgroup *memcg;
2555 memcg = obj_cgroup_memcg(objcg);
2556 account_slab_nmi_safe(memcg, pgdat, idx, nr);
2560 static __always_inline
2561 struct mem_cgroup *mem_cgroup_from_obj_folio(struct folio *folio, void *p)
2564 * Slab objects are accounted individually, not per-page.
2565 * Memcg membership data for each individual object is saved in
2568 if (folio_test_slab(folio)) {
2569 struct slabobj_ext *obj_exts;
2573 slab = folio_slab(folio);
2574 obj_exts = slab_obj_exts(slab);
2578 off = obj_to_index(slab->slab_cache, slab, p);
2579 if (obj_exts[off].objcg)
2580 return obj_cgroup_memcg(obj_exts[off].objcg);
2586 * folio_memcg_check() is used here, because in theory we can encounter
2587 * a folio where the slab flag has been cleared already, but
2588 * slab->obj_exts has not been freed yet
2589 * folio_memcg_check() will guarantee that a proper memory
2590 * cgroup pointer or NULL will be returned.
2592 return folio_memcg_check(folio);
2596 * Returns a pointer to the memory cgroup to which the kernel object is charged.
2597 * It is not suitable for objects allocated using vmalloc().
2599 * A passed kernel object must be a slab object or a generic kernel page.
2601 * The caller must ensure the memcg lifetime, e.g. by taking rcu_read_lock(),
2602 * cgroup_mutex, etc.
2604 struct mem_cgroup *mem_cgroup_from_slab_obj(void *p)
2606 if (mem_cgroup_disabled())
2609 return mem_cgroup_from_obj_folio(virt_to_folio(p), p);
2612 static struct obj_cgroup *__get_obj_cgroup_from_memcg(struct mem_cgroup *memcg)
2614 struct obj_cgroup *objcg = NULL;
2616 for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg)) {
2617 objcg = rcu_dereference(memcg->objcg);
2618 if (likely(objcg && obj_cgroup_tryget(objcg)))
2625 static struct obj_cgroup *current_objcg_update(void)
2627 struct mem_cgroup *memcg;
2628 struct obj_cgroup *old, *objcg = NULL;
2631 /* Atomically drop the update bit. */
2632 old = xchg(¤t->objcg, NULL);
2634 old = (struct obj_cgroup *)
2635 ((unsigned long)old & ~CURRENT_OBJCG_UPDATE_FLAG);
2636 obj_cgroup_put(old);
2641 /* If new objcg is NULL, no reason for the second atomic update. */
2642 if (!current->mm || (current->flags & PF_KTHREAD))
2646 * Release the objcg pointer from the previous iteration,
2647 * if try_cmpxcg() below fails.
2649 if (unlikely(objcg)) {
2650 obj_cgroup_put(objcg);
2655 * Obtain the new objcg pointer. The current task can be
2656 * asynchronously moved to another memcg and the previous
2657 * memcg can be offlined. So let's get the memcg pointer
2658 * and try get a reference to objcg under a rcu read lock.
2662 memcg = mem_cgroup_from_task(current);
2663 objcg = __get_obj_cgroup_from_memcg(memcg);
2667 * Try set up a new objcg pointer atomically. If it
2668 * fails, it means the update flag was set concurrently, so
2669 * the whole procedure should be repeated.
2671 } while (!try_cmpxchg(¤t->objcg, &old, objcg));
2676 __always_inline struct obj_cgroup *current_obj_cgroup(void)
2678 struct mem_cgroup *memcg;
2679 struct obj_cgroup *objcg;
2681 if (IS_ENABLED(CONFIG_MEMCG_NMI_UNSAFE) && in_nmi())
2685 memcg = current->active_memcg;
2686 if (unlikely(memcg))
2689 objcg = READ_ONCE(current->objcg);
2690 if (unlikely((unsigned long)objcg & CURRENT_OBJCG_UPDATE_FLAG))
2691 objcg = current_objcg_update();
2693 * Objcg reference is kept by the task, so it's safe
2694 * to use the objcg by the current task.
2699 memcg = this_cpu_read(int_active_memcg);
2700 if (unlikely(memcg))
2707 for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg)) {
2709 * Memcg pointer is protected by scope (see set_active_memcg())
2710 * and is pinning the corresponding objcg, so objcg can't go
2711 * away and can be used within the scope without any additional
2714 objcg = rcu_dereference_check(memcg->objcg, 1);
2722 struct obj_cgroup *get_obj_cgroup_from_folio(struct folio *folio)
2724 struct obj_cgroup *objcg;
2726 if (!memcg_kmem_online())
2729 if (folio_memcg_kmem(folio)) {
2730 objcg = __folio_objcg(folio);
2731 obj_cgroup_get(objcg);
2733 struct mem_cgroup *memcg;
2736 memcg = __folio_memcg(folio);
2738 objcg = __get_obj_cgroup_from_memcg(memcg);
2746 #ifdef CONFIG_MEMCG_NMI_SAFETY_REQUIRES_ATOMIC
2747 static inline void account_kmem_nmi_safe(struct mem_cgroup *memcg, int val)
2749 if (likely(!in_nmi())) {
2750 mod_memcg_state(memcg, MEMCG_KMEM, val);
2752 /* preemption is disabled in_nmi(). */
2753 css_rstat_updated(&memcg->css, smp_processor_id());
2754 atomic_add(val, &memcg->kmem_stat);
2758 static inline void account_kmem_nmi_safe(struct mem_cgroup *memcg, int val)
2760 mod_memcg_state(memcg, MEMCG_KMEM, val);
2765 * obj_cgroup_uncharge_pages: uncharge a number of kernel pages from a objcg
2766 * @objcg: object cgroup to uncharge
2767 * @nr_pages: number of pages to uncharge
2769 static void obj_cgroup_uncharge_pages(struct obj_cgroup *objcg,
2770 unsigned int nr_pages)
2772 struct mem_cgroup *memcg;
2774 memcg = get_mem_cgroup_from_objcg(objcg);
2776 account_kmem_nmi_safe(memcg, -nr_pages);
2777 memcg1_account_kmem(memcg, -nr_pages);
2778 if (!mem_cgroup_is_root(memcg))
2779 refill_stock(memcg, nr_pages);
2781 css_put(&memcg->css);
2785 * obj_cgroup_charge_pages: charge a number of kernel pages to a objcg
2786 * @objcg: object cgroup to charge
2787 * @gfp: reclaim mode
2788 * @nr_pages: number of pages to charge
2790 * Returns 0 on success, an error code on failure.
2792 static int obj_cgroup_charge_pages(struct obj_cgroup *objcg, gfp_t gfp,
2793 unsigned int nr_pages)
2795 struct mem_cgroup *memcg;
2798 memcg = get_mem_cgroup_from_objcg(objcg);
2800 ret = try_charge_memcg(memcg, gfp, nr_pages);
2804 account_kmem_nmi_safe(memcg, nr_pages);
2805 memcg1_account_kmem(memcg, nr_pages);
2807 css_put(&memcg->css);
2812 static struct obj_cgroup *page_objcg(const struct page *page)
2814 unsigned long memcg_data = page->memcg_data;
2816 if (mem_cgroup_disabled() || !memcg_data)
2819 VM_BUG_ON_PAGE((memcg_data & OBJEXTS_FLAGS_MASK) != MEMCG_DATA_KMEM,
2821 return (struct obj_cgroup *)(memcg_data - MEMCG_DATA_KMEM);
2824 static void page_set_objcg(struct page *page, const struct obj_cgroup *objcg)
2826 page->memcg_data = (unsigned long)objcg | MEMCG_DATA_KMEM;
2830 * __memcg_kmem_charge_page: charge a kmem page to the current memory cgroup
2831 * @page: page to charge
2832 * @gfp: reclaim mode
2833 * @order: allocation order
2835 * Returns 0 on success, an error code on failure.
2837 int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order)
2839 struct obj_cgroup *objcg;
2842 objcg = current_obj_cgroup();
2844 ret = obj_cgroup_charge_pages(objcg, gfp, 1 << order);
2846 obj_cgroup_get(objcg);
2847 page_set_objcg(page, objcg);
2855 * __memcg_kmem_uncharge_page: uncharge a kmem page
2856 * @page: page to uncharge
2857 * @order: allocation order
2859 void __memcg_kmem_uncharge_page(struct page *page, int order)
2861 struct obj_cgroup *objcg = page_objcg(page);
2862 unsigned int nr_pages = 1 << order;
2867 obj_cgroup_uncharge_pages(objcg, nr_pages);
2868 page->memcg_data = 0;
2869 obj_cgroup_put(objcg);
2872 static void __account_obj_stock(struct obj_cgroup *objcg,
2873 struct obj_stock_pcp *stock, int nr,
2874 struct pglist_data *pgdat, enum node_stat_item idx)
2879 * Save vmstat data in stock and skip vmstat array update unless
2880 * accumulating over a page of vmstat data or when pgdat changes.
2882 if (stock->cached_pgdat != pgdat) {
2883 /* Flush the existing cached vmstat data */
2884 struct pglist_data *oldpg = stock->cached_pgdat;
2886 if (stock->nr_slab_reclaimable_b) {
2887 mod_objcg_mlstate(objcg, oldpg, NR_SLAB_RECLAIMABLE_B,
2888 stock->nr_slab_reclaimable_b);
2889 stock->nr_slab_reclaimable_b = 0;
2891 if (stock->nr_slab_unreclaimable_b) {
2892 mod_objcg_mlstate(objcg, oldpg, NR_SLAB_UNRECLAIMABLE_B,
2893 stock->nr_slab_unreclaimable_b);
2894 stock->nr_slab_unreclaimable_b = 0;
2896 stock->cached_pgdat = pgdat;
2899 bytes = (idx == NR_SLAB_RECLAIMABLE_B) ? &stock->nr_slab_reclaimable_b
2900 : &stock->nr_slab_unreclaimable_b;
2902 * Even for large object >= PAGE_SIZE, the vmstat data will still be
2903 * cached locally at least once before pushing it out.
2910 if (abs(*bytes) > PAGE_SIZE) {
2918 mod_objcg_mlstate(objcg, pgdat, idx, nr);
2921 static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes,
2922 struct pglist_data *pgdat, enum node_stat_item idx)
2924 struct obj_stock_pcp *stock;
2927 if (!local_trylock(&obj_stock.lock))
2930 stock = this_cpu_ptr(&obj_stock);
2931 if (objcg == READ_ONCE(stock->cached_objcg) && stock->nr_bytes >= nr_bytes) {
2932 stock->nr_bytes -= nr_bytes;
2936 __account_obj_stock(objcg, stock, nr_bytes, pgdat, idx);
2939 local_unlock(&obj_stock.lock);
2944 static void drain_obj_stock(struct obj_stock_pcp *stock)
2946 struct obj_cgroup *old = READ_ONCE(stock->cached_objcg);
2951 if (stock->nr_bytes) {
2952 unsigned int nr_pages = stock->nr_bytes >> PAGE_SHIFT;
2953 unsigned int nr_bytes = stock->nr_bytes & (PAGE_SIZE - 1);
2956 struct mem_cgroup *memcg;
2958 memcg = get_mem_cgroup_from_objcg(old);
2960 mod_memcg_state(memcg, MEMCG_KMEM, -nr_pages);
2961 memcg1_account_kmem(memcg, -nr_pages);
2962 if (!mem_cgroup_is_root(memcg))
2963 memcg_uncharge(memcg, nr_pages);
2965 css_put(&memcg->css);
2969 * The leftover is flushed to the centralized per-memcg value.
2970 * On the next attempt to refill obj stock it will be moved
2971 * to a per-cpu stock (probably, on an other CPU), see
2972 * refill_obj_stock().
2974 * How often it's flushed is a trade-off between the memory
2975 * limit enforcement accuracy and potential CPU contention,
2976 * so it might be changed in the future.
2978 atomic_add(nr_bytes, &old->nr_charged_bytes);
2979 stock->nr_bytes = 0;
2983 * Flush the vmstat data in current stock
2985 if (stock->nr_slab_reclaimable_b || stock->nr_slab_unreclaimable_b) {
2986 if (stock->nr_slab_reclaimable_b) {
2987 mod_objcg_mlstate(old, stock->cached_pgdat,
2988 NR_SLAB_RECLAIMABLE_B,
2989 stock->nr_slab_reclaimable_b);
2990 stock->nr_slab_reclaimable_b = 0;
2992 if (stock->nr_slab_unreclaimable_b) {
2993 mod_objcg_mlstate(old, stock->cached_pgdat,
2994 NR_SLAB_UNRECLAIMABLE_B,
2995 stock->nr_slab_unreclaimable_b);
2996 stock->nr_slab_unreclaimable_b = 0;
2998 stock->cached_pgdat = NULL;
3001 WRITE_ONCE(stock->cached_objcg, NULL);
3002 obj_cgroup_put(old);
3005 static bool obj_stock_flush_required(struct obj_stock_pcp *stock,
3006 struct mem_cgroup *root_memcg)
3008 struct obj_cgroup *objcg = READ_ONCE(stock->cached_objcg);
3009 struct mem_cgroup *memcg;
3014 memcg = obj_cgroup_memcg(objcg);
3015 if (memcg && mem_cgroup_is_descendant(memcg, root_memcg))
3023 static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes,
3024 bool allow_uncharge, int nr_acct, struct pglist_data *pgdat,
3025 enum node_stat_item idx)
3027 struct obj_stock_pcp *stock;
3028 unsigned int nr_pages = 0;
3030 if (!local_trylock(&obj_stock.lock)) {
3032 mod_objcg_mlstate(objcg, pgdat, idx, nr_bytes);
3033 nr_pages = nr_bytes >> PAGE_SHIFT;
3034 nr_bytes = nr_bytes & (PAGE_SIZE - 1);
3035 atomic_add(nr_bytes, &objcg->nr_charged_bytes);
3039 stock = this_cpu_ptr(&obj_stock);
3040 if (READ_ONCE(stock->cached_objcg) != objcg) { /* reset if necessary */
3041 drain_obj_stock(stock);
3042 obj_cgroup_get(objcg);
3043 stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes)
3044 ? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0;
3045 WRITE_ONCE(stock->cached_objcg, objcg);
3047 allow_uncharge = true; /* Allow uncharge when objcg changes */
3049 stock->nr_bytes += nr_bytes;
3052 __account_obj_stock(objcg, stock, nr_acct, pgdat, idx);
3054 if (allow_uncharge && (stock->nr_bytes > PAGE_SIZE)) {
3055 nr_pages = stock->nr_bytes >> PAGE_SHIFT;
3056 stock->nr_bytes &= (PAGE_SIZE - 1);
3059 local_unlock(&obj_stock.lock);
3062 obj_cgroup_uncharge_pages(objcg, nr_pages);
3065 static int obj_cgroup_charge_account(struct obj_cgroup *objcg, gfp_t gfp, size_t size,
3066 struct pglist_data *pgdat, enum node_stat_item idx)
3068 unsigned int nr_pages, nr_bytes;
3071 if (likely(consume_obj_stock(objcg, size, pgdat, idx)))
3075 * In theory, objcg->nr_charged_bytes can have enough
3076 * pre-charged bytes to satisfy the allocation. However,
3077 * flushing objcg->nr_charged_bytes requires two atomic
3078 * operations, and objcg->nr_charged_bytes can't be big.
3079 * The shared objcg->nr_charged_bytes can also become a
3080 * performance bottleneck if all tasks of the same memcg are
3081 * trying to update it. So it's better to ignore it and try
3082 * grab some new pages. The stock's nr_bytes will be flushed to
3083 * objcg->nr_charged_bytes later on when objcg changes.
3085 * The stock's nr_bytes may contain enough pre-charged bytes
3086 * to allow one less page from being charged, but we can't rely
3087 * on the pre-charged bytes not being changed outside of
3088 * consume_obj_stock() or refill_obj_stock(). So ignore those
3089 * pre-charged bytes as well when charging pages. To avoid a
3090 * page uncharge right after a page charge, we set the
3091 * allow_uncharge flag to false when calling refill_obj_stock()
3092 * to temporarily allow the pre-charged bytes to exceed the page
3093 * size limit. The maximum reachable value of the pre-charged
3094 * bytes is (sizeof(object) + PAGE_SIZE - 2) if there is no data
3097 nr_pages = size >> PAGE_SHIFT;
3098 nr_bytes = size & (PAGE_SIZE - 1);
3103 ret = obj_cgroup_charge_pages(objcg, gfp, nr_pages);
3104 if (!ret && (nr_bytes || pgdat))
3105 refill_obj_stock(objcg, nr_bytes ? PAGE_SIZE - nr_bytes : 0,
3106 false, size, pgdat, idx);
3111 int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size)
3113 return obj_cgroup_charge_account(objcg, gfp, size, NULL, 0);
3116 void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size)
3118 refill_obj_stock(objcg, size, true, 0, NULL, 0);
3121 static inline size_t obj_full_size(struct kmem_cache *s)
3124 * For each accounted object there is an extra space which is used
3125 * to store obj_cgroup membership. Charge it too.
3127 return s->size + sizeof(struct obj_cgroup *);
3130 bool __memcg_slab_post_alloc_hook(struct kmem_cache *s, struct list_lru *lru,
3131 gfp_t flags, size_t size, void **p)
3133 struct obj_cgroup *objcg;
3139 * The obtained objcg pointer is safe to use within the current scope,
3140 * defined by current task or set_active_memcg() pair.
3141 * obj_cgroup_get() is used to get a permanent reference.
3143 objcg = current_obj_cgroup();
3148 * slab_alloc_node() avoids the NULL check, so we might be called with a
3149 * single NULL object. kmem_cache_alloc_bulk() aborts if it can't fill
3150 * the whole requested size.
3151 * return success as there's nothing to free back
3153 if (unlikely(*p == NULL))
3156 flags &= gfp_allowed_mask;
3160 struct mem_cgroup *memcg;
3162 memcg = get_mem_cgroup_from_objcg(objcg);
3163 ret = memcg_list_lru_alloc(memcg, lru, flags);
3164 css_put(&memcg->css);
3170 for (i = 0; i < size; i++) {
3171 slab = virt_to_slab(p[i]);
3173 if (!slab_obj_exts(slab) &&
3174 alloc_slab_obj_exts(slab, s, flags, false)) {
3179 * if we fail and size is 1, memcg_alloc_abort_single() will
3180 * just free the object, which is ok as we have not assigned
3181 * objcg to its obj_ext yet
3183 * for larger sizes, kmem_cache_free_bulk() will uncharge
3184 * any objects that were already charged and obj_ext assigned
3186 * TODO: we could batch this until slab_pgdat(slab) changes
3187 * between iterations, with a more complicated undo
3189 if (obj_cgroup_charge_account(objcg, flags, obj_full_size(s),
3190 slab_pgdat(slab), cache_vmstat_idx(s)))
3193 off = obj_to_index(s, slab, p[i]);
3194 obj_cgroup_get(objcg);
3195 slab_obj_exts(slab)[off].objcg = objcg;
3201 void __memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
3202 void **p, int objects, struct slabobj_ext *obj_exts)
3204 size_t obj_size = obj_full_size(s);
3206 for (int i = 0; i < objects; i++) {
3207 struct obj_cgroup *objcg;
3210 off = obj_to_index(s, slab, p[i]);
3211 objcg = obj_exts[off].objcg;
3215 obj_exts[off].objcg = NULL;
3216 refill_obj_stock(objcg, obj_size, true, -obj_size,
3217 slab_pgdat(slab), cache_vmstat_idx(s));
3218 obj_cgroup_put(objcg);
3223 * The objcg is only set on the first page, so transfer it to all the
3226 void split_page_memcg(struct page *page, unsigned order)
3228 struct obj_cgroup *objcg = page_objcg(page);
3229 unsigned int i, nr = 1 << order;
3234 for (i = 1; i < nr; i++)
3235 page_set_objcg(&page[i], objcg);
3237 obj_cgroup_get_many(objcg, nr - 1);
3240 void folio_split_memcg_refs(struct folio *folio, unsigned old_order,
3245 if (mem_cgroup_disabled() || !folio_memcg_charged(folio))
3248 new_refs = (1 << (old_order - new_order)) - 1;
3249 css_get_many(&__folio_memcg(folio)->css, new_refs);
3252 unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
3256 if (mem_cgroup_is_root(memcg)) {
3258 * Approximate root's usage from global state. This isn't
3259 * perfect, but the root usage was always an approximation.
3261 val = global_node_page_state(NR_FILE_PAGES) +
3262 global_node_page_state(NR_ANON_MAPPED);
3264 val += total_swap_pages - get_nr_swap_pages();
3267 val = page_counter_read(&memcg->memory);
3269 val = page_counter_read(&memcg->memsw);
3274 static int memcg_online_kmem(struct mem_cgroup *memcg)
3276 struct obj_cgroup *objcg;
3278 if (mem_cgroup_kmem_disabled())
3281 if (unlikely(mem_cgroup_is_root(memcg)))
3284 objcg = obj_cgroup_alloc();
3288 objcg->memcg = memcg;
3289 rcu_assign_pointer(memcg->objcg, objcg);
3290 obj_cgroup_get(objcg);
3291 memcg->orig_objcg = objcg;
3293 static_branch_enable(&memcg_kmem_online_key);
3295 memcg->kmemcg_id = memcg->id.id;
3300 static void memcg_offline_kmem(struct mem_cgroup *memcg)
3302 struct mem_cgroup *parent;
3304 if (mem_cgroup_kmem_disabled())
3307 if (unlikely(mem_cgroup_is_root(memcg)))
3310 parent = parent_mem_cgroup(memcg);
3312 parent = root_mem_cgroup;
3314 memcg_reparent_list_lrus(memcg, parent);
3317 * Objcg's reparenting must be after list_lru's, make sure list_lru
3318 * helpers won't use parent's list_lru until child is drained.
3320 memcg_reparent_objcgs(memcg, parent);
3323 #ifdef CONFIG_CGROUP_WRITEBACK
3325 #include <trace/events/writeback.h>
3327 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
3329 return wb_domain_init(&memcg->cgwb_domain, gfp);
3332 static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
3334 wb_domain_exit(&memcg->cgwb_domain);
3337 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
3339 wb_domain_size_changed(&memcg->cgwb_domain);
3342 struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
3344 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
3346 if (!memcg->css.parent)
3349 return &memcg->cgwb_domain;
3353 * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg
3354 * @wb: bdi_writeback in question
3355 * @pfilepages: out parameter for number of file pages
3356 * @pheadroom: out parameter for number of allocatable pages according to memcg
3357 * @pdirty: out parameter for number of dirty pages
3358 * @pwriteback: out parameter for number of pages under writeback
3360 * Determine the numbers of file, headroom, dirty, and writeback pages in
3361 * @wb's memcg. File, dirty and writeback are self-explanatory. Headroom
3362 * is a bit more involved.
3364 * A memcg's headroom is "min(max, high) - used". In the hierarchy, the
3365 * headroom is calculated as the lowest headroom of itself and the
3366 * ancestors. Note that this doesn't consider the actual amount of
3367 * available memory in the system. The caller should further cap
3368 * *@pheadroom accordingly.
3370 void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
3371 unsigned long *pheadroom, unsigned long *pdirty,
3372 unsigned long *pwriteback)
3374 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
3375 struct mem_cgroup *parent;
3377 mem_cgroup_flush_stats_ratelimited(memcg);
3379 *pdirty = memcg_page_state(memcg, NR_FILE_DIRTY);
3380 *pwriteback = memcg_page_state(memcg, NR_WRITEBACK);
3381 *pfilepages = memcg_page_state(memcg, NR_INACTIVE_FILE) +
3382 memcg_page_state(memcg, NR_ACTIVE_FILE);
3384 *pheadroom = PAGE_COUNTER_MAX;
3385 while ((parent = parent_mem_cgroup(memcg))) {
3386 unsigned long ceiling = min(READ_ONCE(memcg->memory.max),
3387 READ_ONCE(memcg->memory.high));
3388 unsigned long used = page_counter_read(&memcg->memory);
3390 *pheadroom = min(*pheadroom, ceiling - min(ceiling, used));
3396 * Foreign dirty flushing
3398 * There's an inherent mismatch between memcg and writeback. The former
3399 * tracks ownership per-page while the latter per-inode. This was a
3400 * deliberate design decision because honoring per-page ownership in the
3401 * writeback path is complicated, may lead to higher CPU and IO overheads
3402 * and deemed unnecessary given that write-sharing an inode across
3403 * different cgroups isn't a common use-case.
3405 * Combined with inode majority-writer ownership switching, this works well
3406 * enough in most cases but there are some pathological cases. For
3407 * example, let's say there are two cgroups A and B which keep writing to
3408 * different but confined parts of the same inode. B owns the inode and
3409 * A's memory is limited far below B's. A's dirty ratio can rise enough to
3410 * trigger balance_dirty_pages() sleeps but B's can be low enough to avoid
3411 * triggering background writeback. A will be slowed down without a way to
3412 * make writeback of the dirty pages happen.
3414 * Conditions like the above can lead to a cgroup getting repeatedly and
3415 * severely throttled after making some progress after each
3416 * dirty_expire_interval while the underlying IO device is almost
3419 * Solving this problem completely requires matching the ownership tracking
3420 * granularities between memcg and writeback in either direction. However,
3421 * the more egregious behaviors can be avoided by simply remembering the
3422 * most recent foreign dirtying events and initiating remote flushes on
3423 * them when local writeback isn't enough to keep the memory clean enough.
3425 * The following two functions implement such mechanism. When a foreign
3426 * page - a page whose memcg and writeback ownerships don't match - is
3427 * dirtied, mem_cgroup_track_foreign_dirty() records the inode owning
3428 * bdi_writeback on the page owning memcg. When balance_dirty_pages()
3429 * decides that the memcg needs to sleep due to high dirty ratio, it calls
3430 * mem_cgroup_flush_foreign() which queues writeback on the recorded
3431 * foreign bdi_writebacks which haven't expired. Both the numbers of
3432 * recorded bdi_writebacks and concurrent in-flight foreign writebacks are
3433 * limited to MEMCG_CGWB_FRN_CNT.
3435 * The mechanism only remembers IDs and doesn't hold any object references.
3436 * As being wrong occasionally doesn't matter, updates and accesses to the
3437 * records are lockless and racy.
3439 void mem_cgroup_track_foreign_dirty_slowpath(struct folio *folio,
3440 struct bdi_writeback *wb)
3442 struct mem_cgroup *memcg = folio_memcg(folio);
3443 struct memcg_cgwb_frn *frn;
3444 u64 now = get_jiffies_64();
3445 u64 oldest_at = now;
3449 trace_track_foreign_dirty(folio, wb);
3452 * Pick the slot to use. If there is already a slot for @wb, keep
3453 * using it. If not replace the oldest one which isn't being
3456 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) {
3457 frn = &memcg->cgwb_frn[i];
3458 if (frn->bdi_id == wb->bdi->id &&
3459 frn->memcg_id == wb->memcg_css->id)
3461 if (time_before64(frn->at, oldest_at) &&
3462 atomic_read(&frn->done.cnt) == 1) {
3464 oldest_at = frn->at;
3468 if (i < MEMCG_CGWB_FRN_CNT) {
3470 * Re-using an existing one. Update timestamp lazily to
3471 * avoid making the cacheline hot. We want them to be
3472 * reasonably up-to-date and significantly shorter than
3473 * dirty_expire_interval as that's what expires the record.
3474 * Use the shorter of 1s and dirty_expire_interval / 8.
3476 unsigned long update_intv =
3477 min_t(unsigned long, HZ,
3478 msecs_to_jiffies(dirty_expire_interval * 10) / 8);
3480 if (time_before64(frn->at, now - update_intv))
3482 } else if (oldest >= 0) {
3483 /* replace the oldest free one */
3484 frn = &memcg->cgwb_frn[oldest];
3485 frn->bdi_id = wb->bdi->id;
3486 frn->memcg_id = wb->memcg_css->id;
3491 /* issue foreign writeback flushes for recorded foreign dirtying events */
3492 void mem_cgroup_flush_foreign(struct bdi_writeback *wb)
3494 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
3495 unsigned long intv = msecs_to_jiffies(dirty_expire_interval * 10);
3496 u64 now = jiffies_64;
3499 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) {
3500 struct memcg_cgwb_frn *frn = &memcg->cgwb_frn[i];
3503 * If the record is older than dirty_expire_interval,
3504 * writeback on it has already started. No need to kick it
3505 * off again. Also, don't start a new one if there's
3506 * already one in flight.
3508 if (time_after64(frn->at, now - intv) &&
3509 atomic_read(&frn->done.cnt) == 1) {
3511 trace_flush_foreign(wb, frn->bdi_id, frn->memcg_id);
3512 cgroup_writeback_by_id(frn->bdi_id, frn->memcg_id,
3513 WB_REASON_FOREIGN_FLUSH,
3519 #else /* CONFIG_CGROUP_WRITEBACK */
3521 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
3526 static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
3530 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
3534 #endif /* CONFIG_CGROUP_WRITEBACK */
3537 * Private memory cgroup IDR
3539 * Swap-out records and page cache shadow entries need to store memcg
3540 * references in constrained space, so we maintain an ID space that is
3541 * limited to 16 bit (MEM_CGROUP_ID_MAX), limiting the total number of
3542 * memory-controlled cgroups to 64k.
3544 * However, there usually are many references to the offline CSS after
3545 * the cgroup has been destroyed, such as page cache or reclaimable
3546 * slab objects, that don't need to hang on to the ID. We want to keep
3547 * those dead CSS from occupying IDs, or we might quickly exhaust the
3548 * relatively small ID space and prevent the creation of new cgroups
3549 * even when there are much fewer than 64k cgroups - possibly none.
3551 * Maintain a private 16-bit ID space for memcg, and allow the ID to
3552 * be freed and recycled when it's no longer needed, which is usually
3553 * when the CSS is offlined.
3555 * The only exception to that are records of swapped out tmpfs/shmem
3556 * pages that need to be attributed to live ancestors on swapin. But
3557 * those references are manageable from userspace.
3560 #define MEM_CGROUP_ID_MAX ((1UL << MEM_CGROUP_ID_SHIFT) - 1)
3561 static DEFINE_XARRAY_ALLOC1(mem_cgroup_ids);
3563 static void mem_cgroup_id_remove(struct mem_cgroup *memcg)
3565 if (memcg->id.id > 0) {
3566 xa_erase(&mem_cgroup_ids, memcg->id.id);
3571 void __maybe_unused mem_cgroup_id_get_many(struct mem_cgroup *memcg,
3574 refcount_add(n, &memcg->id.ref);
3577 static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n)
3579 if (refcount_sub_and_test(n, &memcg->id.ref)) {
3580 mem_cgroup_id_remove(memcg);
3582 /* Memcg ID pins CSS */
3583 css_put(&memcg->css);
3587 static inline void mem_cgroup_id_put(struct mem_cgroup *memcg)
3589 mem_cgroup_id_put_many(memcg, 1);
3592 struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg)
3594 while (!refcount_inc_not_zero(&memcg->id.ref)) {
3596 * The root cgroup cannot be destroyed, so it's refcount must
3599 if (WARN_ON_ONCE(mem_cgroup_is_root(memcg))) {
3603 memcg = parent_mem_cgroup(memcg);
3605 memcg = root_mem_cgroup;
3611 * mem_cgroup_from_id - look up a memcg from a memcg id
3612 * @id: the memcg id to look up
3614 * Caller must hold rcu_read_lock().
3616 struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
3618 WARN_ON_ONCE(!rcu_read_lock_held());
3619 return xa_load(&mem_cgroup_ids, id);
3622 #ifdef CONFIG_SHRINKER_DEBUG
3623 struct mem_cgroup *mem_cgroup_get_from_ino(unsigned long ino)
3625 struct cgroup *cgrp;
3626 struct cgroup_subsys_state *css;
3627 struct mem_cgroup *memcg;
3629 cgrp = cgroup_get_from_id(ino);
3631 return ERR_CAST(cgrp);
3633 css = cgroup_get_e_css(cgrp, &memory_cgrp_subsys);
3635 memcg = container_of(css, struct mem_cgroup, css);
3637 memcg = ERR_PTR(-ENOENT);
3645 static void free_mem_cgroup_per_node_info(struct mem_cgroup_per_node *pn)
3650 free_percpu(pn->lruvec_stats_percpu);
3651 kfree(pn->lruvec_stats);
3655 static bool alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
3657 struct mem_cgroup_per_node *pn;
3659 pn = kmem_cache_alloc_node(memcg_pn_cachep, GFP_KERNEL | __GFP_ZERO,
3664 pn->lruvec_stats = kzalloc_node(sizeof(struct lruvec_stats),
3665 GFP_KERNEL_ACCOUNT, node);
3666 if (!pn->lruvec_stats)
3669 pn->lruvec_stats_percpu = alloc_percpu_gfp(struct lruvec_stats_percpu,
3670 GFP_KERNEL_ACCOUNT);
3671 if (!pn->lruvec_stats_percpu)
3674 lruvec_init(&pn->lruvec);
3677 memcg->nodeinfo[node] = pn;
3680 free_mem_cgroup_per_node_info(pn);
3684 static void __mem_cgroup_free(struct mem_cgroup *memcg)
3688 obj_cgroup_put(memcg->orig_objcg);
3691 free_mem_cgroup_per_node_info(memcg->nodeinfo[node]);
3692 memcg1_free_events(memcg);
3693 kfree(memcg->vmstats);
3694 free_percpu(memcg->vmstats_percpu);
3698 static void mem_cgroup_free(struct mem_cgroup *memcg)
3700 lru_gen_exit_memcg(memcg);
3701 memcg_wb_domain_exit(memcg);
3702 __mem_cgroup_free(memcg);
3705 static struct mem_cgroup *mem_cgroup_alloc(struct mem_cgroup *parent)
3707 struct memcg_vmstats_percpu *statc;
3708 struct memcg_vmstats_percpu __percpu *pstatc_pcpu;
3709 struct mem_cgroup *memcg;
3711 int __maybe_unused i;
3714 memcg = kmem_cache_zalloc(memcg_cachep, GFP_KERNEL);
3716 return ERR_PTR(-ENOMEM);
3718 error = xa_alloc(&mem_cgroup_ids, &memcg->id.id, NULL,
3719 XA_LIMIT(1, MEM_CGROUP_ID_MAX), GFP_KERNEL);
3724 memcg->vmstats = kzalloc(sizeof(struct memcg_vmstats),
3725 GFP_KERNEL_ACCOUNT);
3726 if (!memcg->vmstats)
3729 memcg->vmstats_percpu = alloc_percpu_gfp(struct memcg_vmstats_percpu,
3730 GFP_KERNEL_ACCOUNT);
3731 if (!memcg->vmstats_percpu)
3734 if (!memcg1_alloc_events(memcg))
3737 for_each_possible_cpu(cpu) {
3739 pstatc_pcpu = parent->vmstats_percpu;
3740 statc = per_cpu_ptr(memcg->vmstats_percpu, cpu);
3741 statc->parent_pcpu = parent ? pstatc_pcpu : NULL;
3742 statc->vmstats = memcg->vmstats;
3746 if (!alloc_mem_cgroup_per_node_info(memcg, node))
3749 if (memcg_wb_domain_init(memcg, GFP_KERNEL))
3752 INIT_WORK(&memcg->high_work, high_work_func);
3753 vmpressure_init(&memcg->vmpressure);
3754 INIT_LIST_HEAD(&memcg->memory_peaks);
3755 INIT_LIST_HEAD(&memcg->swap_peaks);
3756 spin_lock_init(&memcg->peaks_lock);
3757 memcg->socket_pressure = get_jiffies_64();
3758 #if BITS_PER_LONG < 64
3759 seqlock_init(&memcg->socket_pressure_seqlock);
3761 memcg1_memcg_init(memcg);
3762 memcg->kmemcg_id = -1;
3763 INIT_LIST_HEAD(&memcg->objcg_list);
3764 #ifdef CONFIG_CGROUP_WRITEBACK
3765 INIT_LIST_HEAD(&memcg->cgwb_list);
3766 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++)
3767 memcg->cgwb_frn[i].done =
3768 __WB_COMPLETION_INIT(&memcg_cgwb_frn_waitq);
3770 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
3771 spin_lock_init(&memcg->deferred_split_queue.split_queue_lock);
3772 INIT_LIST_HEAD(&memcg->deferred_split_queue.split_queue);
3773 memcg->deferred_split_queue.split_queue_len = 0;
3775 lru_gen_init_memcg(memcg);
3778 mem_cgroup_id_remove(memcg);
3779 __mem_cgroup_free(memcg);
3780 return ERR_PTR(error);
3783 static struct cgroup_subsys_state * __ref
3784 mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
3786 struct mem_cgroup *parent = mem_cgroup_from_css(parent_css);
3787 struct mem_cgroup *memcg, *old_memcg;
3788 bool memcg_on_dfl = cgroup_subsys_on_dfl(memory_cgrp_subsys);
3790 old_memcg = set_active_memcg(parent);
3791 memcg = mem_cgroup_alloc(parent);
3792 set_active_memcg(old_memcg);
3794 return ERR_CAST(memcg);
3796 page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX);
3797 memcg1_soft_limit_reset(memcg);
3799 memcg->zswap_max = PAGE_COUNTER_MAX;
3800 WRITE_ONCE(memcg->zswap_writeback, true);
3802 page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX);
3804 WRITE_ONCE(memcg->swappiness, mem_cgroup_swappiness(parent));
3806 page_counter_init(&memcg->memory, &parent->memory, memcg_on_dfl);
3807 page_counter_init(&memcg->swap, &parent->swap, false);
3808 #ifdef CONFIG_MEMCG_V1
3809 memcg->memory.track_failcnt = !memcg_on_dfl;
3810 WRITE_ONCE(memcg->oom_kill_disable, READ_ONCE(parent->oom_kill_disable));
3811 page_counter_init(&memcg->kmem, &parent->kmem, false);
3812 page_counter_init(&memcg->tcpmem, &parent->tcpmem, false);
3816 init_memcg_events();
3817 page_counter_init(&memcg->memory, NULL, true);
3818 page_counter_init(&memcg->swap, NULL, false);
3819 #ifdef CONFIG_MEMCG_V1
3820 page_counter_init(&memcg->kmem, NULL, false);
3821 page_counter_init(&memcg->tcpmem, NULL, false);
3823 root_mem_cgroup = memcg;
3827 if (memcg_on_dfl && !cgroup_memory_nosocket)
3828 static_branch_inc(&memcg_sockets_enabled_key);
3830 if (!cgroup_memory_nobpf)
3831 static_branch_inc(&memcg_bpf_enabled_key);
3836 static int mem_cgroup_css_online(struct cgroup_subsys_state *css)
3838 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3840 if (memcg_online_kmem(memcg))
3844 * A memcg must be visible for expand_shrinker_info()
3845 * by the time the maps are allocated. So, we allocate maps
3846 * here, when for_each_mem_cgroup() can't skip it.
3848 if (alloc_shrinker_info(memcg))
3851 if (unlikely(mem_cgroup_is_root(memcg)) && !mem_cgroup_disabled())
3852 queue_delayed_work(system_unbound_wq, &stats_flush_dwork,
3854 lru_gen_online_memcg(memcg);
3856 /* Online state pins memcg ID, memcg ID pins CSS */
3857 refcount_set(&memcg->id.ref, 1);
3861 * Ensure mem_cgroup_from_id() works once we're fully online.
3863 * We could do this earlier and require callers to filter with
3864 * css_tryget_online(). But right now there are no users that
3865 * need earlier access, and the workingset code relies on the
3866 * cgroup tree linkage (mem_cgroup_get_nr_swap_pages()). So
3867 * publish it here at the end of onlining. This matches the
3868 * regular ID destruction during offlining.
3870 xa_store(&mem_cgroup_ids, memcg->id.id, memcg, GFP_KERNEL);
3874 memcg_offline_kmem(memcg);
3876 mem_cgroup_id_remove(memcg);
3880 static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
3882 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3884 memcg1_css_offline(memcg);
3886 page_counter_set_min(&memcg->memory, 0);
3887 page_counter_set_low(&memcg->memory, 0);
3889 zswap_memcg_offline_cleanup(memcg);
3891 memcg_offline_kmem(memcg);
3892 reparent_shrinker_deferred(memcg);
3893 wb_memcg_offline(memcg);
3894 lru_gen_offline_memcg(memcg);
3896 drain_all_stock(memcg);
3898 mem_cgroup_id_put(memcg);
3901 static void mem_cgroup_css_released(struct cgroup_subsys_state *css)
3903 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3905 invalidate_reclaim_iterators(memcg);
3906 lru_gen_release_memcg(memcg);
3909 static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
3911 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3912 int __maybe_unused i;
3914 #ifdef CONFIG_CGROUP_WRITEBACK
3915 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++)
3916 wb_wait_for_completion(&memcg->cgwb_frn[i].done);
3918 if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
3919 static_branch_dec(&memcg_sockets_enabled_key);
3921 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg1_tcpmem_active(memcg))
3922 static_branch_dec(&memcg_sockets_enabled_key);
3924 if (!cgroup_memory_nobpf)
3925 static_branch_dec(&memcg_bpf_enabled_key);
3927 vmpressure_cleanup(&memcg->vmpressure);
3928 cancel_work_sync(&memcg->high_work);
3929 memcg1_remove_from_trees(memcg);
3930 free_shrinker_info(memcg);
3931 mem_cgroup_free(memcg);
3935 * mem_cgroup_css_reset - reset the states of a mem_cgroup
3936 * @css: the target css
3938 * Reset the states of the mem_cgroup associated with @css. This is
3939 * invoked when the userland requests disabling on the default hierarchy
3940 * but the memcg is pinned through dependency. The memcg should stop
3941 * applying policies and should revert to the vanilla state as it may be
3942 * made visible again.
3944 * The current implementation only resets the essential configurations.
3945 * This needs to be expanded to cover all the visible parts.
3947 static void mem_cgroup_css_reset(struct cgroup_subsys_state *css)
3949 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3951 page_counter_set_max(&memcg->memory, PAGE_COUNTER_MAX);
3952 page_counter_set_max(&memcg->swap, PAGE_COUNTER_MAX);
3953 #ifdef CONFIG_MEMCG_V1
3954 page_counter_set_max(&memcg->kmem, PAGE_COUNTER_MAX);
3955 page_counter_set_max(&memcg->tcpmem, PAGE_COUNTER_MAX);
3957 page_counter_set_min(&memcg->memory, 0);
3958 page_counter_set_low(&memcg->memory, 0);
3959 page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX);
3960 memcg1_soft_limit_reset(memcg);
3961 page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX);
3962 memcg_wb_domain_size_changed(memcg);
3965 struct aggregate_control {
3966 /* pointer to the aggregated (CPU and subtree aggregated) counters */
3968 /* pointer to the non-hierarchichal (CPU aggregated) counters */
3970 /* pointer to the pending child counters during tree propagation */
3972 /* pointer to the parent's pending counters, could be NULL */
3974 /* pointer to the percpu counters to be aggregated */
3976 /* pointer to the percpu counters of the last aggregation*/
3978 /* size of the above counters */
3982 static void mem_cgroup_stat_aggregate(struct aggregate_control *ac)
3985 long delta, delta_cpu, v;
3987 for (i = 0; i < ac->size; i++) {
3989 * Collect the aggregated propagation counts of groups
3990 * below us. We're in a per-cpu loop here and this is
3991 * a global counter, so the first cycle will get them.
3993 delta = ac->pending[i];
3997 /* Add CPU changes on this level since the last flush */
3999 v = READ_ONCE(ac->cstat[i]);
4000 if (v != ac->cstat_prev[i]) {
4001 delta_cpu = v - ac->cstat_prev[i];
4003 ac->cstat_prev[i] = v;
4006 /* Aggregate counts on this level and propagate upwards */
4008 ac->local[i] += delta_cpu;
4011 ac->aggregate[i] += delta;
4013 ac->ppending[i] += delta;
4018 #ifdef CONFIG_MEMCG_NMI_SAFETY_REQUIRES_ATOMIC
4019 static void flush_nmi_stats(struct mem_cgroup *memcg, struct mem_cgroup *parent,
4024 if (atomic_read(&memcg->kmem_stat)) {
4025 int kmem = atomic_xchg(&memcg->kmem_stat, 0);
4026 int index = memcg_stats_index(MEMCG_KMEM);
4028 memcg->vmstats->state[index] += kmem;
4030 parent->vmstats->state_pending[index] += kmem;
4033 for_each_node_state(nid, N_MEMORY) {
4034 struct mem_cgroup_per_node *pn = memcg->nodeinfo[nid];
4035 struct lruvec_stats *lstats = pn->lruvec_stats;
4036 struct lruvec_stats *plstats = NULL;
4039 plstats = parent->nodeinfo[nid]->lruvec_stats;
4041 if (atomic_read(&pn->slab_reclaimable)) {
4042 int slab = atomic_xchg(&pn->slab_reclaimable, 0);
4043 int index = memcg_stats_index(NR_SLAB_RECLAIMABLE_B);
4045 lstats->state[index] += slab;
4047 plstats->state_pending[index] += slab;
4049 if (atomic_read(&pn->slab_unreclaimable)) {
4050 int slab = atomic_xchg(&pn->slab_unreclaimable, 0);
4051 int index = memcg_stats_index(NR_SLAB_UNRECLAIMABLE_B);
4053 lstats->state[index] += slab;
4055 plstats->state_pending[index] += slab;
4060 static void flush_nmi_stats(struct mem_cgroup *memcg, struct mem_cgroup *parent,
4065 static void mem_cgroup_css_rstat_flush(struct cgroup_subsys_state *css, int cpu)
4067 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4068 struct mem_cgroup *parent = parent_mem_cgroup(memcg);
4069 struct memcg_vmstats_percpu *statc;
4070 struct aggregate_control ac;
4073 flush_nmi_stats(memcg, parent, cpu);
4075 statc = per_cpu_ptr(memcg->vmstats_percpu, cpu);
4077 ac = (struct aggregate_control) {
4078 .aggregate = memcg->vmstats->state,
4079 .local = memcg->vmstats->state_local,
4080 .pending = memcg->vmstats->state_pending,
4081 .ppending = parent ? parent->vmstats->state_pending : NULL,
4082 .cstat = statc->state,
4083 .cstat_prev = statc->state_prev,
4084 .size = MEMCG_VMSTAT_SIZE,
4086 mem_cgroup_stat_aggregate(&ac);
4088 ac = (struct aggregate_control) {
4089 .aggregate = memcg->vmstats->events,
4090 .local = memcg->vmstats->events_local,
4091 .pending = memcg->vmstats->events_pending,
4092 .ppending = parent ? parent->vmstats->events_pending : NULL,
4093 .cstat = statc->events,
4094 .cstat_prev = statc->events_prev,
4095 .size = NR_MEMCG_EVENTS,
4097 mem_cgroup_stat_aggregate(&ac);
4099 for_each_node_state(nid, N_MEMORY) {
4100 struct mem_cgroup_per_node *pn = memcg->nodeinfo[nid];
4101 struct lruvec_stats *lstats = pn->lruvec_stats;
4102 struct lruvec_stats *plstats = NULL;
4103 struct lruvec_stats_percpu *lstatc;
4106 plstats = parent->nodeinfo[nid]->lruvec_stats;
4108 lstatc = per_cpu_ptr(pn->lruvec_stats_percpu, cpu);
4110 ac = (struct aggregate_control) {
4111 .aggregate = lstats->state,
4112 .local = lstats->state_local,
4113 .pending = lstats->state_pending,
4114 .ppending = plstats ? plstats->state_pending : NULL,
4115 .cstat = lstatc->state,
4116 .cstat_prev = lstatc->state_prev,
4117 .size = NR_MEMCG_NODE_STAT_ITEMS,
4119 mem_cgroup_stat_aggregate(&ac);
4122 WRITE_ONCE(statc->stats_updates, 0);
4123 /* We are in a per-cpu loop here, only do the atomic write once */
4124 if (atomic_read(&memcg->vmstats->stats_updates))
4125 atomic_set(&memcg->vmstats->stats_updates, 0);
4128 static void mem_cgroup_fork(struct task_struct *task)
4131 * Set the update flag to cause task->objcg to be initialized lazily
4132 * on the first allocation. It can be done without any synchronization
4133 * because it's always performed on the current task, so does
4134 * current_objcg_update().
4136 task->objcg = (struct obj_cgroup *)CURRENT_OBJCG_UPDATE_FLAG;
4139 static void mem_cgroup_exit(struct task_struct *task)
4141 struct obj_cgroup *objcg = task->objcg;
4143 objcg = (struct obj_cgroup *)
4144 ((unsigned long)objcg & ~CURRENT_OBJCG_UPDATE_FLAG);
4145 obj_cgroup_put(objcg);
4148 * Some kernel allocations can happen after this point,
4149 * but let's ignore them. It can be done without any synchronization
4150 * because it's always performed on the current task, so does
4151 * current_objcg_update().
4156 #ifdef CONFIG_LRU_GEN
4157 static void mem_cgroup_lru_gen_attach(struct cgroup_taskset *tset)
4159 struct task_struct *task;
4160 struct cgroup_subsys_state *css;
4162 /* find the first leader if there is any */
4163 cgroup_taskset_for_each_leader(task, css, tset)
4170 if (task->mm && READ_ONCE(task->mm->owner) == task)
4171 lru_gen_migrate_mm(task->mm);
4175 static void mem_cgroup_lru_gen_attach(struct cgroup_taskset *tset) {}
4176 #endif /* CONFIG_LRU_GEN */
4178 static void mem_cgroup_kmem_attach(struct cgroup_taskset *tset)
4180 struct task_struct *task;
4181 struct cgroup_subsys_state *css;
4183 cgroup_taskset_for_each(task, css, tset) {
4184 /* atomically set the update bit */
4185 set_bit(CURRENT_OBJCG_UPDATE_BIT, (unsigned long *)&task->objcg);
4189 static void mem_cgroup_attach(struct cgroup_taskset *tset)
4191 mem_cgroup_lru_gen_attach(tset);
4192 mem_cgroup_kmem_attach(tset);
4195 static int seq_puts_memcg_tunable(struct seq_file *m, unsigned long value)
4197 if (value == PAGE_COUNTER_MAX)
4198 seq_puts(m, "max\n");
4200 seq_printf(m, "%llu\n", (u64)value * PAGE_SIZE);
4205 static u64 memory_current_read(struct cgroup_subsys_state *css,
4208 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4210 return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE;
4213 #define OFP_PEAK_UNSET (((-1UL)))
4215 static int peak_show(struct seq_file *sf, void *v, struct page_counter *pc)
4217 struct cgroup_of_peak *ofp = of_peak(sf->private);
4218 u64 fd_peak = READ_ONCE(ofp->value), peak;
4220 /* User wants global or local peak? */
4221 if (fd_peak == OFP_PEAK_UNSET)
4222 peak = pc->watermark;
4224 peak = max(fd_peak, READ_ONCE(pc->local_watermark));
4226 seq_printf(sf, "%llu\n", peak * PAGE_SIZE);
4230 static int memory_peak_show(struct seq_file *sf, void *v)
4232 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(sf));
4234 return peak_show(sf, v, &memcg->memory);
4237 static int peak_open(struct kernfs_open_file *of)
4239 struct cgroup_of_peak *ofp = of_peak(of);
4241 ofp->value = OFP_PEAK_UNSET;
4245 static void peak_release(struct kernfs_open_file *of)
4247 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4248 struct cgroup_of_peak *ofp = of_peak(of);
4250 if (ofp->value == OFP_PEAK_UNSET) {
4251 /* fast path (no writes on this fd) */
4254 spin_lock(&memcg->peaks_lock);
4255 list_del(&ofp->list);
4256 spin_unlock(&memcg->peaks_lock);
4259 static ssize_t peak_write(struct kernfs_open_file *of, char *buf, size_t nbytes,
4260 loff_t off, struct page_counter *pc,
4261 struct list_head *watchers)
4263 unsigned long usage;
4264 struct cgroup_of_peak *peer_ctx;
4265 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4266 struct cgroup_of_peak *ofp = of_peak(of);
4268 spin_lock(&memcg->peaks_lock);
4270 usage = page_counter_read(pc);
4271 WRITE_ONCE(pc->local_watermark, usage);
4273 list_for_each_entry(peer_ctx, watchers, list)
4274 if (usage > peer_ctx->value)
4275 WRITE_ONCE(peer_ctx->value, usage);
4277 /* initial write, register watcher */
4278 if (ofp->value == OFP_PEAK_UNSET)
4279 list_add(&ofp->list, watchers);
4281 WRITE_ONCE(ofp->value, usage);
4282 spin_unlock(&memcg->peaks_lock);
4287 static ssize_t memory_peak_write(struct kernfs_open_file *of, char *buf,
4288 size_t nbytes, loff_t off)
4290 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4292 return peak_write(of, buf, nbytes, off, &memcg->memory,
4293 &memcg->memory_peaks);
4296 #undef OFP_PEAK_UNSET
4298 static int memory_min_show(struct seq_file *m, void *v)
4300 return seq_puts_memcg_tunable(m,
4301 READ_ONCE(mem_cgroup_from_seq(m)->memory.min));
4304 static ssize_t memory_min_write(struct kernfs_open_file *of,
4305 char *buf, size_t nbytes, loff_t off)
4307 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4311 buf = strstrip(buf);
4312 err = page_counter_memparse(buf, "max", &min);
4316 page_counter_set_min(&memcg->memory, min);
4321 static int memory_low_show(struct seq_file *m, void *v)
4323 return seq_puts_memcg_tunable(m,
4324 READ_ONCE(mem_cgroup_from_seq(m)->memory.low));
4327 static ssize_t memory_low_write(struct kernfs_open_file *of,
4328 char *buf, size_t nbytes, loff_t off)
4330 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4334 buf = strstrip(buf);
4335 err = page_counter_memparse(buf, "max", &low);
4339 page_counter_set_low(&memcg->memory, low);
4344 static int memory_high_show(struct seq_file *m, void *v)
4346 return seq_puts_memcg_tunable(m,
4347 READ_ONCE(mem_cgroup_from_seq(m)->memory.high));
4350 static ssize_t memory_high_write(struct kernfs_open_file *of,
4351 char *buf, size_t nbytes, loff_t off)
4353 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4354 unsigned int nr_retries = MAX_RECLAIM_RETRIES;
4355 bool drained = false;
4359 buf = strstrip(buf);
4360 err = page_counter_memparse(buf, "max", &high);
4364 page_counter_set_high(&memcg->memory, high);
4366 if (of->file->f_flags & O_NONBLOCK)
4370 unsigned long nr_pages = page_counter_read(&memcg->memory);
4371 unsigned long reclaimed;
4373 if (nr_pages <= high)
4376 if (signal_pending(current))
4380 drain_all_stock(memcg);
4385 reclaimed = try_to_free_mem_cgroup_pages(memcg, nr_pages - high,
4386 GFP_KERNEL, MEMCG_RECLAIM_MAY_SWAP, NULL);
4388 if (!reclaimed && !nr_retries--)
4392 memcg_wb_domain_size_changed(memcg);
4396 static int memory_max_show(struct seq_file *m, void *v)
4398 return seq_puts_memcg_tunable(m,
4399 READ_ONCE(mem_cgroup_from_seq(m)->memory.max));
4402 static ssize_t memory_max_write(struct kernfs_open_file *of,
4403 char *buf, size_t nbytes, loff_t off)
4405 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4406 unsigned int nr_reclaims = MAX_RECLAIM_RETRIES;
4407 bool drained = false;
4411 buf = strstrip(buf);
4412 err = page_counter_memparse(buf, "max", &max);
4416 xchg(&memcg->memory.max, max);
4418 if (of->file->f_flags & O_NONBLOCK)
4422 unsigned long nr_pages = page_counter_read(&memcg->memory);
4424 if (nr_pages <= max)
4427 if (signal_pending(current))
4431 drain_all_stock(memcg);
4437 if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max,
4438 GFP_KERNEL, MEMCG_RECLAIM_MAY_SWAP, NULL))
4443 memcg_memory_event(memcg, MEMCG_OOM);
4444 if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0))
4449 memcg_wb_domain_size_changed(memcg);
4454 * Note: don't forget to update the 'samples/cgroup/memcg_event_listener'
4455 * if any new events become available.
4457 static void __memory_events_show(struct seq_file *m, atomic_long_t *events)
4459 seq_printf(m, "low %lu\n", atomic_long_read(&events[MEMCG_LOW]));
4460 seq_printf(m, "high %lu\n", atomic_long_read(&events[MEMCG_HIGH]));
4461 seq_printf(m, "max %lu\n", atomic_long_read(&events[MEMCG_MAX]));
4462 seq_printf(m, "oom %lu\n", atomic_long_read(&events[MEMCG_OOM]));
4463 seq_printf(m, "oom_kill %lu\n",
4464 atomic_long_read(&events[MEMCG_OOM_KILL]));
4465 seq_printf(m, "oom_group_kill %lu\n",
4466 atomic_long_read(&events[MEMCG_OOM_GROUP_KILL]));
4469 static int memory_events_show(struct seq_file *m, void *v)
4471 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
4473 __memory_events_show(m, memcg->memory_events);
4477 static int memory_events_local_show(struct seq_file *m, void *v)
4479 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
4481 __memory_events_show(m, memcg->memory_events_local);
4485 int memory_stat_show(struct seq_file *m, void *v)
4487 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
4488 char *buf = kmalloc(SEQ_BUF_SIZE, GFP_KERNEL);
4493 seq_buf_init(&s, buf, SEQ_BUF_SIZE);
4494 memory_stat_format(memcg, &s);
4501 static inline unsigned long lruvec_page_state_output(struct lruvec *lruvec,
4504 return lruvec_page_state(lruvec, item) *
4505 memcg_page_state_output_unit(item);
4508 static int memory_numa_stat_show(struct seq_file *m, void *v)
4511 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
4513 mem_cgroup_flush_stats(memcg);
4515 for (i = 0; i < ARRAY_SIZE(memory_stats); i++) {
4518 if (memory_stats[i].idx >= NR_VM_NODE_STAT_ITEMS)
4521 seq_printf(m, "%s", memory_stats[i].name);
4522 for_each_node_state(nid, N_MEMORY) {
4524 struct lruvec *lruvec;
4526 lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
4527 size = lruvec_page_state_output(lruvec,
4528 memory_stats[i].idx);
4529 seq_printf(m, " N%d=%llu", nid, size);
4538 static int memory_oom_group_show(struct seq_file *m, void *v)
4540 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
4542 seq_printf(m, "%d\n", READ_ONCE(memcg->oom_group));
4547 static ssize_t memory_oom_group_write(struct kernfs_open_file *of,
4548 char *buf, size_t nbytes, loff_t off)
4550 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4553 buf = strstrip(buf);
4557 ret = kstrtoint(buf, 0, &oom_group);
4561 if (oom_group != 0 && oom_group != 1)
4564 WRITE_ONCE(memcg->oom_group, oom_group);
4569 static ssize_t memory_reclaim(struct kernfs_open_file *of, char *buf,
4570 size_t nbytes, loff_t off)
4572 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4575 ret = user_proactive_reclaim(buf, memcg, NULL);
4582 static struct cftype memory_files[] = {
4585 .flags = CFTYPE_NOT_ON_ROOT,
4586 .read_u64 = memory_current_read,
4590 .flags = CFTYPE_NOT_ON_ROOT,
4592 .release = peak_release,
4593 .seq_show = memory_peak_show,
4594 .write = memory_peak_write,
4598 .flags = CFTYPE_NOT_ON_ROOT,
4599 .seq_show = memory_min_show,
4600 .write = memory_min_write,
4604 .flags = CFTYPE_NOT_ON_ROOT,
4605 .seq_show = memory_low_show,
4606 .write = memory_low_write,
4610 .flags = CFTYPE_NOT_ON_ROOT,
4611 .seq_show = memory_high_show,
4612 .write = memory_high_write,
4616 .flags = CFTYPE_NOT_ON_ROOT,
4617 .seq_show = memory_max_show,
4618 .write = memory_max_write,
4622 .flags = CFTYPE_NOT_ON_ROOT,
4623 .file_offset = offsetof(struct mem_cgroup, events_file),
4624 .seq_show = memory_events_show,
4627 .name = "events.local",
4628 .flags = CFTYPE_NOT_ON_ROOT,
4629 .file_offset = offsetof(struct mem_cgroup, events_local_file),
4630 .seq_show = memory_events_local_show,
4634 .seq_show = memory_stat_show,
4638 .name = "numa_stat",
4639 .seq_show = memory_numa_stat_show,
4643 .name = "oom.group",
4644 .flags = CFTYPE_NOT_ON_ROOT | CFTYPE_NS_DELEGATABLE,
4645 .seq_show = memory_oom_group_show,
4646 .write = memory_oom_group_write,
4650 .flags = CFTYPE_NS_DELEGATABLE,
4651 .write = memory_reclaim,
4656 struct cgroup_subsys memory_cgrp_subsys = {
4657 .css_alloc = mem_cgroup_css_alloc,
4658 .css_online = mem_cgroup_css_online,
4659 .css_offline = mem_cgroup_css_offline,
4660 .css_released = mem_cgroup_css_released,
4661 .css_free = mem_cgroup_css_free,
4662 .css_reset = mem_cgroup_css_reset,
4663 .css_rstat_flush = mem_cgroup_css_rstat_flush,
4664 .attach = mem_cgroup_attach,
4665 .fork = mem_cgroup_fork,
4666 .exit = mem_cgroup_exit,
4667 .dfl_cftypes = memory_files,
4668 #ifdef CONFIG_MEMCG_V1
4669 .legacy_cftypes = mem_cgroup_legacy_files,
4675 * mem_cgroup_calculate_protection - check if memory consumption is in the normal range
4676 * @root: the top ancestor of the sub-tree being checked
4677 * @memcg: the memory cgroup to check
4679 * WARNING: This function is not stateless! It can only be used as part
4680 * of a top-down tree iteration, not for isolated queries.
4682 void mem_cgroup_calculate_protection(struct mem_cgroup *root,
4683 struct mem_cgroup *memcg)
4685 bool recursive_protection =
4686 cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_RECURSIVE_PROT;
4688 if (mem_cgroup_disabled())
4692 root = root_mem_cgroup;
4694 page_counter_calculate_protection(&root->memory, &memcg->memory, recursive_protection);
4697 static int charge_memcg(struct folio *folio, struct mem_cgroup *memcg,
4702 ret = try_charge(memcg, gfp, folio_nr_pages(folio));
4706 css_get(&memcg->css);
4707 commit_charge(folio, memcg);
4708 memcg1_commit_charge(folio, memcg);
4713 int __mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, gfp_t gfp)
4715 struct mem_cgroup *memcg;
4718 memcg = get_mem_cgroup_from_mm(mm);
4719 ret = charge_memcg(folio, memcg, gfp);
4720 css_put(&memcg->css);
4726 * mem_cgroup_charge_hugetlb - charge the memcg for a hugetlb folio
4727 * @folio: folio being charged
4728 * @gfp: reclaim mode
4730 * This function is called when allocating a huge page folio, after the page has
4731 * already been obtained and charged to the appropriate hugetlb cgroup
4732 * controller (if it is enabled).
4734 * Returns ENOMEM if the memcg is already full.
4735 * Returns 0 if either the charge was successful, or if we skip the charging.
4737 int mem_cgroup_charge_hugetlb(struct folio *folio, gfp_t gfp)
4739 struct mem_cgroup *memcg = get_mem_cgroup_from_current();
4743 * Even memcg does not account for hugetlb, we still want to update
4744 * system-level stats via lruvec_stat_mod_folio. Return 0, and skip
4745 * charging the memcg.
4747 if (mem_cgroup_disabled() || !memcg_accounts_hugetlb() ||
4748 !memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
4751 if (charge_memcg(folio, memcg, gfp))
4755 mem_cgroup_put(memcg);
4760 * mem_cgroup_swapin_charge_folio - Charge a newly allocated folio for swapin.
4761 * @folio: folio to charge.
4762 * @mm: mm context of the victim
4763 * @gfp: reclaim mode
4764 * @entry: swap entry for which the folio is allocated
4766 * This function charges a folio allocated for swapin. Please call this before
4767 * adding the folio to the swapcache.
4769 * Returns 0 on success. Otherwise, an error code is returned.
4771 int mem_cgroup_swapin_charge_folio(struct folio *folio, struct mm_struct *mm,
4772 gfp_t gfp, swp_entry_t entry)
4774 struct mem_cgroup *memcg;
4778 if (mem_cgroup_disabled())
4781 id = lookup_swap_cgroup_id(entry);
4783 memcg = mem_cgroup_from_id(id);
4784 if (!memcg || !css_tryget_online(&memcg->css))
4785 memcg = get_mem_cgroup_from_mm(mm);
4788 ret = charge_memcg(folio, memcg, gfp);
4790 css_put(&memcg->css);
4794 struct uncharge_gather {
4795 struct mem_cgroup *memcg;
4796 unsigned long nr_memory;
4797 unsigned long pgpgout;
4798 unsigned long nr_kmem;
4802 static inline void uncharge_gather_clear(struct uncharge_gather *ug)
4804 memset(ug, 0, sizeof(*ug));
4807 static void uncharge_batch(const struct uncharge_gather *ug)
4809 if (ug->nr_memory) {
4810 memcg_uncharge(ug->memcg, ug->nr_memory);
4812 mod_memcg_state(ug->memcg, MEMCG_KMEM, -ug->nr_kmem);
4813 memcg1_account_kmem(ug->memcg, -ug->nr_kmem);
4815 memcg1_oom_recover(ug->memcg);
4818 memcg1_uncharge_batch(ug->memcg, ug->pgpgout, ug->nr_memory, ug->nid);
4820 /* drop reference from uncharge_folio */
4821 css_put(&ug->memcg->css);
4824 static void uncharge_folio(struct folio *folio, struct uncharge_gather *ug)
4827 struct mem_cgroup *memcg;
4828 struct obj_cgroup *objcg;
4830 VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
4833 * Nobody should be changing or seriously looking at
4834 * folio memcg or objcg at this point, we have fully
4835 * exclusive access to the folio.
4837 if (folio_memcg_kmem(folio)) {
4838 objcg = __folio_objcg(folio);
4840 * This get matches the put at the end of the function and
4841 * kmem pages do not hold memcg references anymore.
4843 memcg = get_mem_cgroup_from_objcg(objcg);
4845 memcg = __folio_memcg(folio);
4851 if (ug->memcg != memcg) {
4854 uncharge_gather_clear(ug);
4857 ug->nid = folio_nid(folio);
4859 /* pairs with css_put in uncharge_batch */
4860 css_get(&memcg->css);
4863 nr_pages = folio_nr_pages(folio);
4865 if (folio_memcg_kmem(folio)) {
4866 ug->nr_memory += nr_pages;
4867 ug->nr_kmem += nr_pages;
4869 folio->memcg_data = 0;
4870 obj_cgroup_put(objcg);
4872 /* LRU pages aren't accounted at the root level */
4873 if (!mem_cgroup_is_root(memcg))
4874 ug->nr_memory += nr_pages;
4877 WARN_ON_ONCE(folio_unqueue_deferred_split(folio));
4878 folio->memcg_data = 0;
4881 css_put(&memcg->css);
4884 void __mem_cgroup_uncharge(struct folio *folio)
4886 struct uncharge_gather ug;
4888 /* Don't touch folio->lru of any random page, pre-check: */
4889 if (!folio_memcg_charged(folio))
4892 uncharge_gather_clear(&ug);
4893 uncharge_folio(folio, &ug);
4894 uncharge_batch(&ug);
4897 void __mem_cgroup_uncharge_folios(struct folio_batch *folios)
4899 struct uncharge_gather ug;
4902 uncharge_gather_clear(&ug);
4903 for (i = 0; i < folios->nr; i++)
4904 uncharge_folio(folios->folios[i], &ug);
4906 uncharge_batch(&ug);
4910 * mem_cgroup_replace_folio - Charge a folio's replacement.
4911 * @old: Currently circulating folio.
4912 * @new: Replacement folio.
4914 * Charge @new as a replacement folio for @old. @old will
4915 * be uncharged upon free.
4917 * Both folios must be locked, @new->mapping must be set up.
4919 void mem_cgroup_replace_folio(struct folio *old, struct folio *new)
4921 struct mem_cgroup *memcg;
4922 long nr_pages = folio_nr_pages(new);
4924 VM_BUG_ON_FOLIO(!folio_test_locked(old), old);
4925 VM_BUG_ON_FOLIO(!folio_test_locked(new), new);
4926 VM_BUG_ON_FOLIO(folio_test_anon(old) != folio_test_anon(new), new);
4927 VM_BUG_ON_FOLIO(folio_nr_pages(old) != nr_pages, new);
4929 if (mem_cgroup_disabled())
4932 /* Page cache replacement: new folio already charged? */
4933 if (folio_memcg_charged(new))
4936 memcg = folio_memcg(old);
4937 VM_WARN_ON_ONCE_FOLIO(!memcg, old);
4941 /* Force-charge the new page. The old one will be freed soon */
4942 if (!mem_cgroup_is_root(memcg)) {
4943 page_counter_charge(&memcg->memory, nr_pages);
4944 if (do_memsw_account())
4945 page_counter_charge(&memcg->memsw, nr_pages);
4948 css_get(&memcg->css);
4949 commit_charge(new, memcg);
4950 memcg1_commit_charge(new, memcg);
4954 * mem_cgroup_migrate - Transfer the memcg data from the old to the new folio.
4955 * @old: Currently circulating folio.
4956 * @new: Replacement folio.
4958 * Transfer the memcg data from the old folio to the new folio for migration.
4959 * The old folio's data info will be cleared. Note that the memory counters
4960 * will remain unchanged throughout the process.
4962 * Both folios must be locked, @new->mapping must be set up.
4964 void mem_cgroup_migrate(struct folio *old, struct folio *new)
4966 struct mem_cgroup *memcg;
4968 VM_BUG_ON_FOLIO(!folio_test_locked(old), old);
4969 VM_BUG_ON_FOLIO(!folio_test_locked(new), new);
4970 VM_BUG_ON_FOLIO(folio_test_anon(old) != folio_test_anon(new), new);
4971 VM_BUG_ON_FOLIO(folio_nr_pages(old) != folio_nr_pages(new), new);
4972 VM_BUG_ON_FOLIO(folio_test_lru(old), old);
4974 if (mem_cgroup_disabled())
4977 memcg = folio_memcg(old);
4979 * Note that it is normal to see !memcg for a hugetlb folio.
4980 * For e.g, itt could have been allocated when memory_hugetlb_accounting
4983 VM_WARN_ON_ONCE_FOLIO(!folio_test_hugetlb(old) && !memcg, old);
4987 /* Transfer the charge and the css ref */
4988 commit_charge(new, memcg);
4990 /* Warning should never happen, so don't worry about refcount non-0 */
4991 WARN_ON_ONCE(folio_unqueue_deferred_split(old));
4992 old->memcg_data = 0;
4995 DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
4996 EXPORT_SYMBOL(memcg_sockets_enabled_key);
4998 void mem_cgroup_sk_alloc(struct sock *sk)
5000 struct mem_cgroup *memcg;
5002 if (!mem_cgroup_sockets_enabled)
5005 /* Do not associate the sock with unrelated interrupted task's memcg. */
5010 memcg = mem_cgroup_from_task(current);
5011 if (mem_cgroup_is_root(memcg))
5013 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg1_tcpmem_active(memcg))
5015 if (css_tryget(&memcg->css))
5016 sk->sk_memcg = memcg;
5021 void mem_cgroup_sk_free(struct sock *sk)
5024 css_put(&sk->sk_memcg->css);
5028 * mem_cgroup_charge_skmem - charge socket memory
5029 * @memcg: memcg to charge
5030 * @nr_pages: number of pages to charge
5031 * @gfp_mask: reclaim mode
5033 * Charges @nr_pages to @memcg. Returns %true if the charge fit within
5034 * @memcg's configured limit, %false if it doesn't.
5036 bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages,
5039 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
5040 return memcg1_charge_skmem(memcg, nr_pages, gfp_mask);
5042 if (try_charge_memcg(memcg, gfp_mask, nr_pages) == 0) {
5043 mod_memcg_state(memcg, MEMCG_SOCK, nr_pages);
5051 * mem_cgroup_uncharge_skmem - uncharge socket memory
5052 * @memcg: memcg to uncharge
5053 * @nr_pages: number of pages to uncharge
5055 void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
5057 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
5058 memcg1_uncharge_skmem(memcg, nr_pages);
5062 mod_memcg_state(memcg, MEMCG_SOCK, -nr_pages);
5064 refill_stock(memcg, nr_pages);
5067 static int __init cgroup_memory(char *s)
5071 while ((token = strsep(&s, ",")) != NULL) {
5074 if (!strcmp(token, "nosocket"))
5075 cgroup_memory_nosocket = true;
5076 if (!strcmp(token, "nokmem"))
5077 cgroup_memory_nokmem = true;
5078 if (!strcmp(token, "nobpf"))
5079 cgroup_memory_nobpf = true;
5083 __setup("cgroup.memory=", cgroup_memory);
5086 * Memory controller init before cgroup_init() initialize root_mem_cgroup.
5088 * Some parts like memcg_hotplug_cpu_dead() have to be initialized from this
5089 * context because of lock dependencies (cgroup_lock -> cpu hotplug) but
5090 * basically everything that doesn't depend on a specific mem_cgroup structure
5091 * should be initialized from here.
5093 int __init mem_cgroup_init(void)
5095 unsigned int memcg_size;
5099 * Currently s32 type (can refer to struct batched_lruvec_stat) is
5100 * used for per-memcg-per-cpu caching of per-node statistics. In order
5101 * to work fine, we should make sure that the overfill threshold can't
5102 * exceed S32_MAX / PAGE_SIZE.
5104 BUILD_BUG_ON(MEMCG_CHARGE_BATCH > S32_MAX / PAGE_SIZE);
5106 cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL,
5107 memcg_hotplug_cpu_dead);
5109 for_each_possible_cpu(cpu) {
5110 INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work,
5111 drain_local_memcg_stock);
5112 INIT_WORK(&per_cpu_ptr(&obj_stock, cpu)->work,
5113 drain_local_obj_stock);
5116 memcg_size = struct_size_t(struct mem_cgroup, nodeinfo, nr_node_ids);
5117 memcg_cachep = kmem_cache_create("mem_cgroup", memcg_size, 0,
5118 SLAB_PANIC | SLAB_HWCACHE_ALIGN, NULL);
5120 memcg_pn_cachep = KMEM_CACHE(mem_cgroup_per_node,
5121 SLAB_PANIC | SLAB_HWCACHE_ALIGN);
5128 * __mem_cgroup_try_charge_swap - try charging swap space for a folio
5129 * @folio: folio being added to swap
5130 * @entry: swap entry to charge
5132 * Try to charge @folio's memcg for the swap space at @entry.
5134 * Returns 0 on success, -ENOMEM on failure.
5136 int __mem_cgroup_try_charge_swap(struct folio *folio, swp_entry_t entry)
5138 unsigned int nr_pages = folio_nr_pages(folio);
5139 struct page_counter *counter;
5140 struct mem_cgroup *memcg;
5142 if (do_memsw_account())
5145 memcg = folio_memcg(folio);
5147 VM_WARN_ON_ONCE_FOLIO(!memcg, folio);
5152 memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
5156 memcg = mem_cgroup_id_get_online(memcg);
5158 if (!mem_cgroup_is_root(memcg) &&
5159 !page_counter_try_charge(&memcg->swap, nr_pages, &counter)) {
5160 memcg_memory_event(memcg, MEMCG_SWAP_MAX);
5161 memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
5162 mem_cgroup_id_put(memcg);
5166 /* Get references for the tail pages, too */
5168 mem_cgroup_id_get_many(memcg, nr_pages - 1);
5169 mod_memcg_state(memcg, MEMCG_SWAP, nr_pages);
5171 swap_cgroup_record(folio, mem_cgroup_id(memcg), entry);
5177 * __mem_cgroup_uncharge_swap - uncharge swap space
5178 * @entry: swap entry to uncharge
5179 * @nr_pages: the amount of swap space to uncharge
5181 void __mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages)
5183 struct mem_cgroup *memcg;
5186 id = swap_cgroup_clear(entry, nr_pages);
5188 memcg = mem_cgroup_from_id(id);
5190 if (!mem_cgroup_is_root(memcg)) {
5191 if (do_memsw_account())
5192 page_counter_uncharge(&memcg->memsw, nr_pages);
5194 page_counter_uncharge(&memcg->swap, nr_pages);
5196 mod_memcg_state(memcg, MEMCG_SWAP, -nr_pages);
5197 mem_cgroup_id_put_many(memcg, nr_pages);
5202 long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
5204 long nr_swap_pages = get_nr_swap_pages();
5206 if (mem_cgroup_disabled() || do_memsw_account())
5207 return nr_swap_pages;
5208 for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg))
5209 nr_swap_pages = min_t(long, nr_swap_pages,
5210 READ_ONCE(memcg->swap.max) -
5211 page_counter_read(&memcg->swap));
5212 return nr_swap_pages;
5215 bool mem_cgroup_swap_full(struct folio *folio)
5217 struct mem_cgroup *memcg;
5219 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
5223 if (do_memsw_account())
5226 memcg = folio_memcg(folio);
5230 for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg)) {
5231 unsigned long usage = page_counter_read(&memcg->swap);
5233 if (usage * 2 >= READ_ONCE(memcg->swap.high) ||
5234 usage * 2 >= READ_ONCE(memcg->swap.max))
5241 static int __init setup_swap_account(char *s)
5245 if (!kstrtobool(s, &res) && !res)
5246 pr_warn_once("The swapaccount=0 commandline option is deprecated "
5247 "in favor of configuring swap control via cgroupfs. "
5248 "Please report your usecase to linux-mm@kvack.org if you "
5249 "depend on this functionality.\n");
5252 __setup("swapaccount=", setup_swap_account);
5254 static u64 swap_current_read(struct cgroup_subsys_state *css,
5257 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5259 return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE;
5262 static int swap_peak_show(struct seq_file *sf, void *v)
5264 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(sf));
5266 return peak_show(sf, v, &memcg->swap);
5269 static ssize_t swap_peak_write(struct kernfs_open_file *of, char *buf,
5270 size_t nbytes, loff_t off)
5272 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5274 return peak_write(of, buf, nbytes, off, &memcg->swap,
5275 &memcg->swap_peaks);
5278 static int swap_high_show(struct seq_file *m, void *v)
5280 return seq_puts_memcg_tunable(m,
5281 READ_ONCE(mem_cgroup_from_seq(m)->swap.high));
5284 static ssize_t swap_high_write(struct kernfs_open_file *of,
5285 char *buf, size_t nbytes, loff_t off)
5287 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5291 buf = strstrip(buf);
5292 err = page_counter_memparse(buf, "max", &high);
5296 page_counter_set_high(&memcg->swap, high);
5301 static int swap_max_show(struct seq_file *m, void *v)
5303 return seq_puts_memcg_tunable(m,
5304 READ_ONCE(mem_cgroup_from_seq(m)->swap.max));
5307 static ssize_t swap_max_write(struct kernfs_open_file *of,
5308 char *buf, size_t nbytes, loff_t off)
5310 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5314 buf = strstrip(buf);
5315 err = page_counter_memparse(buf, "max", &max);
5319 xchg(&memcg->swap.max, max);
5324 static int swap_events_show(struct seq_file *m, void *v)
5326 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
5328 seq_printf(m, "high %lu\n",
5329 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_HIGH]));
5330 seq_printf(m, "max %lu\n",
5331 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_MAX]));
5332 seq_printf(m, "fail %lu\n",
5333 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_FAIL]));
5338 static struct cftype swap_files[] = {
5340 .name = "swap.current",
5341 .flags = CFTYPE_NOT_ON_ROOT,
5342 .read_u64 = swap_current_read,
5345 .name = "swap.high",
5346 .flags = CFTYPE_NOT_ON_ROOT,
5347 .seq_show = swap_high_show,
5348 .write = swap_high_write,
5352 .flags = CFTYPE_NOT_ON_ROOT,
5353 .seq_show = swap_max_show,
5354 .write = swap_max_write,
5357 .name = "swap.peak",
5358 .flags = CFTYPE_NOT_ON_ROOT,
5360 .release = peak_release,
5361 .seq_show = swap_peak_show,
5362 .write = swap_peak_write,
5365 .name = "swap.events",
5366 .flags = CFTYPE_NOT_ON_ROOT,
5367 .file_offset = offsetof(struct mem_cgroup, swap_events_file),
5368 .seq_show = swap_events_show,
5375 * obj_cgroup_may_zswap - check if this cgroup can zswap
5376 * @objcg: the object cgroup
5378 * Check if the hierarchical zswap limit has been reached.
5380 * This doesn't check for specific headroom, and it is not atomic
5381 * either. But with zswap, the size of the allocation is only known
5382 * once compression has occurred, and this optimistic pre-check avoids
5383 * spending cycles on compression when there is already no room left
5384 * or zswap is disabled altogether somewhere in the hierarchy.
5386 bool obj_cgroup_may_zswap(struct obj_cgroup *objcg)
5388 struct mem_cgroup *memcg, *original_memcg;
5391 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
5394 original_memcg = get_mem_cgroup_from_objcg(objcg);
5395 for (memcg = original_memcg; !mem_cgroup_is_root(memcg);
5396 memcg = parent_mem_cgroup(memcg)) {
5397 unsigned long max = READ_ONCE(memcg->zswap_max);
5398 unsigned long pages;
5400 if (max == PAGE_COUNTER_MAX)
5407 /* Force flush to get accurate stats for charging */
5408 __mem_cgroup_flush_stats(memcg, true);
5409 pages = memcg_page_state(memcg, MEMCG_ZSWAP_B) / PAGE_SIZE;
5415 mem_cgroup_put(original_memcg);
5420 * obj_cgroup_charge_zswap - charge compression backend memory
5421 * @objcg: the object cgroup
5422 * @size: size of compressed object
5424 * This forces the charge after obj_cgroup_may_zswap() allowed
5425 * compression and storage in zwap for this cgroup to go ahead.
5427 void obj_cgroup_charge_zswap(struct obj_cgroup *objcg, size_t size)
5429 struct mem_cgroup *memcg;
5431 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
5434 VM_WARN_ON_ONCE(!(current->flags & PF_MEMALLOC));
5436 /* PF_MEMALLOC context, charging must succeed */
5437 if (obj_cgroup_charge(objcg, GFP_KERNEL, size))
5441 memcg = obj_cgroup_memcg(objcg);
5442 mod_memcg_state(memcg, MEMCG_ZSWAP_B, size);
5443 mod_memcg_state(memcg, MEMCG_ZSWAPPED, 1);
5448 * obj_cgroup_uncharge_zswap - uncharge compression backend memory
5449 * @objcg: the object cgroup
5450 * @size: size of compressed object
5452 * Uncharges zswap memory on page in.
5454 void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg, size_t size)
5456 struct mem_cgroup *memcg;
5458 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
5461 obj_cgroup_uncharge(objcg, size);
5464 memcg = obj_cgroup_memcg(objcg);
5465 mod_memcg_state(memcg, MEMCG_ZSWAP_B, -size);
5466 mod_memcg_state(memcg, MEMCG_ZSWAPPED, -1);
5470 bool mem_cgroup_zswap_writeback_enabled(struct mem_cgroup *memcg)
5472 /* if zswap is disabled, do not block pages going to the swapping device */
5473 if (!zswap_is_enabled())
5476 for (; memcg; memcg = parent_mem_cgroup(memcg))
5477 if (!READ_ONCE(memcg->zswap_writeback))
5483 static u64 zswap_current_read(struct cgroup_subsys_state *css,
5486 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5488 mem_cgroup_flush_stats(memcg);
5489 return memcg_page_state(memcg, MEMCG_ZSWAP_B);
5492 static int zswap_max_show(struct seq_file *m, void *v)
5494 return seq_puts_memcg_tunable(m,
5495 READ_ONCE(mem_cgroup_from_seq(m)->zswap_max));
5498 static ssize_t zswap_max_write(struct kernfs_open_file *of,
5499 char *buf, size_t nbytes, loff_t off)
5501 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5505 buf = strstrip(buf);
5506 err = page_counter_memparse(buf, "max", &max);
5510 xchg(&memcg->zswap_max, max);
5515 static int zswap_writeback_show(struct seq_file *m, void *v)
5517 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
5519 seq_printf(m, "%d\n", READ_ONCE(memcg->zswap_writeback));
5523 static ssize_t zswap_writeback_write(struct kernfs_open_file *of,
5524 char *buf, size_t nbytes, loff_t off)
5526 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5527 int zswap_writeback;
5528 ssize_t parse_ret = kstrtoint(strstrip(buf), 0, &zswap_writeback);
5533 if (zswap_writeback != 0 && zswap_writeback != 1)
5536 WRITE_ONCE(memcg->zswap_writeback, zswap_writeback);
5540 static struct cftype zswap_files[] = {
5542 .name = "zswap.current",
5543 .flags = CFTYPE_NOT_ON_ROOT,
5544 .read_u64 = zswap_current_read,
5547 .name = "zswap.max",
5548 .flags = CFTYPE_NOT_ON_ROOT,
5549 .seq_show = zswap_max_show,
5550 .write = zswap_max_write,
5553 .name = "zswap.writeback",
5554 .seq_show = zswap_writeback_show,
5555 .write = zswap_writeback_write,
5559 #endif /* CONFIG_ZSWAP */
5561 static int __init mem_cgroup_swap_init(void)
5563 if (mem_cgroup_disabled())
5566 WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, swap_files));
5567 #ifdef CONFIG_MEMCG_V1
5568 WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys, memsw_files));
5571 WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, zswap_files));
5575 subsys_initcall(mem_cgroup_swap_init);
5577 #endif /* CONFIG_SWAP */
5579 bool mem_cgroup_node_allowed(struct mem_cgroup *memcg, int nid)
5581 return memcg ? cpuset_node_allowed(memcg->css.cgroup, nid) : true;