Merge tag 'vfs-6.7.misc' of gitolite.kernel.org:pub/scm/linux/kernel/git/vfs/vfs
[linux-block.git] / include / linux / memcontrol.h
CommitLineData
c942fddf 1/* SPDX-License-Identifier: GPL-2.0-or-later */
8cdea7c0
BS
2/* memcontrol.h - Memory Controller
3 *
4 * Copyright IBM Corporation, 2007
5 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
6 *
78fb7466
PE
7 * Copyright 2007 OpenVZ SWsoft Inc
8 * Author: Pavel Emelianov <xemul@openvz.org>
8cdea7c0
BS
9 */
10
11#ifndef _LINUX_MEMCONTROL_H
12#define _LINUX_MEMCONTROL_H
f8d66542 13#include <linux/cgroup.h>
456f998e 14#include <linux/vm_event_item.h>
7ae1e1d0 15#include <linux/hardirq.h>
a8964b9b 16#include <linux/jump_label.h>
33398cf2
MH
17#include <linux/page_counter.h>
18#include <linux/vmpressure.h>
19#include <linux/eventfd.h>
00f3ca2c
JW
20#include <linux/mm.h>
21#include <linux/vmstat.h>
33398cf2 22#include <linux/writeback.h>
fdf1cdb9 23#include <linux/page-flags.h>
456f998e 24
78fb7466 25struct mem_cgroup;
bf4f0599 26struct obj_cgroup;
8697d331
BS
27struct page;
28struct mm_struct;
2633d7a0 29struct kmem_cache;
78fb7466 30
71cd3113
JW
31/* Cgroup-specific page state, on top of universal node page state */
32enum memcg_stat_item {
468c3982 33 MEMCG_SWAP = NR_VM_NODE_STAT_ITEMS,
71cd3113 34 MEMCG_SOCK,
772616b0 35 MEMCG_PERCPU_B,
4e5aa1f4 36 MEMCG_VMALLOC,
a8c49af3 37 MEMCG_KMEM,
f4840ccf
JW
38 MEMCG_ZSWAP_B,
39 MEMCG_ZSWAPPED,
b2807f07 40 MEMCG_NR_STAT,
2a7106f2
GT
41};
42
e27be240
JW
43enum memcg_memory_event {
44 MEMCG_LOW,
71cd3113
JW
45 MEMCG_HIGH,
46 MEMCG_MAX,
47 MEMCG_OOM,
fe6bdfc8 48 MEMCG_OOM_KILL,
b6bf9abb 49 MEMCG_OOM_GROUP_KILL,
4b82ab4f 50 MEMCG_SWAP_HIGH,
f3a53a3a
TH
51 MEMCG_SWAP_MAX,
52 MEMCG_SWAP_FAIL,
e27be240 53 MEMCG_NR_MEMORY_EVENTS,
71cd3113
JW
54};
55
5660048c 56struct mem_cgroup_reclaim_cookie {
ef8f2327 57 pg_data_t *pgdat;
5660048c
JW
58 unsigned int generation;
59};
60
71cd3113
JW
61#ifdef CONFIG_MEMCG
62
63#define MEM_CGROUP_ID_SHIFT 16
71cd3113
JW
64
65struct mem_cgroup_id {
66 int id;
1c2d479a 67 refcount_t ref;
71cd3113
JW
68};
69
33398cf2
MH
70/*
71 * Per memcg event counter is incremented at every pagein/pageout. With THP,
0845f831
RD
72 * it will be incremented by the number of pages. This counter is used
73 * to trigger some periodic events. This is straightforward and better
33398cf2
MH
74 * than using jiffies etc. to handle periodic memcg event.
75 */
76enum mem_cgroup_events_target {
77 MEM_CGROUP_TARGET_THRESH,
78 MEM_CGROUP_TARGET_SOFTLIMIT,
33398cf2
MH
79 MEM_CGROUP_NTARGETS,
80};
81
410f8e82
SB
82struct memcg_vmstats_percpu;
83struct memcg_vmstats;
33398cf2
MH
84
85struct mem_cgroup_reclaim_iter {
86 struct mem_cgroup *position;
87 /* scan generation, increased every round-trip */
88 unsigned int generation;
89};
90
0a4465d3 91/*
3c6f17e6
YS
92 * Bitmap and deferred work of shrinker::id corresponding to memcg-aware
93 * shrinkers, which have elements charged to this memcg.
0a4465d3 94 */
e4262c4f 95struct shrinker_info {
0a4465d3 96 struct rcu_head rcu;
3c6f17e6
YS
97 atomic_long_t *nr_deferred;
98 unsigned long *map;
42c9db39 99 int map_nr_max;
0a4465d3
KT
100};
101
7e1c0d6f
SB
102struct lruvec_stats_percpu {
103 /* Local (CPU and cgroup) state */
104 long state[NR_VM_NODE_STAT_ITEMS];
105
106 /* Delta calculation for lockless upward propagation */
107 long state_prev[NR_VM_NODE_STAT_ITEMS];
108};
109
110struct lruvec_stats {
111 /* Aggregated (CPU and subtree) state */
112 long state[NR_VM_NODE_STAT_ITEMS];
113
f82e6bf9
YA
114 /* Non-hierarchical (CPU aggregated) state */
115 long state_local[NR_VM_NODE_STAT_ITEMS];
116
7e1c0d6f
SB
117 /* Pending child counts during tree propagation */
118 long state_pending[NR_VM_NODE_STAT_ITEMS];
119};
120
33398cf2 121/*
242c37b4 122 * per-node information in memory controller.
33398cf2 123 */
ef8f2327 124struct mem_cgroup_per_node {
33398cf2 125 struct lruvec lruvec;
a983b5eb 126
7e1c0d6f
SB
127 struct lruvec_stats_percpu __percpu *lruvec_stats_percpu;
128 struct lruvec_stats lruvec_stats;
a983b5eb 129
b4536f0c 130 unsigned long lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS];
33398cf2 131
9da83f3f 132 struct mem_cgroup_reclaim_iter iter;
33398cf2 133
e4262c4f 134 struct shrinker_info __rcu *shrinker_info;
0a432dcb 135
33398cf2
MH
136 struct rb_node tree_node; /* RB tree node */
137 unsigned long usage_in_excess;/* Set to the value by which */
138 /* the soft limit is exceeded*/
139 bool on_tree;
140 struct mem_cgroup *memcg; /* Back pointer, we cannot */
141 /* use container_of */
142};
143
33398cf2
MH
144struct mem_cgroup_threshold {
145 struct eventfd_ctx *eventfd;
146 unsigned long threshold;
147};
148
149/* For threshold */
150struct mem_cgroup_threshold_ary {
151 /* An array index points to threshold just below or equal to usage. */
152 int current_threshold;
153 /* Size of entries[] */
154 unsigned int size;
155 /* Array of thresholds */
307ed94c 156 struct mem_cgroup_threshold entries[];
33398cf2
MH
157};
158
159struct mem_cgroup_thresholds {
160 /* Primary thresholds array */
161 struct mem_cgroup_threshold_ary *primary;
162 /*
163 * Spare threshold array.
164 * This is needed to make mem_cgroup_unregister_event() "never fail".
165 * It must be able to store at least primary->size - 1 entries.
166 */
167 struct mem_cgroup_threshold_ary *spare;
168};
169
97b27821
TH
170/*
171 * Remember four most recent foreign writebacks with dirty pages in this
172 * cgroup. Inode sharing is expected to be uncommon and, even if we miss
173 * one in a given round, we're likely to catch it later if it keeps
174 * foreign-dirtying, so a fairly low count should be enough.
175 *
176 * See mem_cgroup_track_foreign_dirty_slowpath() for details.
177 */
178#define MEMCG_CGWB_FRN_CNT 4
179
180struct memcg_cgwb_frn {
181 u64 bdi_id; /* bdi->id of the foreign inode */
182 int memcg_id; /* memcg->css.id of foreign inode */
183 u64 at; /* jiffies_64 at the time of dirtying */
184 struct wb_completion done; /* tracks in-flight foreign writebacks */
185};
186
bf4f0599
RG
187/*
188 * Bucket for arbitrarily byte-sized objects charged to a memory
189 * cgroup. The bucket can be reparented in one piece when the cgroup
190 * is destroyed, without having to round up the individual references
191 * of all live memory objects in the wild.
192 */
193struct obj_cgroup {
194 struct percpu_ref refcnt;
195 struct mem_cgroup *memcg;
196 atomic_t nr_charged_bytes;
197 union {
0764db9b 198 struct list_head list; /* protected by objcg_lock */
bf4f0599
RG
199 struct rcu_head rcu;
200 };
201};
202
33398cf2
MH
203/*
204 * The memory controller data structure. The memory controller controls both
205 * page cache and RSS per cgroup. We would eventually like to provide
206 * statistics based on the statistics developed by Rik Van Riel for clock-pro,
207 * to help the administrator determine what knobs to tune.
208 */
209struct mem_cgroup {
210 struct cgroup_subsys_state css;
211
73f576c0
JW
212 /* Private memcg ID. Used to ID objects that outlive the cgroup */
213 struct mem_cgroup_id id;
214
33398cf2 215 /* Accounted resources */
bd0b230f
WL
216 struct page_counter memory; /* Both v1 & v2 */
217
218 union {
219 struct page_counter swap; /* v2 only */
220 struct page_counter memsw; /* v1 only */
221 };
0db15298
JW
222
223 /* Legacy consumer-oriented counters */
bd0b230f
WL
224 struct page_counter kmem; /* v1 only */
225 struct page_counter tcpmem; /* v1 only */
33398cf2 226
f7e1cb6e
JW
227 /* Range enforcement for interrupt charges */
228 struct work_struct high_work;
229
f4840ccf
JW
230#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
231 unsigned long zswap_max;
232#endif
233
33398cf2
MH
234 unsigned long soft_limit;
235
236 /* vmpressure notifications */
237 struct vmpressure vmpressure;
238
3d8b38eb
RG
239 /*
240 * Should the OOM killer kill all belonging tasks, had it kill one?
241 */
242 bool oom_group;
243
33398cf2
MH
244 /* protected by memcg_oom_lock */
245 bool oom_lock;
246 int under_oom;
247
248 int swappiness;
249 /* OOM-Killer disable */
250 int oom_kill_disable;
251
1e577f97 252 /* memory.events and memory.events.local */
472912a2 253 struct cgroup_file events_file;
1e577f97 254 struct cgroup_file events_local_file;
472912a2 255
f3a53a3a
TH
256 /* handle for "memory.swap.events" */
257 struct cgroup_file swap_events_file;
258
33398cf2
MH
259 /* protect arrays of thresholds */
260 struct mutex thresholds_lock;
261
262 /* thresholds for memory usage. RCU-protected */
263 struct mem_cgroup_thresholds thresholds;
264
265 /* thresholds for mem+swap usage. RCU-protected */
266 struct mem_cgroup_thresholds memsw_thresholds;
267
268 /* For oom notifier event fd */
269 struct list_head oom_notify;
270
271 /*
272 * Should we move charges of a task when a task is moved into this
273 * mem_cgroup ? And what type of charges should we move ?
274 */
275 unsigned long move_charge_at_immigrate;
e81bf979
AL
276 /* taken only while moving_account > 0 */
277 spinlock_t move_lock;
278 unsigned long move_lock_flags;
279
e6ad640b 280 CACHELINE_PADDING(_pad1_);
e81bf979 281
2d146aa3 282 /* memory.stat */
410f8e82 283 struct memcg_vmstats *vmstats;
42a30035 284
815744d7 285 /* memory.events */
42a30035 286 atomic_long_t memory_events[MEMCG_NR_MEMORY_EVENTS];
1e577f97 287 atomic_long_t memory_events_local[MEMCG_NR_MEMORY_EVENTS];
33398cf2 288
ac8a5296
AW
289 /*
290 * Hint of reclaim pressure for socket memroy management. Note
291 * that this indicator should NOT be used in legacy cgroup mode
292 * where socket memory is accounted/charged separately.
293 */
d886f4e4
JW
294 unsigned long socket_pressure;
295
296 /* Legacy tcp memory accounting */
0db15298
JW
297 bool tcpmem_active;
298 int tcpmem_pressure;
d886f4e4 299
84c07d11 300#ifdef CONFIG_MEMCG_KMEM
33398cf2 301 int kmemcg_id;
bf4f0599 302 struct obj_cgroup __rcu *objcg;
0764db9b
RG
303 /* list of inherited objcgs, protected by objcg_lock */
304 struct list_head objcg_list;
33398cf2
MH
305#endif
306
e6ad640b 307 CACHELINE_PADDING(_pad2_);
4df91062
FT
308
309 /*
310 * set > 0 if pages under this cgroup are moving to other cgroup.
311 */
312 atomic_t moving_account;
313 struct task_struct *move_lock_task;
314
4df91062
FT
315 struct memcg_vmstats_percpu __percpu *vmstats_percpu;
316
33398cf2
MH
317#ifdef CONFIG_CGROUP_WRITEBACK
318 struct list_head cgwb_list;
319 struct wb_domain cgwb_domain;
97b27821 320 struct memcg_cgwb_frn cgwb_frn[MEMCG_CGWB_FRN_CNT];
33398cf2
MH
321#endif
322
323 /* List of events which userspace want to receive */
324 struct list_head event_list;
325 spinlock_t event_list_lock;
326
87eaceb3
YS
327#ifdef CONFIG_TRANSPARENT_HUGEPAGE
328 struct deferred_split deferred_split_queue;
329#endif
330
bd74fdae
YZ
331#ifdef CONFIG_LRU_GEN
332 /* per-memcg mm_struct list */
333 struct lru_gen_mm_list mm_list;
334#endif
335
b51478a0 336 struct mem_cgroup_per_node *nodeinfo[];
33398cf2 337};
7d828602 338
a983b5eb 339/*
1813e51e
SB
340 * size of first charge trial.
341 * TODO: maybe necessary to use big numbers in big irons or dynamic based of the
342 * workload.
a983b5eb 343 */
1813e51e 344#define MEMCG_CHARGE_BATCH 64U
a983b5eb 345
7d828602 346extern struct mem_cgroup *root_mem_cgroup;
56161634 347
87944e29
RG
348enum page_memcg_data_flags {
349 /* page->memcg_data is a pointer to an objcgs vector */
350 MEMCG_DATA_OBJCGS = (1UL << 0),
18b2db3b
RG
351 /* page has been accounted as a non-slab kernel page */
352 MEMCG_DATA_KMEM = (1UL << 1),
87944e29 353 /* the next bit after the last actual flag */
18b2db3b 354 __NR_MEMCG_DATA_FLAGS = (1UL << 2),
87944e29
RG
355};
356
357#define MEMCG_DATA_FLAGS_MASK (__NR_MEMCG_DATA_FLAGS - 1)
358
1b7e4464 359static inline bool folio_memcg_kmem(struct folio *folio);
b4e0b68f
MS
360
361/*
362 * After the initialization objcg->memcg is always pointing at
363 * a valid memcg, but can be atomically swapped to the parent memcg.
364 *
365 * The caller must ensure that the returned memcg won't be released:
366 * e.g. acquire the rcu_read_lock or css_set_lock.
367 */
368static inline struct mem_cgroup *obj_cgroup_memcg(struct obj_cgroup *objcg)
369{
370 return READ_ONCE(objcg->memcg);
371}
372
373/*
1b7e4464
MWO
374 * __folio_memcg - Get the memory cgroup associated with a non-kmem folio
375 * @folio: Pointer to the folio.
b4e0b68f 376 *
1b7e4464
MWO
377 * Returns a pointer to the memory cgroup associated with the folio,
378 * or NULL. This function assumes that the folio is known to have a
b4e0b68f 379 * proper memory cgroup pointer. It's not safe to call this function
1b7e4464
MWO
380 * against some type of folios, e.g. slab folios or ex-slab folios or
381 * kmem folios.
b4e0b68f 382 */
1b7e4464 383static inline struct mem_cgroup *__folio_memcg(struct folio *folio)
b4e0b68f 384{
1b7e4464 385 unsigned long memcg_data = folio->memcg_data;
b4e0b68f 386
1b7e4464
MWO
387 VM_BUG_ON_FOLIO(folio_test_slab(folio), folio);
388 VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_OBJCGS, folio);
389 VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_KMEM, folio);
b4e0b68f
MS
390
391 return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
392}
393
394/*
1b7e4464
MWO
395 * __folio_objcg - get the object cgroup associated with a kmem folio.
396 * @folio: Pointer to the folio.
b4e0b68f 397 *
1b7e4464
MWO
398 * Returns a pointer to the object cgroup associated with the folio,
399 * or NULL. This function assumes that the folio is known to have a
b4e0b68f 400 * proper object cgroup pointer. It's not safe to call this function
1b7e4464
MWO
401 * against some type of folios, e.g. slab folios or ex-slab folios or
402 * LRU folios.
b4e0b68f 403 */
1b7e4464 404static inline struct obj_cgroup *__folio_objcg(struct folio *folio)
b4e0b68f 405{
1b7e4464 406 unsigned long memcg_data = folio->memcg_data;
b4e0b68f 407
1b7e4464
MWO
408 VM_BUG_ON_FOLIO(folio_test_slab(folio), folio);
409 VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_OBJCGS, folio);
410 VM_BUG_ON_FOLIO(!(memcg_data & MEMCG_DATA_KMEM), folio);
b4e0b68f
MS
411
412 return (struct obj_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
413}
414
bcfe06bf 415/*
1b7e4464
MWO
416 * folio_memcg - Get the memory cgroup associated with a folio.
417 * @folio: Pointer to the folio.
bcfe06bf 418 *
1b7e4464
MWO
419 * Returns a pointer to the memory cgroup associated with the folio,
420 * or NULL. This function assumes that the folio is known to have a
bcfe06bf 421 * proper memory cgroup pointer. It's not safe to call this function
1b7e4464 422 * against some type of folios, e.g. slab folios or ex-slab folios.
bcfe06bf 423 *
1b7e4464 424 * For a non-kmem folio any of the following ensures folio and memcg binding
b4e0b68f
MS
425 * stability:
426 *
1b7e4464 427 * - the folio lock
bcfe06bf 428 * - LRU isolation
6c77b607 429 * - folio_memcg_lock()
bcfe06bf 430 * - exclusive reference
018ee47f 431 * - mem_cgroup_trylock_pages()
b4e0b68f 432 *
1b7e4464
MWO
433 * For a kmem folio a caller should hold an rcu read lock to protect memcg
434 * associated with a kmem folio from being released.
bcfe06bf 435 */
1b7e4464
MWO
436static inline struct mem_cgroup *folio_memcg(struct folio *folio)
437{
438 if (folio_memcg_kmem(folio))
439 return obj_cgroup_memcg(__folio_objcg(folio));
440 return __folio_memcg(folio);
441}
442
bcfe06bf
RG
443static inline struct mem_cgroup *page_memcg(struct page *page)
444{
1b7e4464 445 return folio_memcg(page_folio(page));
bcfe06bf
RG
446}
447
c5ce619a
MWO
448/**
449 * folio_memcg_rcu - Locklessly get the memory cgroup associated with a folio.
450 * @folio: Pointer to the folio.
bcfe06bf 451 *
c5ce619a 452 * This function assumes that the folio is known to have a
bcfe06bf 453 * proper memory cgroup pointer. It's not safe to call this function
c5ce619a
MWO
454 * against some type of folios, e.g. slab folios or ex-slab folios.
455 *
456 * Return: A pointer to the memory cgroup associated with the folio,
457 * or NULL.
bcfe06bf 458 */
c5ce619a 459static inline struct mem_cgroup *folio_memcg_rcu(struct folio *folio)
bcfe06bf 460{
c5ce619a 461 unsigned long memcg_data = READ_ONCE(folio->memcg_data);
b4e0b68f 462
c5ce619a 463 VM_BUG_ON_FOLIO(folio_test_slab(folio), folio);
bcfe06bf
RG
464 WARN_ON_ONCE(!rcu_read_lock_held());
465
b4e0b68f
MS
466 if (memcg_data & MEMCG_DATA_KMEM) {
467 struct obj_cgroup *objcg;
468
469 objcg = (void *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
470 return obj_cgroup_memcg(objcg);
471 }
472
473 return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
bcfe06bf
RG
474}
475
476/*
becacb04
MW
477 * folio_memcg_check - Get the memory cgroup associated with a folio.
478 * @folio: Pointer to the folio.
bcfe06bf 479 *
becacb04
MW
480 * Returns a pointer to the memory cgroup associated with the folio,
481 * or NULL. This function unlike folio_memcg() can take any folio
482 * as an argument. It has to be used in cases when it's not known if a folio
b4e0b68f
MS
483 * has an associated memory cgroup pointer or an object cgroups vector or
484 * an object cgroup.
485 *
becacb04 486 * For a non-kmem folio any of the following ensures folio and memcg binding
b4e0b68f 487 * stability:
bcfe06bf 488 *
becacb04 489 * - the folio lock
bcfe06bf 490 * - LRU isolation
becacb04 491 * - lock_folio_memcg()
bcfe06bf 492 * - exclusive reference
018ee47f 493 * - mem_cgroup_trylock_pages()
b4e0b68f 494 *
becacb04
MW
495 * For a kmem folio a caller should hold an rcu read lock to protect memcg
496 * associated with a kmem folio from being released.
bcfe06bf 497 */
becacb04 498static inline struct mem_cgroup *folio_memcg_check(struct folio *folio)
bcfe06bf
RG
499{
500 /*
becacb04
MW
501 * Because folio->memcg_data might be changed asynchronously
502 * for slabs, READ_ONCE() should be used here.
bcfe06bf 503 */
becacb04 504 unsigned long memcg_data = READ_ONCE(folio->memcg_data);
bcfe06bf 505
87944e29 506 if (memcg_data & MEMCG_DATA_OBJCGS)
bcfe06bf
RG
507 return NULL;
508
b4e0b68f
MS
509 if (memcg_data & MEMCG_DATA_KMEM) {
510 struct obj_cgroup *objcg;
511
512 objcg = (void *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
513 return obj_cgroup_memcg(objcg);
514 }
515
18b2db3b
RG
516 return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
517}
518
becacb04
MW
519static inline struct mem_cgroup *page_memcg_check(struct page *page)
520{
521 if (PageTail(page))
522 return NULL;
523 return folio_memcg_check((struct folio *)page);
524}
525
88f2ef73
MS
526static inline struct mem_cgroup *get_mem_cgroup_from_objcg(struct obj_cgroup *objcg)
527{
528 struct mem_cgroup *memcg;
529
530 rcu_read_lock();
531retry:
532 memcg = obj_cgroup_memcg(objcg);
533 if (unlikely(!css_tryget(&memcg->css)))
534 goto retry;
535 rcu_read_unlock();
536
537 return memcg;
538}
539
bd290e1e 540#ifdef CONFIG_MEMCG_KMEM
18b2db3b 541/*
1b7e4464
MWO
542 * folio_memcg_kmem - Check if the folio has the memcg_kmem flag set.
543 * @folio: Pointer to the folio.
18b2db3b 544 *
1b7e4464
MWO
545 * Checks if the folio has MemcgKmem flag set. The caller must ensure
546 * that the folio has an associated memory cgroup. It's not safe to call
547 * this function against some types of folios, e.g. slab folios.
18b2db3b 548 */
1b7e4464 549static inline bool folio_memcg_kmem(struct folio *folio)
18b2db3b 550{
1b7e4464
MWO
551 VM_BUG_ON_PGFLAGS(PageTail(&folio->page), &folio->page);
552 VM_BUG_ON_FOLIO(folio->memcg_data & MEMCG_DATA_OBJCGS, folio);
553 return folio->memcg_data & MEMCG_DATA_KMEM;
bcfe06bf
RG
554}
555
270c6a71 556
270c6a71 557#else
1b7e4464 558static inline bool folio_memcg_kmem(struct folio *folio)
bd290e1e
MS
559{
560 return false;
561}
562
270c6a71
RG
563#endif
564
1b7e4464
MWO
565static inline bool PageMemcgKmem(struct page *page)
566{
567 return folio_memcg_kmem(page_folio(page));
568}
569
dfd2f10c
KT
570static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
571{
572 return (memcg == root_mem_cgroup);
573}
574
23047a96
JW
575static inline bool mem_cgroup_disabled(void)
576{
577 return !cgroup_subsys_enabled(memory_cgrp_subsys);
578}
579
f56ce412
JW
580static inline void mem_cgroup_protection(struct mem_cgroup *root,
581 struct mem_cgroup *memcg,
582 unsigned long *min,
583 unsigned long *low)
9783aa99 584{
f56ce412
JW
585 *min = *low = 0;
586
1bc63fb1 587 if (mem_cgroup_disabled())
f56ce412 588 return;
1bc63fb1 589
22f7496f
YS
590 /*
591 * There is no reclaim protection applied to a targeted reclaim.
592 * We are special casing this specific case here because
5d241789 593 * mem_cgroup_calculate_protection is not robust enough to keep
22f7496f
YS
594 * the protection invariant for calculated effective values for
595 * parallel reclaimers with different reclaim target. This is
596 * especially a problem for tail memcgs (as they have pages on LRU)
597 * which would want to have effective values 0 for targeted reclaim
598 * but a different value for external reclaim.
599 *
600 * Example
601 * Let's have global and A's reclaim in parallel:
602 * |
603 * A (low=2G, usage = 3G, max = 3G, children_low_usage = 1.5G)
604 * |\
605 * | C (low = 1G, usage = 2.5G)
606 * B (low = 1G, usage = 0.5G)
607 *
608 * For the global reclaim
609 * A.elow = A.low
610 * B.elow = min(B.usage, B.low) because children_low_usage <= A.elow
611 * C.elow = min(C.usage, C.low)
612 *
613 * With the effective values resetting we have A reclaim
614 * A.elow = 0
615 * B.elow = B.low
616 * C.elow = C.low
617 *
618 * If the global reclaim races with A's reclaim then
619 * B.elow = C.elow = 0 because children_low_usage > A.elow)
620 * is possible and reclaiming B would be violating the protection.
621 *
622 */
623 if (root == memcg)
f56ce412 624 return;
9783aa99 625
f56ce412
JW
626 *min = READ_ONCE(memcg->memory.emin);
627 *low = READ_ONCE(memcg->memory.elow);
9783aa99
CD
628}
629
45c7f7e1
CD
630void mem_cgroup_calculate_protection(struct mem_cgroup *root,
631 struct mem_cgroup *memcg);
632
adb82130
YA
633static inline bool mem_cgroup_unprotected(struct mem_cgroup *target,
634 struct mem_cgroup *memcg)
45c7f7e1
CD
635{
636 /*
637 * The root memcg doesn't account charges, and doesn't support
adb82130
YA
638 * protection. The target memcg's protection is ignored, see
639 * mem_cgroup_calculate_protection() and mem_cgroup_protection()
45c7f7e1 640 */
adb82130
YA
641 return mem_cgroup_disabled() || mem_cgroup_is_root(memcg) ||
642 memcg == target;
45c7f7e1
CD
643}
644
adb82130
YA
645static inline bool mem_cgroup_below_low(struct mem_cgroup *target,
646 struct mem_cgroup *memcg)
45c7f7e1 647{
adb82130 648 if (mem_cgroup_unprotected(target, memcg))
45c7f7e1
CD
649 return false;
650
651 return READ_ONCE(memcg->memory.elow) >=
652 page_counter_read(&memcg->memory);
653}
654
adb82130
YA
655static inline bool mem_cgroup_below_min(struct mem_cgroup *target,
656 struct mem_cgroup *memcg)
45c7f7e1 657{
adb82130 658 if (mem_cgroup_unprotected(target, memcg))
45c7f7e1
CD
659 return false;
660
661 return READ_ONCE(memcg->memory.emin) >=
662 page_counter_read(&memcg->memory);
663}
241994ed 664
8f425e4e
MWO
665int __mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, gfp_t gfp);
666
667/**
668 * mem_cgroup_charge - Charge a newly allocated folio to a cgroup.
669 * @folio: Folio to charge.
670 * @mm: mm context of the allocating task.
671 * @gfp: Reclaim mode.
672 *
673 * Try to charge @folio to the memcg that @mm belongs to, reclaiming
674 * pages according to @gfp if necessary. If @mm is NULL, try to
675 * charge to the active memcg.
676 *
677 * Do not use this for folios allocated for swapin.
678 *
679 * Return: 0 on success. Otherwise, an error code is returned.
680 */
681static inline int mem_cgroup_charge(struct folio *folio, struct mm_struct *mm,
682 gfp_t gfp)
2c8d8f97
SB
683{
684 if (mem_cgroup_disabled())
685 return 0;
8f425e4e 686 return __mem_cgroup_charge(folio, mm, gfp);
2c8d8f97
SB
687}
688
65995918 689int mem_cgroup_swapin_charge_folio(struct folio *folio, struct mm_struct *mm,
0add0c77
SB
690 gfp_t gfp, swp_entry_t entry);
691void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry);
3fea5a49 692
bbc6b703
MWO
693void __mem_cgroup_uncharge(struct folio *folio);
694
695/**
696 * mem_cgroup_uncharge - Uncharge a folio.
697 * @folio: Folio to uncharge.
698 *
699 * Uncharge a folio previously charged with mem_cgroup_charge().
700 */
701static inline void mem_cgroup_uncharge(struct folio *folio)
2c8d8f97
SB
702{
703 if (mem_cgroup_disabled())
704 return;
bbc6b703 705 __mem_cgroup_uncharge(folio);
2c8d8f97
SB
706}
707
708void __mem_cgroup_uncharge_list(struct list_head *page_list);
709static inline void mem_cgroup_uncharge_list(struct list_head *page_list)
710{
711 if (mem_cgroup_disabled())
712 return;
713 __mem_cgroup_uncharge_list(page_list);
714}
569b846d 715
d21bba2b 716void mem_cgroup_migrate(struct folio *old, struct folio *new);
569b846d 717
55779ec7 718/**
867e5e1d 719 * mem_cgroup_lruvec - get the lru list vector for a memcg & node
55779ec7 720 * @memcg: memcg of the wanted lruvec
9a1ac228 721 * @pgdat: pglist_data
55779ec7 722 *
867e5e1d 723 * Returns the lru list vector holding pages for a given @memcg &
9a1ac228 724 * @pgdat combination. This can be the node lruvec, if the memory
867e5e1d 725 * controller is disabled.
55779ec7 726 */
867e5e1d
JW
727static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg,
728 struct pglist_data *pgdat)
55779ec7 729{
ef8f2327 730 struct mem_cgroup_per_node *mz;
55779ec7
JW
731 struct lruvec *lruvec;
732
733 if (mem_cgroup_disabled()) {
867e5e1d 734 lruvec = &pgdat->__lruvec;
55779ec7
JW
735 goto out;
736 }
737
1b05117d
JW
738 if (!memcg)
739 memcg = root_mem_cgroup;
740
a3747b53 741 mz = memcg->nodeinfo[pgdat->node_id];
55779ec7
JW
742 lruvec = &mz->lruvec;
743out:
744 /*
745 * Since a node can be onlined after the mem_cgroup was created,
599d0c95 746 * we have to be prepared to initialize lruvec->pgdat here;
55779ec7
JW
747 * and if offlined then reonlined, we need to reinitialize it.
748 */
ef8f2327
MG
749 if (unlikely(lruvec->pgdat != pgdat))
750 lruvec->pgdat = pgdat;
55779ec7
JW
751 return lruvec;
752}
753
9a1ac228 754/**
b1baabd9
MWO
755 * folio_lruvec - return lruvec for isolating/putting an LRU folio
756 * @folio: Pointer to the folio.
9a1ac228 757 *
b1baabd9 758 * This function relies on folio->mem_cgroup being stable.
9a1ac228 759 */
b1baabd9 760static inline struct lruvec *folio_lruvec(struct folio *folio)
9a1ac228 761{
b1baabd9 762 struct mem_cgroup *memcg = folio_memcg(folio);
9a1ac228 763
b1baabd9
MWO
764 VM_WARN_ON_ONCE_FOLIO(!memcg && !mem_cgroup_disabled(), folio);
765 return mem_cgroup_lruvec(memcg, folio_pgdat(folio));
9a1ac228 766}
c9b0ed51 767
64219994 768struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
e993d905 769
d46eb14b
SB
770struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm);
771
e809c3fe
MWO
772struct lruvec *folio_lruvec_lock(struct folio *folio);
773struct lruvec *folio_lruvec_lock_irq(struct folio *folio);
774struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio,
6168d0da
AS
775 unsigned long *flags);
776
777#ifdef CONFIG_DEBUG_VM
e809c3fe 778void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio);
6168d0da 779#else
e809c3fe
MWO
780static inline
781void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio)
6168d0da
AS
782{
783}
784#endif
785
33398cf2
MH
786static inline
787struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css){
788 return css ? container_of(css, struct mem_cgroup, css) : NULL;
789}
790
bf4f0599
RG
791static inline bool obj_cgroup_tryget(struct obj_cgroup *objcg)
792{
793 return percpu_ref_tryget(&objcg->refcnt);
794}
795
796static inline void obj_cgroup_get(struct obj_cgroup *objcg)
797{
798 percpu_ref_get(&objcg->refcnt);
799}
800
b4e0b68f
MS
801static inline void obj_cgroup_get_many(struct obj_cgroup *objcg,
802 unsigned long nr)
bf4f0599 803{
b4e0b68f 804 percpu_ref_get_many(&objcg->refcnt, nr);
bf4f0599
RG
805}
806
b4e0b68f 807static inline void obj_cgroup_put(struct obj_cgroup *objcg)
bf4f0599 808{
b4e0b68f 809 percpu_ref_put(&objcg->refcnt);
bf4f0599
RG
810}
811
e4dde56c
YZ
812static inline bool mem_cgroup_tryget(struct mem_cgroup *memcg)
813{
814 return !memcg || css_tryget(&memcg->css);
815}
816
dc0b5864
RG
817static inline void mem_cgroup_put(struct mem_cgroup *memcg)
818{
d46eb14b
SB
819 if (memcg)
820 css_put(&memcg->css);
dc0b5864
RG
821}
822
8e8ae645
JW
823#define mem_cgroup_from_counter(counter, member) \
824 container_of(counter, struct mem_cgroup, member)
825
33398cf2
MH
826struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *,
827 struct mem_cgroup *,
828 struct mem_cgroup_reclaim_cookie *);
829void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
025b7799
Z
830void mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
831 int (*)(struct task_struct *, void *), void *arg);
33398cf2 832
23047a96
JW
833static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
834{
835 if (mem_cgroup_disabled())
836 return 0;
837
73f576c0 838 return memcg->id.id;
23047a96 839}
73f576c0 840struct mem_cgroup *mem_cgroup_from_id(unsigned short id);
23047a96 841
c15187a4
RG
842#ifdef CONFIG_SHRINKER_DEBUG
843static inline unsigned long mem_cgroup_ino(struct mem_cgroup *memcg)
844{
845 return memcg ? cgroup_ino(memcg->css.cgroup) : 0;
846}
847
848struct mem_cgroup *mem_cgroup_get_from_ino(unsigned long ino);
849#endif
850
aa9694bb
CD
851static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m)
852{
853 return mem_cgroup_from_css(seq_css(m));
854}
855
2262185c
RG
856static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec)
857{
858 struct mem_cgroup_per_node *mz;
859
860 if (mem_cgroup_disabled())
861 return NULL;
862
863 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
864 return mz->memcg;
865}
866
8e8ae645
JW
867/**
868 * parent_mem_cgroup - find the accounting parent of a memcg
869 * @memcg: memcg whose parent to find
870 *
ca39c5e7 871 * Returns the parent memcg, or NULL if this is the root.
8e8ae645
JW
872 */
873static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
874{
486bc706 875 return mem_cgroup_from_css(memcg->css.parent);
8e8ae645
JW
876}
877
33398cf2
MH
878static inline bool mem_cgroup_is_descendant(struct mem_cgroup *memcg,
879 struct mem_cgroup *root)
880{
881 if (root == memcg)
882 return true;
33398cf2
MH
883 return cgroup_is_descendant(memcg->css.cgroup, root->css.cgroup);
884}
e1aab161 885
2314b42d
JW
886static inline bool mm_match_cgroup(struct mm_struct *mm,
887 struct mem_cgroup *memcg)
2e4d4091 888{
587af308 889 struct mem_cgroup *task_memcg;
413918bb 890 bool match = false;
c3ac9a8a 891
2e4d4091 892 rcu_read_lock();
587af308 893 task_memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
413918bb 894 if (task_memcg)
2314b42d 895 match = mem_cgroup_is_descendant(task_memcg, memcg);
2e4d4091 896 rcu_read_unlock();
c3ac9a8a 897 return match;
2e4d4091 898}
8a9f3ccd 899
75376c6f 900struct cgroup_subsys_state *mem_cgroup_css_from_folio(struct folio *folio);
2fc04524 901ino_t page_cgroup_ino(struct page *page);
d324236b 902
eb01aaab
VD
903static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
904{
905 if (mem_cgroup_disabled())
906 return true;
907 return !!(memcg->css.flags & CSS_ONLINE);
908}
909
33398cf2 910void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
b4536f0c 911 int zid, int nr_pages);
33398cf2 912
b4536f0c
MH
913static inline
914unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
915 enum lru_list lru, int zone_idx)
916{
917 struct mem_cgroup_per_node *mz;
918
919 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
e0e3f42f 920 return READ_ONCE(mz->lru_zone_size[zone_idx][lru]);
33398cf2
MH
921}
922
9ea9cb00 923void mem_cgroup_handle_over_high(gfp_t gfp_mask);
b23afb93 924
bbec2e15 925unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg);
7c5f64f8 926
9783aa99
CD
927unsigned long mem_cgroup_size(struct mem_cgroup *memcg);
928
f0c867d9 929void mem_cgroup_print_oom_context(struct mem_cgroup *memcg,
64219994 930 struct task_struct *p);
58ae83db 931
f0c867d9 932void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg);
933
29ef680a 934static inline void mem_cgroup_enter_user_fault(void)
519e5247 935{
29ef680a
MH
936 WARN_ON(current->in_user_fault);
937 current->in_user_fault = 1;
519e5247
JW
938}
939
29ef680a 940static inline void mem_cgroup_exit_user_fault(void)
519e5247 941{
29ef680a
MH
942 WARN_ON(!current->in_user_fault);
943 current->in_user_fault = 0;
519e5247
JW
944}
945
3812c8c8
JW
946static inline bool task_in_memcg_oom(struct task_struct *p)
947{
626ebc41 948 return p->memcg_in_oom;
3812c8c8
JW
949}
950
49426420 951bool mem_cgroup_oom_synchronize(bool wait);
3d8b38eb
RG
952struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim,
953 struct mem_cgroup *oom_domain);
954void mem_cgroup_print_oom_group(struct mem_cgroup *memcg);
3812c8c8 955
f70ad448
MWO
956void folio_memcg_lock(struct folio *folio);
957void folio_memcg_unlock(struct folio *folio);
d7365e78 958
db9adbcb 959void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val);
2a2e4885 960
018ee47f
YZ
961/* try to stablize folio_memcg() for all the pages in a memcg */
962static inline bool mem_cgroup_trylock_pages(struct mem_cgroup *memcg)
963{
964 rcu_read_lock();
965
966 if (mem_cgroup_disabled() || !atomic_read(&memcg->moving_account))
967 return true;
968
969 rcu_read_unlock();
970 return false;
971}
972
973static inline void mem_cgroup_unlock_pages(void)
974{
975 rcu_read_unlock();
976}
977
04fecbf5 978/* idx can be of type enum memcg_stat_item or node_stat_item */
00f3ca2c 979static inline void mod_memcg_state(struct mem_cgroup *memcg,
04fecbf5 980 int idx, int val)
2a2e4885 981{
c3cc3911
JW
982 unsigned long flags;
983
984 local_irq_save(flags);
a983b5eb 985 __mod_memcg_state(memcg, idx, val);
c3cc3911 986 local_irq_restore(flags);
2a2e4885
JW
987}
988
4e5aa1f4
SB
989static inline void mod_memcg_page_state(struct page *page,
990 int idx, int val)
991{
992 struct mem_cgroup *memcg;
993
994 if (mem_cgroup_disabled())
995 return;
996
997 rcu_read_lock();
998 memcg = page_memcg(page);
999 if (memcg)
1000 mod_memcg_state(memcg, idx, val);
1001 rcu_read_unlock();
1002}
1003
410f8e82 1004unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx);
7490a2d2 1005
42a30035
JW
1006static inline unsigned long lruvec_page_state(struct lruvec *lruvec,
1007 enum node_stat_item idx)
1008{
1009 struct mem_cgroup_per_node *pn;
dbb16df6 1010 long x;
42a30035
JW
1011
1012 if (mem_cgroup_disabled())
1013 return node_page_state(lruvec_pgdat(lruvec), idx);
1014
1015 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
dbb16df6
SB
1016 x = READ_ONCE(pn->lruvec_stats.state[idx]);
1017#ifdef CONFIG_SMP
1018 if (x < 0)
1019 x = 0;
1020#endif
1021 return x;
42a30035
JW
1022}
1023
205b20cc
JW
1024static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
1025 enum node_stat_item idx)
2a7106f2 1026{
00f3ca2c 1027 struct mem_cgroup_per_node *pn;
815744d7 1028 long x = 0;
00f3ca2c
JW
1029
1030 if (mem_cgroup_disabled())
1031 return node_page_state(lruvec_pgdat(lruvec), idx);
1032
1033 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
f82e6bf9 1034 x = READ_ONCE(pn->lruvec_stats.state_local[idx]);
a983b5eb
JW
1035#ifdef CONFIG_SMP
1036 if (x < 0)
1037 x = 0;
1038#endif
1039 return x;
2a7106f2
GT
1040}
1041
aa48e47e 1042void mem_cgroup_flush_stats(void);
4009b2f1 1043void mem_cgroup_flush_stats_ratelimited(void);
aa48e47e 1044
eedc4e5a
RG
1045void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
1046 int val);
da3ceeff 1047void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val);
991e7673 1048
da3ceeff 1049static inline void mod_lruvec_kmem_state(void *p, enum node_stat_item idx,
991e7673
SB
1050 int val)
1051{
1052 unsigned long flags;
1053
1054 local_irq_save(flags);
da3ceeff 1055 __mod_lruvec_kmem_state(p, idx, val);
991e7673
SB
1056 local_irq_restore(flags);
1057}
1058
eedc4e5a
RG
1059static inline void mod_memcg_lruvec_state(struct lruvec *lruvec,
1060 enum node_stat_item idx, int val)
1061{
1062 unsigned long flags;
1063
1064 local_irq_save(flags);
1065 __mod_memcg_lruvec_state(lruvec, idx, val);
1066 local_irq_restore(flags);
1067}
1068
db9adbcb
JW
1069void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
1070 unsigned long count);
c9019e9b 1071
2262185c 1072static inline void count_memcg_events(struct mem_cgroup *memcg,
e27be240
JW
1073 enum vm_event_item idx,
1074 unsigned long count)
2262185c 1075{
c3cc3911
JW
1076 unsigned long flags;
1077
1078 local_irq_save(flags);
a983b5eb 1079 __count_memcg_events(memcg, idx, count);
c3cc3911 1080 local_irq_restore(flags);
2262185c
RG
1081}
1082
1083static inline void count_memcg_page_event(struct page *page,
e27be240 1084 enum vm_event_item idx)
2262185c 1085{
bcfe06bf
RG
1086 struct mem_cgroup *memcg = page_memcg(page);
1087
1088 if (memcg)
1089 count_memcg_events(memcg, idx, 1);
2262185c
RG
1090}
1091
64daa5d8
MWO
1092static inline void count_memcg_folio_events(struct folio *folio,
1093 enum vm_event_item idx, unsigned long nr)
1094{
1095 struct mem_cgroup *memcg = folio_memcg(folio);
1096
1097 if (memcg)
1098 count_memcg_events(memcg, idx, nr);
1099}
1100
2262185c
RG
1101static inline void count_memcg_event_mm(struct mm_struct *mm,
1102 enum vm_event_item idx)
68ae564b 1103{
33398cf2
MH
1104 struct mem_cgroup *memcg;
1105
68ae564b
DR
1106 if (mem_cgroup_disabled())
1107 return;
33398cf2
MH
1108
1109 rcu_read_lock();
1110 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
fe6bdfc8 1111 if (likely(memcg))
c9019e9b 1112 count_memcg_events(memcg, idx, 1);
33398cf2 1113 rcu_read_unlock();
68ae564b 1114}
c9019e9b 1115
e27be240
JW
1116static inline void memcg_memory_event(struct mem_cgroup *memcg,
1117 enum memcg_memory_event event)
c9019e9b 1118{
8b21ca02
MS
1119 bool swap_event = event == MEMCG_SWAP_HIGH || event == MEMCG_SWAP_MAX ||
1120 event == MEMCG_SWAP_FAIL;
1121
1e577f97 1122 atomic_long_inc(&memcg->memory_events_local[event]);
8b21ca02
MS
1123 if (!swap_event)
1124 cgroup_file_notify(&memcg->events_local_file);
1e577f97 1125
9852ae3f
CD
1126 do {
1127 atomic_long_inc(&memcg->memory_events[event]);
8b21ca02
MS
1128 if (swap_event)
1129 cgroup_file_notify(&memcg->swap_events_file);
1130 else
1131 cgroup_file_notify(&memcg->events_file);
9852ae3f 1132
04fd61a4
YS
1133 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
1134 break;
9852ae3f
CD
1135 if (cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_LOCAL_EVENTS)
1136 break;
1137 } while ((memcg = parent_mem_cgroup(memcg)) &&
1138 !mem_cgroup_is_root(memcg));
c9019e9b
JW
1139}
1140
fe6bdfc8
RG
1141static inline void memcg_memory_event_mm(struct mm_struct *mm,
1142 enum memcg_memory_event event)
1143{
1144 struct mem_cgroup *memcg;
1145
1146 if (mem_cgroup_disabled())
1147 return;
1148
1149 rcu_read_lock();
1150 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
1151 if (likely(memcg))
1152 memcg_memory_event(memcg, event);
1153 rcu_read_unlock();
1154}
1155
be6c8982 1156void split_page_memcg(struct page *head, unsigned int nr);
ca3e0214 1157
2d146aa3
JW
1158unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
1159 gfp_t gfp_mask,
1160 unsigned long *total_scanned);
1161
c255a458 1162#else /* CONFIG_MEMCG */
23047a96
JW
1163
1164#define MEM_CGROUP_ID_SHIFT 0
23047a96 1165
1b7e4464
MWO
1166static inline struct mem_cgroup *folio_memcg(struct folio *folio)
1167{
1168 return NULL;
1169}
1170
bcfe06bf
RG
1171static inline struct mem_cgroup *page_memcg(struct page *page)
1172{
1173 return NULL;
1174}
1175
c5ce619a 1176static inline struct mem_cgroup *folio_memcg_rcu(struct folio *folio)
bcfe06bf
RG
1177{
1178 WARN_ON_ONCE(!rcu_read_lock_held());
1179 return NULL;
1180}
1181
becacb04
MW
1182static inline struct mem_cgroup *folio_memcg_check(struct folio *folio)
1183{
1184 return NULL;
1185}
1186
bcfe06bf
RG
1187static inline struct mem_cgroup *page_memcg_check(struct page *page)
1188{
1189 return NULL;
1190}
1191
1b7e4464
MWO
1192static inline bool folio_memcg_kmem(struct folio *folio)
1193{
1194 return false;
1195}
1196
18b2db3b
RG
1197static inline bool PageMemcgKmem(struct page *page)
1198{
1199 return false;
1200}
1201
dfd2f10c
KT
1202static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
1203{
1204 return true;
1205}
1206
23047a96
JW
1207static inline bool mem_cgroup_disabled(void)
1208{
1209 return true;
1210}
1211
e27be240
JW
1212static inline void memcg_memory_event(struct mem_cgroup *memcg,
1213 enum memcg_memory_event event)
241994ed
JW
1214{
1215}
1216
fe6bdfc8
RG
1217static inline void memcg_memory_event_mm(struct mm_struct *mm,
1218 enum memcg_memory_event event)
1219{
1220}
1221
f56ce412
JW
1222static inline void mem_cgroup_protection(struct mem_cgroup *root,
1223 struct mem_cgroup *memcg,
1224 unsigned long *min,
1225 unsigned long *low)
9783aa99 1226{
f56ce412 1227 *min = *low = 0;
9783aa99
CD
1228}
1229
45c7f7e1
CD
1230static inline void mem_cgroup_calculate_protection(struct mem_cgroup *root,
1231 struct mem_cgroup *memcg)
1232{
1233}
1234
adb82130
YA
1235static inline bool mem_cgroup_unprotected(struct mem_cgroup *target,
1236 struct mem_cgroup *memcg)
1237{
1238 return true;
1239}
1240static inline bool mem_cgroup_below_low(struct mem_cgroup *target,
1241 struct mem_cgroup *memcg)
45c7f7e1
CD
1242{
1243 return false;
1244}
1245
adb82130
YA
1246static inline bool mem_cgroup_below_min(struct mem_cgroup *target,
1247 struct mem_cgroup *memcg)
241994ed 1248{
45c7f7e1 1249 return false;
241994ed
JW
1250}
1251
8f425e4e
MWO
1252static inline int mem_cgroup_charge(struct folio *folio,
1253 struct mm_struct *mm, gfp_t gfp)
3fea5a49
JW
1254{
1255 return 0;
1256}
1257
65995918 1258static inline int mem_cgroup_swapin_charge_folio(struct folio *folio,
0add0c77
SB
1259 struct mm_struct *mm, gfp_t gfp, swp_entry_t entry)
1260{
1261 return 0;
1262}
1263
1264static inline void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry)
1265{
1266}
1267
bbc6b703 1268static inline void mem_cgroup_uncharge(struct folio *folio)
569b846d
KH
1269{
1270}
1271
747db954 1272static inline void mem_cgroup_uncharge_list(struct list_head *page_list)
8a9f3ccd
BS
1273{
1274}
1275
d21bba2b 1276static inline void mem_cgroup_migrate(struct folio *old, struct folio *new)
69029cd5
KH
1277{
1278}
1279
867e5e1d
JW
1280static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg,
1281 struct pglist_data *pgdat)
08e552c6 1282{
867e5e1d 1283 return &pgdat->__lruvec;
08e552c6
KH
1284}
1285
b1baabd9 1286static inline struct lruvec *folio_lruvec(struct folio *folio)
66e1707b 1287{
b1baabd9 1288 struct pglist_data *pgdat = folio_pgdat(folio);
867e5e1d 1289 return &pgdat->__lruvec;
66e1707b
BS
1290}
1291
e809c3fe
MWO
1292static inline
1293void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio)
2d146aa3
JW
1294{
1295}
1296
b910718a
JW
1297static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
1298{
1299 return NULL;
1300}
1301
587af308 1302static inline bool mm_match_cgroup(struct mm_struct *mm,
c0ff4b85 1303 struct mem_cgroup *memcg)
bed7161a 1304{
587af308 1305 return true;
bed7161a
BS
1306}
1307
d46eb14b
SB
1308static inline struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
1309{
1310 return NULL;
1311}
1312
c74d40e8
DS
1313static inline
1314struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css)
1315{
1316 return NULL;
1317}
1318
f4840ccf
JW
1319static inline void obj_cgroup_put(struct obj_cgroup *objcg)
1320{
1321}
1322
e4dde56c
YZ
1323static inline bool mem_cgroup_tryget(struct mem_cgroup *memcg)
1324{
1325 return true;
1326}
1327
dc0b5864
RG
1328static inline void mem_cgroup_put(struct mem_cgroup *memcg)
1329{
1330}
1331
e809c3fe 1332static inline struct lruvec *folio_lruvec_lock(struct folio *folio)
6168d0da 1333{
e809c3fe 1334 struct pglist_data *pgdat = folio_pgdat(folio);
6168d0da
AS
1335
1336 spin_lock(&pgdat->__lruvec.lru_lock);
1337 return &pgdat->__lruvec;
1338}
1339
e809c3fe 1340static inline struct lruvec *folio_lruvec_lock_irq(struct folio *folio)
6168d0da 1341{
e809c3fe 1342 struct pglist_data *pgdat = folio_pgdat(folio);
6168d0da
AS
1343
1344 spin_lock_irq(&pgdat->__lruvec.lru_lock);
1345 return &pgdat->__lruvec;
1346}
1347
e809c3fe 1348static inline struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio,
6168d0da
AS
1349 unsigned long *flagsp)
1350{
e809c3fe 1351 struct pglist_data *pgdat = folio_pgdat(folio);
6168d0da
AS
1352
1353 spin_lock_irqsave(&pgdat->__lruvec.lru_lock, *flagsp);
1354 return &pgdat->__lruvec;
1355}
1356
5660048c
JW
1357static inline struct mem_cgroup *
1358mem_cgroup_iter(struct mem_cgroup *root,
1359 struct mem_cgroup *prev,
1360 struct mem_cgroup_reclaim_cookie *reclaim)
1361{
1362 return NULL;
1363}
1364
1365static inline void mem_cgroup_iter_break(struct mem_cgroup *root,
1366 struct mem_cgroup *prev)
1367{
1368}
1369
025b7799 1370static inline void mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
7c5f64f8
VD
1371 int (*fn)(struct task_struct *, void *), void *arg)
1372{
7c5f64f8
VD
1373}
1374
23047a96 1375static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
f8d66542 1376{
23047a96
JW
1377 return 0;
1378}
1379
1380static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
1381{
1382 WARN_ON_ONCE(id);
1383 /* XXX: This should always return root_mem_cgroup */
1384 return NULL;
f8d66542 1385}
a636b327 1386
c15187a4
RG
1387#ifdef CONFIG_SHRINKER_DEBUG
1388static inline unsigned long mem_cgroup_ino(struct mem_cgroup *memcg)
1389{
1390 return 0;
1391}
1392
1393static inline struct mem_cgroup *mem_cgroup_get_from_ino(unsigned long ino)
1394{
1395 return NULL;
1396}
1397#endif
1398
aa9694bb
CD
1399static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m)
1400{
1401 return NULL;
1402}
1403
2262185c
RG
1404static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec)
1405{
1406 return NULL;
1407}
1408
eb01aaab 1409static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
14797e23 1410{
13308ca9 1411 return true;
14797e23
KM
1412}
1413
b4536f0c
MH
1414static inline
1415unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
1416 enum lru_list lru, int zone_idx)
1417{
1418 return 0;
1419}
a3d8e054 1420
bbec2e15 1421static inline unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
7c5f64f8
VD
1422{
1423 return 0;
1424}
1425
9783aa99
CD
1426static inline unsigned long mem_cgroup_size(struct mem_cgroup *memcg)
1427{
1428 return 0;
1429}
1430
e222432b 1431static inline void
f0c867d9 1432mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p)
1433{
1434}
1435
1436static inline void
1437mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
e222432b
BS
1438{
1439}
1440
f70ad448
MWO
1441static inline void folio_memcg_lock(struct folio *folio)
1442{
1443}
1444
1445static inline void folio_memcg_unlock(struct folio *folio)
1446{
1447}
1448
018ee47f
YZ
1449static inline bool mem_cgroup_trylock_pages(struct mem_cgroup *memcg)
1450{
1451 /* to match folio_memcg_rcu() */
1452 rcu_read_lock();
1453 return true;
1454}
1455
1456static inline void mem_cgroup_unlock_pages(void)
1457{
1458 rcu_read_unlock();
1459}
1460
9ea9cb00 1461static inline void mem_cgroup_handle_over_high(gfp_t gfp_mask)
b23afb93
TH
1462{
1463}
1464
29ef680a 1465static inline void mem_cgroup_enter_user_fault(void)
519e5247
JW
1466{
1467}
1468
29ef680a 1469static inline void mem_cgroup_exit_user_fault(void)
519e5247
JW
1470{
1471}
1472
3812c8c8
JW
1473static inline bool task_in_memcg_oom(struct task_struct *p)
1474{
1475 return false;
1476}
1477
49426420 1478static inline bool mem_cgroup_oom_synchronize(bool wait)
3812c8c8
JW
1479{
1480 return false;
1481}
1482
3d8b38eb
RG
1483static inline struct mem_cgroup *mem_cgroup_get_oom_group(
1484 struct task_struct *victim, struct mem_cgroup *oom_domain)
1485{
1486 return NULL;
1487}
1488
1489static inline void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
1490{
1491}
1492
00f3ca2c 1493static inline void __mod_memcg_state(struct mem_cgroup *memcg,
04fecbf5 1494 int idx,
00f3ca2c 1495 int nr)
2a2e4885
JW
1496{
1497}
1498
00f3ca2c 1499static inline void mod_memcg_state(struct mem_cgroup *memcg,
04fecbf5 1500 int idx,
00f3ca2c 1501 int nr)
2a2e4885
JW
1502{
1503}
1504
4e5aa1f4
SB
1505static inline void mod_memcg_page_state(struct page *page,
1506 int idx, int val)
1507{
1508}
1509
7490a2d2
SB
1510static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
1511{
1512 return 0;
1513}
1514
42a30035
JW
1515static inline unsigned long lruvec_page_state(struct lruvec *lruvec,
1516 enum node_stat_item idx)
1517{
1518 return node_page_state(lruvec_pgdat(lruvec), idx);
1519}
1520
205b20cc
JW
1521static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
1522 enum node_stat_item idx)
2a7106f2 1523{
00f3ca2c 1524 return node_page_state(lruvec_pgdat(lruvec), idx);
2a7106f2
GT
1525}
1526
aa48e47e
SB
1527static inline void mem_cgroup_flush_stats(void)
1528{
1529}
1530
4009b2f1 1531static inline void mem_cgroup_flush_stats_ratelimited(void)
9b301615
SB
1532{
1533}
1534
eedc4e5a
RG
1535static inline void __mod_memcg_lruvec_state(struct lruvec *lruvec,
1536 enum node_stat_item idx, int val)
1537{
1538}
1539
da3ceeff 1540static inline void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx,
ec9f0238
RG
1541 int val)
1542{
1543 struct page *page = virt_to_head_page(p);
1544
1545 __mod_node_page_state(page_pgdat(page), idx, val);
1546}
1547
da3ceeff 1548static inline void mod_lruvec_kmem_state(void *p, enum node_stat_item idx,
991e7673
SB
1549 int val)
1550{
1551 struct page *page = virt_to_head_page(p);
1552
1553 mod_node_page_state(page_pgdat(page), idx, val);
1554}
1555
2262185c
RG
1556static inline void count_memcg_events(struct mem_cgroup *memcg,
1557 enum vm_event_item idx,
1558 unsigned long count)
1559{
1560}
1561
9851ac13
KT
1562static inline void __count_memcg_events(struct mem_cgroup *memcg,
1563 enum vm_event_item idx,
1564 unsigned long count)
1565{
1566}
1567
2262185c 1568static inline void count_memcg_page_event(struct page *page,
04fecbf5 1569 int idx)
2262185c
RG
1570{
1571}
1572
64daa5d8
MWO
1573static inline void count_memcg_folio_events(struct folio *folio,
1574 enum vm_event_item idx, unsigned long nr)
1575{
1576}
1577
456f998e 1578static inline
2262185c 1579void count_memcg_event_mm(struct mm_struct *mm, enum vm_event_item idx)
456f998e
YH
1580{
1581}
6168d0da 1582
2d146aa3
JW
1583static inline void split_page_memcg(struct page *head, unsigned int nr)
1584{
1585}
1586
1587static inline
1588unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
1589 gfp_t gfp_mask,
1590 unsigned long *total_scanned)
6168d0da 1591{
2d146aa3 1592 return 0;
6168d0da 1593}
c255a458 1594#endif /* CONFIG_MEMCG */
78fb7466 1595
da3ceeff 1596static inline void __inc_lruvec_kmem_state(void *p, enum node_stat_item idx)
ec9f0238 1597{
da3ceeff 1598 __mod_lruvec_kmem_state(p, idx, 1);
ec9f0238
RG
1599}
1600
da3ceeff 1601static inline void __dec_lruvec_kmem_state(void *p, enum node_stat_item idx)
ec9f0238 1602{
da3ceeff 1603 __mod_lruvec_kmem_state(p, idx, -1);
ec9f0238
RG
1604}
1605
7cf111bc
JW
1606static inline struct lruvec *parent_lruvec(struct lruvec *lruvec)
1607{
1608 struct mem_cgroup *memcg;
1609
1610 memcg = lruvec_memcg(lruvec);
1611 if (!memcg)
1612 return NULL;
1613 memcg = parent_mem_cgroup(memcg);
1614 if (!memcg)
1615 return NULL;
1616 return mem_cgroup_lruvec(memcg, lruvec_pgdat(lruvec));
1617}
1618
6168d0da
AS
1619static inline void unlock_page_lruvec(struct lruvec *lruvec)
1620{
1621 spin_unlock(&lruvec->lru_lock);
1622}
1623
1624static inline void unlock_page_lruvec_irq(struct lruvec *lruvec)
1625{
1626 spin_unlock_irq(&lruvec->lru_lock);
1627}
1628
1629static inline void unlock_page_lruvec_irqrestore(struct lruvec *lruvec,
1630 unsigned long flags)
1631{
1632 spin_unlock_irqrestore(&lruvec->lru_lock, flags);
1633}
1634
7467c391 1635/* Test requires a stable page->memcg binding, see page_memcg() */
0de340cb
MWO
1636static inline bool folio_matches_lruvec(struct folio *folio,
1637 struct lruvec *lruvec)
f2e4d28d 1638{
0de340cb
MWO
1639 return lruvec_pgdat(lruvec) == folio_pgdat(folio) &&
1640 lruvec_memcg(lruvec) == folio_memcg(folio);
f2e4d28d
MS
1641}
1642
2a5e4e34 1643/* Don't lock again iff page's lruvec locked */
0de340cb 1644static inline struct lruvec *folio_lruvec_relock_irq(struct folio *folio,
2a5e4e34
AD
1645 struct lruvec *locked_lruvec)
1646{
1647 if (locked_lruvec) {
0de340cb 1648 if (folio_matches_lruvec(folio, locked_lruvec))
2a5e4e34
AD
1649 return locked_lruvec;
1650
1651 unlock_page_lruvec_irq(locked_lruvec);
1652 }
1653
e809c3fe 1654 return folio_lruvec_lock_irq(folio);
2a5e4e34
AD
1655}
1656
1657/* Don't lock again iff page's lruvec locked */
0de340cb 1658static inline struct lruvec *folio_lruvec_relock_irqsave(struct folio *folio,
2a5e4e34
AD
1659 struct lruvec *locked_lruvec, unsigned long *flags)
1660{
1661 if (locked_lruvec) {
0de340cb 1662 if (folio_matches_lruvec(folio, locked_lruvec))
2a5e4e34
AD
1663 return locked_lruvec;
1664
1665 unlock_page_lruvec_irqrestore(locked_lruvec, *flags);
1666 }
1667
e809c3fe 1668 return folio_lruvec_lock_irqsave(folio, flags);
2a5e4e34
AD
1669}
1670
52ebea74 1671#ifdef CONFIG_CGROUP_WRITEBACK
841710aa 1672
841710aa 1673struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb);
c5edf9cd
TH
1674void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
1675 unsigned long *pheadroom, unsigned long *pdirty,
1676 unsigned long *pwriteback);
841710aa 1677
9d8053fc 1678void mem_cgroup_track_foreign_dirty_slowpath(struct folio *folio,
97b27821
TH
1679 struct bdi_writeback *wb);
1680
203a3151 1681static inline void mem_cgroup_track_foreign_dirty(struct folio *folio,
97b27821
TH
1682 struct bdi_writeback *wb)
1683{
ac86f547
KW
1684 struct mem_cgroup *memcg;
1685
08d1d0e6
BH
1686 if (mem_cgroup_disabled())
1687 return;
1688
ac86f547
KW
1689 memcg = folio_memcg(folio);
1690 if (unlikely(memcg && &memcg->css != wb->memcg_css))
9d8053fc 1691 mem_cgroup_track_foreign_dirty_slowpath(folio, wb);
97b27821
TH
1692}
1693
1694void mem_cgroup_flush_foreign(struct bdi_writeback *wb);
1695
841710aa
TH
1696#else /* CONFIG_CGROUP_WRITEBACK */
1697
1698static inline struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
1699{
1700 return NULL;
1701}
1702
c2aa723a 1703static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb,
c5edf9cd
TH
1704 unsigned long *pfilepages,
1705 unsigned long *pheadroom,
c2aa723a
TH
1706 unsigned long *pdirty,
1707 unsigned long *pwriteback)
1708{
1709}
1710
203a3151 1711static inline void mem_cgroup_track_foreign_dirty(struct folio *folio,
97b27821
TH
1712 struct bdi_writeback *wb)
1713{
1714}
1715
1716static inline void mem_cgroup_flush_foreign(struct bdi_writeback *wb)
1717{
1718}
1719
841710aa 1720#endif /* CONFIG_CGROUP_WRITEBACK */
52ebea74 1721
e1aab161 1722struct sock;
4b1327be
WW
1723bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages,
1724 gfp_t gfp_mask);
baac50bb 1725void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages);
d886f4e4 1726#ifdef CONFIG_MEMCG
ef12947c
JW
1727extern struct static_key_false memcg_sockets_enabled_key;
1728#define mem_cgroup_sockets_enabled static_branch_unlikely(&memcg_sockets_enabled_key)
2d758073
JW
1729void mem_cgroup_sk_alloc(struct sock *sk);
1730void mem_cgroup_sk_free(struct sock *sk);
baac50bb 1731static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
e805605c 1732{
ac8a5296
AW
1733 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
1734 return !!memcg->tcpmem_pressure;
8e8ae645 1735 do {
7e6ec49c 1736 if (time_before(jiffies, READ_ONCE(memcg->socket_pressure)))
8e8ae645
JW
1737 return true;
1738 } while ((memcg = parent_mem_cgroup(memcg)));
1739 return false;
e805605c 1740}
0a432dcb 1741
e4262c4f
YS
1742int alloc_shrinker_info(struct mem_cgroup *memcg);
1743void free_shrinker_info(struct mem_cgroup *memcg);
2bfd3637 1744void set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id);
a178015c 1745void reparent_shrinker_deferred(struct mem_cgroup *memcg);
e805605c 1746#else
80e95fe0 1747#define mem_cgroup_sockets_enabled 0
2d758073
JW
1748static inline void mem_cgroup_sk_alloc(struct sock *sk) { };
1749static inline void mem_cgroup_sk_free(struct sock *sk) { };
baac50bb 1750static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
e805605c
JW
1751{
1752 return false;
1753}
0a432dcb 1754
2bfd3637
YS
1755static inline void set_shrinker_bit(struct mem_cgroup *memcg,
1756 int nid, int shrinker_id)
0a432dcb
YS
1757{
1758}
e805605c 1759#endif
7ae1e1d0 1760
9b6f7e16 1761#ifdef CONFIG_MEMCG_KMEM
4d5c8aed 1762bool mem_cgroup_kmem_disabled(void);
f4b00eab
RG
1763int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order);
1764void __memcg_kmem_uncharge_page(struct page *page, int order);
45264778 1765
bf4f0599 1766struct obj_cgroup *get_obj_cgroup_from_current(void);
074e3e26 1767struct obj_cgroup *get_obj_cgroup_from_folio(struct folio *folio);
bf4f0599
RG
1768
1769int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size);
1770void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size);
1771
b6c1a8af
YS
1772extern struct static_key_false memcg_bpf_enabled_key;
1773static inline bool memcg_bpf_enabled(void)
1774{
1775 return static_branch_likely(&memcg_bpf_enabled_key);
1776}
1777
f7a449f7 1778extern struct static_key_false memcg_kmem_online_key;
749c5415 1779
f7a449f7 1780static inline bool memcg_kmem_online(void)
7ae1e1d0 1781{
f7a449f7 1782 return static_branch_likely(&memcg_kmem_online_key);
7ae1e1d0
GC
1783}
1784
f4b00eab
RG
1785static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp,
1786 int order)
60cd4bcd 1787{
f7a449f7 1788 if (memcg_kmem_online())
f4b00eab 1789 return __memcg_kmem_charge_page(page, gfp, order);
60cd4bcd
SB
1790 return 0;
1791}
1792
f4b00eab 1793static inline void memcg_kmem_uncharge_page(struct page *page, int order)
60cd4bcd 1794{
f7a449f7 1795 if (memcg_kmem_online())
f4b00eab 1796 __memcg_kmem_uncharge_page(page, order);
60cd4bcd
SB
1797}
1798
33398cf2 1799/*
a7cb874b
RG
1800 * A helper for accessing memcg's kmem_id, used for getting
1801 * corresponding LRU lists.
33398cf2 1802 */
7c52f65d 1803static inline int memcg_kmem_id(struct mem_cgroup *memcg)
33398cf2
MH
1804{
1805 return memcg ? memcg->kmemcg_id : -1;
1806}
5722d094 1807
8380ce47 1808struct mem_cgroup *mem_cgroup_from_obj(void *p);
fc4db90f 1809struct mem_cgroup *mem_cgroup_from_slab_obj(void *p);
8380ce47 1810
f4840ccf
JW
1811static inline void count_objcg_event(struct obj_cgroup *objcg,
1812 enum vm_event_item idx)
1813{
1814 struct mem_cgroup *memcg;
1815
f7a449f7 1816 if (!memcg_kmem_online())
f4840ccf
JW
1817 return;
1818
1819 rcu_read_lock();
1820 memcg = obj_cgroup_memcg(objcg);
1821 count_memcg_events(memcg, idx, 1);
1822 rcu_read_unlock();
1823}
1824
7ae1e1d0 1825#else
4d5c8aed
RG
1826static inline bool mem_cgroup_kmem_disabled(void)
1827{
1828 return true;
1829}
9b6f7e16 1830
f4b00eab
RG
1831static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp,
1832 int order)
9b6f7e16
RG
1833{
1834 return 0;
1835}
1836
f4b00eab 1837static inline void memcg_kmem_uncharge_page(struct page *page, int order)
9b6f7e16
RG
1838{
1839}
1840
f4b00eab
RG
1841static inline int __memcg_kmem_charge_page(struct page *page, gfp_t gfp,
1842 int order)
60cd4bcd
SB
1843{
1844 return 0;
1845}
1846
f4b00eab 1847static inline void __memcg_kmem_uncharge_page(struct page *page, int order)
60cd4bcd
SB
1848{
1849}
1850
074e3e26 1851static inline struct obj_cgroup *get_obj_cgroup_from_folio(struct folio *folio)
f4840ccf
JW
1852{
1853 return NULL;
1854}
1855
b6c1a8af
YS
1856static inline bool memcg_bpf_enabled(void)
1857{
1858 return false;
1859}
1860
f7a449f7 1861static inline bool memcg_kmem_online(void)
b9ce5ef4
GC
1862{
1863 return false;
1864}
1865
7c52f65d 1866static inline int memcg_kmem_id(struct mem_cgroup *memcg)
2633d7a0
GC
1867{
1868 return -1;
1869}
1870
8380ce47
RG
1871static inline struct mem_cgroup *mem_cgroup_from_obj(void *p)
1872{
1d0403d2 1873 return NULL;
8380ce47
RG
1874}
1875
fc4db90f
RG
1876static inline struct mem_cgroup *mem_cgroup_from_slab_obj(void *p)
1877{
1878 return NULL;
1879}
1880
f4840ccf
JW
1881static inline void count_objcg_event(struct obj_cgroup *objcg,
1882 enum vm_event_item idx)
1883{
1884}
1885
84c07d11 1886#endif /* CONFIG_MEMCG_KMEM */
127424c8 1887
f4840ccf
JW
1888#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
1889bool obj_cgroup_may_zswap(struct obj_cgroup *objcg);
1890void obj_cgroup_charge_zswap(struct obj_cgroup *objcg, size_t size);
1891void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg, size_t size);
1892#else
1893static inline bool obj_cgroup_may_zswap(struct obj_cgroup *objcg)
1894{
1895 return true;
1896}
1897static inline void obj_cgroup_charge_zswap(struct obj_cgroup *objcg,
1898 size_t size)
1899{
1900}
1901static inline void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg,
1902 size_t size)
1903{
1904}
1905#endif
1906
8cdea7c0 1907#endif /* _LINUX_MEMCONTROL_H */