mm: convert mem_cgroup_css_from_page() to mem_cgroup_css_from_folio()
[linux-block.git] / include / linux / memcontrol.h
CommitLineData
c942fddf 1/* SPDX-License-Identifier: GPL-2.0-or-later */
8cdea7c0
BS
2/* memcontrol.h - Memory Controller
3 *
4 * Copyright IBM Corporation, 2007
5 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
6 *
78fb7466
PE
7 * Copyright 2007 OpenVZ SWsoft Inc
8 * Author: Pavel Emelianov <xemul@openvz.org>
8cdea7c0
BS
9 */
10
11#ifndef _LINUX_MEMCONTROL_H
12#define _LINUX_MEMCONTROL_H
f8d66542 13#include <linux/cgroup.h>
456f998e 14#include <linux/vm_event_item.h>
7ae1e1d0 15#include <linux/hardirq.h>
a8964b9b 16#include <linux/jump_label.h>
33398cf2
MH
17#include <linux/page_counter.h>
18#include <linux/vmpressure.h>
19#include <linux/eventfd.h>
00f3ca2c
JW
20#include <linux/mm.h>
21#include <linux/vmstat.h>
33398cf2 22#include <linux/writeback.h>
fdf1cdb9 23#include <linux/page-flags.h>
456f998e 24
78fb7466 25struct mem_cgroup;
bf4f0599 26struct obj_cgroup;
8697d331
BS
27struct page;
28struct mm_struct;
2633d7a0 29struct kmem_cache;
78fb7466 30
71cd3113
JW
31/* Cgroup-specific page state, on top of universal node page state */
32enum memcg_stat_item {
468c3982 33 MEMCG_SWAP = NR_VM_NODE_STAT_ITEMS,
71cd3113 34 MEMCG_SOCK,
772616b0 35 MEMCG_PERCPU_B,
4e5aa1f4 36 MEMCG_VMALLOC,
a8c49af3 37 MEMCG_KMEM,
f4840ccf
JW
38 MEMCG_ZSWAP_B,
39 MEMCG_ZSWAPPED,
b2807f07 40 MEMCG_NR_STAT,
2a7106f2
GT
41};
42
e27be240
JW
43enum memcg_memory_event {
44 MEMCG_LOW,
71cd3113
JW
45 MEMCG_HIGH,
46 MEMCG_MAX,
47 MEMCG_OOM,
fe6bdfc8 48 MEMCG_OOM_KILL,
b6bf9abb 49 MEMCG_OOM_GROUP_KILL,
4b82ab4f 50 MEMCG_SWAP_HIGH,
f3a53a3a
TH
51 MEMCG_SWAP_MAX,
52 MEMCG_SWAP_FAIL,
e27be240 53 MEMCG_NR_MEMORY_EVENTS,
71cd3113
JW
54};
55
5660048c 56struct mem_cgroup_reclaim_cookie {
ef8f2327 57 pg_data_t *pgdat;
5660048c
JW
58 unsigned int generation;
59};
60
71cd3113
JW
61#ifdef CONFIG_MEMCG
62
63#define MEM_CGROUP_ID_SHIFT 16
64#define MEM_CGROUP_ID_MAX USHRT_MAX
65
66struct mem_cgroup_id {
67 int id;
1c2d479a 68 refcount_t ref;
71cd3113
JW
69};
70
33398cf2
MH
71/*
72 * Per memcg event counter is incremented at every pagein/pageout. With THP,
0845f831
RD
73 * it will be incremented by the number of pages. This counter is used
74 * to trigger some periodic events. This is straightforward and better
33398cf2
MH
75 * than using jiffies etc. to handle periodic memcg event.
76 */
77enum mem_cgroup_events_target {
78 MEM_CGROUP_TARGET_THRESH,
79 MEM_CGROUP_TARGET_SOFTLIMIT,
33398cf2
MH
80 MEM_CGROUP_NTARGETS,
81};
82
410f8e82
SB
83struct memcg_vmstats_percpu;
84struct memcg_vmstats;
33398cf2
MH
85
86struct mem_cgroup_reclaim_iter {
87 struct mem_cgroup *position;
88 /* scan generation, increased every round-trip */
89 unsigned int generation;
90};
91
0a4465d3 92/*
3c6f17e6
YS
93 * Bitmap and deferred work of shrinker::id corresponding to memcg-aware
94 * shrinkers, which have elements charged to this memcg.
0a4465d3 95 */
e4262c4f 96struct shrinker_info {
0a4465d3 97 struct rcu_head rcu;
3c6f17e6
YS
98 atomic_long_t *nr_deferred;
99 unsigned long *map;
0a4465d3
KT
100};
101
7e1c0d6f
SB
102struct lruvec_stats_percpu {
103 /* Local (CPU and cgroup) state */
104 long state[NR_VM_NODE_STAT_ITEMS];
105
106 /* Delta calculation for lockless upward propagation */
107 long state_prev[NR_VM_NODE_STAT_ITEMS];
108};
109
110struct lruvec_stats {
111 /* Aggregated (CPU and subtree) state */
112 long state[NR_VM_NODE_STAT_ITEMS];
113
114 /* Pending child counts during tree propagation */
115 long state_pending[NR_VM_NODE_STAT_ITEMS];
116};
117
33398cf2 118/*
242c37b4 119 * per-node information in memory controller.
33398cf2 120 */
ef8f2327 121struct mem_cgroup_per_node {
33398cf2 122 struct lruvec lruvec;
a983b5eb 123
7e1c0d6f
SB
124 struct lruvec_stats_percpu __percpu *lruvec_stats_percpu;
125 struct lruvec_stats lruvec_stats;
a983b5eb 126
b4536f0c 127 unsigned long lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS];
33398cf2 128
9da83f3f 129 struct mem_cgroup_reclaim_iter iter;
33398cf2 130
e4262c4f 131 struct shrinker_info __rcu *shrinker_info;
0a432dcb 132
33398cf2
MH
133 struct rb_node tree_node; /* RB tree node */
134 unsigned long usage_in_excess;/* Set to the value by which */
135 /* the soft limit is exceeded*/
136 bool on_tree;
137 struct mem_cgroup *memcg; /* Back pointer, we cannot */
138 /* use container_of */
139};
140
33398cf2
MH
141struct mem_cgroup_threshold {
142 struct eventfd_ctx *eventfd;
143 unsigned long threshold;
144};
145
146/* For threshold */
147struct mem_cgroup_threshold_ary {
148 /* An array index points to threshold just below or equal to usage. */
149 int current_threshold;
150 /* Size of entries[] */
151 unsigned int size;
152 /* Array of thresholds */
307ed94c 153 struct mem_cgroup_threshold entries[];
33398cf2
MH
154};
155
156struct mem_cgroup_thresholds {
157 /* Primary thresholds array */
158 struct mem_cgroup_threshold_ary *primary;
159 /*
160 * Spare threshold array.
161 * This is needed to make mem_cgroup_unregister_event() "never fail".
162 * It must be able to store at least primary->size - 1 entries.
163 */
164 struct mem_cgroup_threshold_ary *spare;
165};
166
97b27821
TH
167/*
168 * Remember four most recent foreign writebacks with dirty pages in this
169 * cgroup. Inode sharing is expected to be uncommon and, even if we miss
170 * one in a given round, we're likely to catch it later if it keeps
171 * foreign-dirtying, so a fairly low count should be enough.
172 *
173 * See mem_cgroup_track_foreign_dirty_slowpath() for details.
174 */
175#define MEMCG_CGWB_FRN_CNT 4
176
177struct memcg_cgwb_frn {
178 u64 bdi_id; /* bdi->id of the foreign inode */
179 int memcg_id; /* memcg->css.id of foreign inode */
180 u64 at; /* jiffies_64 at the time of dirtying */
181 struct wb_completion done; /* tracks in-flight foreign writebacks */
182};
183
bf4f0599
RG
184/*
185 * Bucket for arbitrarily byte-sized objects charged to a memory
186 * cgroup. The bucket can be reparented in one piece when the cgroup
187 * is destroyed, without having to round up the individual references
188 * of all live memory objects in the wild.
189 */
190struct obj_cgroup {
191 struct percpu_ref refcnt;
192 struct mem_cgroup *memcg;
193 atomic_t nr_charged_bytes;
194 union {
0764db9b 195 struct list_head list; /* protected by objcg_lock */
bf4f0599
RG
196 struct rcu_head rcu;
197 };
198};
199
33398cf2
MH
200/*
201 * The memory controller data structure. The memory controller controls both
202 * page cache and RSS per cgroup. We would eventually like to provide
203 * statistics based on the statistics developed by Rik Van Riel for clock-pro,
204 * to help the administrator determine what knobs to tune.
205 */
206struct mem_cgroup {
207 struct cgroup_subsys_state css;
208
73f576c0
JW
209 /* Private memcg ID. Used to ID objects that outlive the cgroup */
210 struct mem_cgroup_id id;
211
33398cf2 212 /* Accounted resources */
bd0b230f
WL
213 struct page_counter memory; /* Both v1 & v2 */
214
215 union {
216 struct page_counter swap; /* v2 only */
217 struct page_counter memsw; /* v1 only */
218 };
0db15298
JW
219
220 /* Legacy consumer-oriented counters */
bd0b230f
WL
221 struct page_counter kmem; /* v1 only */
222 struct page_counter tcpmem; /* v1 only */
33398cf2 223
f7e1cb6e
JW
224 /* Range enforcement for interrupt charges */
225 struct work_struct high_work;
226
f4840ccf
JW
227#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
228 unsigned long zswap_max;
229#endif
230
33398cf2
MH
231 unsigned long soft_limit;
232
233 /* vmpressure notifications */
234 struct vmpressure vmpressure;
235
3d8b38eb
RG
236 /*
237 * Should the OOM killer kill all belonging tasks, had it kill one?
238 */
239 bool oom_group;
240
33398cf2
MH
241 /* protected by memcg_oom_lock */
242 bool oom_lock;
243 int under_oom;
244
245 int swappiness;
246 /* OOM-Killer disable */
247 int oom_kill_disable;
248
1e577f97 249 /* memory.events and memory.events.local */
472912a2 250 struct cgroup_file events_file;
1e577f97 251 struct cgroup_file events_local_file;
472912a2 252
f3a53a3a
TH
253 /* handle for "memory.swap.events" */
254 struct cgroup_file swap_events_file;
255
33398cf2
MH
256 /* protect arrays of thresholds */
257 struct mutex thresholds_lock;
258
259 /* thresholds for memory usage. RCU-protected */
260 struct mem_cgroup_thresholds thresholds;
261
262 /* thresholds for mem+swap usage. RCU-protected */
263 struct mem_cgroup_thresholds memsw_thresholds;
264
265 /* For oom notifier event fd */
266 struct list_head oom_notify;
267
268 /*
269 * Should we move charges of a task when a task is moved into this
270 * mem_cgroup ? And what type of charges should we move ?
271 */
272 unsigned long move_charge_at_immigrate;
e81bf979
AL
273 /* taken only while moving_account > 0 */
274 spinlock_t move_lock;
275 unsigned long move_lock_flags;
276
e6ad640b 277 CACHELINE_PADDING(_pad1_);
e81bf979 278
2d146aa3 279 /* memory.stat */
410f8e82 280 struct memcg_vmstats *vmstats;
42a30035 281
815744d7 282 /* memory.events */
42a30035 283 atomic_long_t memory_events[MEMCG_NR_MEMORY_EVENTS];
1e577f97 284 atomic_long_t memory_events_local[MEMCG_NR_MEMORY_EVENTS];
33398cf2 285
d886f4e4
JW
286 unsigned long socket_pressure;
287
288 /* Legacy tcp memory accounting */
0db15298
JW
289 bool tcpmem_active;
290 int tcpmem_pressure;
d886f4e4 291
84c07d11 292#ifdef CONFIG_MEMCG_KMEM
33398cf2 293 int kmemcg_id;
bf4f0599 294 struct obj_cgroup __rcu *objcg;
0764db9b
RG
295 /* list of inherited objcgs, protected by objcg_lock */
296 struct list_head objcg_list;
33398cf2
MH
297#endif
298
e6ad640b 299 CACHELINE_PADDING(_pad2_);
4df91062
FT
300
301 /*
302 * set > 0 if pages under this cgroup are moving to other cgroup.
303 */
304 atomic_t moving_account;
305 struct task_struct *move_lock_task;
306
4df91062
FT
307 struct memcg_vmstats_percpu __percpu *vmstats_percpu;
308
33398cf2
MH
309#ifdef CONFIG_CGROUP_WRITEBACK
310 struct list_head cgwb_list;
311 struct wb_domain cgwb_domain;
97b27821 312 struct memcg_cgwb_frn cgwb_frn[MEMCG_CGWB_FRN_CNT];
33398cf2
MH
313#endif
314
315 /* List of events which userspace want to receive */
316 struct list_head event_list;
317 spinlock_t event_list_lock;
318
87eaceb3
YS
319#ifdef CONFIG_TRANSPARENT_HUGEPAGE
320 struct deferred_split deferred_split_queue;
321#endif
322
bd74fdae
YZ
323#ifdef CONFIG_LRU_GEN
324 /* per-memcg mm_struct list */
325 struct lru_gen_mm_list mm_list;
326#endif
327
b51478a0 328 struct mem_cgroup_per_node *nodeinfo[];
33398cf2 329};
7d828602 330
a983b5eb 331/*
1813e51e
SB
332 * size of first charge trial.
333 * TODO: maybe necessary to use big numbers in big irons or dynamic based of the
334 * workload.
a983b5eb 335 */
1813e51e 336#define MEMCG_CHARGE_BATCH 64U
a983b5eb 337
7d828602 338extern struct mem_cgroup *root_mem_cgroup;
56161634 339
87944e29
RG
340enum page_memcg_data_flags {
341 /* page->memcg_data is a pointer to an objcgs vector */
342 MEMCG_DATA_OBJCGS = (1UL << 0),
18b2db3b
RG
343 /* page has been accounted as a non-slab kernel page */
344 MEMCG_DATA_KMEM = (1UL << 1),
87944e29 345 /* the next bit after the last actual flag */
18b2db3b 346 __NR_MEMCG_DATA_FLAGS = (1UL << 2),
87944e29
RG
347};
348
349#define MEMCG_DATA_FLAGS_MASK (__NR_MEMCG_DATA_FLAGS - 1)
350
1b7e4464 351static inline bool folio_memcg_kmem(struct folio *folio);
b4e0b68f
MS
352
353/*
354 * After the initialization objcg->memcg is always pointing at
355 * a valid memcg, but can be atomically swapped to the parent memcg.
356 *
357 * The caller must ensure that the returned memcg won't be released:
358 * e.g. acquire the rcu_read_lock or css_set_lock.
359 */
360static inline struct mem_cgroup *obj_cgroup_memcg(struct obj_cgroup *objcg)
361{
362 return READ_ONCE(objcg->memcg);
363}
364
365/*
1b7e4464
MWO
366 * __folio_memcg - Get the memory cgroup associated with a non-kmem folio
367 * @folio: Pointer to the folio.
b4e0b68f 368 *
1b7e4464
MWO
369 * Returns a pointer to the memory cgroup associated with the folio,
370 * or NULL. This function assumes that the folio is known to have a
b4e0b68f 371 * proper memory cgroup pointer. It's not safe to call this function
1b7e4464
MWO
372 * against some type of folios, e.g. slab folios or ex-slab folios or
373 * kmem folios.
b4e0b68f 374 */
1b7e4464 375static inline struct mem_cgroup *__folio_memcg(struct folio *folio)
b4e0b68f 376{
1b7e4464 377 unsigned long memcg_data = folio->memcg_data;
b4e0b68f 378
1b7e4464
MWO
379 VM_BUG_ON_FOLIO(folio_test_slab(folio), folio);
380 VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_OBJCGS, folio);
381 VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_KMEM, folio);
b4e0b68f
MS
382
383 return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
384}
385
386/*
1b7e4464
MWO
387 * __folio_objcg - get the object cgroup associated with a kmem folio.
388 * @folio: Pointer to the folio.
b4e0b68f 389 *
1b7e4464
MWO
390 * Returns a pointer to the object cgroup associated with the folio,
391 * or NULL. This function assumes that the folio is known to have a
b4e0b68f 392 * proper object cgroup pointer. It's not safe to call this function
1b7e4464
MWO
393 * against some type of folios, e.g. slab folios or ex-slab folios or
394 * LRU folios.
b4e0b68f 395 */
1b7e4464 396static inline struct obj_cgroup *__folio_objcg(struct folio *folio)
b4e0b68f 397{
1b7e4464 398 unsigned long memcg_data = folio->memcg_data;
b4e0b68f 399
1b7e4464
MWO
400 VM_BUG_ON_FOLIO(folio_test_slab(folio), folio);
401 VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_OBJCGS, folio);
402 VM_BUG_ON_FOLIO(!(memcg_data & MEMCG_DATA_KMEM), folio);
b4e0b68f
MS
403
404 return (struct obj_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
405}
406
bcfe06bf 407/*
1b7e4464
MWO
408 * folio_memcg - Get the memory cgroup associated with a folio.
409 * @folio: Pointer to the folio.
bcfe06bf 410 *
1b7e4464
MWO
411 * Returns a pointer to the memory cgroup associated with the folio,
412 * or NULL. This function assumes that the folio is known to have a
bcfe06bf 413 * proper memory cgroup pointer. It's not safe to call this function
1b7e4464 414 * against some type of folios, e.g. slab folios or ex-slab folios.
bcfe06bf 415 *
1b7e4464 416 * For a non-kmem folio any of the following ensures folio and memcg binding
b4e0b68f
MS
417 * stability:
418 *
1b7e4464 419 * - the folio lock
bcfe06bf
RG
420 * - LRU isolation
421 * - lock_page_memcg()
422 * - exclusive reference
018ee47f 423 * - mem_cgroup_trylock_pages()
b4e0b68f 424 *
1b7e4464
MWO
425 * For a kmem folio a caller should hold an rcu read lock to protect memcg
426 * associated with a kmem folio from being released.
bcfe06bf 427 */
1b7e4464
MWO
428static inline struct mem_cgroup *folio_memcg(struct folio *folio)
429{
430 if (folio_memcg_kmem(folio))
431 return obj_cgroup_memcg(__folio_objcg(folio));
432 return __folio_memcg(folio);
433}
434
bcfe06bf
RG
435static inline struct mem_cgroup *page_memcg(struct page *page)
436{
1b7e4464 437 return folio_memcg(page_folio(page));
bcfe06bf
RG
438}
439
c5ce619a
MWO
440/**
441 * folio_memcg_rcu - Locklessly get the memory cgroup associated with a folio.
442 * @folio: Pointer to the folio.
bcfe06bf 443 *
c5ce619a 444 * This function assumes that the folio is known to have a
bcfe06bf 445 * proper memory cgroup pointer. It's not safe to call this function
c5ce619a
MWO
446 * against some type of folios, e.g. slab folios or ex-slab folios.
447 *
448 * Return: A pointer to the memory cgroup associated with the folio,
449 * or NULL.
bcfe06bf 450 */
c5ce619a 451static inline struct mem_cgroup *folio_memcg_rcu(struct folio *folio)
bcfe06bf 452{
c5ce619a 453 unsigned long memcg_data = READ_ONCE(folio->memcg_data);
b4e0b68f 454
c5ce619a 455 VM_BUG_ON_FOLIO(folio_test_slab(folio), folio);
bcfe06bf
RG
456 WARN_ON_ONCE(!rcu_read_lock_held());
457
b4e0b68f
MS
458 if (memcg_data & MEMCG_DATA_KMEM) {
459 struct obj_cgroup *objcg;
460
461 objcg = (void *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
462 return obj_cgroup_memcg(objcg);
463 }
464
465 return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
bcfe06bf
RG
466}
467
468/*
becacb04
MW
469 * folio_memcg_check - Get the memory cgroup associated with a folio.
470 * @folio: Pointer to the folio.
bcfe06bf 471 *
becacb04
MW
472 * Returns a pointer to the memory cgroup associated with the folio,
473 * or NULL. This function unlike folio_memcg() can take any folio
474 * as an argument. It has to be used in cases when it's not known if a folio
b4e0b68f
MS
475 * has an associated memory cgroup pointer or an object cgroups vector or
476 * an object cgroup.
477 *
becacb04 478 * For a non-kmem folio any of the following ensures folio and memcg binding
b4e0b68f 479 * stability:
bcfe06bf 480 *
becacb04 481 * - the folio lock
bcfe06bf 482 * - LRU isolation
becacb04 483 * - lock_folio_memcg()
bcfe06bf 484 * - exclusive reference
018ee47f 485 * - mem_cgroup_trylock_pages()
b4e0b68f 486 *
becacb04
MW
487 * For a kmem folio a caller should hold an rcu read lock to protect memcg
488 * associated with a kmem folio from being released.
bcfe06bf 489 */
becacb04 490static inline struct mem_cgroup *folio_memcg_check(struct folio *folio)
bcfe06bf
RG
491{
492 /*
becacb04
MW
493 * Because folio->memcg_data might be changed asynchronously
494 * for slabs, READ_ONCE() should be used here.
bcfe06bf 495 */
becacb04 496 unsigned long memcg_data = READ_ONCE(folio->memcg_data);
bcfe06bf 497
87944e29 498 if (memcg_data & MEMCG_DATA_OBJCGS)
bcfe06bf
RG
499 return NULL;
500
b4e0b68f
MS
501 if (memcg_data & MEMCG_DATA_KMEM) {
502 struct obj_cgroup *objcg;
503
504 objcg = (void *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
505 return obj_cgroup_memcg(objcg);
506 }
507
18b2db3b
RG
508 return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
509}
510
becacb04
MW
511static inline struct mem_cgroup *page_memcg_check(struct page *page)
512{
513 if (PageTail(page))
514 return NULL;
515 return folio_memcg_check((struct folio *)page);
516}
517
88f2ef73
MS
518static inline struct mem_cgroup *get_mem_cgroup_from_objcg(struct obj_cgroup *objcg)
519{
520 struct mem_cgroup *memcg;
521
522 rcu_read_lock();
523retry:
524 memcg = obj_cgroup_memcg(objcg);
525 if (unlikely(!css_tryget(&memcg->css)))
526 goto retry;
527 rcu_read_unlock();
528
529 return memcg;
530}
531
bd290e1e 532#ifdef CONFIG_MEMCG_KMEM
18b2db3b 533/*
1b7e4464
MWO
534 * folio_memcg_kmem - Check if the folio has the memcg_kmem flag set.
535 * @folio: Pointer to the folio.
18b2db3b 536 *
1b7e4464
MWO
537 * Checks if the folio has MemcgKmem flag set. The caller must ensure
538 * that the folio has an associated memory cgroup. It's not safe to call
539 * this function against some types of folios, e.g. slab folios.
18b2db3b 540 */
1b7e4464 541static inline bool folio_memcg_kmem(struct folio *folio)
18b2db3b 542{
1b7e4464
MWO
543 VM_BUG_ON_PGFLAGS(PageTail(&folio->page), &folio->page);
544 VM_BUG_ON_FOLIO(folio->memcg_data & MEMCG_DATA_OBJCGS, folio);
545 return folio->memcg_data & MEMCG_DATA_KMEM;
bcfe06bf
RG
546}
547
270c6a71 548
270c6a71 549#else
1b7e4464 550static inline bool folio_memcg_kmem(struct folio *folio)
bd290e1e
MS
551{
552 return false;
553}
554
270c6a71
RG
555#endif
556
1b7e4464
MWO
557static inline bool PageMemcgKmem(struct page *page)
558{
559 return folio_memcg_kmem(page_folio(page));
560}
561
dfd2f10c
KT
562static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
563{
564 return (memcg == root_mem_cgroup);
565}
566
23047a96
JW
567static inline bool mem_cgroup_disabled(void)
568{
569 return !cgroup_subsys_enabled(memory_cgrp_subsys);
570}
571
f56ce412
JW
572static inline void mem_cgroup_protection(struct mem_cgroup *root,
573 struct mem_cgroup *memcg,
574 unsigned long *min,
575 unsigned long *low)
9783aa99 576{
f56ce412
JW
577 *min = *low = 0;
578
1bc63fb1 579 if (mem_cgroup_disabled())
f56ce412 580 return;
1bc63fb1 581
22f7496f
YS
582 /*
583 * There is no reclaim protection applied to a targeted reclaim.
584 * We are special casing this specific case here because
585 * mem_cgroup_protected calculation is not robust enough to keep
586 * the protection invariant for calculated effective values for
587 * parallel reclaimers with different reclaim target. This is
588 * especially a problem for tail memcgs (as they have pages on LRU)
589 * which would want to have effective values 0 for targeted reclaim
590 * but a different value for external reclaim.
591 *
592 * Example
593 * Let's have global and A's reclaim in parallel:
594 * |
595 * A (low=2G, usage = 3G, max = 3G, children_low_usage = 1.5G)
596 * |\
597 * | C (low = 1G, usage = 2.5G)
598 * B (low = 1G, usage = 0.5G)
599 *
600 * For the global reclaim
601 * A.elow = A.low
602 * B.elow = min(B.usage, B.low) because children_low_usage <= A.elow
603 * C.elow = min(C.usage, C.low)
604 *
605 * With the effective values resetting we have A reclaim
606 * A.elow = 0
607 * B.elow = B.low
608 * C.elow = C.low
609 *
610 * If the global reclaim races with A's reclaim then
611 * B.elow = C.elow = 0 because children_low_usage > A.elow)
612 * is possible and reclaiming B would be violating the protection.
613 *
614 */
615 if (root == memcg)
f56ce412 616 return;
9783aa99 617
f56ce412
JW
618 *min = READ_ONCE(memcg->memory.emin);
619 *low = READ_ONCE(memcg->memory.elow);
9783aa99
CD
620}
621
45c7f7e1
CD
622void mem_cgroup_calculate_protection(struct mem_cgroup *root,
623 struct mem_cgroup *memcg);
624
adb82130
YA
625static inline bool mem_cgroup_unprotected(struct mem_cgroup *target,
626 struct mem_cgroup *memcg)
45c7f7e1
CD
627{
628 /*
629 * The root memcg doesn't account charges, and doesn't support
adb82130
YA
630 * protection. The target memcg's protection is ignored, see
631 * mem_cgroup_calculate_protection() and mem_cgroup_protection()
45c7f7e1 632 */
adb82130
YA
633 return mem_cgroup_disabled() || mem_cgroup_is_root(memcg) ||
634 memcg == target;
45c7f7e1
CD
635}
636
adb82130
YA
637static inline bool mem_cgroup_below_low(struct mem_cgroup *target,
638 struct mem_cgroup *memcg)
45c7f7e1 639{
adb82130 640 if (mem_cgroup_unprotected(target, memcg))
45c7f7e1
CD
641 return false;
642
643 return READ_ONCE(memcg->memory.elow) >=
644 page_counter_read(&memcg->memory);
645}
646
adb82130
YA
647static inline bool mem_cgroup_below_min(struct mem_cgroup *target,
648 struct mem_cgroup *memcg)
45c7f7e1 649{
adb82130 650 if (mem_cgroup_unprotected(target, memcg))
45c7f7e1
CD
651 return false;
652
653 return READ_ONCE(memcg->memory.emin) >=
654 page_counter_read(&memcg->memory);
655}
241994ed 656
8f425e4e
MWO
657int __mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, gfp_t gfp);
658
659/**
660 * mem_cgroup_charge - Charge a newly allocated folio to a cgroup.
661 * @folio: Folio to charge.
662 * @mm: mm context of the allocating task.
663 * @gfp: Reclaim mode.
664 *
665 * Try to charge @folio to the memcg that @mm belongs to, reclaiming
666 * pages according to @gfp if necessary. If @mm is NULL, try to
667 * charge to the active memcg.
668 *
669 * Do not use this for folios allocated for swapin.
670 *
671 * Return: 0 on success. Otherwise, an error code is returned.
672 */
673static inline int mem_cgroup_charge(struct folio *folio, struct mm_struct *mm,
674 gfp_t gfp)
2c8d8f97
SB
675{
676 if (mem_cgroup_disabled())
677 return 0;
8f425e4e 678 return __mem_cgroup_charge(folio, mm, gfp);
2c8d8f97
SB
679}
680
65995918 681int mem_cgroup_swapin_charge_folio(struct folio *folio, struct mm_struct *mm,
0add0c77
SB
682 gfp_t gfp, swp_entry_t entry);
683void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry);
3fea5a49 684
bbc6b703
MWO
685void __mem_cgroup_uncharge(struct folio *folio);
686
687/**
688 * mem_cgroup_uncharge - Uncharge a folio.
689 * @folio: Folio to uncharge.
690 *
691 * Uncharge a folio previously charged with mem_cgroup_charge().
692 */
693static inline void mem_cgroup_uncharge(struct folio *folio)
2c8d8f97
SB
694{
695 if (mem_cgroup_disabled())
696 return;
bbc6b703 697 __mem_cgroup_uncharge(folio);
2c8d8f97
SB
698}
699
700void __mem_cgroup_uncharge_list(struct list_head *page_list);
701static inline void mem_cgroup_uncharge_list(struct list_head *page_list)
702{
703 if (mem_cgroup_disabled())
704 return;
705 __mem_cgroup_uncharge_list(page_list);
706}
569b846d 707
d21bba2b 708void mem_cgroup_migrate(struct folio *old, struct folio *new);
569b846d 709
55779ec7 710/**
867e5e1d 711 * mem_cgroup_lruvec - get the lru list vector for a memcg & node
55779ec7 712 * @memcg: memcg of the wanted lruvec
9a1ac228 713 * @pgdat: pglist_data
55779ec7 714 *
867e5e1d 715 * Returns the lru list vector holding pages for a given @memcg &
9a1ac228 716 * @pgdat combination. This can be the node lruvec, if the memory
867e5e1d 717 * controller is disabled.
55779ec7 718 */
867e5e1d
JW
719static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg,
720 struct pglist_data *pgdat)
55779ec7 721{
ef8f2327 722 struct mem_cgroup_per_node *mz;
55779ec7
JW
723 struct lruvec *lruvec;
724
725 if (mem_cgroup_disabled()) {
867e5e1d 726 lruvec = &pgdat->__lruvec;
55779ec7
JW
727 goto out;
728 }
729
1b05117d
JW
730 if (!memcg)
731 memcg = root_mem_cgroup;
732
a3747b53 733 mz = memcg->nodeinfo[pgdat->node_id];
55779ec7
JW
734 lruvec = &mz->lruvec;
735out:
736 /*
737 * Since a node can be onlined after the mem_cgroup was created,
599d0c95 738 * we have to be prepared to initialize lruvec->pgdat here;
55779ec7
JW
739 * and if offlined then reonlined, we need to reinitialize it.
740 */
ef8f2327
MG
741 if (unlikely(lruvec->pgdat != pgdat))
742 lruvec->pgdat = pgdat;
55779ec7
JW
743 return lruvec;
744}
745
9a1ac228 746/**
b1baabd9
MWO
747 * folio_lruvec - return lruvec for isolating/putting an LRU folio
748 * @folio: Pointer to the folio.
9a1ac228 749 *
b1baabd9 750 * This function relies on folio->mem_cgroup being stable.
9a1ac228 751 */
b1baabd9 752static inline struct lruvec *folio_lruvec(struct folio *folio)
9a1ac228 753{
b1baabd9 754 struct mem_cgroup *memcg = folio_memcg(folio);
9a1ac228 755
b1baabd9
MWO
756 VM_WARN_ON_ONCE_FOLIO(!memcg && !mem_cgroup_disabled(), folio);
757 return mem_cgroup_lruvec(memcg, folio_pgdat(folio));
9a1ac228 758}
c9b0ed51 759
64219994 760struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
e993d905 761
d46eb14b
SB
762struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm);
763
e809c3fe
MWO
764struct lruvec *folio_lruvec_lock(struct folio *folio);
765struct lruvec *folio_lruvec_lock_irq(struct folio *folio);
766struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio,
6168d0da
AS
767 unsigned long *flags);
768
769#ifdef CONFIG_DEBUG_VM
e809c3fe 770void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio);
6168d0da 771#else
e809c3fe
MWO
772static inline
773void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio)
6168d0da
AS
774{
775}
776#endif
777
33398cf2
MH
778static inline
779struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css){
780 return css ? container_of(css, struct mem_cgroup, css) : NULL;
781}
782
bf4f0599
RG
783static inline bool obj_cgroup_tryget(struct obj_cgroup *objcg)
784{
785 return percpu_ref_tryget(&objcg->refcnt);
786}
787
788static inline void obj_cgroup_get(struct obj_cgroup *objcg)
789{
790 percpu_ref_get(&objcg->refcnt);
791}
792
b4e0b68f
MS
793static inline void obj_cgroup_get_many(struct obj_cgroup *objcg,
794 unsigned long nr)
bf4f0599 795{
b4e0b68f 796 percpu_ref_get_many(&objcg->refcnt, nr);
bf4f0599
RG
797}
798
b4e0b68f 799static inline void obj_cgroup_put(struct obj_cgroup *objcg)
bf4f0599 800{
b4e0b68f 801 percpu_ref_put(&objcg->refcnt);
bf4f0599
RG
802}
803
e4dde56c
YZ
804static inline bool mem_cgroup_tryget(struct mem_cgroup *memcg)
805{
806 return !memcg || css_tryget(&memcg->css);
807}
808
dc0b5864
RG
809static inline void mem_cgroup_put(struct mem_cgroup *memcg)
810{
d46eb14b
SB
811 if (memcg)
812 css_put(&memcg->css);
dc0b5864
RG
813}
814
8e8ae645
JW
815#define mem_cgroup_from_counter(counter, member) \
816 container_of(counter, struct mem_cgroup, member)
817
33398cf2
MH
818struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *,
819 struct mem_cgroup *,
820 struct mem_cgroup_reclaim_cookie *);
821void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
7c5f64f8
VD
822int mem_cgroup_scan_tasks(struct mem_cgroup *,
823 int (*)(struct task_struct *, void *), void *);
33398cf2 824
23047a96
JW
825static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
826{
827 if (mem_cgroup_disabled())
828 return 0;
829
73f576c0 830 return memcg->id.id;
23047a96 831}
73f576c0 832struct mem_cgroup *mem_cgroup_from_id(unsigned short id);
23047a96 833
c15187a4
RG
834#ifdef CONFIG_SHRINKER_DEBUG
835static inline unsigned long mem_cgroup_ino(struct mem_cgroup *memcg)
836{
837 return memcg ? cgroup_ino(memcg->css.cgroup) : 0;
838}
839
840struct mem_cgroup *mem_cgroup_get_from_ino(unsigned long ino);
841#endif
842
aa9694bb
CD
843static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m)
844{
845 return mem_cgroup_from_css(seq_css(m));
846}
847
2262185c
RG
848static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec)
849{
850 struct mem_cgroup_per_node *mz;
851
852 if (mem_cgroup_disabled())
853 return NULL;
854
855 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
856 return mz->memcg;
857}
858
8e8ae645
JW
859/**
860 * parent_mem_cgroup - find the accounting parent of a memcg
861 * @memcg: memcg whose parent to find
862 *
863 * Returns the parent memcg, or NULL if this is the root or the memory
864 * controller is in legacy no-hierarchy mode.
865 */
866static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
867{
486bc706 868 return mem_cgroup_from_css(memcg->css.parent);
8e8ae645
JW
869}
870
33398cf2
MH
871static inline bool mem_cgroup_is_descendant(struct mem_cgroup *memcg,
872 struct mem_cgroup *root)
873{
874 if (root == memcg)
875 return true;
33398cf2
MH
876 return cgroup_is_descendant(memcg->css.cgroup, root->css.cgroup);
877}
e1aab161 878
2314b42d
JW
879static inline bool mm_match_cgroup(struct mm_struct *mm,
880 struct mem_cgroup *memcg)
2e4d4091 881{
587af308 882 struct mem_cgroup *task_memcg;
413918bb 883 bool match = false;
c3ac9a8a 884
2e4d4091 885 rcu_read_lock();
587af308 886 task_memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
413918bb 887 if (task_memcg)
2314b42d 888 match = mem_cgroup_is_descendant(task_memcg, memcg);
2e4d4091 889 rcu_read_unlock();
c3ac9a8a 890 return match;
2e4d4091 891}
8a9f3ccd 892
75376c6f 893struct cgroup_subsys_state *mem_cgroup_css_from_folio(struct folio *folio);
2fc04524 894ino_t page_cgroup_ino(struct page *page);
d324236b 895
eb01aaab
VD
896static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
897{
898 if (mem_cgroup_disabled())
899 return true;
900 return !!(memcg->css.flags & CSS_ONLINE);
901}
902
33398cf2 903void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
b4536f0c 904 int zid, int nr_pages);
33398cf2 905
b4536f0c
MH
906static inline
907unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
908 enum lru_list lru, int zone_idx)
909{
910 struct mem_cgroup_per_node *mz;
911
912 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
e0e3f42f 913 return READ_ONCE(mz->lru_zone_size[zone_idx][lru]);
33398cf2
MH
914}
915
b23afb93
TH
916void mem_cgroup_handle_over_high(void);
917
bbec2e15 918unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg);
7c5f64f8 919
9783aa99
CD
920unsigned long mem_cgroup_size(struct mem_cgroup *memcg);
921
f0c867d9 922void mem_cgroup_print_oom_context(struct mem_cgroup *memcg,
64219994 923 struct task_struct *p);
58ae83db 924
f0c867d9 925void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg);
926
29ef680a 927static inline void mem_cgroup_enter_user_fault(void)
519e5247 928{
29ef680a
MH
929 WARN_ON(current->in_user_fault);
930 current->in_user_fault = 1;
519e5247
JW
931}
932
29ef680a 933static inline void mem_cgroup_exit_user_fault(void)
519e5247 934{
29ef680a
MH
935 WARN_ON(!current->in_user_fault);
936 current->in_user_fault = 0;
519e5247
JW
937}
938
3812c8c8
JW
939static inline bool task_in_memcg_oom(struct task_struct *p)
940{
626ebc41 941 return p->memcg_in_oom;
3812c8c8
JW
942}
943
49426420 944bool mem_cgroup_oom_synchronize(bool wait);
3d8b38eb
RG
945struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim,
946 struct mem_cgroup *oom_domain);
947void mem_cgroup_print_oom_group(struct mem_cgroup *memcg);
3812c8c8 948
f70ad448
MWO
949void folio_memcg_lock(struct folio *folio);
950void folio_memcg_unlock(struct folio *folio);
1c824a68 951void lock_page_memcg(struct page *page);
62cccb8c 952void unlock_page_memcg(struct page *page);
d7365e78 953
db9adbcb 954void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val);
2a2e4885 955
018ee47f
YZ
956/* try to stablize folio_memcg() for all the pages in a memcg */
957static inline bool mem_cgroup_trylock_pages(struct mem_cgroup *memcg)
958{
959 rcu_read_lock();
960
961 if (mem_cgroup_disabled() || !atomic_read(&memcg->moving_account))
962 return true;
963
964 rcu_read_unlock();
965 return false;
966}
967
968static inline void mem_cgroup_unlock_pages(void)
969{
970 rcu_read_unlock();
971}
972
04fecbf5 973/* idx can be of type enum memcg_stat_item or node_stat_item */
00f3ca2c 974static inline void mod_memcg_state(struct mem_cgroup *memcg,
04fecbf5 975 int idx, int val)
2a2e4885 976{
c3cc3911
JW
977 unsigned long flags;
978
979 local_irq_save(flags);
a983b5eb 980 __mod_memcg_state(memcg, idx, val);
c3cc3911 981 local_irq_restore(flags);
2a2e4885
JW
982}
983
4e5aa1f4
SB
984static inline void mod_memcg_page_state(struct page *page,
985 int idx, int val)
986{
987 struct mem_cgroup *memcg;
988
989 if (mem_cgroup_disabled())
990 return;
991
992 rcu_read_lock();
993 memcg = page_memcg(page);
994 if (memcg)
995 mod_memcg_state(memcg, idx, val);
996 rcu_read_unlock();
997}
998
410f8e82 999unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx);
7490a2d2 1000
42a30035
JW
1001static inline unsigned long lruvec_page_state(struct lruvec *lruvec,
1002 enum node_stat_item idx)
1003{
1004 struct mem_cgroup_per_node *pn;
dbb16df6 1005 long x;
42a30035
JW
1006
1007 if (mem_cgroup_disabled())
1008 return node_page_state(lruvec_pgdat(lruvec), idx);
1009
1010 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
dbb16df6
SB
1011 x = READ_ONCE(pn->lruvec_stats.state[idx]);
1012#ifdef CONFIG_SMP
1013 if (x < 0)
1014 x = 0;
1015#endif
1016 return x;
42a30035
JW
1017}
1018
205b20cc
JW
1019static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
1020 enum node_stat_item idx)
2a7106f2 1021{
00f3ca2c 1022 struct mem_cgroup_per_node *pn;
815744d7
JW
1023 long x = 0;
1024 int cpu;
00f3ca2c
JW
1025
1026 if (mem_cgroup_disabled())
1027 return node_page_state(lruvec_pgdat(lruvec), idx);
1028
1029 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
815744d7 1030 for_each_possible_cpu(cpu)
7e1c0d6f 1031 x += per_cpu(pn->lruvec_stats_percpu->state[idx], cpu);
a983b5eb
JW
1032#ifdef CONFIG_SMP
1033 if (x < 0)
1034 x = 0;
1035#endif
1036 return x;
2a7106f2
GT
1037}
1038
aa48e47e 1039void mem_cgroup_flush_stats(void);
9b301615 1040void mem_cgroup_flush_stats_delayed(void);
aa48e47e 1041
eedc4e5a
RG
1042void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
1043 int val);
da3ceeff 1044void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val);
991e7673 1045
da3ceeff 1046static inline void mod_lruvec_kmem_state(void *p, enum node_stat_item idx,
991e7673
SB
1047 int val)
1048{
1049 unsigned long flags;
1050
1051 local_irq_save(flags);
da3ceeff 1052 __mod_lruvec_kmem_state(p, idx, val);
991e7673
SB
1053 local_irq_restore(flags);
1054}
1055
eedc4e5a
RG
1056static inline void mod_memcg_lruvec_state(struct lruvec *lruvec,
1057 enum node_stat_item idx, int val)
1058{
1059 unsigned long flags;
1060
1061 local_irq_save(flags);
1062 __mod_memcg_lruvec_state(lruvec, idx, val);
1063 local_irq_restore(flags);
1064}
1065
db9adbcb
JW
1066void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
1067 unsigned long count);
c9019e9b 1068
2262185c 1069static inline void count_memcg_events(struct mem_cgroup *memcg,
e27be240
JW
1070 enum vm_event_item idx,
1071 unsigned long count)
2262185c 1072{
c3cc3911
JW
1073 unsigned long flags;
1074
1075 local_irq_save(flags);
a983b5eb 1076 __count_memcg_events(memcg, idx, count);
c3cc3911 1077 local_irq_restore(flags);
2262185c
RG
1078}
1079
1080static inline void count_memcg_page_event(struct page *page,
e27be240 1081 enum vm_event_item idx)
2262185c 1082{
bcfe06bf
RG
1083 struct mem_cgroup *memcg = page_memcg(page);
1084
1085 if (memcg)
1086 count_memcg_events(memcg, idx, 1);
2262185c
RG
1087}
1088
64daa5d8
MWO
1089static inline void count_memcg_folio_events(struct folio *folio,
1090 enum vm_event_item idx, unsigned long nr)
1091{
1092 struct mem_cgroup *memcg = folio_memcg(folio);
1093
1094 if (memcg)
1095 count_memcg_events(memcg, idx, nr);
1096}
1097
2262185c
RG
1098static inline void count_memcg_event_mm(struct mm_struct *mm,
1099 enum vm_event_item idx)
68ae564b 1100{
33398cf2
MH
1101 struct mem_cgroup *memcg;
1102
68ae564b
DR
1103 if (mem_cgroup_disabled())
1104 return;
33398cf2
MH
1105
1106 rcu_read_lock();
1107 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
fe6bdfc8 1108 if (likely(memcg))
c9019e9b 1109 count_memcg_events(memcg, idx, 1);
33398cf2 1110 rcu_read_unlock();
68ae564b 1111}
c9019e9b 1112
e27be240
JW
1113static inline void memcg_memory_event(struct mem_cgroup *memcg,
1114 enum memcg_memory_event event)
c9019e9b 1115{
8b21ca02
MS
1116 bool swap_event = event == MEMCG_SWAP_HIGH || event == MEMCG_SWAP_MAX ||
1117 event == MEMCG_SWAP_FAIL;
1118
1e577f97 1119 atomic_long_inc(&memcg->memory_events_local[event]);
8b21ca02
MS
1120 if (!swap_event)
1121 cgroup_file_notify(&memcg->events_local_file);
1e577f97 1122
9852ae3f
CD
1123 do {
1124 atomic_long_inc(&memcg->memory_events[event]);
8b21ca02
MS
1125 if (swap_event)
1126 cgroup_file_notify(&memcg->swap_events_file);
1127 else
1128 cgroup_file_notify(&memcg->events_file);
9852ae3f 1129
04fd61a4
YS
1130 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
1131 break;
9852ae3f
CD
1132 if (cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_LOCAL_EVENTS)
1133 break;
1134 } while ((memcg = parent_mem_cgroup(memcg)) &&
1135 !mem_cgroup_is_root(memcg));
c9019e9b
JW
1136}
1137
fe6bdfc8
RG
1138static inline void memcg_memory_event_mm(struct mm_struct *mm,
1139 enum memcg_memory_event event)
1140{
1141 struct mem_cgroup *memcg;
1142
1143 if (mem_cgroup_disabled())
1144 return;
1145
1146 rcu_read_lock();
1147 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
1148 if (likely(memcg))
1149 memcg_memory_event(memcg, event);
1150 rcu_read_unlock();
1151}
1152
be6c8982 1153void split_page_memcg(struct page *head, unsigned int nr);
ca3e0214 1154
2d146aa3
JW
1155unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
1156 gfp_t gfp_mask,
1157 unsigned long *total_scanned);
1158
c255a458 1159#else /* CONFIG_MEMCG */
23047a96
JW
1160
1161#define MEM_CGROUP_ID_SHIFT 0
1162#define MEM_CGROUP_ID_MAX 0
1163
1b7e4464
MWO
1164static inline struct mem_cgroup *folio_memcg(struct folio *folio)
1165{
1166 return NULL;
1167}
1168
bcfe06bf
RG
1169static inline struct mem_cgroup *page_memcg(struct page *page)
1170{
1171 return NULL;
1172}
1173
c5ce619a 1174static inline struct mem_cgroup *folio_memcg_rcu(struct folio *folio)
bcfe06bf
RG
1175{
1176 WARN_ON_ONCE(!rcu_read_lock_held());
1177 return NULL;
1178}
1179
becacb04
MW
1180static inline struct mem_cgroup *folio_memcg_check(struct folio *folio)
1181{
1182 return NULL;
1183}
1184
bcfe06bf
RG
1185static inline struct mem_cgroup *page_memcg_check(struct page *page)
1186{
1187 return NULL;
1188}
1189
1b7e4464
MWO
1190static inline bool folio_memcg_kmem(struct folio *folio)
1191{
1192 return false;
1193}
1194
18b2db3b
RG
1195static inline bool PageMemcgKmem(struct page *page)
1196{
1197 return false;
1198}
1199
dfd2f10c
KT
1200static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
1201{
1202 return true;
1203}
1204
23047a96
JW
1205static inline bool mem_cgroup_disabled(void)
1206{
1207 return true;
1208}
1209
e27be240
JW
1210static inline void memcg_memory_event(struct mem_cgroup *memcg,
1211 enum memcg_memory_event event)
241994ed
JW
1212{
1213}
1214
fe6bdfc8
RG
1215static inline void memcg_memory_event_mm(struct mm_struct *mm,
1216 enum memcg_memory_event event)
1217{
1218}
1219
f56ce412
JW
1220static inline void mem_cgroup_protection(struct mem_cgroup *root,
1221 struct mem_cgroup *memcg,
1222 unsigned long *min,
1223 unsigned long *low)
9783aa99 1224{
f56ce412 1225 *min = *low = 0;
9783aa99
CD
1226}
1227
45c7f7e1
CD
1228static inline void mem_cgroup_calculate_protection(struct mem_cgroup *root,
1229 struct mem_cgroup *memcg)
1230{
1231}
1232
adb82130
YA
1233static inline bool mem_cgroup_unprotected(struct mem_cgroup *target,
1234 struct mem_cgroup *memcg)
1235{
1236 return true;
1237}
1238static inline bool mem_cgroup_below_low(struct mem_cgroup *target,
1239 struct mem_cgroup *memcg)
45c7f7e1
CD
1240{
1241 return false;
1242}
1243
adb82130
YA
1244static inline bool mem_cgroup_below_min(struct mem_cgroup *target,
1245 struct mem_cgroup *memcg)
241994ed 1246{
45c7f7e1 1247 return false;
241994ed
JW
1248}
1249
8f425e4e
MWO
1250static inline int mem_cgroup_charge(struct folio *folio,
1251 struct mm_struct *mm, gfp_t gfp)
3fea5a49
JW
1252{
1253 return 0;
1254}
1255
65995918 1256static inline int mem_cgroup_swapin_charge_folio(struct folio *folio,
0add0c77
SB
1257 struct mm_struct *mm, gfp_t gfp, swp_entry_t entry)
1258{
1259 return 0;
1260}
1261
1262static inline void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry)
1263{
1264}
1265
bbc6b703 1266static inline void mem_cgroup_uncharge(struct folio *folio)
569b846d
KH
1267{
1268}
1269
747db954 1270static inline void mem_cgroup_uncharge_list(struct list_head *page_list)
8a9f3ccd
BS
1271{
1272}
1273
d21bba2b 1274static inline void mem_cgroup_migrate(struct folio *old, struct folio *new)
69029cd5
KH
1275{
1276}
1277
867e5e1d
JW
1278static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg,
1279 struct pglist_data *pgdat)
08e552c6 1280{
867e5e1d 1281 return &pgdat->__lruvec;
08e552c6
KH
1282}
1283
b1baabd9 1284static inline struct lruvec *folio_lruvec(struct folio *folio)
66e1707b 1285{
b1baabd9 1286 struct pglist_data *pgdat = folio_pgdat(folio);
867e5e1d 1287 return &pgdat->__lruvec;
66e1707b
BS
1288}
1289
e809c3fe
MWO
1290static inline
1291void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio)
2d146aa3
JW
1292{
1293}
1294
b910718a
JW
1295static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
1296{
1297 return NULL;
1298}
1299
587af308 1300static inline bool mm_match_cgroup(struct mm_struct *mm,
c0ff4b85 1301 struct mem_cgroup *memcg)
bed7161a 1302{
587af308 1303 return true;
bed7161a
BS
1304}
1305
d46eb14b
SB
1306static inline struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
1307{
1308 return NULL;
1309}
1310
c74d40e8
DS
1311static inline
1312struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css)
1313{
1314 return NULL;
1315}
1316
f4840ccf
JW
1317static inline void obj_cgroup_put(struct obj_cgroup *objcg)
1318{
1319}
1320
e4dde56c
YZ
1321static inline bool mem_cgroup_tryget(struct mem_cgroup *memcg)
1322{
1323 return true;
1324}
1325
dc0b5864
RG
1326static inline void mem_cgroup_put(struct mem_cgroup *memcg)
1327{
1328}
1329
e809c3fe 1330static inline struct lruvec *folio_lruvec_lock(struct folio *folio)
6168d0da 1331{
e809c3fe 1332 struct pglist_data *pgdat = folio_pgdat(folio);
6168d0da
AS
1333
1334 spin_lock(&pgdat->__lruvec.lru_lock);
1335 return &pgdat->__lruvec;
1336}
1337
e809c3fe 1338static inline struct lruvec *folio_lruvec_lock_irq(struct folio *folio)
6168d0da 1339{
e809c3fe 1340 struct pglist_data *pgdat = folio_pgdat(folio);
6168d0da
AS
1341
1342 spin_lock_irq(&pgdat->__lruvec.lru_lock);
1343 return &pgdat->__lruvec;
1344}
1345
e809c3fe 1346static inline struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio,
6168d0da
AS
1347 unsigned long *flagsp)
1348{
e809c3fe 1349 struct pglist_data *pgdat = folio_pgdat(folio);
6168d0da
AS
1350
1351 spin_lock_irqsave(&pgdat->__lruvec.lru_lock, *flagsp);
1352 return &pgdat->__lruvec;
1353}
1354
5660048c
JW
1355static inline struct mem_cgroup *
1356mem_cgroup_iter(struct mem_cgroup *root,
1357 struct mem_cgroup *prev,
1358 struct mem_cgroup_reclaim_cookie *reclaim)
1359{
1360 return NULL;
1361}
1362
1363static inline void mem_cgroup_iter_break(struct mem_cgroup *root,
1364 struct mem_cgroup *prev)
1365{
1366}
1367
7c5f64f8
VD
1368static inline int mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
1369 int (*fn)(struct task_struct *, void *), void *arg)
1370{
1371 return 0;
1372}
1373
23047a96 1374static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
f8d66542 1375{
23047a96
JW
1376 return 0;
1377}
1378
1379static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
1380{
1381 WARN_ON_ONCE(id);
1382 /* XXX: This should always return root_mem_cgroup */
1383 return NULL;
f8d66542 1384}
a636b327 1385
c15187a4
RG
1386#ifdef CONFIG_SHRINKER_DEBUG
1387static inline unsigned long mem_cgroup_ino(struct mem_cgroup *memcg)
1388{
1389 return 0;
1390}
1391
1392static inline struct mem_cgroup *mem_cgroup_get_from_ino(unsigned long ino)
1393{
1394 return NULL;
1395}
1396#endif
1397
aa9694bb
CD
1398static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m)
1399{
1400 return NULL;
1401}
1402
2262185c
RG
1403static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec)
1404{
1405 return NULL;
1406}
1407
eb01aaab 1408static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
14797e23 1409{
13308ca9 1410 return true;
14797e23
KM
1411}
1412
b4536f0c
MH
1413static inline
1414unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
1415 enum lru_list lru, int zone_idx)
1416{
1417 return 0;
1418}
a3d8e054 1419
bbec2e15 1420static inline unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
7c5f64f8
VD
1421{
1422 return 0;
1423}
1424
9783aa99
CD
1425static inline unsigned long mem_cgroup_size(struct mem_cgroup *memcg)
1426{
1427 return 0;
1428}
1429
e222432b 1430static inline void
f0c867d9 1431mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p)
1432{
1433}
1434
1435static inline void
1436mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
e222432b
BS
1437{
1438}
1439
1c824a68 1440static inline void lock_page_memcg(struct page *page)
89c06bd5
KH
1441{
1442}
1443
62cccb8c 1444static inline void unlock_page_memcg(struct page *page)
89c06bd5
KH
1445{
1446}
1447
f70ad448
MWO
1448static inline void folio_memcg_lock(struct folio *folio)
1449{
1450}
1451
1452static inline void folio_memcg_unlock(struct folio *folio)
1453{
1454}
1455
018ee47f
YZ
1456static inline bool mem_cgroup_trylock_pages(struct mem_cgroup *memcg)
1457{
1458 /* to match folio_memcg_rcu() */
1459 rcu_read_lock();
1460 return true;
1461}
1462
1463static inline void mem_cgroup_unlock_pages(void)
1464{
1465 rcu_read_unlock();
1466}
1467
b23afb93
TH
1468static inline void mem_cgroup_handle_over_high(void)
1469{
1470}
1471
29ef680a 1472static inline void mem_cgroup_enter_user_fault(void)
519e5247
JW
1473{
1474}
1475
29ef680a 1476static inline void mem_cgroup_exit_user_fault(void)
519e5247
JW
1477{
1478}
1479
3812c8c8
JW
1480static inline bool task_in_memcg_oom(struct task_struct *p)
1481{
1482 return false;
1483}
1484
49426420 1485static inline bool mem_cgroup_oom_synchronize(bool wait)
3812c8c8
JW
1486{
1487 return false;
1488}
1489
3d8b38eb
RG
1490static inline struct mem_cgroup *mem_cgroup_get_oom_group(
1491 struct task_struct *victim, struct mem_cgroup *oom_domain)
1492{
1493 return NULL;
1494}
1495
1496static inline void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
1497{
1498}
1499
00f3ca2c 1500static inline void __mod_memcg_state(struct mem_cgroup *memcg,
04fecbf5 1501 int idx,
00f3ca2c 1502 int nr)
2a2e4885
JW
1503{
1504}
1505
00f3ca2c 1506static inline void mod_memcg_state(struct mem_cgroup *memcg,
04fecbf5 1507 int idx,
00f3ca2c 1508 int nr)
2a2e4885
JW
1509{
1510}
1511
4e5aa1f4
SB
1512static inline void mod_memcg_page_state(struct page *page,
1513 int idx, int val)
1514{
1515}
1516
7490a2d2
SB
1517static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
1518{
1519 return 0;
1520}
1521
42a30035
JW
1522static inline unsigned long lruvec_page_state(struct lruvec *lruvec,
1523 enum node_stat_item idx)
1524{
1525 return node_page_state(lruvec_pgdat(lruvec), idx);
1526}
1527
205b20cc
JW
1528static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
1529 enum node_stat_item idx)
2a7106f2 1530{
00f3ca2c 1531 return node_page_state(lruvec_pgdat(lruvec), idx);
2a7106f2
GT
1532}
1533
aa48e47e
SB
1534static inline void mem_cgroup_flush_stats(void)
1535{
1536}
1537
9b301615
SB
1538static inline void mem_cgroup_flush_stats_delayed(void)
1539{
1540}
1541
eedc4e5a
RG
1542static inline void __mod_memcg_lruvec_state(struct lruvec *lruvec,
1543 enum node_stat_item idx, int val)
1544{
1545}
1546
da3ceeff 1547static inline void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx,
ec9f0238
RG
1548 int val)
1549{
1550 struct page *page = virt_to_head_page(p);
1551
1552 __mod_node_page_state(page_pgdat(page), idx, val);
1553}
1554
da3ceeff 1555static inline void mod_lruvec_kmem_state(void *p, enum node_stat_item idx,
991e7673
SB
1556 int val)
1557{
1558 struct page *page = virt_to_head_page(p);
1559
1560 mod_node_page_state(page_pgdat(page), idx, val);
1561}
1562
2262185c
RG
1563static inline void count_memcg_events(struct mem_cgroup *memcg,
1564 enum vm_event_item idx,
1565 unsigned long count)
1566{
1567}
1568
9851ac13
KT
1569static inline void __count_memcg_events(struct mem_cgroup *memcg,
1570 enum vm_event_item idx,
1571 unsigned long count)
1572{
1573}
1574
2262185c 1575static inline void count_memcg_page_event(struct page *page,
04fecbf5 1576 int idx)
2262185c
RG
1577{
1578}
1579
64daa5d8
MWO
1580static inline void count_memcg_folio_events(struct folio *folio,
1581 enum vm_event_item idx, unsigned long nr)
1582{
1583}
1584
456f998e 1585static inline
2262185c 1586void count_memcg_event_mm(struct mm_struct *mm, enum vm_event_item idx)
456f998e
YH
1587{
1588}
6168d0da 1589
2d146aa3
JW
1590static inline void split_page_memcg(struct page *head, unsigned int nr)
1591{
1592}
1593
1594static inline
1595unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
1596 gfp_t gfp_mask,
1597 unsigned long *total_scanned)
6168d0da 1598{
2d146aa3 1599 return 0;
6168d0da 1600}
c255a458 1601#endif /* CONFIG_MEMCG */
78fb7466 1602
da3ceeff 1603static inline void __inc_lruvec_kmem_state(void *p, enum node_stat_item idx)
ec9f0238 1604{
da3ceeff 1605 __mod_lruvec_kmem_state(p, idx, 1);
ec9f0238
RG
1606}
1607
da3ceeff 1608static inline void __dec_lruvec_kmem_state(void *p, enum node_stat_item idx)
ec9f0238 1609{
da3ceeff 1610 __mod_lruvec_kmem_state(p, idx, -1);
ec9f0238
RG
1611}
1612
7cf111bc
JW
1613static inline struct lruvec *parent_lruvec(struct lruvec *lruvec)
1614{
1615 struct mem_cgroup *memcg;
1616
1617 memcg = lruvec_memcg(lruvec);
1618 if (!memcg)
1619 return NULL;
1620 memcg = parent_mem_cgroup(memcg);
1621 if (!memcg)
1622 return NULL;
1623 return mem_cgroup_lruvec(memcg, lruvec_pgdat(lruvec));
1624}
1625
6168d0da
AS
1626static inline void unlock_page_lruvec(struct lruvec *lruvec)
1627{
1628 spin_unlock(&lruvec->lru_lock);
1629}
1630
1631static inline void unlock_page_lruvec_irq(struct lruvec *lruvec)
1632{
1633 spin_unlock_irq(&lruvec->lru_lock);
1634}
1635
1636static inline void unlock_page_lruvec_irqrestore(struct lruvec *lruvec,
1637 unsigned long flags)
1638{
1639 spin_unlock_irqrestore(&lruvec->lru_lock, flags);
1640}
1641
7467c391 1642/* Test requires a stable page->memcg binding, see page_memcg() */
0de340cb
MWO
1643static inline bool folio_matches_lruvec(struct folio *folio,
1644 struct lruvec *lruvec)
f2e4d28d 1645{
0de340cb
MWO
1646 return lruvec_pgdat(lruvec) == folio_pgdat(folio) &&
1647 lruvec_memcg(lruvec) == folio_memcg(folio);
f2e4d28d
MS
1648}
1649
2a5e4e34 1650/* Don't lock again iff page's lruvec locked */
0de340cb 1651static inline struct lruvec *folio_lruvec_relock_irq(struct folio *folio,
2a5e4e34
AD
1652 struct lruvec *locked_lruvec)
1653{
1654 if (locked_lruvec) {
0de340cb 1655 if (folio_matches_lruvec(folio, locked_lruvec))
2a5e4e34
AD
1656 return locked_lruvec;
1657
1658 unlock_page_lruvec_irq(locked_lruvec);
1659 }
1660
e809c3fe 1661 return folio_lruvec_lock_irq(folio);
2a5e4e34
AD
1662}
1663
1664/* Don't lock again iff page's lruvec locked */
0de340cb 1665static inline struct lruvec *folio_lruvec_relock_irqsave(struct folio *folio,
2a5e4e34
AD
1666 struct lruvec *locked_lruvec, unsigned long *flags)
1667{
1668 if (locked_lruvec) {
0de340cb 1669 if (folio_matches_lruvec(folio, locked_lruvec))
2a5e4e34
AD
1670 return locked_lruvec;
1671
1672 unlock_page_lruvec_irqrestore(locked_lruvec, *flags);
1673 }
1674
e809c3fe 1675 return folio_lruvec_lock_irqsave(folio, flags);
2a5e4e34
AD
1676}
1677
52ebea74 1678#ifdef CONFIG_CGROUP_WRITEBACK
841710aa 1679
841710aa 1680struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb);
c5edf9cd
TH
1681void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
1682 unsigned long *pheadroom, unsigned long *pdirty,
1683 unsigned long *pwriteback);
841710aa 1684
9d8053fc 1685void mem_cgroup_track_foreign_dirty_slowpath(struct folio *folio,
97b27821
TH
1686 struct bdi_writeback *wb);
1687
203a3151 1688static inline void mem_cgroup_track_foreign_dirty(struct folio *folio,
97b27821
TH
1689 struct bdi_writeback *wb)
1690{
ac86f547
KW
1691 struct mem_cgroup *memcg;
1692
08d1d0e6
BH
1693 if (mem_cgroup_disabled())
1694 return;
1695
ac86f547
KW
1696 memcg = folio_memcg(folio);
1697 if (unlikely(memcg && &memcg->css != wb->memcg_css))
9d8053fc 1698 mem_cgroup_track_foreign_dirty_slowpath(folio, wb);
97b27821
TH
1699}
1700
1701void mem_cgroup_flush_foreign(struct bdi_writeback *wb);
1702
841710aa
TH
1703#else /* CONFIG_CGROUP_WRITEBACK */
1704
1705static inline struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
1706{
1707 return NULL;
1708}
1709
c2aa723a 1710static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb,
c5edf9cd
TH
1711 unsigned long *pfilepages,
1712 unsigned long *pheadroom,
c2aa723a
TH
1713 unsigned long *pdirty,
1714 unsigned long *pwriteback)
1715{
1716}
1717
203a3151 1718static inline void mem_cgroup_track_foreign_dirty(struct folio *folio,
97b27821
TH
1719 struct bdi_writeback *wb)
1720{
1721}
1722
1723static inline void mem_cgroup_flush_foreign(struct bdi_writeback *wb)
1724{
1725}
1726
841710aa 1727#endif /* CONFIG_CGROUP_WRITEBACK */
52ebea74 1728
e1aab161 1729struct sock;
4b1327be
WW
1730bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages,
1731 gfp_t gfp_mask);
baac50bb 1732void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages);
d886f4e4 1733#ifdef CONFIG_MEMCG
ef12947c
JW
1734extern struct static_key_false memcg_sockets_enabled_key;
1735#define mem_cgroup_sockets_enabled static_branch_unlikely(&memcg_sockets_enabled_key)
2d758073
JW
1736void mem_cgroup_sk_alloc(struct sock *sk);
1737void mem_cgroup_sk_free(struct sock *sk);
baac50bb 1738static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
e805605c 1739{
0db15298 1740 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_pressure)
8e8ae645 1741 return true;
8e8ae645 1742 do {
7e6ec49c 1743 if (time_before(jiffies, READ_ONCE(memcg->socket_pressure)))
8e8ae645
JW
1744 return true;
1745 } while ((memcg = parent_mem_cgroup(memcg)));
1746 return false;
e805605c 1747}
0a432dcb 1748
e4262c4f
YS
1749int alloc_shrinker_info(struct mem_cgroup *memcg);
1750void free_shrinker_info(struct mem_cgroup *memcg);
2bfd3637 1751void set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id);
a178015c 1752void reparent_shrinker_deferred(struct mem_cgroup *memcg);
e805605c 1753#else
80e95fe0 1754#define mem_cgroup_sockets_enabled 0
2d758073
JW
1755static inline void mem_cgroup_sk_alloc(struct sock *sk) { };
1756static inline void mem_cgroup_sk_free(struct sock *sk) { };
baac50bb 1757static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
e805605c
JW
1758{
1759 return false;
1760}
0a432dcb 1761
2bfd3637
YS
1762static inline void set_shrinker_bit(struct mem_cgroup *memcg,
1763 int nid, int shrinker_id)
0a432dcb
YS
1764{
1765}
e805605c 1766#endif
7ae1e1d0 1767
9b6f7e16 1768#ifdef CONFIG_MEMCG_KMEM
4d5c8aed 1769bool mem_cgroup_kmem_disabled(void);
f4b00eab
RG
1770int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order);
1771void __memcg_kmem_uncharge_page(struct page *page, int order);
45264778 1772
bf4f0599 1773struct obj_cgroup *get_obj_cgroup_from_current(void);
f4840ccf 1774struct obj_cgroup *get_obj_cgroup_from_page(struct page *page);
bf4f0599
RG
1775
1776int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size);
1777void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size);
1778
ef12947c 1779extern struct static_key_false memcg_kmem_enabled_key;
749c5415 1780
7ae1e1d0
GC
1781static inline bool memcg_kmem_enabled(void)
1782{
eda330e5 1783 return static_branch_likely(&memcg_kmem_enabled_key);
7ae1e1d0
GC
1784}
1785
f4b00eab
RG
1786static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp,
1787 int order)
60cd4bcd
SB
1788{
1789 if (memcg_kmem_enabled())
f4b00eab 1790 return __memcg_kmem_charge_page(page, gfp, order);
60cd4bcd
SB
1791 return 0;
1792}
1793
f4b00eab 1794static inline void memcg_kmem_uncharge_page(struct page *page, int order)
60cd4bcd
SB
1795{
1796 if (memcg_kmem_enabled())
f4b00eab 1797 __memcg_kmem_uncharge_page(page, order);
60cd4bcd
SB
1798}
1799
33398cf2 1800/*
a7cb874b
RG
1801 * A helper for accessing memcg's kmem_id, used for getting
1802 * corresponding LRU lists.
33398cf2 1803 */
7c52f65d 1804static inline int memcg_kmem_id(struct mem_cgroup *memcg)
33398cf2
MH
1805{
1806 return memcg ? memcg->kmemcg_id : -1;
1807}
5722d094 1808
8380ce47 1809struct mem_cgroup *mem_cgroup_from_obj(void *p);
fc4db90f 1810struct mem_cgroup *mem_cgroup_from_slab_obj(void *p);
8380ce47 1811
f4840ccf
JW
1812static inline void count_objcg_event(struct obj_cgroup *objcg,
1813 enum vm_event_item idx)
1814{
1815 struct mem_cgroup *memcg;
1816
2eb98919 1817 if (!memcg_kmem_enabled())
f4840ccf
JW
1818 return;
1819
1820 rcu_read_lock();
1821 memcg = obj_cgroup_memcg(objcg);
1822 count_memcg_events(memcg, idx, 1);
1823 rcu_read_unlock();
1824}
1825
7ae1e1d0 1826#else
4d5c8aed
RG
1827static inline bool mem_cgroup_kmem_disabled(void)
1828{
1829 return true;
1830}
9b6f7e16 1831
f4b00eab
RG
1832static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp,
1833 int order)
9b6f7e16
RG
1834{
1835 return 0;
1836}
1837
f4b00eab 1838static inline void memcg_kmem_uncharge_page(struct page *page, int order)
9b6f7e16
RG
1839{
1840}
1841
f4b00eab
RG
1842static inline int __memcg_kmem_charge_page(struct page *page, gfp_t gfp,
1843 int order)
60cd4bcd
SB
1844{
1845 return 0;
1846}
1847
f4b00eab 1848static inline void __memcg_kmem_uncharge_page(struct page *page, int order)
60cd4bcd
SB
1849{
1850}
1851
f4840ccf
JW
1852static inline struct obj_cgroup *get_obj_cgroup_from_page(struct page *page)
1853{
1854 return NULL;
1855}
1856
b9ce5ef4
GC
1857static inline bool memcg_kmem_enabled(void)
1858{
1859 return false;
1860}
1861
7c52f65d 1862static inline int memcg_kmem_id(struct mem_cgroup *memcg)
2633d7a0
GC
1863{
1864 return -1;
1865}
1866
8380ce47
RG
1867static inline struct mem_cgroup *mem_cgroup_from_obj(void *p)
1868{
1d0403d2 1869 return NULL;
8380ce47
RG
1870}
1871
fc4db90f
RG
1872static inline struct mem_cgroup *mem_cgroup_from_slab_obj(void *p)
1873{
1874 return NULL;
1875}
1876
f4840ccf
JW
1877static inline void count_objcg_event(struct obj_cgroup *objcg,
1878 enum vm_event_item idx)
1879{
1880}
1881
84c07d11 1882#endif /* CONFIG_MEMCG_KMEM */
127424c8 1883
f4840ccf
JW
1884#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
1885bool obj_cgroup_may_zswap(struct obj_cgroup *objcg);
1886void obj_cgroup_charge_zswap(struct obj_cgroup *objcg, size_t size);
1887void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg, size_t size);
1888#else
1889static inline bool obj_cgroup_may_zswap(struct obj_cgroup *objcg)
1890{
1891 return true;
1892}
1893static inline void obj_cgroup_charge_zswap(struct obj_cgroup *objcg,
1894 size_t size)
1895{
1896}
1897static inline void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg,
1898 size_t size)
1899{
1900}
1901#endif
1902
8cdea7c0 1903#endif /* _LINUX_MEMCONTROL_H */