mm/hugetlb_cgroup: convert __set_hugetlb_cgroup() to folios
[linux-2.6-block.git] / include / linux / hugetlb_cgroup.h
CommitLineData
2bc64a20
AK
1/*
2 * Copyright IBM Corporation, 2012
3 * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2.1 of the GNU Lesser General Public License
7 * as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
12 *
13 */
14
15#ifndef _LINUX_HUGETLB_CGROUP_H
16#define _LINUX_HUGETLB_CGROUP_H
17
309381fe 18#include <linux/mmdebug.h>
2bc64a20
AK
19
20struct hugetlb_cgroup;
e9fe92ae 21struct resv_map;
075a61d0 22struct file_region;
e9fe92ae 23
cd39d4e9 24#ifdef CONFIG_CGROUP_HUGETLB
9dd540e2
AK
25/*
26 * Minimum page order trackable by hugetlb cgroup.
1adc4d41 27 * At least 4 pages are necessary for all the tracking information.
cd39d4e9
MS
28 * The second tail page (hpage[SUBPAGE_INDEX_CGROUP]) is the fault
29 * usage cgroup. The third tail page (hpage[SUBPAGE_INDEX_CGROUP_RSVD])
30 * is the reservation usage cgroup.
9dd540e2 31 */
cd39d4e9 32#define HUGETLB_CGROUP_MIN_ORDER order_base_2(__MAX_CGROUP_SUBPAGE_INDEX + 1)
2bc64a20 33
e9fe92ae
MA
34enum hugetlb_memory_event {
35 HUGETLB_MAX,
36 HUGETLB_NR_MEMORY_EVENTS,
37};
38
f4776199
MA
39struct hugetlb_cgroup_per_node {
40 /* hugetlb usage in pages over all hstates. */
41 unsigned long usage[HUGE_MAX_HSTATE];
42};
43
e9fe92ae
MA
44struct hugetlb_cgroup {
45 struct cgroup_subsys_state css;
46
47 /*
48 * the counter to account for hugepages from hugetlb.
49 */
50 struct page_counter hugepage[HUGE_MAX_HSTATE];
51
52 /*
53 * the counter to account for hugepage reservations from hugetlb.
54 */
55 struct page_counter rsvd_hugepage[HUGE_MAX_HSTATE];
56
57 atomic_long_t events[HUGE_MAX_HSTATE][HUGETLB_NR_MEMORY_EVENTS];
58 atomic_long_t events_local[HUGE_MAX_HSTATE][HUGETLB_NR_MEMORY_EVENTS];
59
60 /* Handle for "hugetlb.events" */
61 struct cgroup_file events_file[HUGE_MAX_HSTATE];
62
63 /* Handle for "hugetlb.events.local" */
64 struct cgroup_file events_local_file[HUGE_MAX_HSTATE];
f4776199
MA
65
66 struct hugetlb_cgroup_per_node *nodeinfo[];
e9fe92ae 67};
9dd540e2 68
1adc4d41
MA
69static inline struct hugetlb_cgroup *
70__hugetlb_cgroup_from_page(struct page *page, bool rsvd)
9dd540e2 71{
309381fe 72 VM_BUG_ON_PAGE(!PageHuge(page), page);
9dd540e2
AK
73
74 if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER)
75 return NULL;
1adc4d41 76 if (rsvd)
cd39d4e9 77 return (void *)page_private(page + SUBPAGE_INDEX_CGROUP_RSVD);
1adc4d41 78 else
cd39d4e9 79 return (void *)page_private(page + SUBPAGE_INDEX_CGROUP);
1adc4d41
MA
80}
81
82static inline struct hugetlb_cgroup *hugetlb_cgroup_from_page(struct page *page)
83{
84 return __hugetlb_cgroup_from_page(page, false);
9dd540e2
AK
85}
86
1adc4d41
MA
87static inline struct hugetlb_cgroup *
88hugetlb_cgroup_from_page_rsvd(struct page *page)
89{
90 return __hugetlb_cgroup_from_page(page, true);
91}
92
a098c977 93static inline void __set_hugetlb_cgroup(struct folio *folio,
1adc4d41 94 struct hugetlb_cgroup *h_cg, bool rsvd)
9dd540e2 95{
a098c977 96 VM_BUG_ON_FOLIO(!folio_test_hugetlb(folio), folio);
9dd540e2 97
a098c977 98 if (folio_order(folio) < HUGETLB_CGROUP_MIN_ORDER)
736a8ccc 99 return;
1adc4d41 100 if (rsvd)
a098c977 101 set_page_private(folio_page(folio, SUBPAGE_INDEX_CGROUP_RSVD),
cd39d4e9 102 (unsigned long)h_cg);
1adc4d41 103 else
a098c977 104 set_page_private(folio_page(folio, SUBPAGE_INDEX_CGROUP),
cd39d4e9 105 (unsigned long)h_cg);
9dd540e2
AK
106}
107
736a8ccc 108static inline void set_hugetlb_cgroup(struct page *page,
1adc4d41
MA
109 struct hugetlb_cgroup *h_cg)
110{
a098c977 111 __set_hugetlb_cgroup(page_folio(page), h_cg, false);
1adc4d41
MA
112}
113
736a8ccc 114static inline void set_hugetlb_cgroup_rsvd(struct page *page,
1adc4d41
MA
115 struct hugetlb_cgroup *h_cg)
116{
a098c977 117 __set_hugetlb_cgroup(page_folio(page), h_cg, true);
1adc4d41
MA
118}
119
2bc64a20
AK
120static inline bool hugetlb_cgroup_disabled(void)
121{
fc5ed1e9 122 return !cgroup_subsys_enabled(hugetlb_cgrp_subsys);
2bc64a20
AK
123}
124
d85aecf2
ML
125static inline void hugetlb_cgroup_put_rsvd_cgroup(struct hugetlb_cgroup *h_cg)
126{
127 css_put(&h_cg->css);
128}
129
09a26e83
MK
130static inline void resv_map_dup_hugetlb_cgroup_uncharge_info(
131 struct resv_map *resv_map)
132{
133 if (resv_map->css)
134 css_get(resv_map->css);
135}
136
afe041c2
BQM
137static inline void resv_map_put_hugetlb_cgroup_uncharge_info(
138 struct resv_map *resv_map)
139{
140 if (resv_map->css)
141 css_put(resv_map->css);
142}
143
6d76dcf4
AK
144extern int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
145 struct hugetlb_cgroup **ptr);
1adc4d41
MA
146extern int hugetlb_cgroup_charge_cgroup_rsvd(int idx, unsigned long nr_pages,
147 struct hugetlb_cgroup **ptr);
6d76dcf4
AK
148extern void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
149 struct hugetlb_cgroup *h_cg,
150 struct page *page);
1adc4d41
MA
151extern void hugetlb_cgroup_commit_charge_rsvd(int idx, unsigned long nr_pages,
152 struct hugetlb_cgroup *h_cg,
153 struct page *page);
6d76dcf4
AK
154extern void hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages,
155 struct page *page);
1adc4d41
MA
156extern void hugetlb_cgroup_uncharge_page_rsvd(int idx, unsigned long nr_pages,
157 struct page *page);
158
6d76dcf4
AK
159extern void hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages,
160 struct hugetlb_cgroup *h_cg);
1adc4d41
MA
161extern void hugetlb_cgroup_uncharge_cgroup_rsvd(int idx, unsigned long nr_pages,
162 struct hugetlb_cgroup *h_cg);
e9fe92ae
MA
163extern void hugetlb_cgroup_uncharge_counter(struct resv_map *resv,
164 unsigned long start,
165 unsigned long end);
1adc4d41 166
075a61d0
MA
167extern void hugetlb_cgroup_uncharge_file_region(struct resv_map *resv,
168 struct file_region *rg,
d85aecf2
ML
169 unsigned long nr_pages,
170 bool region_del);
075a61d0 171
7179e7bf 172extern void hugetlb_cgroup_file_init(void) __init;
8e6ac7fa
AK
173extern void hugetlb_cgroup_migrate(struct page *oldhpage,
174 struct page *newhpage);
6d76dcf4 175
2bc64a20 176#else
075a61d0
MA
177static inline void hugetlb_cgroup_uncharge_file_region(struct resv_map *resv,
178 struct file_region *rg,
d85aecf2
ML
179 unsigned long nr_pages,
180 bool region_del)
075a61d0
MA
181{
182}
183
9dd540e2
AK
184static inline struct hugetlb_cgroup *hugetlb_cgroup_from_page(struct page *page)
185{
186 return NULL;
187}
188
1adc4d41
MA
189static inline struct hugetlb_cgroup *
190hugetlb_cgroup_from_page_resv(struct page *page)
191{
192 return NULL;
193}
194
195static inline struct hugetlb_cgroup *
196hugetlb_cgroup_from_page_rsvd(struct page *page)
197{
198 return NULL;
199}
200
736a8ccc 201static inline void set_hugetlb_cgroup(struct page *page,
1adc4d41
MA
202 struct hugetlb_cgroup *h_cg)
203{
1adc4d41
MA
204}
205
736a8ccc 206static inline void set_hugetlb_cgroup_rsvd(struct page *page,
1adc4d41 207 struct hugetlb_cgroup *h_cg)
9dd540e2 208{
9dd540e2
AK
209}
210
2bc64a20
AK
211static inline bool hugetlb_cgroup_disabled(void)
212{
213 return true;
214}
215
d85aecf2
ML
216static inline void hugetlb_cgroup_put_rsvd_cgroup(struct hugetlb_cgroup *h_cg)
217{
218}
219
09a26e83
MK
220static inline void resv_map_dup_hugetlb_cgroup_uncharge_info(
221 struct resv_map *resv_map)
222{
223}
224
afe041c2
BQM
225static inline void resv_map_put_hugetlb_cgroup_uncharge_info(
226 struct resv_map *resv_map)
227{
228}
229
1adc4d41
MA
230static inline int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
231 struct hugetlb_cgroup **ptr)
6d76dcf4
AK
232{
233 return 0;
234}
235
1adc4d41
MA
236static inline int hugetlb_cgroup_charge_cgroup_rsvd(int idx,
237 unsigned long nr_pages,
238 struct hugetlb_cgroup **ptr)
239{
240 return 0;
241}
242
243static inline void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
244 struct hugetlb_cgroup *h_cg,
245 struct page *page)
6d76dcf4 246{
6d76dcf4
AK
247}
248
249static inline void
1adc4d41
MA
250hugetlb_cgroup_commit_charge_rsvd(int idx, unsigned long nr_pages,
251 struct hugetlb_cgroup *h_cg,
252 struct page *page)
253{
254}
255
256static inline void hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages,
257 struct page *page)
258{
259}
260
261static inline void hugetlb_cgroup_uncharge_page_rsvd(int idx,
262 unsigned long nr_pages,
263 struct page *page)
264{
265}
266static inline void hugetlb_cgroup_uncharge_cgroup(int idx,
267 unsigned long nr_pages,
268 struct hugetlb_cgroup *h_cg)
6d76dcf4 269{
6d76dcf4
AK
270}
271
272static inline void
1adc4d41
MA
273hugetlb_cgroup_uncharge_cgroup_rsvd(int idx, unsigned long nr_pages,
274 struct hugetlb_cgroup *h_cg)
6d76dcf4 275{
6d76dcf4
AK
276}
277
e9fe92ae
MA
278static inline void hugetlb_cgroup_uncharge_counter(struct resv_map *resv,
279 unsigned long start,
280 unsigned long end)
281{
282}
283
7179e7bf 284static inline void hugetlb_cgroup_file_init(void)
abb8206c 285{
abb8206c
AK
286}
287
8e6ac7fa
AK
288static inline void hugetlb_cgroup_migrate(struct page *oldhpage,
289 struct page *newhpage)
290{
8e6ac7fa
AK
291}
292
2bc64a20
AK
293#endif /* CONFIG_MEM_RES_CTLR_HUGETLB */
294#endif