Merge git://git.kernel.org/pub/scm/linux/kernel/git/netfilter/nf
[linux-block.git] / include / linux / hugetlb_cgroup.h
CommitLineData
2bc64a20
AK
1/*
2 * Copyright IBM Corporation, 2012
3 * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2.1 of the GNU Lesser General Public License
7 * as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
12 *
13 */
14
15#ifndef _LINUX_HUGETLB_CGROUP_H
16#define _LINUX_HUGETLB_CGROUP_H
17
309381fe 18#include <linux/mmdebug.h>
2bc64a20
AK
19
20struct hugetlb_cgroup;
e9fe92ae 21struct resv_map;
075a61d0 22struct file_region;
e9fe92ae 23
cd39d4e9 24#ifdef CONFIG_CGROUP_HUGETLB
9dd540e2
AK
25/*
26 * Minimum page order trackable by hugetlb cgroup.
1adc4d41 27 * At least 4 pages are necessary for all the tracking information.
cd39d4e9
MS
28 * The second tail page (hpage[SUBPAGE_INDEX_CGROUP]) is the fault
29 * usage cgroup. The third tail page (hpage[SUBPAGE_INDEX_CGROUP_RSVD])
30 * is the reservation usage cgroup.
9dd540e2 31 */
cd39d4e9 32#define HUGETLB_CGROUP_MIN_ORDER order_base_2(__MAX_CGROUP_SUBPAGE_INDEX + 1)
2bc64a20 33
e9fe92ae
MA
34enum hugetlb_memory_event {
35 HUGETLB_MAX,
36 HUGETLB_NR_MEMORY_EVENTS,
37};
38
f4776199
MA
39struct hugetlb_cgroup_per_node {
40 /* hugetlb usage in pages over all hstates. */
41 unsigned long usage[HUGE_MAX_HSTATE];
42};
43
e9fe92ae
MA
44struct hugetlb_cgroup {
45 struct cgroup_subsys_state css;
46
47 /*
48 * the counter to account for hugepages from hugetlb.
49 */
50 struct page_counter hugepage[HUGE_MAX_HSTATE];
51
52 /*
53 * the counter to account for hugepage reservations from hugetlb.
54 */
55 struct page_counter rsvd_hugepage[HUGE_MAX_HSTATE];
56
57 atomic_long_t events[HUGE_MAX_HSTATE][HUGETLB_NR_MEMORY_EVENTS];
58 atomic_long_t events_local[HUGE_MAX_HSTATE][HUGETLB_NR_MEMORY_EVENTS];
59
60 /* Handle for "hugetlb.events" */
61 struct cgroup_file events_file[HUGE_MAX_HSTATE];
62
63 /* Handle for "hugetlb.events.local" */
64 struct cgroup_file events_local_file[HUGE_MAX_HSTATE];
f4776199
MA
65
66 struct hugetlb_cgroup_per_node *nodeinfo[];
e9fe92ae 67};
9dd540e2 68
1adc4d41
MA
69static inline struct hugetlb_cgroup *
70__hugetlb_cgroup_from_page(struct page *page, bool rsvd)
9dd540e2 71{
309381fe 72 VM_BUG_ON_PAGE(!PageHuge(page), page);
9dd540e2
AK
73
74 if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER)
75 return NULL;
1adc4d41 76 if (rsvd)
cd39d4e9 77 return (void *)page_private(page + SUBPAGE_INDEX_CGROUP_RSVD);
1adc4d41 78 else
cd39d4e9 79 return (void *)page_private(page + SUBPAGE_INDEX_CGROUP);
1adc4d41
MA
80}
81
82static inline struct hugetlb_cgroup *hugetlb_cgroup_from_page(struct page *page)
83{
84 return __hugetlb_cgroup_from_page(page, false);
9dd540e2
AK
85}
86
1adc4d41
MA
87static inline struct hugetlb_cgroup *
88hugetlb_cgroup_from_page_rsvd(struct page *page)
89{
90 return __hugetlb_cgroup_from_page(page, true);
91}
92
93static inline int __set_hugetlb_cgroup(struct page *page,
94 struct hugetlb_cgroup *h_cg, bool rsvd)
9dd540e2 95{
309381fe 96 VM_BUG_ON_PAGE(!PageHuge(page), page);
9dd540e2
AK
97
98 if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER)
99 return -1;
1adc4d41 100 if (rsvd)
cd39d4e9
MS
101 set_page_private(page + SUBPAGE_INDEX_CGROUP_RSVD,
102 (unsigned long)h_cg);
1adc4d41 103 else
cd39d4e9
MS
104 set_page_private(page + SUBPAGE_INDEX_CGROUP,
105 (unsigned long)h_cg);
9dd540e2
AK
106 return 0;
107}
108
1adc4d41
MA
109static inline int set_hugetlb_cgroup(struct page *page,
110 struct hugetlb_cgroup *h_cg)
111{
112 return __set_hugetlb_cgroup(page, h_cg, false);
113}
114
115static inline int set_hugetlb_cgroup_rsvd(struct page *page,
116 struct hugetlb_cgroup *h_cg)
117{
118 return __set_hugetlb_cgroup(page, h_cg, true);
119}
120
2bc64a20
AK
121static inline bool hugetlb_cgroup_disabled(void)
122{
fc5ed1e9 123 return !cgroup_subsys_enabled(hugetlb_cgrp_subsys);
2bc64a20
AK
124}
125
d85aecf2
ML
126static inline void hugetlb_cgroup_put_rsvd_cgroup(struct hugetlb_cgroup *h_cg)
127{
128 css_put(&h_cg->css);
129}
130
09a26e83
MK
131static inline void resv_map_dup_hugetlb_cgroup_uncharge_info(
132 struct resv_map *resv_map)
133{
134 if (resv_map->css)
135 css_get(resv_map->css);
136}
137
afe041c2
BQM
138static inline void resv_map_put_hugetlb_cgroup_uncharge_info(
139 struct resv_map *resv_map)
140{
141 if (resv_map->css)
142 css_put(resv_map->css);
143}
144
6d76dcf4
AK
145extern int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
146 struct hugetlb_cgroup **ptr);
1adc4d41
MA
147extern int hugetlb_cgroup_charge_cgroup_rsvd(int idx, unsigned long nr_pages,
148 struct hugetlb_cgroup **ptr);
6d76dcf4
AK
149extern void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
150 struct hugetlb_cgroup *h_cg,
151 struct page *page);
1adc4d41
MA
152extern void hugetlb_cgroup_commit_charge_rsvd(int idx, unsigned long nr_pages,
153 struct hugetlb_cgroup *h_cg,
154 struct page *page);
6d76dcf4
AK
155extern void hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages,
156 struct page *page);
1adc4d41
MA
157extern void hugetlb_cgroup_uncharge_page_rsvd(int idx, unsigned long nr_pages,
158 struct page *page);
159
6d76dcf4
AK
160extern void hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages,
161 struct hugetlb_cgroup *h_cg);
1adc4d41
MA
162extern void hugetlb_cgroup_uncharge_cgroup_rsvd(int idx, unsigned long nr_pages,
163 struct hugetlb_cgroup *h_cg);
e9fe92ae
MA
164extern void hugetlb_cgroup_uncharge_counter(struct resv_map *resv,
165 unsigned long start,
166 unsigned long end);
1adc4d41 167
075a61d0
MA
168extern void hugetlb_cgroup_uncharge_file_region(struct resv_map *resv,
169 struct file_region *rg,
d85aecf2
ML
170 unsigned long nr_pages,
171 bool region_del);
075a61d0 172
7179e7bf 173extern void hugetlb_cgroup_file_init(void) __init;
8e6ac7fa
AK
174extern void hugetlb_cgroup_migrate(struct page *oldhpage,
175 struct page *newhpage);
6d76dcf4 176
2bc64a20 177#else
075a61d0
MA
178static inline void hugetlb_cgroup_uncharge_file_region(struct resv_map *resv,
179 struct file_region *rg,
d85aecf2
ML
180 unsigned long nr_pages,
181 bool region_del)
075a61d0
MA
182{
183}
184
9dd540e2
AK
185static inline struct hugetlb_cgroup *hugetlb_cgroup_from_page(struct page *page)
186{
187 return NULL;
188}
189
1adc4d41
MA
190static inline struct hugetlb_cgroup *
191hugetlb_cgroup_from_page_resv(struct page *page)
192{
193 return NULL;
194}
195
196static inline struct hugetlb_cgroup *
197hugetlb_cgroup_from_page_rsvd(struct page *page)
198{
199 return NULL;
200}
201
202static inline int set_hugetlb_cgroup(struct page *page,
203 struct hugetlb_cgroup *h_cg)
204{
205 return 0;
206}
207
208static inline int set_hugetlb_cgroup_rsvd(struct page *page,
209 struct hugetlb_cgroup *h_cg)
9dd540e2
AK
210{
211 return 0;
212}
213
2bc64a20
AK
214static inline bool hugetlb_cgroup_disabled(void)
215{
216 return true;
217}
218
d85aecf2
ML
219static inline void hugetlb_cgroup_put_rsvd_cgroup(struct hugetlb_cgroup *h_cg)
220{
221}
222
09a26e83
MK
223static inline void resv_map_dup_hugetlb_cgroup_uncharge_info(
224 struct resv_map *resv_map)
225{
226}
227
afe041c2
BQM
228static inline void resv_map_put_hugetlb_cgroup_uncharge_info(
229 struct resv_map *resv_map)
230{
231}
232
1adc4d41
MA
233static inline int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
234 struct hugetlb_cgroup **ptr)
6d76dcf4
AK
235{
236 return 0;
237}
238
1adc4d41
MA
239static inline int hugetlb_cgroup_charge_cgroup_rsvd(int idx,
240 unsigned long nr_pages,
241 struct hugetlb_cgroup **ptr)
242{
243 return 0;
244}
245
246static inline void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
247 struct hugetlb_cgroup *h_cg,
248 struct page *page)
6d76dcf4 249{
6d76dcf4
AK
250}
251
252static inline void
1adc4d41
MA
253hugetlb_cgroup_commit_charge_rsvd(int idx, unsigned long nr_pages,
254 struct hugetlb_cgroup *h_cg,
255 struct page *page)
256{
257}
258
259static inline void hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages,
260 struct page *page)
261{
262}
263
264static inline void hugetlb_cgroup_uncharge_page_rsvd(int idx,
265 unsigned long nr_pages,
266 struct page *page)
267{
268}
269static inline void hugetlb_cgroup_uncharge_cgroup(int idx,
270 unsigned long nr_pages,
271 struct hugetlb_cgroup *h_cg)
6d76dcf4 272{
6d76dcf4
AK
273}
274
275static inline void
1adc4d41
MA
276hugetlb_cgroup_uncharge_cgroup_rsvd(int idx, unsigned long nr_pages,
277 struct hugetlb_cgroup *h_cg)
6d76dcf4 278{
6d76dcf4
AK
279}
280
e9fe92ae
MA
281static inline void hugetlb_cgroup_uncharge_counter(struct resv_map *resv,
282 unsigned long start,
283 unsigned long end)
284{
285}
286
7179e7bf 287static inline void hugetlb_cgroup_file_init(void)
abb8206c 288{
abb8206c
AK
289}
290
8e6ac7fa
AK
291static inline void hugetlb_cgroup_migrate(struct page *oldhpage,
292 struct page *newhpage)
293{
8e6ac7fa
AK
294}
295
2bc64a20
AK
296#endif /* CONFIG_MEM_RES_CTLR_HUGETLB */
297#endif