mm/hugetlb_cgroup: convert hugetlb_cgroup_migrate to folios
[linux-block.git] / include / linux / hugetlb_cgroup.h
1 /*
2  * Copyright IBM Corporation, 2012
3  * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of version 2.1 of the GNU Lesser General Public License
7  * as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful, but
10  * WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
12  *
13  */
14
15 #ifndef _LINUX_HUGETLB_CGROUP_H
16 #define _LINUX_HUGETLB_CGROUP_H
17
18 #include <linux/mmdebug.h>
19
20 struct hugetlb_cgroup;
21 struct resv_map;
22 struct file_region;
23
24 #ifdef CONFIG_CGROUP_HUGETLB
25 /*
26  * Minimum page order trackable by hugetlb cgroup.
27  * At least 4 pages are necessary for all the tracking information.
28  * The second tail page (hpage[SUBPAGE_INDEX_CGROUP]) is the fault
29  * usage cgroup. The third tail page (hpage[SUBPAGE_INDEX_CGROUP_RSVD])
30  * is the reservation usage cgroup.
31  */
32 #define HUGETLB_CGROUP_MIN_ORDER order_base_2(__MAX_CGROUP_SUBPAGE_INDEX + 1)
33
34 enum hugetlb_memory_event {
35         HUGETLB_MAX,
36         HUGETLB_NR_MEMORY_EVENTS,
37 };
38
39 struct hugetlb_cgroup_per_node {
40         /* hugetlb usage in pages over all hstates. */
41         unsigned long usage[HUGE_MAX_HSTATE];
42 };
43
44 struct hugetlb_cgroup {
45         struct cgroup_subsys_state css;
46
47         /*
48          * the counter to account for hugepages from hugetlb.
49          */
50         struct page_counter hugepage[HUGE_MAX_HSTATE];
51
52         /*
53          * the counter to account for hugepage reservations from hugetlb.
54          */
55         struct page_counter rsvd_hugepage[HUGE_MAX_HSTATE];
56
57         atomic_long_t events[HUGE_MAX_HSTATE][HUGETLB_NR_MEMORY_EVENTS];
58         atomic_long_t events_local[HUGE_MAX_HSTATE][HUGETLB_NR_MEMORY_EVENTS];
59
60         /* Handle for "hugetlb.events" */
61         struct cgroup_file events_file[HUGE_MAX_HSTATE];
62
63         /* Handle for "hugetlb.events.local" */
64         struct cgroup_file events_local_file[HUGE_MAX_HSTATE];
65
66         struct hugetlb_cgroup_per_node *nodeinfo[];
67 };
68
69 static inline struct hugetlb_cgroup *
70 __hugetlb_cgroup_from_folio(struct folio *folio, bool rsvd)
71 {
72         struct page *tail;
73
74         VM_BUG_ON_FOLIO(!folio_test_hugetlb(folio), folio);
75         if (folio_order(folio) < HUGETLB_CGROUP_MIN_ORDER)
76                 return NULL;
77
78         if (rsvd) {
79                 tail = folio_page(folio, SUBPAGE_INDEX_CGROUP_RSVD);
80                 return (void *)page_private(tail);
81         }
82
83         else {
84                 tail = folio_page(folio, SUBPAGE_INDEX_CGROUP);
85                 return (void *)page_private(tail);
86         }
87 }
88
89 static inline struct hugetlb_cgroup *hugetlb_cgroup_from_folio(struct folio *folio)
90 {
91         return __hugetlb_cgroup_from_folio(folio, false);
92 }
93
94 static inline struct hugetlb_cgroup *
95 hugetlb_cgroup_from_folio_rsvd(struct folio *folio)
96 {
97         return __hugetlb_cgroup_from_folio(folio, true);
98 }
99
100 static inline void __set_hugetlb_cgroup(struct folio *folio,
101                                        struct hugetlb_cgroup *h_cg, bool rsvd)
102 {
103         VM_BUG_ON_FOLIO(!folio_test_hugetlb(folio), folio);
104
105         if (folio_order(folio) < HUGETLB_CGROUP_MIN_ORDER)
106                 return;
107         if (rsvd)
108                 set_page_private(folio_page(folio, SUBPAGE_INDEX_CGROUP_RSVD),
109                                  (unsigned long)h_cg);
110         else
111                 set_page_private(folio_page(folio, SUBPAGE_INDEX_CGROUP),
112                                  (unsigned long)h_cg);
113 }
114
115 static inline void set_hugetlb_cgroup(struct folio *folio,
116                                      struct hugetlb_cgroup *h_cg)
117 {
118         __set_hugetlb_cgroup(folio, h_cg, false);
119 }
120
121 static inline void set_hugetlb_cgroup_rsvd(struct folio *folio,
122                                           struct hugetlb_cgroup *h_cg)
123 {
124         __set_hugetlb_cgroup(folio, h_cg, true);
125 }
126
127 static inline bool hugetlb_cgroup_disabled(void)
128 {
129         return !cgroup_subsys_enabled(hugetlb_cgrp_subsys);
130 }
131
132 static inline void hugetlb_cgroup_put_rsvd_cgroup(struct hugetlb_cgroup *h_cg)
133 {
134         css_put(&h_cg->css);
135 }
136
137 static inline void resv_map_dup_hugetlb_cgroup_uncharge_info(
138                                                 struct resv_map *resv_map)
139 {
140         if (resv_map->css)
141                 css_get(resv_map->css);
142 }
143
144 static inline void resv_map_put_hugetlb_cgroup_uncharge_info(
145                                                 struct resv_map *resv_map)
146 {
147         if (resv_map->css)
148                 css_put(resv_map->css);
149 }
150
151 extern int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
152                                         struct hugetlb_cgroup **ptr);
153 extern int hugetlb_cgroup_charge_cgroup_rsvd(int idx, unsigned long nr_pages,
154                                              struct hugetlb_cgroup **ptr);
155 extern void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
156                                          struct hugetlb_cgroup *h_cg,
157                                          struct page *page);
158 extern void hugetlb_cgroup_commit_charge_rsvd(int idx, unsigned long nr_pages,
159                                               struct hugetlb_cgroup *h_cg,
160                                               struct page *page);
161 extern void hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages,
162                                          struct page *page);
163 extern void hugetlb_cgroup_uncharge_page_rsvd(int idx, unsigned long nr_pages,
164                                               struct page *page);
165
166 extern void hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages,
167                                            struct hugetlb_cgroup *h_cg);
168 extern void hugetlb_cgroup_uncharge_cgroup_rsvd(int idx, unsigned long nr_pages,
169                                                 struct hugetlb_cgroup *h_cg);
170 extern void hugetlb_cgroup_uncharge_counter(struct resv_map *resv,
171                                             unsigned long start,
172                                             unsigned long end);
173
174 extern void hugetlb_cgroup_uncharge_file_region(struct resv_map *resv,
175                                                 struct file_region *rg,
176                                                 unsigned long nr_pages,
177                                                 bool region_del);
178
179 extern void hugetlb_cgroup_file_init(void) __init;
180 extern void hugetlb_cgroup_migrate(struct folio *old_folio,
181                                    struct folio *new_folio);
182
183 #else
184 static inline void hugetlb_cgroup_uncharge_file_region(struct resv_map *resv,
185                                                        struct file_region *rg,
186                                                        unsigned long nr_pages,
187                                                        bool region_del)
188 {
189 }
190
191 static inline struct hugetlb_cgroup *hugetlb_cgroup_from_folio(struct folio *folio)
192 {
193         return NULL;
194 }
195
196 static inline struct hugetlb_cgroup *
197 hugetlb_cgroup_from_folio_rsvd(struct folio *folio)
198 {
199         return NULL;
200 }
201
202 static inline void set_hugetlb_cgroup(struct folio *folio,
203                                      struct hugetlb_cgroup *h_cg)
204 {
205 }
206
207 static inline void set_hugetlb_cgroup_rsvd(struct folio *folio,
208                                           struct hugetlb_cgroup *h_cg)
209 {
210 }
211
212 static inline bool hugetlb_cgroup_disabled(void)
213 {
214         return true;
215 }
216
217 static inline void hugetlb_cgroup_put_rsvd_cgroup(struct hugetlb_cgroup *h_cg)
218 {
219 }
220
221 static inline void resv_map_dup_hugetlb_cgroup_uncharge_info(
222                                                 struct resv_map *resv_map)
223 {
224 }
225
226 static inline void resv_map_put_hugetlb_cgroup_uncharge_info(
227                                                 struct resv_map *resv_map)
228 {
229 }
230
231 static inline int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
232                                                struct hugetlb_cgroup **ptr)
233 {
234         return 0;
235 }
236
237 static inline int hugetlb_cgroup_charge_cgroup_rsvd(int idx,
238                                                     unsigned long nr_pages,
239                                                     struct hugetlb_cgroup **ptr)
240 {
241         return 0;
242 }
243
244 static inline void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
245                                                 struct hugetlb_cgroup *h_cg,
246                                                 struct page *page)
247 {
248 }
249
250 static inline void
251 hugetlb_cgroup_commit_charge_rsvd(int idx, unsigned long nr_pages,
252                                   struct hugetlb_cgroup *h_cg,
253                                   struct page *page)
254 {
255 }
256
257 static inline void hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages,
258                                                 struct page *page)
259 {
260 }
261
262 static inline void hugetlb_cgroup_uncharge_page_rsvd(int idx,
263                                                      unsigned long nr_pages,
264                                                      struct page *page)
265 {
266 }
267 static inline void hugetlb_cgroup_uncharge_cgroup(int idx,
268                                                   unsigned long nr_pages,
269                                                   struct hugetlb_cgroup *h_cg)
270 {
271 }
272
273 static inline void
274 hugetlb_cgroup_uncharge_cgroup_rsvd(int idx, unsigned long nr_pages,
275                                     struct hugetlb_cgroup *h_cg)
276 {
277 }
278
279 static inline void hugetlb_cgroup_uncharge_counter(struct resv_map *resv,
280                                                    unsigned long start,
281                                                    unsigned long end)
282 {
283 }
284
285 static inline void hugetlb_cgroup_file_init(void)
286 {
287 }
288
289 static inline void hugetlb_cgroup_migrate(struct folio *old_folio,
290                                           struct folio *new_folio)
291 {
292 }
293
294 #endif  /* CONFIG_MEM_RES_CTLR_HUGETLB */
295 #endif