Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
1da177e4 LT |
2 | #ifndef _LINUX_HUGETLB_H |
3 | #define _LINUX_HUGETLB_H | |
4 | ||
be93d8cf | 5 | #include <linux/mm_types.h> |
309381fe | 6 | #include <linux/mmdebug.h> |
4e950f6f | 7 | #include <linux/fs.h> |
8edf344c | 8 | #include <linux/hugetlb_inline.h> |
abb8206c | 9 | #include <linux/cgroup.h> |
9119a41e JK |
10 | #include <linux/list.h> |
11 | #include <linux/kref.h> | |
ca5999fd | 12 | #include <linux/pgtable.h> |
d92bbc27 | 13 | #include <linux/gfp.h> |
f6191471 | 14 | #include <linux/userfaultfd_k.h> |
4e950f6f | 15 | |
e9ea0e2d AM |
16 | struct ctl_table; |
17 | struct user_struct; | |
24669e58 | 18 | struct mmu_gather; |
e9ea0e2d | 19 | |
e2299292 | 20 | #ifndef is_hugepd |
e2299292 AK |
21 | typedef struct { unsigned long pd; } hugepd_t; |
22 | #define is_hugepd(hugepd) (0) | |
23 | #define __hugepd(x) ((hugepd_t) { (x) }) | |
e2299292 AK |
24 | #endif |
25 | ||
1da177e4 LT |
26 | #ifdef CONFIG_HUGETLB_PAGE |
27 | ||
28 | #include <linux/mempolicy.h> | |
516dffdc | 29 | #include <linux/shm.h> |
63551ae0 | 30 | #include <asm/tlbflush.h> |
1da177e4 | 31 | |
cd39d4e9 MS |
32 | /* |
33 | * For HugeTLB page, there are more metadata to save in the struct page. But | |
34 | * the head struct page cannot meet our needs, so we have to abuse other tail | |
35 | * struct page to store the metadata. In order to avoid conflicts caused by | |
36 | * subsequent use of more tail struct pages, we gather these discrete indexes | |
37 | * of tail struct page here. | |
38 | */ | |
39 | enum { | |
40 | SUBPAGE_INDEX_SUBPOOL = 1, /* reuse page->private */ | |
41 | #ifdef CONFIG_CGROUP_HUGETLB | |
42 | SUBPAGE_INDEX_CGROUP, /* reuse page->private */ | |
43 | SUBPAGE_INDEX_CGROUP_RSVD, /* reuse page->private */ | |
44 | __MAX_CGROUP_SUBPAGE_INDEX = SUBPAGE_INDEX_CGROUP_RSVD, | |
45 | #endif | |
46 | __NR_USED_SUBPAGE, | |
47 | }; | |
48 | ||
90481622 DG |
49 | struct hugepage_subpool { |
50 | spinlock_t lock; | |
51 | long count; | |
c6a91820 MK |
52 | long max_hpages; /* Maximum huge pages or -1 if no maximum. */ |
53 | long used_hpages; /* Used count against maximum, includes */ | |
06c88398 | 54 | /* both allocated and reserved pages. */ |
c6a91820 MK |
55 | struct hstate *hstate; |
56 | long min_hpages; /* Minimum huge pages or -1 if no minimum. */ | |
57 | long rsv_hpages; /* Pages reserved against global pool to */ | |
6c26d310 | 58 | /* satisfy minimum size. */ |
90481622 DG |
59 | }; |
60 | ||
9119a41e JK |
61 | struct resv_map { |
62 | struct kref refs; | |
7b24d861 | 63 | spinlock_t lock; |
9119a41e | 64 | struct list_head regions; |
5e911373 MK |
65 | long adds_in_progress; |
66 | struct list_head region_cache; | |
67 | long region_cache_count; | |
e9fe92ae MA |
68 | #ifdef CONFIG_CGROUP_HUGETLB |
69 | /* | |
70 | * On private mappings, the counter to uncharge reservations is stored | |
71 | * here. If these fields are 0, then either the mapping is shared, or | |
72 | * cgroup accounting is disabled for this resv_map. | |
73 | */ | |
74 | struct page_counter *reservation_counter; | |
75 | unsigned long pages_per_hpage; | |
76 | struct cgroup_subsys_state *css; | |
77 | #endif | |
9119a41e | 78 | }; |
075a61d0 MA |
79 | |
80 | /* | |
81 | * Region tracking -- allows tracking of reservations and instantiated pages | |
82 | * across the pages in a mapping. | |
83 | * | |
84 | * The region data structures are embedded into a resv_map and protected | |
85 | * by a resv_map's lock. The set of regions within the resv_map represent | |
86 | * reservations for huge pages, or huge pages that have already been | |
87 | * instantiated within the map. The from and to elements are huge page | |
06c88398 | 88 | * indices into the associated mapping. from indicates the starting index |
075a61d0 MA |
89 | * of the region. to represents the first index past the end of the region. |
90 | * | |
91 | * For example, a file region structure with from == 0 and to == 4 represents | |
92 | * four huge pages in a mapping. It is important to note that the to element | |
93 | * represents the first element past the end of the region. This is used in | |
94 | * arithmetic as 4(to) - 0(from) = 4 huge pages in the region. | |
95 | * | |
96 | * Interval notation of the form [from, to) will be used to indicate that | |
97 | * the endpoint from is inclusive and to is exclusive. | |
98 | */ | |
99 | struct file_region { | |
100 | struct list_head link; | |
101 | long from; | |
102 | long to; | |
103 | #ifdef CONFIG_CGROUP_HUGETLB | |
104 | /* | |
105 | * On shared mappings, each reserved region appears as a struct | |
106 | * file_region in resv_map. These fields hold the info needed to | |
107 | * uncharge each reservation. | |
108 | */ | |
109 | struct page_counter *reservation_counter; | |
110 | struct cgroup_subsys_state *css; | |
111 | #endif | |
112 | }; | |
113 | ||
9119a41e JK |
114 | extern struct resv_map *resv_map_alloc(void); |
115 | void resv_map_release(struct kref *ref); | |
116 | ||
c3f38a38 AK |
117 | extern spinlock_t hugetlb_lock; |
118 | extern int hugetlb_max_hstate __read_mostly; | |
119 | #define for_each_hstate(h) \ | |
120 | for ((h) = hstates; (h) < &hstates[hugetlb_max_hstate]; (h)++) | |
121 | ||
7ca02d0a MK |
122 | struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages, |
123 | long min_hpages); | |
90481622 DG |
124 | void hugepage_put_subpool(struct hugepage_subpool *spool); |
125 | ||
a1e78772 | 126 | void reset_vma_resv_huge_pages(struct vm_area_struct *vma); |
550a7d60 | 127 | void clear_vma_resv_huge_pages(struct vm_area_struct *vma); |
32927393 CH |
128 | int hugetlb_sysctl_handler(struct ctl_table *, int, void *, size_t *, loff_t *); |
129 | int hugetlb_overcommit_handler(struct ctl_table *, int, void *, size_t *, | |
130 | loff_t *); | |
131 | int hugetlb_treat_movable_handler(struct ctl_table *, int, void *, size_t *, | |
132 | loff_t *); | |
133 | int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int, void *, size_t *, | |
134 | loff_t *); | |
06808b08 | 135 | |
550a7d60 MA |
136 | int move_hugetlb_page_tables(struct vm_area_struct *vma, |
137 | struct vm_area_struct *new_vma, | |
138 | unsigned long old_addr, unsigned long new_addr, | |
139 | unsigned long len); | |
bc70fbf2 PX |
140 | int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, |
141 | struct vm_area_struct *, struct vm_area_struct *); | |
28a35716 ML |
142 | long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *, |
143 | struct page **, struct vm_area_struct **, | |
87ffc118 AA |
144 | unsigned long *, unsigned long *, long, unsigned int, |
145 | int *); | |
04f2cbe3 | 146 | void unmap_hugepage_range(struct vm_area_struct *, |
05e90bd0 PX |
147 | unsigned long, unsigned long, struct page *, |
148 | zap_flags_t); | |
d833352a MG |
149 | void __unmap_hugepage_range_final(struct mmu_gather *tlb, |
150 | struct vm_area_struct *vma, | |
151 | unsigned long start, unsigned long end, | |
05e90bd0 | 152 | struct page *ref_page, zap_flags_t zap_flags); |
e1759c21 | 153 | void hugetlb_report_meminfo(struct seq_file *); |
7981593b | 154 | int hugetlb_report_node_meminfo(char *buf, int len, int nid); |
949f7ec5 | 155 | void hugetlb_show_meminfo(void); |
1da177e4 | 156 | unsigned long hugetlb_total_pages(void); |
2b740303 | 157 | vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, |
788c7df4 | 158 | unsigned long address, unsigned int flags); |
714c1891 | 159 | #ifdef CONFIG_USERFAULTFD |
8fb5debc MK |
160 | int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, pte_t *dst_pte, |
161 | struct vm_area_struct *dst_vma, | |
162 | unsigned long dst_addr, | |
163 | unsigned long src_addr, | |
f6191471 | 164 | enum mcopy_atomic_mode mode, |
6041c691 PX |
165 | struct page **pagep, |
166 | bool wp_copy); | |
714c1891 | 167 | #endif /* CONFIG_USERFAULTFD */ |
33b8f84a | 168 | bool hugetlb_reserve_pages(struct inode *inode, long from, long to, |
5a6fe125 | 169 | struct vm_area_struct *vma, |
ca16d140 | 170 | vm_flags_t vm_flags); |
b5cec28d MK |
171 | long hugetlb_unreserve_pages(struct inode *inode, long start, long end, |
172 | long freed); | |
31caf665 | 173 | bool isolate_huge_page(struct page *page, struct list_head *list); |
25182f05 | 174 | int get_hwpoison_huge_page(struct page *page, bool *hugetlb); |
405ce051 | 175 | int get_huge_page_for_hwpoison(unsigned long pfn, int flags); |
31caf665 | 176 | void putback_active_hugepage(struct page *page); |
ab5ac90a | 177 | void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason); |
8f1d26d0 | 178 | void free_huge_page(struct page *page); |
72e2936c | 179 | void hugetlb_fix_reserve_counts(struct inode *inode); |
c672c7f2 | 180 | extern struct mutex *hugetlb_fault_mutex_table; |
188b04a7 | 181 | u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx); |
1da177e4 | 182 | |
aec44e0f PX |
183 | pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma, |
184 | unsigned long addr, pud_t *pud); | |
3212b535 | 185 | |
c0d0381a MK |
186 | struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage); |
187 | ||
1da177e4 | 188 | extern int sysctl_hugetlb_shm_group; |
53ba51d2 | 189 | extern struct list_head huge_boot_pages; |
1da177e4 | 190 | |
63551ae0 DG |
191 | /* arch callbacks */ |
192 | ||
aec44e0f | 193 | pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, |
a5516438 | 194 | unsigned long addr, unsigned long sz); |
7868a208 PA |
195 | pte_t *huge_pte_offset(struct mm_struct *mm, |
196 | unsigned long addr, unsigned long sz); | |
34ae204f MK |
197 | int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma, |
198 | unsigned long *addr, pte_t *ptep); | |
017b1660 MK |
199 | void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma, |
200 | unsigned long *start, unsigned long *end); | |
63551ae0 DG |
201 | struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address, |
202 | int write); | |
4dc71451 AK |
203 | struct page *follow_huge_pd(struct vm_area_struct *vma, |
204 | unsigned long address, hugepd_t hpd, | |
205 | int flags, int pdshift); | |
63551ae0 | 206 | struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address, |
e66f17ff | 207 | pmd_t *pmd, int flags); |
ceb86879 | 208 | struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address, |
e66f17ff | 209 | pud_t *pud, int flags); |
faaa5b62 AK |
210 | struct page *follow_huge_pgd(struct mm_struct *mm, unsigned long address, |
211 | pgd_t *pgd, int flags); | |
212 | ||
63551ae0 | 213 | int pmd_huge(pmd_t pmd); |
c2febafc | 214 | int pud_huge(pud_t pud); |
7da4d641 | 215 | unsigned long hugetlb_change_protection(struct vm_area_struct *vma, |
5a90d5a1 PX |
216 | unsigned long address, unsigned long end, pgprot_t newprot, |
217 | unsigned long cp_flags); | |
63551ae0 | 218 | |
d5ed7444 | 219 | bool is_hugetlb_entry_migration(pte_t pte); |
6dfeaff9 | 220 | void hugetlb_unshare_all_pmds(struct vm_area_struct *vma); |
ab5ac90a | 221 | |
1da177e4 LT |
222 | #else /* !CONFIG_HUGETLB_PAGE */ |
223 | ||
a1e78772 MG |
224 | static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma) |
225 | { | |
226 | } | |
227 | ||
550a7d60 MA |
228 | static inline void clear_vma_resv_huge_pages(struct vm_area_struct *vma) |
229 | { | |
230 | } | |
231 | ||
1da177e4 LT |
232 | static inline unsigned long hugetlb_total_pages(void) |
233 | { | |
234 | return 0; | |
235 | } | |
236 | ||
c0d0381a MK |
237 | static inline struct address_space *hugetlb_page_mapping_lock_write( |
238 | struct page *hpage) | |
239 | { | |
240 | return NULL; | |
241 | } | |
242 | ||
34ae204f MK |
243 | static inline int huge_pmd_unshare(struct mm_struct *mm, |
244 | struct vm_area_struct *vma, | |
245 | unsigned long *addr, pte_t *ptep) | |
017b1660 MK |
246 | { |
247 | return 0; | |
248 | } | |
249 | ||
250 | static inline void adjust_range_if_pmd_sharing_possible( | |
251 | struct vm_area_struct *vma, | |
252 | unsigned long *start, unsigned long *end) | |
253 | { | |
254 | } | |
255 | ||
1f9dccb2 MK |
256 | static inline long follow_hugetlb_page(struct mm_struct *mm, |
257 | struct vm_area_struct *vma, struct page **pages, | |
258 | struct vm_area_struct **vmas, unsigned long *position, | |
259 | unsigned long *nr_pages, long i, unsigned int flags, | |
260 | int *nonblocking) | |
261 | { | |
262 | BUG(); | |
263 | return 0; | |
264 | } | |
265 | ||
266 | static inline struct page *follow_huge_addr(struct mm_struct *mm, | |
267 | unsigned long address, int write) | |
268 | { | |
269 | return ERR_PTR(-EINVAL); | |
270 | } | |
271 | ||
272 | static inline int copy_hugetlb_page_range(struct mm_struct *dst, | |
bc70fbf2 PX |
273 | struct mm_struct *src, |
274 | struct vm_area_struct *dst_vma, | |
275 | struct vm_area_struct *src_vma) | |
1f9dccb2 MK |
276 | { |
277 | BUG(); | |
278 | return 0; | |
279 | } | |
280 | ||
550a7d60 MA |
281 | static inline int move_hugetlb_page_tables(struct vm_area_struct *vma, |
282 | struct vm_area_struct *new_vma, | |
283 | unsigned long old_addr, | |
284 | unsigned long new_addr, | |
285 | unsigned long len) | |
286 | { | |
287 | BUG(); | |
288 | return 0; | |
289 | } | |
290 | ||
e1759c21 AD |
291 | static inline void hugetlb_report_meminfo(struct seq_file *m) |
292 | { | |
293 | } | |
1f9dccb2 | 294 | |
7981593b | 295 | static inline int hugetlb_report_node_meminfo(char *buf, int len, int nid) |
1f9dccb2 MK |
296 | { |
297 | return 0; | |
298 | } | |
299 | ||
949f7ec5 DR |
300 | static inline void hugetlb_show_meminfo(void) |
301 | { | |
302 | } | |
1f9dccb2 MK |
303 | |
304 | static inline struct page *follow_huge_pd(struct vm_area_struct *vma, | |
305 | unsigned long address, hugepd_t hpd, int flags, | |
306 | int pdshift) | |
307 | { | |
308 | return NULL; | |
309 | } | |
310 | ||
311 | static inline struct page *follow_huge_pmd(struct mm_struct *mm, | |
312 | unsigned long address, pmd_t *pmd, int flags) | |
313 | { | |
314 | return NULL; | |
315 | } | |
316 | ||
317 | static inline struct page *follow_huge_pud(struct mm_struct *mm, | |
318 | unsigned long address, pud_t *pud, int flags) | |
319 | { | |
320 | return NULL; | |
321 | } | |
322 | ||
323 | static inline struct page *follow_huge_pgd(struct mm_struct *mm, | |
324 | unsigned long address, pgd_t *pgd, int flags) | |
325 | { | |
326 | return NULL; | |
327 | } | |
328 | ||
329 | static inline int prepare_hugepage_range(struct file *file, | |
330 | unsigned long addr, unsigned long len) | |
331 | { | |
332 | return -EINVAL; | |
333 | } | |
334 | ||
335 | static inline int pmd_huge(pmd_t pmd) | |
336 | { | |
337 | return 0; | |
338 | } | |
339 | ||
340 | static inline int pud_huge(pud_t pud) | |
341 | { | |
342 | return 0; | |
343 | } | |
344 | ||
345 | static inline int is_hugepage_only_range(struct mm_struct *mm, | |
346 | unsigned long addr, unsigned long len) | |
347 | { | |
348 | return 0; | |
349 | } | |
350 | ||
351 | static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb, | |
352 | unsigned long addr, unsigned long end, | |
353 | unsigned long floor, unsigned long ceiling) | |
354 | { | |
355 | BUG(); | |
356 | } | |
357 | ||
714c1891 | 358 | #ifdef CONFIG_USERFAULTFD |
1f9dccb2 MK |
359 | static inline int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, |
360 | pte_t *dst_pte, | |
361 | struct vm_area_struct *dst_vma, | |
362 | unsigned long dst_addr, | |
363 | unsigned long src_addr, | |
f6191471 | 364 | enum mcopy_atomic_mode mode, |
6041c691 PX |
365 | struct page **pagep, |
366 | bool wp_copy) | |
1f9dccb2 MK |
367 | { |
368 | BUG(); | |
369 | return 0; | |
370 | } | |
714c1891 | 371 | #endif /* CONFIG_USERFAULTFD */ |
1f9dccb2 MK |
372 | |
373 | static inline pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr, | |
374 | unsigned long sz) | |
375 | { | |
376 | return NULL; | |
377 | } | |
24669e58 | 378 | |
f40386a4 NH |
379 | static inline bool isolate_huge_page(struct page *page, struct list_head *list) |
380 | { | |
381 | return false; | |
382 | } | |
1da177e4 | 383 | |
25182f05 NH |
384 | static inline int get_hwpoison_huge_page(struct page *page, bool *hugetlb) |
385 | { | |
386 | return 0; | |
387 | } | |
388 | ||
405ce051 NH |
389 | static inline int get_huge_page_for_hwpoison(unsigned long pfn, int flags) |
390 | { | |
391 | return 0; | |
392 | } | |
393 | ||
1f9dccb2 MK |
394 | static inline void putback_active_hugepage(struct page *page) |
395 | { | |
396 | } | |
397 | ||
398 | static inline void move_hugetlb_state(struct page *oldpage, | |
399 | struct page *newpage, int reason) | |
400 | { | |
401 | } | |
402 | ||
403 | static inline unsigned long hugetlb_change_protection( | |
404 | struct vm_area_struct *vma, unsigned long address, | |
5a90d5a1 PX |
405 | unsigned long end, pgprot_t newprot, |
406 | unsigned long cp_flags) | |
7da4d641 PZ |
407 | { |
408 | return 0; | |
409 | } | |
8f860591 | 410 | |
d833352a MG |
411 | static inline void __unmap_hugepage_range_final(struct mmu_gather *tlb, |
412 | struct vm_area_struct *vma, unsigned long start, | |
05e90bd0 PX |
413 | unsigned long end, struct page *ref_page, |
414 | zap_flags_t zap_flags) | |
d833352a MG |
415 | { |
416 | BUG(); | |
417 | } | |
418 | ||
a953e772 | 419 | static inline vm_fault_t hugetlb_fault(struct mm_struct *mm, |
1f9dccb2 MK |
420 | struct vm_area_struct *vma, unsigned long address, |
421 | unsigned int flags) | |
a953e772 SJ |
422 | { |
423 | BUG(); | |
424 | return 0; | |
425 | } | |
24669e58 | 426 | |
6dfeaff9 PX |
427 | static inline void hugetlb_unshare_all_pmds(struct vm_area_struct *vma) { } |
428 | ||
1da177e4 | 429 | #endif /* !CONFIG_HUGETLB_PAGE */ |
f30c59e9 AK |
430 | /* |
431 | * hugepages at page global directory. If arch support | |
432 | * hugepages at pgd level, they need to define this. | |
433 | */ | |
434 | #ifndef pgd_huge | |
435 | #define pgd_huge(x) 0 | |
436 | #endif | |
c2febafc KS |
437 | #ifndef p4d_huge |
438 | #define p4d_huge(x) 0 | |
439 | #endif | |
f30c59e9 AK |
440 | |
441 | #ifndef pgd_write | |
442 | static inline int pgd_write(pgd_t pgd) | |
443 | { | |
444 | BUG(); | |
445 | return 0; | |
446 | } | |
447 | #endif | |
448 | ||
4e52780d EM |
449 | #define HUGETLB_ANON_FILE "anon_hugepage" |
450 | ||
6bfde05b EM |
451 | enum { |
452 | /* | |
453 | * The file will be used as an shm file so shmfs accounting rules | |
454 | * apply | |
455 | */ | |
456 | HUGETLB_SHMFS_INODE = 1, | |
4e52780d EM |
457 | /* |
458 | * The file is being created on the internal vfs mount and shmfs | |
459 | * accounting rules do not apply | |
460 | */ | |
461 | HUGETLB_ANONHUGE_INODE = 2, | |
6bfde05b EM |
462 | }; |
463 | ||
1da177e4 | 464 | #ifdef CONFIG_HUGETLBFS |
1da177e4 | 465 | struct hugetlbfs_sb_info { |
1da177e4 LT |
466 | long max_inodes; /* inodes allowed */ |
467 | long free_inodes; /* inodes free */ | |
468 | spinlock_t stat_lock; | |
a137e1cc | 469 | struct hstate *hstate; |
90481622 | 470 | struct hugepage_subpool *spool; |
4a25220d DH |
471 | kuid_t uid; |
472 | kgid_t gid; | |
473 | umode_t mode; | |
1da177e4 LT |
474 | }; |
475 | ||
1da177e4 LT |
476 | static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb) |
477 | { | |
478 | return sb->s_fs_info; | |
479 | } | |
480 | ||
da14c1e5 MAL |
481 | struct hugetlbfs_inode_info { |
482 | struct shared_policy policy; | |
483 | struct inode vfs_inode; | |
ff62a342 | 484 | unsigned int seals; |
da14c1e5 MAL |
485 | }; |
486 | ||
487 | static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode) | |
488 | { | |
489 | return container_of(inode, struct hugetlbfs_inode_info, vfs_inode); | |
490 | } | |
491 | ||
4b6f5d20 | 492 | extern const struct file_operations hugetlbfs_file_operations; |
f0f37e2f | 493 | extern const struct vm_operations_struct hugetlb_vm_ops; |
af73e4d9 | 494 | struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct, |
83c1fd76 | 495 | int creat_flags, int page_size_log); |
1da177e4 | 496 | |
719ff321 | 497 | static inline bool is_file_hugepages(struct file *file) |
1da177e4 | 498 | { |
516dffdc | 499 | if (file->f_op == &hugetlbfs_file_operations) |
719ff321 | 500 | return true; |
516dffdc | 501 | |
719ff321 | 502 | return is_file_shm_hugepages(file); |
1da177e4 LT |
503 | } |
504 | ||
bb297bb2 CL |
505 | static inline struct hstate *hstate_inode(struct inode *i) |
506 | { | |
507 | return HUGETLBFS_SB(i->i_sb)->hstate; | |
508 | } | |
1da177e4 LT |
509 | #else /* !CONFIG_HUGETLBFS */ |
510 | ||
719ff321 | 511 | #define is_file_hugepages(file) false |
40716e29 | 512 | static inline struct file * |
af73e4d9 | 513 | hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag, |
83c1fd76 | 514 | int creat_flags, int page_size_log) |
e9ea0e2d AM |
515 | { |
516 | return ERR_PTR(-ENOSYS); | |
517 | } | |
1da177e4 | 518 | |
bb297bb2 CL |
519 | static inline struct hstate *hstate_inode(struct inode *i) |
520 | { | |
521 | return NULL; | |
522 | } | |
1da177e4 LT |
523 | #endif /* !CONFIG_HUGETLBFS */ |
524 | ||
d2ba27e8 AB |
525 | #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA |
526 | unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, | |
527 | unsigned long len, unsigned long pgoff, | |
528 | unsigned long flags); | |
529 | #endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */ | |
530 | ||
4b439e25 CL |
531 | unsigned long |
532 | generic_hugetlb_get_unmapped_area(struct file *file, unsigned long addr, | |
533 | unsigned long len, unsigned long pgoff, | |
534 | unsigned long flags); | |
535 | ||
d6995da3 MK |
536 | /* |
537 | * huegtlb page specific state flags. These flags are located in page.private | |
538 | * of the hugetlb head page. Functions created via the below macros should be | |
539 | * used to manipulate these flags. | |
540 | * | |
541 | * HPG_restore_reserve - Set when a hugetlb page consumes a reservation at | |
542 | * allocation time. Cleared when page is fully instantiated. Free | |
543 | * routine checks flag to restore a reservation on error paths. | |
d95c0337 MK |
544 | * Synchronization: Examined or modified by code that knows it has |
545 | * the only reference to page. i.e. After allocation but before use | |
546 | * or when the page is being freed. | |
8f251a3d MK |
547 | * HPG_migratable - Set after a newly allocated page is added to the page |
548 | * cache and/or page tables. Indicates the page is a candidate for | |
549 | * migration. | |
d95c0337 MK |
550 | * Synchronization: Initially set after new page allocation with no |
551 | * locking. When examined and modified during migration processing | |
552 | * (isolate, migrate, putback) the hugetlb_lock is held. | |
9157c311 MK |
553 | * HPG_temporary - - Set on a page that is temporarily allocated from the buddy |
554 | * allocator. Typically used for migration target pages when no pages | |
555 | * are available in the pool. The hugetlb free page path will | |
556 | * immediately free pages with this flag set to the buddy allocator. | |
d95c0337 MK |
557 | * Synchronization: Can be set after huge page allocation from buddy when |
558 | * code knows it has only reference. All other examinations and | |
559 | * modifications require hugetlb_lock. | |
6c037149 | 560 | * HPG_freed - Set when page is on the free lists. |
d95c0337 | 561 | * Synchronization: hugetlb_lock held for examination and modification. |
ad2fa371 | 562 | * HPG_vmemmap_optimized - Set when the vmemmap pages of the page are freed. |
d6995da3 MK |
563 | */ |
564 | enum hugetlb_page_flags { | |
565 | HPG_restore_reserve = 0, | |
8f251a3d | 566 | HPG_migratable, |
9157c311 | 567 | HPG_temporary, |
6c037149 | 568 | HPG_freed, |
ad2fa371 | 569 | HPG_vmemmap_optimized, |
d6995da3 MK |
570 | __NR_HPAGEFLAGS, |
571 | }; | |
572 | ||
573 | /* | |
574 | * Macros to create test, set and clear function definitions for | |
575 | * hugetlb specific page flags. | |
576 | */ | |
577 | #ifdef CONFIG_HUGETLB_PAGE | |
578 | #define TESTHPAGEFLAG(uname, flname) \ | |
579 | static inline int HPage##uname(struct page *page) \ | |
580 | { return test_bit(HPG_##flname, &(page->private)); } | |
581 | ||
582 | #define SETHPAGEFLAG(uname, flname) \ | |
583 | static inline void SetHPage##uname(struct page *page) \ | |
584 | { set_bit(HPG_##flname, &(page->private)); } | |
585 | ||
586 | #define CLEARHPAGEFLAG(uname, flname) \ | |
587 | static inline void ClearHPage##uname(struct page *page) \ | |
588 | { clear_bit(HPG_##flname, &(page->private)); } | |
589 | #else | |
590 | #define TESTHPAGEFLAG(uname, flname) \ | |
591 | static inline int HPage##uname(struct page *page) \ | |
592 | { return 0; } | |
593 | ||
594 | #define SETHPAGEFLAG(uname, flname) \ | |
595 | static inline void SetHPage##uname(struct page *page) \ | |
596 | { } | |
597 | ||
598 | #define CLEARHPAGEFLAG(uname, flname) \ | |
599 | static inline void ClearHPage##uname(struct page *page) \ | |
600 | { } | |
601 | #endif | |
602 | ||
603 | #define HPAGEFLAG(uname, flname) \ | |
604 | TESTHPAGEFLAG(uname, flname) \ | |
605 | SETHPAGEFLAG(uname, flname) \ | |
606 | CLEARHPAGEFLAG(uname, flname) \ | |
607 | ||
608 | /* | |
609 | * Create functions associated with hugetlb page flags | |
610 | */ | |
611 | HPAGEFLAG(RestoreReserve, restore_reserve) | |
8f251a3d | 612 | HPAGEFLAG(Migratable, migratable) |
9157c311 | 613 | HPAGEFLAG(Temporary, temporary) |
6c037149 | 614 | HPAGEFLAG(Freed, freed) |
ad2fa371 | 615 | HPAGEFLAG(VmemmapOptimized, vmemmap_optimized) |
d6995da3 | 616 | |
a5516438 AK |
617 | #ifdef CONFIG_HUGETLB_PAGE |
618 | ||
a3437870 | 619 | #define HSTATE_NAME_LEN 32 |
a5516438 AK |
620 | /* Defines one hugetlb page size */ |
621 | struct hstate { | |
29383967 | 622 | struct mutex resize_lock; |
e8c5c824 LS |
623 | int next_nid_to_alloc; |
624 | int next_nid_to_free; | |
a5516438 | 625 | unsigned int order; |
79dfc695 | 626 | unsigned int demote_order; |
a5516438 AK |
627 | unsigned long mask; |
628 | unsigned long max_huge_pages; | |
629 | unsigned long nr_huge_pages; | |
630 | unsigned long free_huge_pages; | |
631 | unsigned long resv_huge_pages; | |
632 | unsigned long surplus_huge_pages; | |
633 | unsigned long nr_overcommit_huge_pages; | |
0edaecfa | 634 | struct list_head hugepage_activelist; |
a5516438 | 635 | struct list_head hugepage_freelists[MAX_NUMNODES]; |
b5389086 | 636 | unsigned int max_huge_pages_node[MAX_NUMNODES]; |
a5516438 AK |
637 | unsigned int nr_huge_pages_node[MAX_NUMNODES]; |
638 | unsigned int free_huge_pages_node[MAX_NUMNODES]; | |
639 | unsigned int surplus_huge_pages_node[MAX_NUMNODES]; | |
47010c04 | 640 | #ifdef CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP |
5981611d | 641 | unsigned int optimize_vmemmap_pages; |
77490587 | 642 | #endif |
abb8206c AK |
643 | #ifdef CONFIG_CGROUP_HUGETLB |
644 | /* cgroup control files */ | |
f4776199 MA |
645 | struct cftype cgroup_files_dfl[8]; |
646 | struct cftype cgroup_files_legacy[10]; | |
abb8206c | 647 | #endif |
a3437870 | 648 | char name[HSTATE_NAME_LEN]; |
a5516438 AK |
649 | }; |
650 | ||
53ba51d2 JT |
651 | struct huge_bootmem_page { |
652 | struct list_head list; | |
653 | struct hstate *hstate; | |
654 | }; | |
655 | ||
ae37c7ff | 656 | int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list); |
70c3547e MK |
657 | struct page *alloc_huge_page(struct vm_area_struct *vma, |
658 | unsigned long addr, int avoid_reserve); | |
3e59fcb0 | 659 | struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid, |
d92bbc27 | 660 | nodemask_t *nmask, gfp_t gfp_mask); |
389c8178 MH |
661 | struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma, |
662 | unsigned long address); | |
ab76ad54 MK |
663 | int huge_add_to_page_cache(struct page *page, struct address_space *mapping, |
664 | pgoff_t idx); | |
846be085 MK |
665 | void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma, |
666 | unsigned long address, struct page *page); | |
bf50bab2 | 667 | |
53ba51d2 | 668 | /* arch callback */ |
b5389086 ZY |
669 | int __init __alloc_bootmem_huge_page(struct hstate *h, int nid); |
670 | int __init alloc_bootmem_huge_page(struct hstate *h, int nid); | |
671 | bool __init hugetlb_node_alloc_supported(void); | |
53ba51d2 | 672 | |
e5ff2159 | 673 | void __init hugetlb_add_hstate(unsigned order); |
ae94da89 | 674 | bool __init arch_hugetlb_valid_size(unsigned long size); |
e5ff2159 AK |
675 | struct hstate *size_to_hstate(unsigned long size); |
676 | ||
677 | #ifndef HUGE_MAX_HSTATE | |
678 | #define HUGE_MAX_HSTATE 1 | |
679 | #endif | |
680 | ||
681 | extern struct hstate hstates[HUGE_MAX_HSTATE]; | |
682 | extern unsigned int default_hstate_idx; | |
683 | ||
684 | #define default_hstate (hstates[default_hstate_idx]) | |
a5516438 | 685 | |
d6995da3 MK |
686 | /* |
687 | * hugetlb page subpool pointer located in hpage[1].private | |
688 | */ | |
689 | static inline struct hugepage_subpool *hugetlb_page_subpool(struct page *hpage) | |
690 | { | |
cd39d4e9 | 691 | return (void *)page_private(hpage + SUBPAGE_INDEX_SUBPOOL); |
d6995da3 MK |
692 | } |
693 | ||
694 | static inline void hugetlb_set_page_subpool(struct page *hpage, | |
695 | struct hugepage_subpool *subpool) | |
696 | { | |
cd39d4e9 | 697 | set_page_private(hpage + SUBPAGE_INDEX_SUBPOOL, (unsigned long)subpool); |
d6995da3 MK |
698 | } |
699 | ||
a5516438 AK |
700 | static inline struct hstate *hstate_file(struct file *f) |
701 | { | |
496ad9aa | 702 | return hstate_inode(file_inode(f)); |
a5516438 AK |
703 | } |
704 | ||
af73e4d9 NH |
705 | static inline struct hstate *hstate_sizelog(int page_size_log) |
706 | { | |
707 | if (!page_size_log) | |
708 | return &default_hstate; | |
97ad2be1 SL |
709 | |
710 | return size_to_hstate(1UL << page_size_log); | |
af73e4d9 NH |
711 | } |
712 | ||
a137e1cc | 713 | static inline struct hstate *hstate_vma(struct vm_area_struct *vma) |
a5516438 | 714 | { |
a137e1cc | 715 | return hstate_file(vma->vm_file); |
a5516438 AK |
716 | } |
717 | ||
718 | static inline unsigned long huge_page_size(struct hstate *h) | |
719 | { | |
720 | return (unsigned long)PAGE_SIZE << h->order; | |
721 | } | |
722 | ||
08fba699 MG |
723 | extern unsigned long vma_kernel_pagesize(struct vm_area_struct *vma); |
724 | ||
3340289d MG |
725 | extern unsigned long vma_mmu_pagesize(struct vm_area_struct *vma); |
726 | ||
a5516438 AK |
727 | static inline unsigned long huge_page_mask(struct hstate *h) |
728 | { | |
729 | return h->mask; | |
730 | } | |
731 | ||
732 | static inline unsigned int huge_page_order(struct hstate *h) | |
733 | { | |
734 | return h->order; | |
735 | } | |
736 | ||
737 | static inline unsigned huge_page_shift(struct hstate *h) | |
738 | { | |
739 | return h->order + PAGE_SHIFT; | |
740 | } | |
741 | ||
bae7f4ae LC |
742 | static inline bool hstate_is_gigantic(struct hstate *h) |
743 | { | |
744 | return huge_page_order(h) >= MAX_ORDER; | |
745 | } | |
746 | ||
a5516438 AK |
747 | static inline unsigned int pages_per_huge_page(struct hstate *h) |
748 | { | |
749 | return 1 << h->order; | |
750 | } | |
751 | ||
752 | static inline unsigned int blocks_per_huge_page(struct hstate *h) | |
753 | { | |
754 | return huge_page_size(h) / 512; | |
755 | } | |
756 | ||
757 | #include <asm/hugetlb.h> | |
758 | ||
b0eae98c AK |
759 | #ifndef is_hugepage_only_range |
760 | static inline int is_hugepage_only_range(struct mm_struct *mm, | |
761 | unsigned long addr, unsigned long len) | |
762 | { | |
763 | return 0; | |
764 | } | |
765 | #define is_hugepage_only_range is_hugepage_only_range | |
766 | #endif | |
767 | ||
5be99343 AK |
768 | #ifndef arch_clear_hugepage_flags |
769 | static inline void arch_clear_hugepage_flags(struct page *page) { } | |
770 | #define arch_clear_hugepage_flags arch_clear_hugepage_flags | |
771 | #endif | |
772 | ||
d9ed9faa | 773 | #ifndef arch_make_huge_pte |
79c1c594 CL |
774 | static inline pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, |
775 | vm_flags_t flags) | |
d9ed9faa | 776 | { |
16785bd7 | 777 | return pte_mkhuge(entry); |
d9ed9faa CM |
778 | } |
779 | #endif | |
780 | ||
e5ff2159 AK |
781 | static inline struct hstate *page_hstate(struct page *page) |
782 | { | |
309381fe | 783 | VM_BUG_ON_PAGE(!PageHuge(page), page); |
a50b854e | 784 | return size_to_hstate(page_size(page)); |
e5ff2159 AK |
785 | } |
786 | ||
aa50d3a7 AK |
787 | static inline unsigned hstate_index_to_shift(unsigned index) |
788 | { | |
789 | return hstates[index].order + PAGE_SHIFT; | |
790 | } | |
791 | ||
972dc4de AK |
792 | static inline int hstate_index(struct hstate *h) |
793 | { | |
794 | return h - hstates; | |
795 | } | |
796 | ||
c3114a84 | 797 | extern int dissolve_free_huge_page(struct page *page); |
082d5b6b GS |
798 | extern int dissolve_free_huge_pages(unsigned long start_pfn, |
799 | unsigned long end_pfn); | |
e693de18 | 800 | |
c177c81e | 801 | #ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION |
e693de18 AK |
802 | #ifndef arch_hugetlb_migration_supported |
803 | static inline bool arch_hugetlb_migration_supported(struct hstate *h) | |
804 | { | |
94310cbc | 805 | if ((huge_page_shift(h) == PMD_SHIFT) || |
9b553bf5 AK |
806 | (huge_page_shift(h) == PUD_SHIFT) || |
807 | (huge_page_shift(h) == PGDIR_SHIFT)) | |
94310cbc AK |
808 | return true; |
809 | else | |
810 | return false; | |
e693de18 AK |
811 | } |
812 | #endif | |
c177c81e | 813 | #else |
e693de18 AK |
814 | static inline bool arch_hugetlb_migration_supported(struct hstate *h) |
815 | { | |
d70c17d4 | 816 | return false; |
e693de18 | 817 | } |
c177c81e | 818 | #endif |
e693de18 AK |
819 | |
820 | static inline bool hugepage_migration_supported(struct hstate *h) | |
821 | { | |
822 | return arch_hugetlb_migration_supported(h); | |
83467efb | 823 | } |
c8721bbb | 824 | |
7ed2c31d AK |
825 | /* |
826 | * Movability check is different as compared to migration check. | |
827 | * It determines whether or not a huge page should be placed on | |
828 | * movable zone or not. Movability of any huge page should be | |
829 | * required only if huge page size is supported for migration. | |
06c88398 | 830 | * There won't be any reason for the huge page to be movable if |
7ed2c31d AK |
831 | * it is not migratable to start with. Also the size of the huge |
832 | * page should be large enough to be placed under a movable zone | |
833 | * and still feasible enough to be migratable. Just the presence | |
834 | * in movable zone does not make the migration feasible. | |
835 | * | |
836 | * So even though large huge page sizes like the gigantic ones | |
837 | * are migratable they should not be movable because its not | |
838 | * feasible to migrate them from movable zone. | |
839 | */ | |
840 | static inline bool hugepage_movable_supported(struct hstate *h) | |
841 | { | |
842 | if (!hugepage_migration_supported(h)) | |
843 | return false; | |
844 | ||
845 | if (hstate_is_gigantic(h)) | |
846 | return false; | |
847 | return true; | |
848 | } | |
849 | ||
d92bbc27 JK |
850 | /* Movability of hugepages depends on migration support. */ |
851 | static inline gfp_t htlb_alloc_mask(struct hstate *h) | |
852 | { | |
853 | if (hugepage_movable_supported(h)) | |
854 | return GFP_HIGHUSER_MOVABLE; | |
855 | else | |
856 | return GFP_HIGHUSER; | |
857 | } | |
858 | ||
19fc7bed JK |
859 | static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask) |
860 | { | |
861 | gfp_t modified_mask = htlb_alloc_mask(h); | |
862 | ||
863 | /* Some callers might want to enforce node */ | |
864 | modified_mask |= (gfp_mask & __GFP_THISNODE); | |
865 | ||
41b4dc14 JK |
866 | modified_mask |= (gfp_mask & __GFP_NOWARN); |
867 | ||
19fc7bed JK |
868 | return modified_mask; |
869 | } | |
870 | ||
cb900f41 KS |
871 | static inline spinlock_t *huge_pte_lockptr(struct hstate *h, |
872 | struct mm_struct *mm, pte_t *pte) | |
873 | { | |
874 | if (huge_page_size(h) == PMD_SIZE) | |
875 | return pmd_lockptr(mm, (pmd_t *) pte); | |
876 | VM_BUG_ON(huge_page_size(h) == PAGE_SIZE); | |
877 | return &mm->page_table_lock; | |
878 | } | |
879 | ||
2531c8cf DD |
880 | #ifndef hugepages_supported |
881 | /* | |
882 | * Some platform decide whether they support huge pages at boot | |
883 | * time. Some of them, such as powerpc, set HPAGE_SHIFT to 0 | |
884 | * when there is no such support | |
885 | */ | |
886 | #define hugepages_supported() (HPAGE_SHIFT != 0) | |
887 | #endif | |
457c1b27 | 888 | |
5d317b2b NH |
889 | void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm); |
890 | ||
13db8c50 LZ |
891 | static inline void hugetlb_count_init(struct mm_struct *mm) |
892 | { | |
893 | atomic_long_set(&mm->hugetlb_usage, 0); | |
894 | } | |
895 | ||
5d317b2b NH |
896 | static inline void hugetlb_count_add(long l, struct mm_struct *mm) |
897 | { | |
898 | atomic_long_add(l, &mm->hugetlb_usage); | |
899 | } | |
900 | ||
901 | static inline void hugetlb_count_sub(long l, struct mm_struct *mm) | |
902 | { | |
903 | atomic_long_sub(l, &mm->hugetlb_usage); | |
904 | } | |
e5251fd4 PA |
905 | |
906 | #ifndef set_huge_swap_pte_at | |
907 | static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr, | |
908 | pte_t *ptep, pte_t pte, unsigned long sz) | |
909 | { | |
910 | set_huge_pte_at(mm, addr, ptep, pte); | |
911 | } | |
912 | #endif | |
023bdd00 AK |
913 | |
914 | #ifndef huge_ptep_modify_prot_start | |
915 | #define huge_ptep_modify_prot_start huge_ptep_modify_prot_start | |
916 | static inline pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma, | |
917 | unsigned long addr, pte_t *ptep) | |
918 | { | |
919 | return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep); | |
920 | } | |
921 | #endif | |
922 | ||
923 | #ifndef huge_ptep_modify_prot_commit | |
924 | #define huge_ptep_modify_prot_commit huge_ptep_modify_prot_commit | |
925 | static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma, | |
926 | unsigned long addr, pte_t *ptep, | |
927 | pte_t old_pte, pte_t pte) | |
928 | { | |
929 | set_huge_pte_at(vma->vm_mm, addr, ptep, pte); | |
930 | } | |
931 | #endif | |
932 | ||
af73e4d9 | 933 | #else /* CONFIG_HUGETLB_PAGE */ |
a5516438 | 934 | struct hstate {}; |
442a5a9a | 935 | |
6acfb5ba MS |
936 | static inline struct hugepage_subpool *hugetlb_page_subpool(struct page *hpage) |
937 | { | |
938 | return NULL; | |
939 | } | |
940 | ||
ae37c7ff OS |
941 | static inline int isolate_or_dissolve_huge_page(struct page *page, |
942 | struct list_head *list) | |
369fa227 OS |
943 | { |
944 | return -ENOMEM; | |
945 | } | |
946 | ||
442a5a9a JG |
947 | static inline struct page *alloc_huge_page(struct vm_area_struct *vma, |
948 | unsigned long addr, | |
949 | int avoid_reserve) | |
950 | { | |
951 | return NULL; | |
952 | } | |
953 | ||
442a5a9a | 954 | static inline struct page * |
d92bbc27 JK |
955 | alloc_huge_page_nodemask(struct hstate *h, int preferred_nid, |
956 | nodemask_t *nmask, gfp_t gfp_mask) | |
442a5a9a JG |
957 | { |
958 | return NULL; | |
959 | } | |
960 | ||
961 | static inline struct page *alloc_huge_page_vma(struct hstate *h, | |
962 | struct vm_area_struct *vma, | |
963 | unsigned long address) | |
964 | { | |
965 | return NULL; | |
966 | } | |
967 | ||
968 | static inline int __alloc_bootmem_huge_page(struct hstate *h) | |
969 | { | |
970 | return 0; | |
971 | } | |
972 | ||
973 | static inline struct hstate *hstate_file(struct file *f) | |
974 | { | |
975 | return NULL; | |
976 | } | |
977 | ||
978 | static inline struct hstate *hstate_sizelog(int page_size_log) | |
979 | { | |
980 | return NULL; | |
981 | } | |
982 | ||
983 | static inline struct hstate *hstate_vma(struct vm_area_struct *vma) | |
984 | { | |
985 | return NULL; | |
442a5a9a JG |
986 | } |
987 | ||
988 | static inline struct hstate *page_hstate(struct page *page) | |
989 | { | |
990 | return NULL; | |
991 | } | |
992 | ||
2aff7a47 MWO |
993 | static inline struct hstate *size_to_hstate(unsigned long size) |
994 | { | |
995 | return NULL; | |
996 | } | |
997 | ||
442a5a9a JG |
998 | static inline unsigned long huge_page_size(struct hstate *h) |
999 | { | |
1000 | return PAGE_SIZE; | |
1001 | } | |
1002 | ||
1003 | static inline unsigned long huge_page_mask(struct hstate *h) | |
1004 | { | |
1005 | return PAGE_MASK; | |
1006 | } | |
1007 | ||
1008 | static inline unsigned long vma_kernel_pagesize(struct vm_area_struct *vma) | |
1009 | { | |
1010 | return PAGE_SIZE; | |
1011 | } | |
1012 | ||
1013 | static inline unsigned long vma_mmu_pagesize(struct vm_area_struct *vma) | |
1014 | { | |
1015 | return PAGE_SIZE; | |
1016 | } | |
1017 | ||
1018 | static inline unsigned int huge_page_order(struct hstate *h) | |
1019 | { | |
1020 | return 0; | |
1021 | } | |
1022 | ||
1023 | static inline unsigned int huge_page_shift(struct hstate *h) | |
1024 | { | |
1025 | return PAGE_SHIFT; | |
1026 | } | |
1027 | ||
94310cbc AK |
1028 | static inline bool hstate_is_gigantic(struct hstate *h) |
1029 | { | |
1030 | return false; | |
1031 | } | |
1032 | ||
510a35d4 AR |
1033 | static inline unsigned int pages_per_huge_page(struct hstate *h) |
1034 | { | |
1035 | return 1; | |
1036 | } | |
c3114a84 AK |
1037 | |
1038 | static inline unsigned hstate_index_to_shift(unsigned index) | |
1039 | { | |
1040 | return 0; | |
1041 | } | |
1042 | ||
1043 | static inline int hstate_index(struct hstate *h) | |
1044 | { | |
1045 | return 0; | |
1046 | } | |
13d60f4b | 1047 | |
c3114a84 AK |
1048 | static inline int dissolve_free_huge_page(struct page *page) |
1049 | { | |
1050 | return 0; | |
1051 | } | |
1052 | ||
1053 | static inline int dissolve_free_huge_pages(unsigned long start_pfn, | |
1054 | unsigned long end_pfn) | |
1055 | { | |
1056 | return 0; | |
1057 | } | |
1058 | ||
1059 | static inline bool hugepage_migration_supported(struct hstate *h) | |
1060 | { | |
1061 | return false; | |
1062 | } | |
cb900f41 | 1063 | |
7ed2c31d AK |
1064 | static inline bool hugepage_movable_supported(struct hstate *h) |
1065 | { | |
1066 | return false; | |
1067 | } | |
1068 | ||
d92bbc27 JK |
1069 | static inline gfp_t htlb_alloc_mask(struct hstate *h) |
1070 | { | |
1071 | return 0; | |
1072 | } | |
1073 | ||
19fc7bed JK |
1074 | static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask) |
1075 | { | |
1076 | return 0; | |
1077 | } | |
1078 | ||
cb900f41 KS |
1079 | static inline spinlock_t *huge_pte_lockptr(struct hstate *h, |
1080 | struct mm_struct *mm, pte_t *pte) | |
1081 | { | |
1082 | return &mm->page_table_lock; | |
1083 | } | |
5d317b2b | 1084 | |
13db8c50 LZ |
1085 | static inline void hugetlb_count_init(struct mm_struct *mm) |
1086 | { | |
1087 | } | |
1088 | ||
5d317b2b NH |
1089 | static inline void hugetlb_report_usage(struct seq_file *f, struct mm_struct *m) |
1090 | { | |
1091 | } | |
1092 | ||
1093 | static inline void hugetlb_count_sub(long l, struct mm_struct *mm) | |
1094 | { | |
1095 | } | |
e5251fd4 PA |
1096 | |
1097 | static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr, | |
1098 | pte_t *ptep, pte_t pte, unsigned long sz) | |
1099 | { | |
1100 | } | |
5d4af619 BW |
1101 | |
1102 | static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma, | |
1103 | unsigned long addr, pte_t *ptep) | |
1104 | { | |
1105 | return *ptep; | |
1106 | } | |
1107 | ||
1108 | static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, | |
1109 | pte_t *ptep, pte_t pte) | |
1110 | { | |
1111 | } | |
af73e4d9 | 1112 | #endif /* CONFIG_HUGETLB_PAGE */ |
a5516438 | 1113 | |
cb900f41 KS |
1114 | static inline spinlock_t *huge_pte_lock(struct hstate *h, |
1115 | struct mm_struct *mm, pte_t *pte) | |
1116 | { | |
1117 | spinlock_t *ptl; | |
1118 | ||
1119 | ptl = huge_pte_lockptr(h, mm, pte); | |
1120 | spin_lock(ptl); | |
1121 | return ptl; | |
1122 | } | |
1123 | ||
cf11e85f RG |
1124 | #if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_CMA) |
1125 | extern void __init hugetlb_cma_reserve(int order); | |
1126 | extern void __init hugetlb_cma_check(void); | |
1127 | #else | |
1128 | static inline __init void hugetlb_cma_reserve(int order) | |
1129 | { | |
1130 | } | |
1131 | static inline __init void hugetlb_cma_check(void) | |
1132 | { | |
1133 | } | |
1134 | #endif | |
1135 | ||
c1991e07 PX |
1136 | bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr); |
1137 | ||
537cf30b PX |
1138 | #ifndef __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE |
1139 | /* | |
1140 | * ARCHes with special requirements for evicting HUGETLB backing TLB entries can | |
1141 | * implement this. | |
1142 | */ | |
1143 | #define flush_hugetlb_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end) | |
1144 | #endif | |
1145 | ||
1da177e4 | 1146 | #endif /* _LINUX_HUGETLB_H */ |