Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
1da177e4 LT |
2 | #ifndef _LINUX_HUGETLB_H |
3 | #define _LINUX_HUGETLB_H | |
4 | ||
be93d8cf | 5 | #include <linux/mm_types.h> |
309381fe | 6 | #include <linux/mmdebug.h> |
4e950f6f | 7 | #include <linux/fs.h> |
8edf344c | 8 | #include <linux/hugetlb_inline.h> |
abb8206c | 9 | #include <linux/cgroup.h> |
9119a41e JK |
10 | #include <linux/list.h> |
11 | #include <linux/kref.h> | |
ca5999fd | 12 | #include <linux/pgtable.h> |
4e950f6f | 13 | |
e9ea0e2d AM |
14 | struct ctl_table; |
15 | struct user_struct; | |
24669e58 | 16 | struct mmu_gather; |
e9ea0e2d | 17 | |
e2299292 | 18 | #ifndef is_hugepd |
e2299292 AK |
19 | typedef struct { unsigned long pd; } hugepd_t; |
20 | #define is_hugepd(hugepd) (0) | |
21 | #define __hugepd(x) ((hugepd_t) { (x) }) | |
e2299292 AK |
22 | #endif |
23 | ||
1da177e4 LT |
24 | #ifdef CONFIG_HUGETLB_PAGE |
25 | ||
26 | #include <linux/mempolicy.h> | |
516dffdc | 27 | #include <linux/shm.h> |
63551ae0 | 28 | #include <asm/tlbflush.h> |
1da177e4 | 29 | |
90481622 DG |
30 | struct hugepage_subpool { |
31 | spinlock_t lock; | |
32 | long count; | |
c6a91820 MK |
33 | long max_hpages; /* Maximum huge pages or -1 if no maximum. */ |
34 | long used_hpages; /* Used count against maximum, includes */ | |
35 | /* both alloced and reserved pages. */ | |
36 | struct hstate *hstate; | |
37 | long min_hpages; /* Minimum huge pages or -1 if no minimum. */ | |
38 | long rsv_hpages; /* Pages reserved against global pool to */ | |
39 | /* sasitfy minimum size. */ | |
90481622 DG |
40 | }; |
41 | ||
9119a41e JK |
42 | struct resv_map { |
43 | struct kref refs; | |
7b24d861 | 44 | spinlock_t lock; |
9119a41e | 45 | struct list_head regions; |
5e911373 MK |
46 | long adds_in_progress; |
47 | struct list_head region_cache; | |
48 | long region_cache_count; | |
e9fe92ae MA |
49 | #ifdef CONFIG_CGROUP_HUGETLB |
50 | /* | |
51 | * On private mappings, the counter to uncharge reservations is stored | |
52 | * here. If these fields are 0, then either the mapping is shared, or | |
53 | * cgroup accounting is disabled for this resv_map. | |
54 | */ | |
55 | struct page_counter *reservation_counter; | |
56 | unsigned long pages_per_hpage; | |
57 | struct cgroup_subsys_state *css; | |
58 | #endif | |
9119a41e | 59 | }; |
075a61d0 MA |
60 | |
61 | /* | |
62 | * Region tracking -- allows tracking of reservations and instantiated pages | |
63 | * across the pages in a mapping. | |
64 | * | |
65 | * The region data structures are embedded into a resv_map and protected | |
66 | * by a resv_map's lock. The set of regions within the resv_map represent | |
67 | * reservations for huge pages, or huge pages that have already been | |
68 | * instantiated within the map. The from and to elements are huge page | |
69 | * indicies into the associated mapping. from indicates the starting index | |
70 | * of the region. to represents the first index past the end of the region. | |
71 | * | |
72 | * For example, a file region structure with from == 0 and to == 4 represents | |
73 | * four huge pages in a mapping. It is important to note that the to element | |
74 | * represents the first element past the end of the region. This is used in | |
75 | * arithmetic as 4(to) - 0(from) = 4 huge pages in the region. | |
76 | * | |
77 | * Interval notation of the form [from, to) will be used to indicate that | |
78 | * the endpoint from is inclusive and to is exclusive. | |
79 | */ | |
80 | struct file_region { | |
81 | struct list_head link; | |
82 | long from; | |
83 | long to; | |
84 | #ifdef CONFIG_CGROUP_HUGETLB | |
85 | /* | |
86 | * On shared mappings, each reserved region appears as a struct | |
87 | * file_region in resv_map. These fields hold the info needed to | |
88 | * uncharge each reservation. | |
89 | */ | |
90 | struct page_counter *reservation_counter; | |
91 | struct cgroup_subsys_state *css; | |
92 | #endif | |
93 | }; | |
94 | ||
9119a41e JK |
95 | extern struct resv_map *resv_map_alloc(void); |
96 | void resv_map_release(struct kref *ref); | |
97 | ||
c3f38a38 AK |
98 | extern spinlock_t hugetlb_lock; |
99 | extern int hugetlb_max_hstate __read_mostly; | |
100 | #define for_each_hstate(h) \ | |
101 | for ((h) = hstates; (h) < &hstates[hugetlb_max_hstate]; (h)++) | |
102 | ||
7ca02d0a MK |
103 | struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages, |
104 | long min_hpages); | |
90481622 DG |
105 | void hugepage_put_subpool(struct hugepage_subpool *spool); |
106 | ||
a1e78772 | 107 | void reset_vma_resv_huge_pages(struct vm_area_struct *vma); |
32927393 CH |
108 | int hugetlb_sysctl_handler(struct ctl_table *, int, void *, size_t *, loff_t *); |
109 | int hugetlb_overcommit_handler(struct ctl_table *, int, void *, size_t *, | |
110 | loff_t *); | |
111 | int hugetlb_treat_movable_handler(struct ctl_table *, int, void *, size_t *, | |
112 | loff_t *); | |
113 | int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int, void *, size_t *, | |
114 | loff_t *); | |
06808b08 | 115 | |
1da177e4 | 116 | int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *); |
28a35716 ML |
117 | long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *, |
118 | struct page **, struct vm_area_struct **, | |
87ffc118 AA |
119 | unsigned long *, unsigned long *, long, unsigned int, |
120 | int *); | |
04f2cbe3 | 121 | void unmap_hugepage_range(struct vm_area_struct *, |
24669e58 | 122 | unsigned long, unsigned long, struct page *); |
d833352a MG |
123 | void __unmap_hugepage_range_final(struct mmu_gather *tlb, |
124 | struct vm_area_struct *vma, | |
125 | unsigned long start, unsigned long end, | |
126 | struct page *ref_page); | |
24669e58 AK |
127 | void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma, |
128 | unsigned long start, unsigned long end, | |
129 | struct page *ref_page); | |
e1759c21 | 130 | void hugetlb_report_meminfo(struct seq_file *); |
1da177e4 | 131 | int hugetlb_report_node_meminfo(int, char *); |
949f7ec5 | 132 | void hugetlb_show_meminfo(void); |
1da177e4 | 133 | unsigned long hugetlb_total_pages(void); |
2b740303 | 134 | vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, |
788c7df4 | 135 | unsigned long address, unsigned int flags); |
8fb5debc MK |
136 | int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, pte_t *dst_pte, |
137 | struct vm_area_struct *dst_vma, | |
138 | unsigned long dst_addr, | |
139 | unsigned long src_addr, | |
140 | struct page **pagep); | |
a1e78772 | 141 | int hugetlb_reserve_pages(struct inode *inode, long from, long to, |
5a6fe125 | 142 | struct vm_area_struct *vma, |
ca16d140 | 143 | vm_flags_t vm_flags); |
b5cec28d MK |
144 | long hugetlb_unreserve_pages(struct inode *inode, long start, long end, |
145 | long freed); | |
31caf665 NH |
146 | bool isolate_huge_page(struct page *page, struct list_head *list); |
147 | void putback_active_hugepage(struct page *page); | |
ab5ac90a | 148 | void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason); |
8f1d26d0 | 149 | void free_huge_page(struct page *page); |
72e2936c | 150 | void hugetlb_fix_reserve_counts(struct inode *inode); |
c672c7f2 | 151 | extern struct mutex *hugetlb_fault_mutex_table; |
188b04a7 | 152 | u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx); |
1da177e4 | 153 | |
3212b535 | 154 | pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud); |
3212b535 | 155 | |
c0d0381a MK |
156 | struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage); |
157 | ||
1da177e4 | 158 | extern int sysctl_hugetlb_shm_group; |
53ba51d2 | 159 | extern struct list_head huge_boot_pages; |
1da177e4 | 160 | |
63551ae0 DG |
161 | /* arch callbacks */ |
162 | ||
a5516438 AK |
163 | pte_t *huge_pte_alloc(struct mm_struct *mm, |
164 | unsigned long addr, unsigned long sz); | |
7868a208 PA |
165 | pte_t *huge_pte_offset(struct mm_struct *mm, |
166 | unsigned long addr, unsigned long sz); | |
39dde65c | 167 | int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep); |
017b1660 MK |
168 | void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma, |
169 | unsigned long *start, unsigned long *end); | |
63551ae0 DG |
170 | struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address, |
171 | int write); | |
4dc71451 AK |
172 | struct page *follow_huge_pd(struct vm_area_struct *vma, |
173 | unsigned long address, hugepd_t hpd, | |
174 | int flags, int pdshift); | |
63551ae0 | 175 | struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address, |
e66f17ff | 176 | pmd_t *pmd, int flags); |
ceb86879 | 177 | struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address, |
e66f17ff | 178 | pud_t *pud, int flags); |
faaa5b62 AK |
179 | struct page *follow_huge_pgd(struct mm_struct *mm, unsigned long address, |
180 | pgd_t *pgd, int flags); | |
181 | ||
63551ae0 | 182 | int pmd_huge(pmd_t pmd); |
c2febafc | 183 | int pud_huge(pud_t pud); |
7da4d641 | 184 | unsigned long hugetlb_change_protection(struct vm_area_struct *vma, |
8f860591 | 185 | unsigned long address, unsigned long end, pgprot_t newprot); |
63551ae0 | 186 | |
d5ed7444 | 187 | bool is_hugetlb_entry_migration(pte_t pte); |
ab5ac90a | 188 | |
1da177e4 LT |
189 | #else /* !CONFIG_HUGETLB_PAGE */ |
190 | ||
a1e78772 MG |
191 | static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma) |
192 | { | |
193 | } | |
194 | ||
1da177e4 LT |
195 | static inline unsigned long hugetlb_total_pages(void) |
196 | { | |
197 | return 0; | |
198 | } | |
199 | ||
c0d0381a MK |
200 | static inline struct address_space *hugetlb_page_mapping_lock_write( |
201 | struct page *hpage) | |
202 | { | |
203 | return NULL; | |
204 | } | |
205 | ||
017b1660 MK |
206 | static inline int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, |
207 | pte_t *ptep) | |
208 | { | |
209 | return 0; | |
210 | } | |
211 | ||
212 | static inline void adjust_range_if_pmd_sharing_possible( | |
213 | struct vm_area_struct *vma, | |
214 | unsigned long *start, unsigned long *end) | |
215 | { | |
216 | } | |
217 | ||
1f9dccb2 MK |
218 | static inline long follow_hugetlb_page(struct mm_struct *mm, |
219 | struct vm_area_struct *vma, struct page **pages, | |
220 | struct vm_area_struct **vmas, unsigned long *position, | |
221 | unsigned long *nr_pages, long i, unsigned int flags, | |
222 | int *nonblocking) | |
223 | { | |
224 | BUG(); | |
225 | return 0; | |
226 | } | |
227 | ||
228 | static inline struct page *follow_huge_addr(struct mm_struct *mm, | |
229 | unsigned long address, int write) | |
230 | { | |
231 | return ERR_PTR(-EINVAL); | |
232 | } | |
233 | ||
234 | static inline int copy_hugetlb_page_range(struct mm_struct *dst, | |
235 | struct mm_struct *src, struct vm_area_struct *vma) | |
236 | { | |
237 | BUG(); | |
238 | return 0; | |
239 | } | |
240 | ||
e1759c21 AD |
241 | static inline void hugetlb_report_meminfo(struct seq_file *m) |
242 | { | |
243 | } | |
1f9dccb2 MK |
244 | |
245 | static inline int hugetlb_report_node_meminfo(int nid, char *buf) | |
246 | { | |
247 | return 0; | |
248 | } | |
249 | ||
949f7ec5 DR |
250 | static inline void hugetlb_show_meminfo(void) |
251 | { | |
252 | } | |
1f9dccb2 MK |
253 | |
254 | static inline struct page *follow_huge_pd(struct vm_area_struct *vma, | |
255 | unsigned long address, hugepd_t hpd, int flags, | |
256 | int pdshift) | |
257 | { | |
258 | return NULL; | |
259 | } | |
260 | ||
261 | static inline struct page *follow_huge_pmd(struct mm_struct *mm, | |
262 | unsigned long address, pmd_t *pmd, int flags) | |
263 | { | |
264 | return NULL; | |
265 | } | |
266 | ||
267 | static inline struct page *follow_huge_pud(struct mm_struct *mm, | |
268 | unsigned long address, pud_t *pud, int flags) | |
269 | { | |
270 | return NULL; | |
271 | } | |
272 | ||
273 | static inline struct page *follow_huge_pgd(struct mm_struct *mm, | |
274 | unsigned long address, pgd_t *pgd, int flags) | |
275 | { | |
276 | return NULL; | |
277 | } | |
278 | ||
279 | static inline int prepare_hugepage_range(struct file *file, | |
280 | unsigned long addr, unsigned long len) | |
281 | { | |
282 | return -EINVAL; | |
283 | } | |
284 | ||
285 | static inline int pmd_huge(pmd_t pmd) | |
286 | { | |
287 | return 0; | |
288 | } | |
289 | ||
290 | static inline int pud_huge(pud_t pud) | |
291 | { | |
292 | return 0; | |
293 | } | |
294 | ||
295 | static inline int is_hugepage_only_range(struct mm_struct *mm, | |
296 | unsigned long addr, unsigned long len) | |
297 | { | |
298 | return 0; | |
299 | } | |
300 | ||
301 | static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb, | |
302 | unsigned long addr, unsigned long end, | |
303 | unsigned long floor, unsigned long ceiling) | |
304 | { | |
305 | BUG(); | |
306 | } | |
307 | ||
308 | static inline int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, | |
309 | pte_t *dst_pte, | |
310 | struct vm_area_struct *dst_vma, | |
311 | unsigned long dst_addr, | |
312 | unsigned long src_addr, | |
313 | struct page **pagep) | |
314 | { | |
315 | BUG(); | |
316 | return 0; | |
317 | } | |
318 | ||
319 | static inline pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr, | |
320 | unsigned long sz) | |
321 | { | |
322 | return NULL; | |
323 | } | |
24669e58 | 324 | |
f40386a4 NH |
325 | static inline bool isolate_huge_page(struct page *page, struct list_head *list) |
326 | { | |
327 | return false; | |
328 | } | |
1da177e4 | 329 | |
1f9dccb2 MK |
330 | static inline void putback_active_hugepage(struct page *page) |
331 | { | |
332 | } | |
333 | ||
334 | static inline void move_hugetlb_state(struct page *oldpage, | |
335 | struct page *newpage, int reason) | |
336 | { | |
337 | } | |
338 | ||
339 | static inline unsigned long hugetlb_change_protection( | |
340 | struct vm_area_struct *vma, unsigned long address, | |
341 | unsigned long end, pgprot_t newprot) | |
7da4d641 PZ |
342 | { |
343 | return 0; | |
344 | } | |
8f860591 | 345 | |
d833352a MG |
346 | static inline void __unmap_hugepage_range_final(struct mmu_gather *tlb, |
347 | struct vm_area_struct *vma, unsigned long start, | |
348 | unsigned long end, struct page *ref_page) | |
349 | { | |
350 | BUG(); | |
351 | } | |
352 | ||
24669e58 AK |
353 | static inline void __unmap_hugepage_range(struct mmu_gather *tlb, |
354 | struct vm_area_struct *vma, unsigned long start, | |
355 | unsigned long end, struct page *ref_page) | |
356 | { | |
357 | BUG(); | |
358 | } | |
1f9dccb2 | 359 | |
a953e772 | 360 | static inline vm_fault_t hugetlb_fault(struct mm_struct *mm, |
1f9dccb2 MK |
361 | struct vm_area_struct *vma, unsigned long address, |
362 | unsigned int flags) | |
a953e772 SJ |
363 | { |
364 | BUG(); | |
365 | return 0; | |
366 | } | |
24669e58 | 367 | |
1da177e4 | 368 | #endif /* !CONFIG_HUGETLB_PAGE */ |
f30c59e9 AK |
369 | /* |
370 | * hugepages at page global directory. If arch support | |
371 | * hugepages at pgd level, they need to define this. | |
372 | */ | |
373 | #ifndef pgd_huge | |
374 | #define pgd_huge(x) 0 | |
375 | #endif | |
c2febafc KS |
376 | #ifndef p4d_huge |
377 | #define p4d_huge(x) 0 | |
378 | #endif | |
f30c59e9 AK |
379 | |
380 | #ifndef pgd_write | |
381 | static inline int pgd_write(pgd_t pgd) | |
382 | { | |
383 | BUG(); | |
384 | return 0; | |
385 | } | |
386 | #endif | |
387 | ||
4e52780d EM |
388 | #define HUGETLB_ANON_FILE "anon_hugepage" |
389 | ||
6bfde05b EM |
390 | enum { |
391 | /* | |
392 | * The file will be used as an shm file so shmfs accounting rules | |
393 | * apply | |
394 | */ | |
395 | HUGETLB_SHMFS_INODE = 1, | |
4e52780d EM |
396 | /* |
397 | * The file is being created on the internal vfs mount and shmfs | |
398 | * accounting rules do not apply | |
399 | */ | |
400 | HUGETLB_ANONHUGE_INODE = 2, | |
6bfde05b EM |
401 | }; |
402 | ||
1da177e4 | 403 | #ifdef CONFIG_HUGETLBFS |
1da177e4 | 404 | struct hugetlbfs_sb_info { |
1da177e4 LT |
405 | long max_inodes; /* inodes allowed */ |
406 | long free_inodes; /* inodes free */ | |
407 | spinlock_t stat_lock; | |
a137e1cc | 408 | struct hstate *hstate; |
90481622 | 409 | struct hugepage_subpool *spool; |
4a25220d DH |
410 | kuid_t uid; |
411 | kgid_t gid; | |
412 | umode_t mode; | |
1da177e4 LT |
413 | }; |
414 | ||
1da177e4 LT |
415 | static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb) |
416 | { | |
417 | return sb->s_fs_info; | |
418 | } | |
419 | ||
da14c1e5 MAL |
420 | struct hugetlbfs_inode_info { |
421 | struct shared_policy policy; | |
422 | struct inode vfs_inode; | |
ff62a342 | 423 | unsigned int seals; |
da14c1e5 MAL |
424 | }; |
425 | ||
426 | static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode) | |
427 | { | |
428 | return container_of(inode, struct hugetlbfs_inode_info, vfs_inode); | |
429 | } | |
430 | ||
4b6f5d20 | 431 | extern const struct file_operations hugetlbfs_file_operations; |
f0f37e2f | 432 | extern const struct vm_operations_struct hugetlb_vm_ops; |
af73e4d9 | 433 | struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct, |
42d7395f AK |
434 | struct user_struct **user, int creat_flags, |
435 | int page_size_log); | |
1da177e4 | 436 | |
719ff321 | 437 | static inline bool is_file_hugepages(struct file *file) |
1da177e4 | 438 | { |
516dffdc | 439 | if (file->f_op == &hugetlbfs_file_operations) |
719ff321 | 440 | return true; |
516dffdc | 441 | |
719ff321 | 442 | return is_file_shm_hugepages(file); |
1da177e4 LT |
443 | } |
444 | ||
bb297bb2 CL |
445 | static inline struct hstate *hstate_inode(struct inode *i) |
446 | { | |
447 | return HUGETLBFS_SB(i->i_sb)->hstate; | |
448 | } | |
1da177e4 LT |
449 | #else /* !CONFIG_HUGETLBFS */ |
450 | ||
719ff321 | 451 | #define is_file_hugepages(file) false |
40716e29 | 452 | static inline struct file * |
af73e4d9 NH |
453 | hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag, |
454 | struct user_struct **user, int creat_flags, | |
42d7395f | 455 | int page_size_log) |
e9ea0e2d AM |
456 | { |
457 | return ERR_PTR(-ENOSYS); | |
458 | } | |
1da177e4 | 459 | |
bb297bb2 CL |
460 | static inline struct hstate *hstate_inode(struct inode *i) |
461 | { | |
462 | return NULL; | |
463 | } | |
1da177e4 LT |
464 | #endif /* !CONFIG_HUGETLBFS */ |
465 | ||
d2ba27e8 AB |
466 | #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA |
467 | unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, | |
468 | unsigned long len, unsigned long pgoff, | |
469 | unsigned long flags); | |
470 | #endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */ | |
471 | ||
a5516438 AK |
472 | #ifdef CONFIG_HUGETLB_PAGE |
473 | ||
a3437870 | 474 | #define HSTATE_NAME_LEN 32 |
a5516438 AK |
475 | /* Defines one hugetlb page size */ |
476 | struct hstate { | |
e8c5c824 LS |
477 | int next_nid_to_alloc; |
478 | int next_nid_to_free; | |
a5516438 AK |
479 | unsigned int order; |
480 | unsigned long mask; | |
481 | unsigned long max_huge_pages; | |
482 | unsigned long nr_huge_pages; | |
483 | unsigned long free_huge_pages; | |
484 | unsigned long resv_huge_pages; | |
485 | unsigned long surplus_huge_pages; | |
486 | unsigned long nr_overcommit_huge_pages; | |
0edaecfa | 487 | struct list_head hugepage_activelist; |
a5516438 AK |
488 | struct list_head hugepage_freelists[MAX_NUMNODES]; |
489 | unsigned int nr_huge_pages_node[MAX_NUMNODES]; | |
490 | unsigned int free_huge_pages_node[MAX_NUMNODES]; | |
491 | unsigned int surplus_huge_pages_node[MAX_NUMNODES]; | |
abb8206c AK |
492 | #ifdef CONFIG_CGROUP_HUGETLB |
493 | /* cgroup control files */ | |
cdc2fcfe MA |
494 | struct cftype cgroup_files_dfl[7]; |
495 | struct cftype cgroup_files_legacy[9]; | |
abb8206c | 496 | #endif |
a3437870 | 497 | char name[HSTATE_NAME_LEN]; |
a5516438 AK |
498 | }; |
499 | ||
53ba51d2 JT |
500 | struct huge_bootmem_page { |
501 | struct list_head list; | |
502 | struct hstate *hstate; | |
503 | }; | |
504 | ||
70c3547e MK |
505 | struct page *alloc_huge_page(struct vm_area_struct *vma, |
506 | unsigned long addr, int avoid_reserve); | |
bf50bab2 | 507 | struct page *alloc_huge_page_node(struct hstate *h, int nid); |
3e59fcb0 MH |
508 | struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid, |
509 | nodemask_t *nmask); | |
389c8178 MH |
510 | struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma, |
511 | unsigned long address); | |
9a4e9f3b AK |
512 | struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask, |
513 | int nid, nodemask_t *nmask); | |
ab76ad54 MK |
514 | int huge_add_to_page_cache(struct page *page, struct address_space *mapping, |
515 | pgoff_t idx); | |
bf50bab2 | 516 | |
53ba51d2 | 517 | /* arch callback */ |
e24a1307 | 518 | int __init __alloc_bootmem_huge_page(struct hstate *h); |
53ba51d2 JT |
519 | int __init alloc_bootmem_huge_page(struct hstate *h); |
520 | ||
e5ff2159 | 521 | void __init hugetlb_add_hstate(unsigned order); |
ae94da89 | 522 | bool __init arch_hugetlb_valid_size(unsigned long size); |
e5ff2159 AK |
523 | struct hstate *size_to_hstate(unsigned long size); |
524 | ||
525 | #ifndef HUGE_MAX_HSTATE | |
526 | #define HUGE_MAX_HSTATE 1 | |
527 | #endif | |
528 | ||
529 | extern struct hstate hstates[HUGE_MAX_HSTATE]; | |
530 | extern unsigned int default_hstate_idx; | |
531 | ||
532 | #define default_hstate (hstates[default_hstate_idx]) | |
a5516438 | 533 | |
a5516438 AK |
534 | static inline struct hstate *hstate_file(struct file *f) |
535 | { | |
496ad9aa | 536 | return hstate_inode(file_inode(f)); |
a5516438 AK |
537 | } |
538 | ||
af73e4d9 NH |
539 | static inline struct hstate *hstate_sizelog(int page_size_log) |
540 | { | |
541 | if (!page_size_log) | |
542 | return &default_hstate; | |
97ad2be1 SL |
543 | |
544 | return size_to_hstate(1UL << page_size_log); | |
af73e4d9 NH |
545 | } |
546 | ||
a137e1cc | 547 | static inline struct hstate *hstate_vma(struct vm_area_struct *vma) |
a5516438 | 548 | { |
a137e1cc | 549 | return hstate_file(vma->vm_file); |
a5516438 AK |
550 | } |
551 | ||
552 | static inline unsigned long huge_page_size(struct hstate *h) | |
553 | { | |
554 | return (unsigned long)PAGE_SIZE << h->order; | |
555 | } | |
556 | ||
08fba699 MG |
557 | extern unsigned long vma_kernel_pagesize(struct vm_area_struct *vma); |
558 | ||
3340289d MG |
559 | extern unsigned long vma_mmu_pagesize(struct vm_area_struct *vma); |
560 | ||
a5516438 AK |
561 | static inline unsigned long huge_page_mask(struct hstate *h) |
562 | { | |
563 | return h->mask; | |
564 | } | |
565 | ||
566 | static inline unsigned int huge_page_order(struct hstate *h) | |
567 | { | |
568 | return h->order; | |
569 | } | |
570 | ||
571 | static inline unsigned huge_page_shift(struct hstate *h) | |
572 | { | |
573 | return h->order + PAGE_SHIFT; | |
574 | } | |
575 | ||
bae7f4ae LC |
576 | static inline bool hstate_is_gigantic(struct hstate *h) |
577 | { | |
578 | return huge_page_order(h) >= MAX_ORDER; | |
579 | } | |
580 | ||
a5516438 AK |
581 | static inline unsigned int pages_per_huge_page(struct hstate *h) |
582 | { | |
583 | return 1 << h->order; | |
584 | } | |
585 | ||
586 | static inline unsigned int blocks_per_huge_page(struct hstate *h) | |
587 | { | |
588 | return huge_page_size(h) / 512; | |
589 | } | |
590 | ||
591 | #include <asm/hugetlb.h> | |
592 | ||
b0eae98c AK |
593 | #ifndef is_hugepage_only_range |
594 | static inline int is_hugepage_only_range(struct mm_struct *mm, | |
595 | unsigned long addr, unsigned long len) | |
596 | { | |
597 | return 0; | |
598 | } | |
599 | #define is_hugepage_only_range is_hugepage_only_range | |
600 | #endif | |
601 | ||
5be99343 AK |
602 | #ifndef arch_clear_hugepage_flags |
603 | static inline void arch_clear_hugepage_flags(struct page *page) { } | |
604 | #define arch_clear_hugepage_flags arch_clear_hugepage_flags | |
605 | #endif | |
606 | ||
d9ed9faa CM |
607 | #ifndef arch_make_huge_pte |
608 | static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma, | |
609 | struct page *page, int writable) | |
610 | { | |
611 | return entry; | |
612 | } | |
613 | #endif | |
614 | ||
e5ff2159 AK |
615 | static inline struct hstate *page_hstate(struct page *page) |
616 | { | |
309381fe | 617 | VM_BUG_ON_PAGE(!PageHuge(page), page); |
a50b854e | 618 | return size_to_hstate(page_size(page)); |
e5ff2159 AK |
619 | } |
620 | ||
aa50d3a7 AK |
621 | static inline unsigned hstate_index_to_shift(unsigned index) |
622 | { | |
623 | return hstates[index].order + PAGE_SHIFT; | |
624 | } | |
625 | ||
972dc4de AK |
626 | static inline int hstate_index(struct hstate *h) |
627 | { | |
628 | return h - hstates; | |
629 | } | |
630 | ||
13d60f4b ZY |
631 | pgoff_t __basepage_index(struct page *page); |
632 | ||
633 | /* Return page->index in PAGE_SIZE units */ | |
634 | static inline pgoff_t basepage_index(struct page *page) | |
635 | { | |
636 | if (!PageCompound(page)) | |
637 | return page->index; | |
638 | ||
639 | return __basepage_index(page); | |
640 | } | |
641 | ||
c3114a84 | 642 | extern int dissolve_free_huge_page(struct page *page); |
082d5b6b GS |
643 | extern int dissolve_free_huge_pages(unsigned long start_pfn, |
644 | unsigned long end_pfn); | |
e693de18 | 645 | |
c177c81e | 646 | #ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION |
e693de18 AK |
647 | #ifndef arch_hugetlb_migration_supported |
648 | static inline bool arch_hugetlb_migration_supported(struct hstate *h) | |
649 | { | |
94310cbc | 650 | if ((huge_page_shift(h) == PMD_SHIFT) || |
9b553bf5 AK |
651 | (huge_page_shift(h) == PUD_SHIFT) || |
652 | (huge_page_shift(h) == PGDIR_SHIFT)) | |
94310cbc AK |
653 | return true; |
654 | else | |
655 | return false; | |
e693de18 AK |
656 | } |
657 | #endif | |
c177c81e | 658 | #else |
e693de18 AK |
659 | static inline bool arch_hugetlb_migration_supported(struct hstate *h) |
660 | { | |
d70c17d4 | 661 | return false; |
e693de18 | 662 | } |
c177c81e | 663 | #endif |
e693de18 AK |
664 | |
665 | static inline bool hugepage_migration_supported(struct hstate *h) | |
666 | { | |
667 | return arch_hugetlb_migration_supported(h); | |
83467efb | 668 | } |
c8721bbb | 669 | |
7ed2c31d AK |
670 | /* |
671 | * Movability check is different as compared to migration check. | |
672 | * It determines whether or not a huge page should be placed on | |
673 | * movable zone or not. Movability of any huge page should be | |
674 | * required only if huge page size is supported for migration. | |
675 | * There wont be any reason for the huge page to be movable if | |
676 | * it is not migratable to start with. Also the size of the huge | |
677 | * page should be large enough to be placed under a movable zone | |
678 | * and still feasible enough to be migratable. Just the presence | |
679 | * in movable zone does not make the migration feasible. | |
680 | * | |
681 | * So even though large huge page sizes like the gigantic ones | |
682 | * are migratable they should not be movable because its not | |
683 | * feasible to migrate them from movable zone. | |
684 | */ | |
685 | static inline bool hugepage_movable_supported(struct hstate *h) | |
686 | { | |
687 | if (!hugepage_migration_supported(h)) | |
688 | return false; | |
689 | ||
690 | if (hstate_is_gigantic(h)) | |
691 | return false; | |
692 | return true; | |
693 | } | |
694 | ||
cb900f41 KS |
695 | static inline spinlock_t *huge_pte_lockptr(struct hstate *h, |
696 | struct mm_struct *mm, pte_t *pte) | |
697 | { | |
698 | if (huge_page_size(h) == PMD_SIZE) | |
699 | return pmd_lockptr(mm, (pmd_t *) pte); | |
700 | VM_BUG_ON(huge_page_size(h) == PAGE_SIZE); | |
701 | return &mm->page_table_lock; | |
702 | } | |
703 | ||
2531c8cf DD |
704 | #ifndef hugepages_supported |
705 | /* | |
706 | * Some platform decide whether they support huge pages at boot | |
707 | * time. Some of them, such as powerpc, set HPAGE_SHIFT to 0 | |
708 | * when there is no such support | |
709 | */ | |
710 | #define hugepages_supported() (HPAGE_SHIFT != 0) | |
711 | #endif | |
457c1b27 | 712 | |
5d317b2b NH |
713 | void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm); |
714 | ||
715 | static inline void hugetlb_count_add(long l, struct mm_struct *mm) | |
716 | { | |
717 | atomic_long_add(l, &mm->hugetlb_usage); | |
718 | } | |
719 | ||
720 | static inline void hugetlb_count_sub(long l, struct mm_struct *mm) | |
721 | { | |
722 | atomic_long_sub(l, &mm->hugetlb_usage); | |
723 | } | |
e5251fd4 PA |
724 | |
725 | #ifndef set_huge_swap_pte_at | |
726 | static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr, | |
727 | pte_t *ptep, pte_t pte, unsigned long sz) | |
728 | { | |
729 | set_huge_pte_at(mm, addr, ptep, pte); | |
730 | } | |
731 | #endif | |
023bdd00 AK |
732 | |
733 | #ifndef huge_ptep_modify_prot_start | |
734 | #define huge_ptep_modify_prot_start huge_ptep_modify_prot_start | |
735 | static inline pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma, | |
736 | unsigned long addr, pte_t *ptep) | |
737 | { | |
738 | return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep); | |
739 | } | |
740 | #endif | |
741 | ||
742 | #ifndef huge_ptep_modify_prot_commit | |
743 | #define huge_ptep_modify_prot_commit huge_ptep_modify_prot_commit | |
744 | static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma, | |
745 | unsigned long addr, pte_t *ptep, | |
746 | pte_t old_pte, pte_t pte) | |
747 | { | |
748 | set_huge_pte_at(vma->vm_mm, addr, ptep, pte); | |
749 | } | |
750 | #endif | |
751 | ||
af73e4d9 | 752 | #else /* CONFIG_HUGETLB_PAGE */ |
a5516438 | 753 | struct hstate {}; |
442a5a9a JG |
754 | |
755 | static inline struct page *alloc_huge_page(struct vm_area_struct *vma, | |
756 | unsigned long addr, | |
757 | int avoid_reserve) | |
758 | { | |
759 | return NULL; | |
760 | } | |
761 | ||
762 | static inline struct page *alloc_huge_page_node(struct hstate *h, int nid) | |
763 | { | |
764 | return NULL; | |
765 | } | |
766 | ||
767 | static inline struct page * | |
768 | alloc_huge_page_nodemask(struct hstate *h, int preferred_nid, nodemask_t *nmask) | |
769 | { | |
770 | return NULL; | |
771 | } | |
772 | ||
773 | static inline struct page *alloc_huge_page_vma(struct hstate *h, | |
774 | struct vm_area_struct *vma, | |
775 | unsigned long address) | |
776 | { | |
777 | return NULL; | |
778 | } | |
779 | ||
780 | static inline int __alloc_bootmem_huge_page(struct hstate *h) | |
781 | { | |
782 | return 0; | |
783 | } | |
784 | ||
785 | static inline struct hstate *hstate_file(struct file *f) | |
786 | { | |
787 | return NULL; | |
788 | } | |
789 | ||
790 | static inline struct hstate *hstate_sizelog(int page_size_log) | |
791 | { | |
792 | return NULL; | |
793 | } | |
794 | ||
795 | static inline struct hstate *hstate_vma(struct vm_area_struct *vma) | |
796 | { | |
797 | return NULL; | |
442a5a9a JG |
798 | } |
799 | ||
800 | static inline struct hstate *page_hstate(struct page *page) | |
801 | { | |
802 | return NULL; | |
803 | } | |
804 | ||
805 | static inline unsigned long huge_page_size(struct hstate *h) | |
806 | { | |
807 | return PAGE_SIZE; | |
808 | } | |
809 | ||
810 | static inline unsigned long huge_page_mask(struct hstate *h) | |
811 | { | |
812 | return PAGE_MASK; | |
813 | } | |
814 | ||
815 | static inline unsigned long vma_kernel_pagesize(struct vm_area_struct *vma) | |
816 | { | |
817 | return PAGE_SIZE; | |
818 | } | |
819 | ||
820 | static inline unsigned long vma_mmu_pagesize(struct vm_area_struct *vma) | |
821 | { | |
822 | return PAGE_SIZE; | |
823 | } | |
824 | ||
825 | static inline unsigned int huge_page_order(struct hstate *h) | |
826 | { | |
827 | return 0; | |
828 | } | |
829 | ||
830 | static inline unsigned int huge_page_shift(struct hstate *h) | |
831 | { | |
832 | return PAGE_SHIFT; | |
833 | } | |
834 | ||
94310cbc AK |
835 | static inline bool hstate_is_gigantic(struct hstate *h) |
836 | { | |
837 | return false; | |
838 | } | |
839 | ||
510a35d4 AR |
840 | static inline unsigned int pages_per_huge_page(struct hstate *h) |
841 | { | |
842 | return 1; | |
843 | } | |
c3114a84 AK |
844 | |
845 | static inline unsigned hstate_index_to_shift(unsigned index) | |
846 | { | |
847 | return 0; | |
848 | } | |
849 | ||
850 | static inline int hstate_index(struct hstate *h) | |
851 | { | |
852 | return 0; | |
853 | } | |
13d60f4b ZY |
854 | |
855 | static inline pgoff_t basepage_index(struct page *page) | |
856 | { | |
857 | return page->index; | |
858 | } | |
c3114a84 AK |
859 | |
860 | static inline int dissolve_free_huge_page(struct page *page) | |
861 | { | |
862 | return 0; | |
863 | } | |
864 | ||
865 | static inline int dissolve_free_huge_pages(unsigned long start_pfn, | |
866 | unsigned long end_pfn) | |
867 | { | |
868 | return 0; | |
869 | } | |
870 | ||
871 | static inline bool hugepage_migration_supported(struct hstate *h) | |
872 | { | |
873 | return false; | |
874 | } | |
cb900f41 | 875 | |
7ed2c31d AK |
876 | static inline bool hugepage_movable_supported(struct hstate *h) |
877 | { | |
878 | return false; | |
879 | } | |
880 | ||
cb900f41 KS |
881 | static inline spinlock_t *huge_pte_lockptr(struct hstate *h, |
882 | struct mm_struct *mm, pte_t *pte) | |
883 | { | |
884 | return &mm->page_table_lock; | |
885 | } | |
5d317b2b NH |
886 | |
887 | static inline void hugetlb_report_usage(struct seq_file *f, struct mm_struct *m) | |
888 | { | |
889 | } | |
890 | ||
891 | static inline void hugetlb_count_sub(long l, struct mm_struct *mm) | |
892 | { | |
893 | } | |
e5251fd4 PA |
894 | |
895 | static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr, | |
896 | pte_t *ptep, pte_t pte, unsigned long sz) | |
897 | { | |
898 | } | |
af73e4d9 | 899 | #endif /* CONFIG_HUGETLB_PAGE */ |
a5516438 | 900 | |
cb900f41 KS |
901 | static inline spinlock_t *huge_pte_lock(struct hstate *h, |
902 | struct mm_struct *mm, pte_t *pte) | |
903 | { | |
904 | spinlock_t *ptl; | |
905 | ||
906 | ptl = huge_pte_lockptr(h, mm, pte); | |
907 | spin_lock(ptl); | |
908 | return ptl; | |
909 | } | |
910 | ||
cf11e85f RG |
911 | #if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_CMA) |
912 | extern void __init hugetlb_cma_reserve(int order); | |
913 | extern void __init hugetlb_cma_check(void); | |
914 | #else | |
915 | static inline __init void hugetlb_cma_reserve(int order) | |
916 | { | |
917 | } | |
918 | static inline __init void hugetlb_cma_check(void) | |
919 | { | |
920 | } | |
921 | #endif | |
922 | ||
1da177e4 | 923 | #endif /* _LINUX_HUGETLB_H */ |