Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
1da177e4 LT |
2 | #ifndef _LINUX_HUGETLB_H |
3 | #define _LINUX_HUGETLB_H | |
4 | ||
be93d8cf | 5 | #include <linux/mm_types.h> |
309381fe | 6 | #include <linux/mmdebug.h> |
4e950f6f | 7 | #include <linux/fs.h> |
8edf344c | 8 | #include <linux/hugetlb_inline.h> |
abb8206c | 9 | #include <linux/cgroup.h> |
9119a41e JK |
10 | #include <linux/list.h> |
11 | #include <linux/kref.h> | |
888cdbc2 | 12 | #include <asm/pgtable.h> |
4e950f6f | 13 | |
e9ea0e2d AM |
14 | struct ctl_table; |
15 | struct user_struct; | |
24669e58 | 16 | struct mmu_gather; |
e9ea0e2d | 17 | |
e2299292 AK |
18 | #ifndef is_hugepd |
19 | /* | |
20 | * Some architectures requires a hugepage directory format that is | |
21 | * required to support multiple hugepage sizes. For example | |
22 | * a4fe3ce76 "powerpc/mm: Allow more flexible layouts for hugepage pagetables" | |
23 | * introduced the same on powerpc. This allows for a more flexible hugepage | |
24 | * pagetable layout. | |
25 | */ | |
26 | typedef struct { unsigned long pd; } hugepd_t; | |
27 | #define is_hugepd(hugepd) (0) | |
28 | #define __hugepd(x) ((hugepd_t) { (x) }) | |
29 | static inline int gup_huge_pd(hugepd_t hugepd, unsigned long addr, | |
30 | unsigned pdshift, unsigned long end, | |
31 | int write, struct page **pages, int *nr) | |
32 | { | |
33 | return 0; | |
34 | } | |
35 | #else | |
36 | extern int gup_huge_pd(hugepd_t hugepd, unsigned long addr, | |
37 | unsigned pdshift, unsigned long end, | |
38 | int write, struct page **pages, int *nr); | |
39 | #endif | |
40 | ||
41 | ||
1da177e4 LT |
42 | #ifdef CONFIG_HUGETLB_PAGE |
43 | ||
44 | #include <linux/mempolicy.h> | |
516dffdc | 45 | #include <linux/shm.h> |
63551ae0 | 46 | #include <asm/tlbflush.h> |
1da177e4 | 47 | |
90481622 DG |
48 | struct hugepage_subpool { |
49 | spinlock_t lock; | |
50 | long count; | |
c6a91820 MK |
51 | long max_hpages; /* Maximum huge pages or -1 if no maximum. */ |
52 | long used_hpages; /* Used count against maximum, includes */ | |
53 | /* both alloced and reserved pages. */ | |
54 | struct hstate *hstate; | |
55 | long min_hpages; /* Minimum huge pages or -1 if no minimum. */ | |
56 | long rsv_hpages; /* Pages reserved against global pool to */ | |
57 | /* sasitfy minimum size. */ | |
90481622 DG |
58 | }; |
59 | ||
9119a41e JK |
60 | struct resv_map { |
61 | struct kref refs; | |
7b24d861 | 62 | spinlock_t lock; |
9119a41e | 63 | struct list_head regions; |
5e911373 MK |
64 | long adds_in_progress; |
65 | struct list_head region_cache; | |
66 | long region_cache_count; | |
9119a41e JK |
67 | }; |
68 | extern struct resv_map *resv_map_alloc(void); | |
69 | void resv_map_release(struct kref *ref); | |
70 | ||
c3f38a38 AK |
71 | extern spinlock_t hugetlb_lock; |
72 | extern int hugetlb_max_hstate __read_mostly; | |
73 | #define for_each_hstate(h) \ | |
74 | for ((h) = hstates; (h) < &hstates[hugetlb_max_hstate]; (h)++) | |
75 | ||
7ca02d0a MK |
76 | struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages, |
77 | long min_hpages); | |
90481622 DG |
78 | void hugepage_put_subpool(struct hugepage_subpool *spool); |
79 | ||
a1e78772 | 80 | void reset_vma_resv_huge_pages(struct vm_area_struct *vma); |
8d65af78 AD |
81 | int hugetlb_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *); |
82 | int hugetlb_overcommit_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *); | |
83 | int hugetlb_treat_movable_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *); | |
06808b08 LS |
84 | |
85 | #ifdef CONFIG_NUMA | |
86 | int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int, | |
87 | void __user *, size_t *, loff_t *); | |
88 | #endif | |
89 | ||
1da177e4 | 90 | int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *); |
28a35716 ML |
91 | long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *, |
92 | struct page **, struct vm_area_struct **, | |
87ffc118 AA |
93 | unsigned long *, unsigned long *, long, unsigned int, |
94 | int *); | |
04f2cbe3 | 95 | void unmap_hugepage_range(struct vm_area_struct *, |
24669e58 | 96 | unsigned long, unsigned long, struct page *); |
d833352a MG |
97 | void __unmap_hugepage_range_final(struct mmu_gather *tlb, |
98 | struct vm_area_struct *vma, | |
99 | unsigned long start, unsigned long end, | |
100 | struct page *ref_page); | |
24669e58 AK |
101 | void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma, |
102 | unsigned long start, unsigned long end, | |
103 | struct page *ref_page); | |
e1759c21 | 104 | void hugetlb_report_meminfo(struct seq_file *); |
1da177e4 | 105 | int hugetlb_report_node_meminfo(int, char *); |
949f7ec5 | 106 | void hugetlb_show_meminfo(void); |
1da177e4 | 107 | unsigned long hugetlb_total_pages(void); |
2b740303 | 108 | vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, |
788c7df4 | 109 | unsigned long address, unsigned int flags); |
8fb5debc MK |
110 | int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, pte_t *dst_pte, |
111 | struct vm_area_struct *dst_vma, | |
112 | unsigned long dst_addr, | |
113 | unsigned long src_addr, | |
114 | struct page **pagep); | |
a1e78772 | 115 | int hugetlb_reserve_pages(struct inode *inode, long from, long to, |
5a6fe125 | 116 | struct vm_area_struct *vma, |
ca16d140 | 117 | vm_flags_t vm_flags); |
b5cec28d MK |
118 | long hugetlb_unreserve_pages(struct inode *inode, long start, long end, |
119 | long freed); | |
31caf665 NH |
120 | bool isolate_huge_page(struct page *page, struct list_head *list); |
121 | void putback_active_hugepage(struct page *page); | |
ab5ac90a | 122 | void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason); |
8f1d26d0 | 123 | void free_huge_page(struct page *page); |
72e2936c | 124 | void hugetlb_fix_reserve_counts(struct inode *inode); |
c672c7f2 MK |
125 | extern struct mutex *hugetlb_fault_mutex_table; |
126 | u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm, | |
127 | struct vm_area_struct *vma, | |
128 | struct address_space *mapping, | |
129 | pgoff_t idx, unsigned long address); | |
1da177e4 | 130 | |
3212b535 | 131 | pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud); |
3212b535 | 132 | |
1da177e4 | 133 | extern int sysctl_hugetlb_shm_group; |
53ba51d2 | 134 | extern struct list_head huge_boot_pages; |
1da177e4 | 135 | |
63551ae0 DG |
136 | /* arch callbacks */ |
137 | ||
a5516438 AK |
138 | pte_t *huge_pte_alloc(struct mm_struct *mm, |
139 | unsigned long addr, unsigned long sz); | |
7868a208 PA |
140 | pte_t *huge_pte_offset(struct mm_struct *mm, |
141 | unsigned long addr, unsigned long sz); | |
39dde65c | 142 | int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep); |
017b1660 MK |
143 | void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma, |
144 | unsigned long *start, unsigned long *end); | |
63551ae0 DG |
145 | struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address, |
146 | int write); | |
4dc71451 AK |
147 | struct page *follow_huge_pd(struct vm_area_struct *vma, |
148 | unsigned long address, hugepd_t hpd, | |
149 | int flags, int pdshift); | |
63551ae0 | 150 | struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address, |
e66f17ff | 151 | pmd_t *pmd, int flags); |
ceb86879 | 152 | struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address, |
e66f17ff | 153 | pud_t *pud, int flags); |
faaa5b62 AK |
154 | struct page *follow_huge_pgd(struct mm_struct *mm, unsigned long address, |
155 | pgd_t *pgd, int flags); | |
156 | ||
63551ae0 | 157 | int pmd_huge(pmd_t pmd); |
c2febafc | 158 | int pud_huge(pud_t pud); |
7da4d641 | 159 | unsigned long hugetlb_change_protection(struct vm_area_struct *vma, |
8f860591 | 160 | unsigned long address, unsigned long end, pgprot_t newprot); |
63551ae0 | 161 | |
d5ed7444 | 162 | bool is_hugetlb_entry_migration(pte_t pte); |
ab5ac90a | 163 | |
1da177e4 LT |
164 | #else /* !CONFIG_HUGETLB_PAGE */ |
165 | ||
a1e78772 MG |
166 | static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma) |
167 | { | |
168 | } | |
169 | ||
1da177e4 LT |
170 | static inline unsigned long hugetlb_total_pages(void) |
171 | { | |
172 | return 0; | |
173 | } | |
174 | ||
017b1660 MK |
175 | static inline int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, |
176 | pte_t *ptep) | |
177 | { | |
178 | return 0; | |
179 | } | |
180 | ||
181 | static inline void adjust_range_if_pmd_sharing_possible( | |
182 | struct vm_area_struct *vma, | |
183 | unsigned long *start, unsigned long *end) | |
184 | { | |
185 | } | |
186 | ||
87ffc118 | 187 | #define follow_hugetlb_page(m,v,p,vs,a,b,i,w,n) ({ BUG(); 0; }) |
1da177e4 LT |
188 | #define follow_huge_addr(mm, addr, write) ERR_PTR(-EINVAL) |
189 | #define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; }) | |
e1759c21 AD |
190 | static inline void hugetlb_report_meminfo(struct seq_file *m) |
191 | { | |
192 | } | |
1da177e4 | 193 | #define hugetlb_report_node_meminfo(n, buf) 0 |
949f7ec5 DR |
194 | static inline void hugetlb_show_meminfo(void) |
195 | { | |
196 | } | |
4dc71451 | 197 | #define follow_huge_pd(vma, addr, hpd, flags, pdshift) NULL |
e66f17ff NH |
198 | #define follow_huge_pmd(mm, addr, pmd, flags) NULL |
199 | #define follow_huge_pud(mm, addr, pud, flags) NULL | |
faaa5b62 | 200 | #define follow_huge_pgd(mm, addr, pgd, flags) NULL |
a5516438 | 201 | #define prepare_hugepage_range(file, addr, len) (-EINVAL) |
1da177e4 | 202 | #define pmd_huge(x) 0 |
ceb86879 | 203 | #define pud_huge(x) 0 |
1da177e4 | 204 | #define is_hugepage_only_range(mm, addr, len) 0 |
9da61aef | 205 | #define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; }) |
788c7df4 | 206 | #define hugetlb_fault(mm, vma, addr, flags) ({ BUG(); 0; }) |
8fb5debc MK |
207 | #define hugetlb_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma, dst_addr, \ |
208 | src_addr, pagep) ({ BUG(); 0; }) | |
7868a208 | 209 | #define huge_pte_offset(mm, address, sz) 0 |
24669e58 | 210 | |
f40386a4 NH |
211 | static inline bool isolate_huge_page(struct page *page, struct list_head *list) |
212 | { | |
213 | return false; | |
214 | } | |
31caf665 | 215 | #define putback_active_hugepage(p) do {} while (0) |
ab5ac90a | 216 | #define move_hugetlb_state(old, new, reason) do {} while (0) |
1da177e4 | 217 | |
7da4d641 PZ |
218 | static inline unsigned long hugetlb_change_protection(struct vm_area_struct *vma, |
219 | unsigned long address, unsigned long end, pgprot_t newprot) | |
220 | { | |
221 | return 0; | |
222 | } | |
8f860591 | 223 | |
d833352a MG |
224 | static inline void __unmap_hugepage_range_final(struct mmu_gather *tlb, |
225 | struct vm_area_struct *vma, unsigned long start, | |
226 | unsigned long end, struct page *ref_page) | |
227 | { | |
228 | BUG(); | |
229 | } | |
230 | ||
24669e58 AK |
231 | static inline void __unmap_hugepage_range(struct mmu_gather *tlb, |
232 | struct vm_area_struct *vma, unsigned long start, | |
233 | unsigned long end, struct page *ref_page) | |
234 | { | |
235 | BUG(); | |
236 | } | |
237 | ||
1da177e4 | 238 | #endif /* !CONFIG_HUGETLB_PAGE */ |
f30c59e9 AK |
239 | /* |
240 | * hugepages at page global directory. If arch support | |
241 | * hugepages at pgd level, they need to define this. | |
242 | */ | |
243 | #ifndef pgd_huge | |
244 | #define pgd_huge(x) 0 | |
245 | #endif | |
c2febafc KS |
246 | #ifndef p4d_huge |
247 | #define p4d_huge(x) 0 | |
248 | #endif | |
f30c59e9 AK |
249 | |
250 | #ifndef pgd_write | |
251 | static inline int pgd_write(pgd_t pgd) | |
252 | { | |
253 | BUG(); | |
254 | return 0; | |
255 | } | |
256 | #endif | |
257 | ||
4e52780d EM |
258 | #define HUGETLB_ANON_FILE "anon_hugepage" |
259 | ||
6bfde05b EM |
260 | enum { |
261 | /* | |
262 | * The file will be used as an shm file so shmfs accounting rules | |
263 | * apply | |
264 | */ | |
265 | HUGETLB_SHMFS_INODE = 1, | |
4e52780d EM |
266 | /* |
267 | * The file is being created on the internal vfs mount and shmfs | |
268 | * accounting rules do not apply | |
269 | */ | |
270 | HUGETLB_ANONHUGE_INODE = 2, | |
6bfde05b EM |
271 | }; |
272 | ||
1da177e4 | 273 | #ifdef CONFIG_HUGETLBFS |
1da177e4 | 274 | struct hugetlbfs_sb_info { |
1da177e4 LT |
275 | long max_inodes; /* inodes allowed */ |
276 | long free_inodes; /* inodes free */ | |
277 | spinlock_t stat_lock; | |
a137e1cc | 278 | struct hstate *hstate; |
90481622 | 279 | struct hugepage_subpool *spool; |
4a25220d DH |
280 | kuid_t uid; |
281 | kgid_t gid; | |
282 | umode_t mode; | |
1da177e4 LT |
283 | }; |
284 | ||
1da177e4 LT |
285 | static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb) |
286 | { | |
287 | return sb->s_fs_info; | |
288 | } | |
289 | ||
da14c1e5 MAL |
290 | struct hugetlbfs_inode_info { |
291 | struct shared_policy policy; | |
292 | struct inode vfs_inode; | |
ff62a342 | 293 | unsigned int seals; |
da14c1e5 MAL |
294 | }; |
295 | ||
296 | static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode) | |
297 | { | |
298 | return container_of(inode, struct hugetlbfs_inode_info, vfs_inode); | |
299 | } | |
300 | ||
4b6f5d20 | 301 | extern const struct file_operations hugetlbfs_file_operations; |
f0f37e2f | 302 | extern const struct vm_operations_struct hugetlb_vm_ops; |
af73e4d9 | 303 | struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct, |
42d7395f AK |
304 | struct user_struct **user, int creat_flags, |
305 | int page_size_log); | |
1da177e4 | 306 | |
719ff321 | 307 | static inline bool is_file_hugepages(struct file *file) |
1da177e4 | 308 | { |
516dffdc | 309 | if (file->f_op == &hugetlbfs_file_operations) |
719ff321 | 310 | return true; |
516dffdc | 311 | |
719ff321 | 312 | return is_file_shm_hugepages(file); |
1da177e4 LT |
313 | } |
314 | ||
42d7395f | 315 | |
1da177e4 LT |
316 | #else /* !CONFIG_HUGETLBFS */ |
317 | ||
719ff321 | 318 | #define is_file_hugepages(file) false |
40716e29 | 319 | static inline struct file * |
af73e4d9 NH |
320 | hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag, |
321 | struct user_struct **user, int creat_flags, | |
42d7395f | 322 | int page_size_log) |
e9ea0e2d AM |
323 | { |
324 | return ERR_PTR(-ENOSYS); | |
325 | } | |
1da177e4 LT |
326 | |
327 | #endif /* !CONFIG_HUGETLBFS */ | |
328 | ||
d2ba27e8 AB |
329 | #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA |
330 | unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, | |
331 | unsigned long len, unsigned long pgoff, | |
332 | unsigned long flags); | |
333 | #endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */ | |
334 | ||
a5516438 AK |
335 | #ifdef CONFIG_HUGETLB_PAGE |
336 | ||
a3437870 | 337 | #define HSTATE_NAME_LEN 32 |
a5516438 AK |
338 | /* Defines one hugetlb page size */ |
339 | struct hstate { | |
e8c5c824 LS |
340 | int next_nid_to_alloc; |
341 | int next_nid_to_free; | |
a5516438 AK |
342 | unsigned int order; |
343 | unsigned long mask; | |
344 | unsigned long max_huge_pages; | |
345 | unsigned long nr_huge_pages; | |
346 | unsigned long free_huge_pages; | |
347 | unsigned long resv_huge_pages; | |
348 | unsigned long surplus_huge_pages; | |
349 | unsigned long nr_overcommit_huge_pages; | |
0edaecfa | 350 | struct list_head hugepage_activelist; |
a5516438 AK |
351 | struct list_head hugepage_freelists[MAX_NUMNODES]; |
352 | unsigned int nr_huge_pages_node[MAX_NUMNODES]; | |
353 | unsigned int free_huge_pages_node[MAX_NUMNODES]; | |
354 | unsigned int surplus_huge_pages_node[MAX_NUMNODES]; | |
abb8206c AK |
355 | #ifdef CONFIG_CGROUP_HUGETLB |
356 | /* cgroup control files */ | |
357 | struct cftype cgroup_files[5]; | |
358 | #endif | |
a3437870 | 359 | char name[HSTATE_NAME_LEN]; |
a5516438 AK |
360 | }; |
361 | ||
53ba51d2 JT |
362 | struct huge_bootmem_page { |
363 | struct list_head list; | |
364 | struct hstate *hstate; | |
365 | }; | |
366 | ||
70c3547e MK |
367 | struct page *alloc_huge_page(struct vm_area_struct *vma, |
368 | unsigned long addr, int avoid_reserve); | |
bf50bab2 | 369 | struct page *alloc_huge_page_node(struct hstate *h, int nid); |
3e59fcb0 MH |
370 | struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid, |
371 | nodemask_t *nmask); | |
389c8178 MH |
372 | struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma, |
373 | unsigned long address); | |
9a4e9f3b AK |
374 | struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask, |
375 | int nid, nodemask_t *nmask); | |
ab76ad54 MK |
376 | int huge_add_to_page_cache(struct page *page, struct address_space *mapping, |
377 | pgoff_t idx); | |
bf50bab2 | 378 | |
53ba51d2 | 379 | /* arch callback */ |
e24a1307 | 380 | int __init __alloc_bootmem_huge_page(struct hstate *h); |
53ba51d2 JT |
381 | int __init alloc_bootmem_huge_page(struct hstate *h); |
382 | ||
9fee021d | 383 | void __init hugetlb_bad_size(void); |
e5ff2159 AK |
384 | void __init hugetlb_add_hstate(unsigned order); |
385 | struct hstate *size_to_hstate(unsigned long size); | |
386 | ||
387 | #ifndef HUGE_MAX_HSTATE | |
388 | #define HUGE_MAX_HSTATE 1 | |
389 | #endif | |
390 | ||
391 | extern struct hstate hstates[HUGE_MAX_HSTATE]; | |
392 | extern unsigned int default_hstate_idx; | |
393 | ||
394 | #define default_hstate (hstates[default_hstate_idx]) | |
a5516438 | 395 | |
a137e1cc | 396 | static inline struct hstate *hstate_inode(struct inode *i) |
a5516438 | 397 | { |
7fab358d | 398 | return HUGETLBFS_SB(i->i_sb)->hstate; |
a5516438 AK |
399 | } |
400 | ||
401 | static inline struct hstate *hstate_file(struct file *f) | |
402 | { | |
496ad9aa | 403 | return hstate_inode(file_inode(f)); |
a5516438 AK |
404 | } |
405 | ||
af73e4d9 NH |
406 | static inline struct hstate *hstate_sizelog(int page_size_log) |
407 | { | |
408 | if (!page_size_log) | |
409 | return &default_hstate; | |
97ad2be1 SL |
410 | |
411 | return size_to_hstate(1UL << page_size_log); | |
af73e4d9 NH |
412 | } |
413 | ||
a137e1cc | 414 | static inline struct hstate *hstate_vma(struct vm_area_struct *vma) |
a5516438 | 415 | { |
a137e1cc | 416 | return hstate_file(vma->vm_file); |
a5516438 AK |
417 | } |
418 | ||
419 | static inline unsigned long huge_page_size(struct hstate *h) | |
420 | { | |
421 | return (unsigned long)PAGE_SIZE << h->order; | |
422 | } | |
423 | ||
08fba699 MG |
424 | extern unsigned long vma_kernel_pagesize(struct vm_area_struct *vma); |
425 | ||
3340289d MG |
426 | extern unsigned long vma_mmu_pagesize(struct vm_area_struct *vma); |
427 | ||
a5516438 AK |
428 | static inline unsigned long huge_page_mask(struct hstate *h) |
429 | { | |
430 | return h->mask; | |
431 | } | |
432 | ||
433 | static inline unsigned int huge_page_order(struct hstate *h) | |
434 | { | |
435 | return h->order; | |
436 | } | |
437 | ||
438 | static inline unsigned huge_page_shift(struct hstate *h) | |
439 | { | |
440 | return h->order + PAGE_SHIFT; | |
441 | } | |
442 | ||
bae7f4ae LC |
443 | static inline bool hstate_is_gigantic(struct hstate *h) |
444 | { | |
445 | return huge_page_order(h) >= MAX_ORDER; | |
446 | } | |
447 | ||
a5516438 AK |
448 | static inline unsigned int pages_per_huge_page(struct hstate *h) |
449 | { | |
450 | return 1 << h->order; | |
451 | } | |
452 | ||
453 | static inline unsigned int blocks_per_huge_page(struct hstate *h) | |
454 | { | |
455 | return huge_page_size(h) / 512; | |
456 | } | |
457 | ||
458 | #include <asm/hugetlb.h> | |
459 | ||
d9ed9faa CM |
460 | #ifndef arch_make_huge_pte |
461 | static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma, | |
462 | struct page *page, int writable) | |
463 | { | |
464 | return entry; | |
465 | } | |
466 | #endif | |
467 | ||
e5ff2159 AK |
468 | static inline struct hstate *page_hstate(struct page *page) |
469 | { | |
309381fe | 470 | VM_BUG_ON_PAGE(!PageHuge(page), page); |
e5ff2159 AK |
471 | return size_to_hstate(PAGE_SIZE << compound_order(page)); |
472 | } | |
473 | ||
aa50d3a7 AK |
474 | static inline unsigned hstate_index_to_shift(unsigned index) |
475 | { | |
476 | return hstates[index].order + PAGE_SHIFT; | |
477 | } | |
478 | ||
972dc4de AK |
479 | static inline int hstate_index(struct hstate *h) |
480 | { | |
481 | return h - hstates; | |
482 | } | |
483 | ||
13d60f4b ZY |
484 | pgoff_t __basepage_index(struct page *page); |
485 | ||
486 | /* Return page->index in PAGE_SIZE units */ | |
487 | static inline pgoff_t basepage_index(struct page *page) | |
488 | { | |
489 | if (!PageCompound(page)) | |
490 | return page->index; | |
491 | ||
492 | return __basepage_index(page); | |
493 | } | |
494 | ||
c3114a84 | 495 | extern int dissolve_free_huge_page(struct page *page); |
082d5b6b GS |
496 | extern int dissolve_free_huge_pages(unsigned long start_pfn, |
497 | unsigned long end_pfn); | |
e693de18 | 498 | |
c177c81e | 499 | #ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION |
e693de18 AK |
500 | #ifndef arch_hugetlb_migration_supported |
501 | static inline bool arch_hugetlb_migration_supported(struct hstate *h) | |
502 | { | |
94310cbc | 503 | if ((huge_page_shift(h) == PMD_SHIFT) || |
9b553bf5 AK |
504 | (huge_page_shift(h) == PUD_SHIFT) || |
505 | (huge_page_shift(h) == PGDIR_SHIFT)) | |
94310cbc AK |
506 | return true; |
507 | else | |
508 | return false; | |
e693de18 AK |
509 | } |
510 | #endif | |
c177c81e | 511 | #else |
e693de18 AK |
512 | static inline bool arch_hugetlb_migration_supported(struct hstate *h) |
513 | { | |
d70c17d4 | 514 | return false; |
e693de18 | 515 | } |
c177c81e | 516 | #endif |
e693de18 AK |
517 | |
518 | static inline bool hugepage_migration_supported(struct hstate *h) | |
519 | { | |
520 | return arch_hugetlb_migration_supported(h); | |
83467efb | 521 | } |
c8721bbb | 522 | |
7ed2c31d AK |
523 | /* |
524 | * Movability check is different as compared to migration check. | |
525 | * It determines whether or not a huge page should be placed on | |
526 | * movable zone or not. Movability of any huge page should be | |
527 | * required only if huge page size is supported for migration. | |
528 | * There wont be any reason for the huge page to be movable if | |
529 | * it is not migratable to start with. Also the size of the huge | |
530 | * page should be large enough to be placed under a movable zone | |
531 | * and still feasible enough to be migratable. Just the presence | |
532 | * in movable zone does not make the migration feasible. | |
533 | * | |
534 | * So even though large huge page sizes like the gigantic ones | |
535 | * are migratable they should not be movable because its not | |
536 | * feasible to migrate them from movable zone. | |
537 | */ | |
538 | static inline bool hugepage_movable_supported(struct hstate *h) | |
539 | { | |
540 | if (!hugepage_migration_supported(h)) | |
541 | return false; | |
542 | ||
543 | if (hstate_is_gigantic(h)) | |
544 | return false; | |
545 | return true; | |
546 | } | |
547 | ||
cb900f41 KS |
548 | static inline spinlock_t *huge_pte_lockptr(struct hstate *h, |
549 | struct mm_struct *mm, pte_t *pte) | |
550 | { | |
551 | if (huge_page_size(h) == PMD_SIZE) | |
552 | return pmd_lockptr(mm, (pmd_t *) pte); | |
553 | VM_BUG_ON(huge_page_size(h) == PAGE_SIZE); | |
554 | return &mm->page_table_lock; | |
555 | } | |
556 | ||
2531c8cf DD |
557 | #ifndef hugepages_supported |
558 | /* | |
559 | * Some platform decide whether they support huge pages at boot | |
560 | * time. Some of them, such as powerpc, set HPAGE_SHIFT to 0 | |
561 | * when there is no such support | |
562 | */ | |
563 | #define hugepages_supported() (HPAGE_SHIFT != 0) | |
564 | #endif | |
457c1b27 | 565 | |
5d317b2b NH |
566 | void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm); |
567 | ||
568 | static inline void hugetlb_count_add(long l, struct mm_struct *mm) | |
569 | { | |
570 | atomic_long_add(l, &mm->hugetlb_usage); | |
571 | } | |
572 | ||
573 | static inline void hugetlb_count_sub(long l, struct mm_struct *mm) | |
574 | { | |
575 | atomic_long_sub(l, &mm->hugetlb_usage); | |
576 | } | |
e5251fd4 PA |
577 | |
578 | #ifndef set_huge_swap_pte_at | |
579 | static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr, | |
580 | pte_t *ptep, pte_t pte, unsigned long sz) | |
581 | { | |
582 | set_huge_pte_at(mm, addr, ptep, pte); | |
583 | } | |
584 | #endif | |
023bdd00 AK |
585 | |
586 | #ifndef huge_ptep_modify_prot_start | |
587 | #define huge_ptep_modify_prot_start huge_ptep_modify_prot_start | |
588 | static inline pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma, | |
589 | unsigned long addr, pte_t *ptep) | |
590 | { | |
591 | return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep); | |
592 | } | |
593 | #endif | |
594 | ||
595 | #ifndef huge_ptep_modify_prot_commit | |
596 | #define huge_ptep_modify_prot_commit huge_ptep_modify_prot_commit | |
597 | static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma, | |
598 | unsigned long addr, pte_t *ptep, | |
599 | pte_t old_pte, pte_t pte) | |
600 | { | |
601 | set_huge_pte_at(vma->vm_mm, addr, ptep, pte); | |
602 | } | |
603 | #endif | |
604 | ||
af73e4d9 | 605 | #else /* CONFIG_HUGETLB_PAGE */ |
a5516438 | 606 | struct hstate {}; |
70c3547e | 607 | #define alloc_huge_page(v, a, r) NULL |
bf50bab2 | 608 | #define alloc_huge_page_node(h, nid) NULL |
3e59fcb0 | 609 | #define alloc_huge_page_nodemask(h, preferred_nid, nmask) NULL |
389c8178 | 610 | #define alloc_huge_page_vma(h, vma, address) NULL |
53ba51d2 | 611 | #define alloc_bootmem_huge_page(h) NULL |
a5516438 | 612 | #define hstate_file(f) NULL |
af73e4d9 | 613 | #define hstate_sizelog(s) NULL |
a5516438 AK |
614 | #define hstate_vma(v) NULL |
615 | #define hstate_inode(i) NULL | |
cb900f41 | 616 | #define page_hstate(page) NULL |
a5516438 AK |
617 | #define huge_page_size(h) PAGE_SIZE |
618 | #define huge_page_mask(h) PAGE_MASK | |
08fba699 | 619 | #define vma_kernel_pagesize(v) PAGE_SIZE |
3340289d | 620 | #define vma_mmu_pagesize(v) PAGE_SIZE |
a5516438 AK |
621 | #define huge_page_order(h) 0 |
622 | #define huge_page_shift(h) PAGE_SHIFT | |
94310cbc AK |
623 | static inline bool hstate_is_gigantic(struct hstate *h) |
624 | { | |
625 | return false; | |
626 | } | |
627 | ||
510a35d4 AR |
628 | static inline unsigned int pages_per_huge_page(struct hstate *h) |
629 | { | |
630 | return 1; | |
631 | } | |
c3114a84 AK |
632 | |
633 | static inline unsigned hstate_index_to_shift(unsigned index) | |
634 | { | |
635 | return 0; | |
636 | } | |
637 | ||
638 | static inline int hstate_index(struct hstate *h) | |
639 | { | |
640 | return 0; | |
641 | } | |
13d60f4b ZY |
642 | |
643 | static inline pgoff_t basepage_index(struct page *page) | |
644 | { | |
645 | return page->index; | |
646 | } | |
c3114a84 AK |
647 | |
648 | static inline int dissolve_free_huge_page(struct page *page) | |
649 | { | |
650 | return 0; | |
651 | } | |
652 | ||
653 | static inline int dissolve_free_huge_pages(unsigned long start_pfn, | |
654 | unsigned long end_pfn) | |
655 | { | |
656 | return 0; | |
657 | } | |
658 | ||
659 | static inline bool hugepage_migration_supported(struct hstate *h) | |
660 | { | |
661 | return false; | |
662 | } | |
cb900f41 | 663 | |
7ed2c31d AK |
664 | static inline bool hugepage_movable_supported(struct hstate *h) |
665 | { | |
666 | return false; | |
667 | } | |
668 | ||
cb900f41 KS |
669 | static inline spinlock_t *huge_pte_lockptr(struct hstate *h, |
670 | struct mm_struct *mm, pte_t *pte) | |
671 | { | |
672 | return &mm->page_table_lock; | |
673 | } | |
5d317b2b NH |
674 | |
675 | static inline void hugetlb_report_usage(struct seq_file *f, struct mm_struct *m) | |
676 | { | |
677 | } | |
678 | ||
679 | static inline void hugetlb_count_sub(long l, struct mm_struct *mm) | |
680 | { | |
681 | } | |
e5251fd4 PA |
682 | |
683 | static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr, | |
684 | pte_t *ptep, pte_t pte, unsigned long sz) | |
685 | { | |
686 | } | |
af73e4d9 | 687 | #endif /* CONFIG_HUGETLB_PAGE */ |
a5516438 | 688 | |
cb900f41 KS |
689 | static inline spinlock_t *huge_pte_lock(struct hstate *h, |
690 | struct mm_struct *mm, pte_t *pte) | |
691 | { | |
692 | spinlock_t *ptl; | |
693 | ||
694 | ptl = huge_pte_lockptr(h, mm, pte); | |
695 | spin_lock(ptl); | |
696 | return ptl; | |
697 | } | |
698 | ||
1da177e4 | 699 | #endif /* _LINUX_HUGETLB_H */ |