Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * hugetlbpage-backed filesystem. Based on ramfs. | |
3 | * | |
6d49e352 | 4 | * Nadia Yvette Chambers, 2002 |
1da177e4 LT |
5 | * |
6 | * Copyright (C) 2002 Linus Torvalds. | |
3e89e1c5 | 7 | * License: GPL |
1da177e4 LT |
8 | */ |
9 | ||
9b857d26 AM |
10 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
11 | ||
1da177e4 LT |
12 | #include <linux/thread_info.h> |
13 | #include <asm/current.h> | |
70c3547e | 14 | #include <linux/falloc.h> |
1da177e4 LT |
15 | #include <linux/fs.h> |
16 | #include <linux/mount.h> | |
17 | #include <linux/file.h> | |
e73a75fa | 18 | #include <linux/kernel.h> |
1da177e4 LT |
19 | #include <linux/writeback.h> |
20 | #include <linux/pagemap.h> | |
21 | #include <linux/highmem.h> | |
22 | #include <linux/init.h> | |
23 | #include <linux/string.h> | |
16f7e0fe | 24 | #include <linux/capability.h> |
e73a75fa | 25 | #include <linux/ctype.h> |
1da177e4 LT |
26 | #include <linux/backing-dev.h> |
27 | #include <linux/hugetlb.h> | |
28 | #include <linux/pagevec.h> | |
32021982 | 29 | #include <linux/fs_parser.h> |
036e0856 | 30 | #include <linux/mman.h> |
1da177e4 LT |
31 | #include <linux/slab.h> |
32 | #include <linux/dnotify.h> | |
33 | #include <linux/statfs.h> | |
34 | #include <linux/security.h> | |
1fd7317d | 35 | #include <linux/magic.h> |
290408d4 | 36 | #include <linux/migrate.h> |
34d0640e | 37 | #include <linux/uio.h> |
1da177e4 | 38 | |
7c0f6ba6 | 39 | #include <linux/uaccess.h> |
88590253 | 40 | #include <linux/sched/mm.h> |
1da177e4 | 41 | |
f5e54d6e | 42 | static const struct address_space_operations hugetlbfs_aops; |
4b6f5d20 | 43 | const struct file_operations hugetlbfs_file_operations; |
92e1d5be AV |
44 | static const struct inode_operations hugetlbfs_dir_inode_operations; |
45 | static const struct inode_operations hugetlbfs_inode_operations; | |
1da177e4 | 46 | |
32021982 DH |
47 | enum hugetlbfs_size_type { NO_SIZE, SIZE_STD, SIZE_PERCENT }; |
48 | ||
49 | struct hugetlbfs_fs_context { | |
4a25220d | 50 | struct hstate *hstate; |
32021982 DH |
51 | unsigned long long max_size_opt; |
52 | unsigned long long min_size_opt; | |
4a25220d DH |
53 | long max_hpages; |
54 | long nr_inodes; | |
55 | long min_hpages; | |
32021982 DH |
56 | enum hugetlbfs_size_type max_val_type; |
57 | enum hugetlbfs_size_type min_val_type; | |
4a25220d DH |
58 | kuid_t uid; |
59 | kgid_t gid; | |
60 | umode_t mode; | |
a1d776ee DG |
61 | }; |
62 | ||
1da177e4 LT |
63 | int sysctl_hugetlb_shm_group; |
64 | ||
32021982 DH |
65 | enum hugetlb_param { |
66 | Opt_gid, | |
67 | Opt_min_size, | |
68 | Opt_mode, | |
69 | Opt_nr_inodes, | |
70 | Opt_pagesize, | |
71 | Opt_size, | |
72 | Opt_uid, | |
e73a75fa RD |
73 | }; |
74 | ||
d7167b14 | 75 | static const struct fs_parameter_spec hugetlb_fs_parameters[] = { |
32021982 DH |
76 | fsparam_u32 ("gid", Opt_gid), |
77 | fsparam_string("min_size", Opt_min_size), | |
e0f7e2b2 | 78 | fsparam_u32oct("mode", Opt_mode), |
32021982 DH |
79 | fsparam_string("nr_inodes", Opt_nr_inodes), |
80 | fsparam_string("pagesize", Opt_pagesize), | |
81 | fsparam_string("size", Opt_size), | |
82 | fsparam_u32 ("uid", Opt_uid), | |
83 | {} | |
84 | }; | |
85 | ||
70c3547e MK |
86 | #ifdef CONFIG_NUMA |
87 | static inline void hugetlb_set_vma_policy(struct vm_area_struct *vma, | |
88 | struct inode *inode, pgoff_t index) | |
89 | { | |
90 | vma->vm_policy = mpol_shared_policy_lookup(&HUGETLBFS_I(inode)->policy, | |
91 | index); | |
92 | } | |
93 | ||
94 | static inline void hugetlb_drop_vma_policy(struct vm_area_struct *vma) | |
95 | { | |
96 | mpol_cond_put(vma->vm_policy); | |
97 | } | |
98 | #else | |
99 | static inline void hugetlb_set_vma_policy(struct vm_area_struct *vma, | |
100 | struct inode *inode, pgoff_t index) | |
101 | { | |
102 | } | |
103 | ||
104 | static inline void hugetlb_drop_vma_policy(struct vm_area_struct *vma) | |
105 | { | |
106 | } | |
107 | #endif | |
108 | ||
63489f8e MK |
109 | /* |
110 | * Mask used when checking the page offset value passed in via system | |
111 | * calls. This value will be converted to a loff_t which is signed. | |
112 | * Therefore, we want to check the upper PAGE_SHIFT + 1 bits of the | |
113 | * value. The extra bit (- 1 in the shift value) is to take the sign | |
114 | * bit into account. | |
115 | */ | |
116 | #define PGOFF_LOFFT_MAX \ | |
117 | (((1UL << (PAGE_SHIFT + 1)) - 1) << (BITS_PER_LONG - (PAGE_SHIFT + 1))) | |
118 | ||
1da177e4 LT |
119 | static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma) |
120 | { | |
496ad9aa | 121 | struct inode *inode = file_inode(file); |
22247efd | 122 | struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode); |
1da177e4 LT |
123 | loff_t len, vma_len; |
124 | int ret; | |
a5516438 | 125 | struct hstate *h = hstate_file(file); |
1da177e4 | 126 | |
68589bc3 | 127 | /* |
dec4ad86 DG |
128 | * vma address alignment (but not the pgoff alignment) has |
129 | * already been checked by prepare_hugepage_range. If you add | |
130 | * any error returns here, do so after setting VM_HUGETLB, so | |
131 | * is_vm_hugetlb_page tests below unmap_region go the right | |
45e55300 | 132 | * way when do_mmap unwinds (may be important on powerpc |
dec4ad86 | 133 | * and ia64). |
68589bc3 | 134 | */ |
1c71222e | 135 | vm_flags_set(vma, VM_HUGETLB | VM_DONTEXPAND); |
68589bc3 | 136 | vma->vm_ops = &hugetlb_vm_ops; |
1da177e4 | 137 | |
22247efd PX |
138 | ret = seal_check_future_write(info->seals, vma); |
139 | if (ret) | |
140 | return ret; | |
141 | ||
045c7a3f | 142 | /* |
63489f8e | 143 | * page based offset in vm_pgoff could be sufficiently large to |
5df63c2a MK |
144 | * overflow a loff_t when converted to byte offset. This can |
145 | * only happen on architectures where sizeof(loff_t) == | |
146 | * sizeof(unsigned long). So, only check in those instances. | |
045c7a3f | 147 | */ |
5df63c2a MK |
148 | if (sizeof(unsigned long) == sizeof(loff_t)) { |
149 | if (vma->vm_pgoff & PGOFF_LOFFT_MAX) | |
150 | return -EINVAL; | |
151 | } | |
045c7a3f | 152 | |
63489f8e | 153 | /* must be huge page aligned */ |
2b37c35e | 154 | if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT)) |
dec4ad86 DG |
155 | return -EINVAL; |
156 | ||
1da177e4 | 157 | vma_len = (loff_t)(vma->vm_end - vma->vm_start); |
045c7a3f MK |
158 | len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT); |
159 | /* check for overflow */ | |
160 | if (len < vma_len) | |
161 | return -EINVAL; | |
1da177e4 | 162 | |
5955102c | 163 | inode_lock(inode); |
1da177e4 | 164 | file_accessed(file); |
1da177e4 LT |
165 | |
166 | ret = -ENOMEM; | |
33b8f84a | 167 | if (!hugetlb_reserve_pages(inode, |
a5516438 | 168 | vma->vm_pgoff >> huge_page_order(h), |
5a6fe125 MG |
169 | len >> huge_page_shift(h), vma, |
170 | vma->vm_flags)) | |
a43a8c39 | 171 | goto out; |
b45b5bd6 | 172 | |
4c887265 | 173 | ret = 0; |
b6174df5 | 174 | if (vma->vm_flags & VM_WRITE && inode->i_size < len) |
045c7a3f | 175 | i_size_write(inode, len); |
1da177e4 | 176 | out: |
5955102c | 177 | inode_unlock(inode); |
1da177e4 LT |
178 | |
179 | return ret; | |
180 | } | |
181 | ||
182 | /* | |
3e4e28c5 | 183 | * Called under mmap_write_lock(mm). |
1da177e4 LT |
184 | */ |
185 | ||
88590253 SH |
186 | static unsigned long |
187 | hugetlb_get_unmapped_area_bottomup(struct file *file, unsigned long addr, | |
188 | unsigned long len, unsigned long pgoff, unsigned long flags) | |
189 | { | |
190 | struct hstate *h = hstate_file(file); | |
191 | struct vm_unmapped_area_info info; | |
192 | ||
193 | info.flags = 0; | |
194 | info.length = len; | |
195 | info.low_limit = current->mm->mmap_base; | |
2cb4de08 | 196 | info.high_limit = arch_get_mmap_end(addr, len, flags); |
88590253 SH |
197 | info.align_mask = PAGE_MASK & ~huge_page_mask(h); |
198 | info.align_offset = 0; | |
199 | return vm_unmapped_area(&info); | |
200 | } | |
201 | ||
202 | static unsigned long | |
203 | hugetlb_get_unmapped_area_topdown(struct file *file, unsigned long addr, | |
204 | unsigned long len, unsigned long pgoff, unsigned long flags) | |
205 | { | |
206 | struct hstate *h = hstate_file(file); | |
207 | struct vm_unmapped_area_info info; | |
208 | ||
209 | info.flags = VM_UNMAPPED_AREA_TOPDOWN; | |
210 | info.length = len; | |
6b008640 | 211 | info.low_limit = PAGE_SIZE; |
5f24d5a5 | 212 | info.high_limit = arch_get_mmap_base(addr, current->mm->mmap_base); |
88590253 SH |
213 | info.align_mask = PAGE_MASK & ~huge_page_mask(h); |
214 | info.align_offset = 0; | |
215 | addr = vm_unmapped_area(&info); | |
216 | ||
217 | /* | |
218 | * A failed mmap() very likely causes application failure, | |
219 | * so fall back to the bottom-up function here. This scenario | |
220 | * can happen with large stack limits and large mmap() | |
221 | * allocations. | |
222 | */ | |
223 | if (unlikely(offset_in_page(addr))) { | |
224 | VM_BUG_ON(addr != -ENOMEM); | |
225 | info.flags = 0; | |
226 | info.low_limit = current->mm->mmap_base; | |
2cb4de08 | 227 | info.high_limit = arch_get_mmap_end(addr, len, flags); |
88590253 SH |
228 | addr = vm_unmapped_area(&info); |
229 | } | |
230 | ||
231 | return addr; | |
232 | } | |
233 | ||
4b439e25 CL |
234 | unsigned long |
235 | generic_hugetlb_get_unmapped_area(struct file *file, unsigned long addr, | |
236 | unsigned long len, unsigned long pgoff, | |
237 | unsigned long flags) | |
1da177e4 LT |
238 | { |
239 | struct mm_struct *mm = current->mm; | |
240 | struct vm_area_struct *vma; | |
a5516438 | 241 | struct hstate *h = hstate_file(file); |
2cb4de08 | 242 | const unsigned long mmap_end = arch_get_mmap_end(addr, len, flags); |
1da177e4 | 243 | |
a5516438 | 244 | if (len & ~huge_page_mask(h)) |
1da177e4 LT |
245 | return -EINVAL; |
246 | if (len > TASK_SIZE) | |
247 | return -ENOMEM; | |
248 | ||
036e0856 | 249 | if (flags & MAP_FIXED) { |
a5516438 | 250 | if (prepare_hugepage_range(file, addr, len)) |
036e0856 BH |
251 | return -EINVAL; |
252 | return addr; | |
253 | } | |
254 | ||
1da177e4 | 255 | if (addr) { |
a5516438 | 256 | addr = ALIGN(addr, huge_page_size(h)); |
1da177e4 | 257 | vma = find_vma(mm, addr); |
5f24d5a5 | 258 | if (mmap_end - len >= addr && |
1be7107f | 259 | (!vma || addr + len <= vm_start_gap(vma))) |
1da177e4 LT |
260 | return addr; |
261 | } | |
262 | ||
88590253 SH |
263 | /* |
264 | * Use mm->get_unmapped_area value as a hint to use topdown routine. | |
265 | * If architectures have special needs, they should define their own | |
266 | * version of hugetlb_get_unmapped_area. | |
267 | */ | |
268 | if (mm->get_unmapped_area == arch_get_unmapped_area_topdown) | |
269 | return hugetlb_get_unmapped_area_topdown(file, addr, len, | |
270 | pgoff, flags); | |
271 | return hugetlb_get_unmapped_area_bottomup(file, addr, len, | |
272 | pgoff, flags); | |
1da177e4 | 273 | } |
4b439e25 CL |
274 | |
275 | #ifndef HAVE_ARCH_HUGETLB_UNMAPPED_AREA | |
276 | static unsigned long | |
277 | hugetlb_get_unmapped_area(struct file *file, unsigned long addr, | |
278 | unsigned long len, unsigned long pgoff, | |
279 | unsigned long flags) | |
280 | { | |
281 | return generic_hugetlb_get_unmapped_area(file, addr, len, pgoff, flags); | |
282 | } | |
1da177e4 LT |
283 | #endif |
284 | ||
38c1ddbd JY |
285 | /* |
286 | * Someone wants to read @bytes from a HWPOISON hugetlb @page from @offset. | |
287 | * Returns the maximum number of bytes one can read without touching the 1st raw | |
288 | * HWPOISON subpage. | |
289 | * | |
290 | * The implementation borrows the iteration logic from copy_page_to_iter*. | |
291 | */ | |
292 | static size_t adjust_range_hwpoison(struct page *page, size_t offset, size_t bytes) | |
293 | { | |
294 | size_t n = 0; | |
295 | size_t res = 0; | |
296 | ||
297 | /* First subpage to start the loop. */ | |
298 | page += offset / PAGE_SIZE; | |
299 | offset %= PAGE_SIZE; | |
300 | while (1) { | |
301 | if (is_raw_hwpoison_page_in_hugepage(page)) | |
302 | break; | |
303 | ||
304 | /* Safe to read n bytes without touching HWPOISON subpage. */ | |
305 | n = min(bytes, (size_t)PAGE_SIZE - offset); | |
306 | res += n; | |
307 | bytes -= n; | |
308 | if (!bytes || !n) | |
309 | break; | |
310 | offset += n; | |
311 | if (offset == PAGE_SIZE) { | |
312 | page++; | |
313 | offset = 0; | |
314 | } | |
315 | } | |
316 | ||
317 | return res; | |
318 | } | |
319 | ||
e63e1e5a BP |
320 | /* |
321 | * Support for read() - Find the page attached to f_mapping and copy out the | |
445c8098 | 322 | * data. This provides functionality similar to filemap_read(). |
e63e1e5a | 323 | */ |
34d0640e | 324 | static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to) |
e63e1e5a | 325 | { |
34d0640e AV |
326 | struct file *file = iocb->ki_filp; |
327 | struct hstate *h = hstate_file(file); | |
328 | struct address_space *mapping = file->f_mapping; | |
e63e1e5a | 329 | struct inode *inode = mapping->host; |
34d0640e AV |
330 | unsigned long index = iocb->ki_pos >> huge_page_shift(h); |
331 | unsigned long offset = iocb->ki_pos & ~huge_page_mask(h); | |
e63e1e5a BP |
332 | unsigned long end_index; |
333 | loff_t isize; | |
334 | ssize_t retval = 0; | |
335 | ||
34d0640e | 336 | while (iov_iter_count(to)) { |
e63e1e5a | 337 | struct page *page; |
38c1ddbd | 338 | size_t nr, copied, want; |
e63e1e5a BP |
339 | |
340 | /* nr is the maximum number of bytes to copy from this page */ | |
a5516438 | 341 | nr = huge_page_size(h); |
a05b0855 AK |
342 | isize = i_size_read(inode); |
343 | if (!isize) | |
34d0640e | 344 | break; |
a05b0855 | 345 | end_index = (isize - 1) >> huge_page_shift(h); |
34d0640e AV |
346 | if (index > end_index) |
347 | break; | |
348 | if (index == end_index) { | |
a5516438 | 349 | nr = ((isize - 1) & ~huge_page_mask(h)) + 1; |
a05b0855 | 350 | if (nr <= offset) |
34d0640e | 351 | break; |
e63e1e5a BP |
352 | } |
353 | nr = nr - offset; | |
354 | ||
355 | /* Find the page */ | |
a05b0855 | 356 | page = find_lock_page(mapping, index); |
e63e1e5a BP |
357 | if (unlikely(page == NULL)) { |
358 | /* | |
359 | * We have a HOLE, zero out the user-buffer for the | |
360 | * length of the hole or request. | |
361 | */ | |
34d0640e | 362 | copied = iov_iter_zero(nr, to); |
e63e1e5a | 363 | } else { |
a05b0855 AK |
364 | unlock_page(page); |
365 | ||
38c1ddbd JY |
366 | if (!PageHWPoison(page)) |
367 | want = nr; | |
368 | else { | |
369 | /* | |
370 | * Adjust how many bytes safe to read without | |
371 | * touching the 1st raw HWPOISON subpage after | |
372 | * offset. | |
373 | */ | |
374 | want = adjust_range_hwpoison(page, offset, nr); | |
375 | if (want == 0) { | |
376 | put_page(page); | |
377 | retval = -EIO; | |
378 | break; | |
379 | } | |
8625147c JH |
380 | } |
381 | ||
e63e1e5a BP |
382 | /* |
383 | * We have the page, copy it to user space buffer. | |
384 | */ | |
38c1ddbd | 385 | copied = copy_page_to_iter(page, offset, want, to); |
09cbfeaf | 386 | put_page(page); |
e63e1e5a | 387 | } |
34d0640e AV |
388 | offset += copied; |
389 | retval += copied; | |
390 | if (copied != nr && iov_iter_count(to)) { | |
391 | if (!retval) | |
392 | retval = -EFAULT; | |
393 | break; | |
e63e1e5a | 394 | } |
a5516438 AK |
395 | index += offset >> huge_page_shift(h); |
396 | offset &= ~huge_page_mask(h); | |
e63e1e5a | 397 | } |
34d0640e | 398 | iocb->ki_pos = ((loff_t)index << huge_page_shift(h)) + offset; |
e63e1e5a BP |
399 | return retval; |
400 | } | |
401 | ||
800d15a5 NP |
402 | static int hugetlbfs_write_begin(struct file *file, |
403 | struct address_space *mapping, | |
9d6b0cd7 | 404 | loff_t pos, unsigned len, |
800d15a5 | 405 | struct page **pagep, void **fsdata) |
1da177e4 LT |
406 | { |
407 | return -EINVAL; | |
408 | } | |
409 | ||
800d15a5 NP |
410 | static int hugetlbfs_write_end(struct file *file, struct address_space *mapping, |
411 | loff_t pos, unsigned len, unsigned copied, | |
412 | struct page *page, void *fsdata) | |
1da177e4 | 413 | { |
800d15a5 | 414 | BUG(); |
1da177e4 LT |
415 | return -EINVAL; |
416 | } | |
417 | ||
ece62684 | 418 | static void hugetlb_delete_from_page_cache(struct folio *folio) |
1da177e4 | 419 | { |
ece62684 SK |
420 | folio_clear_dirty(folio); |
421 | folio_clear_uptodate(folio); | |
422 | filemap_remove_folio(folio); | |
1da177e4 LT |
423 | } |
424 | ||
378397cc MK |
425 | /* |
426 | * Called with i_mmap_rwsem held for inode based vma maps. This makes | |
427 | * sure vma (and vm_mm) will not go away. We also hold the hugetlb fault | |
428 | * mutex for the page in the mapping. So, we can not race with page being | |
429 | * faulted into the vma. | |
430 | */ | |
431 | static bool hugetlb_vma_maps_page(struct vm_area_struct *vma, | |
432 | unsigned long addr, struct page *page) | |
433 | { | |
434 | pte_t *ptep, pte; | |
435 | ||
9c67a207 | 436 | ptep = hugetlb_walk(vma, addr, huge_page_size(hstate_vma(vma))); |
378397cc MK |
437 | if (!ptep) |
438 | return false; | |
439 | ||
440 | pte = huge_ptep_get(ptep); | |
441 | if (huge_pte_none(pte) || !pte_present(pte)) | |
442 | return false; | |
443 | ||
444 | if (pte_page(pte) == page) | |
445 | return true; | |
446 | ||
447 | return false; | |
448 | } | |
449 | ||
450 | /* | |
451 | * Can vma_offset_start/vma_offset_end overflow on 32-bit arches? | |
452 | * No, because the interval tree returns us only those vmas | |
453 | * which overlap the truncated area starting at pgoff, | |
454 | * and no vma on a 32-bit arch can span beyond the 4GB. | |
455 | */ | |
456 | static unsigned long vma_offset_start(struct vm_area_struct *vma, pgoff_t start) | |
457 | { | |
243b1f2d PX |
458 | unsigned long offset = 0; |
459 | ||
378397cc | 460 | if (vma->vm_pgoff < start) |
243b1f2d PX |
461 | offset = (start - vma->vm_pgoff) << PAGE_SHIFT; |
462 | ||
463 | return vma->vm_start + offset; | |
378397cc MK |
464 | } |
465 | ||
466 | static unsigned long vma_offset_end(struct vm_area_struct *vma, pgoff_t end) | |
467 | { | |
468 | unsigned long t_end; | |
469 | ||
470 | if (!end) | |
471 | return vma->vm_end; | |
472 | ||
473 | t_end = ((end - vma->vm_pgoff) << PAGE_SHIFT) + vma->vm_start; | |
474 | if (t_end > vma->vm_end) | |
475 | t_end = vma->vm_end; | |
476 | return t_end; | |
477 | } | |
478 | ||
479 | /* | |
480 | * Called with hugetlb fault mutex held. Therefore, no more mappings to | |
481 | * this folio can be created while executing the routine. | |
482 | */ | |
483 | static void hugetlb_unmap_file_folio(struct hstate *h, | |
484 | struct address_space *mapping, | |
485 | struct folio *folio, pgoff_t index) | |
486 | { | |
487 | struct rb_root_cached *root = &mapping->i_mmap; | |
40549ba8 | 488 | struct hugetlb_vma_lock *vma_lock; |
378397cc MK |
489 | struct page *page = &folio->page; |
490 | struct vm_area_struct *vma; | |
491 | unsigned long v_start; | |
492 | unsigned long v_end; | |
493 | pgoff_t start, end; | |
494 | ||
495 | start = index * pages_per_huge_page(h); | |
496 | end = (index + 1) * pages_per_huge_page(h); | |
497 | ||
498 | i_mmap_lock_write(mapping); | |
40549ba8 MK |
499 | retry: |
500 | vma_lock = NULL; | |
378397cc MK |
501 | vma_interval_tree_foreach(vma, root, start, end - 1) { |
502 | v_start = vma_offset_start(vma, start); | |
503 | v_end = vma_offset_end(vma, end); | |
504 | ||
243b1f2d | 505 | if (!hugetlb_vma_maps_page(vma, v_start, page)) |
378397cc MK |
506 | continue; |
507 | ||
40549ba8 MK |
508 | if (!hugetlb_vma_trylock_write(vma)) { |
509 | vma_lock = vma->vm_private_data; | |
510 | /* | |
511 | * If we can not get vma lock, we need to drop | |
512 | * immap_sema and take locks in order. First, | |
513 | * take a ref on the vma_lock structure so that | |
514 | * we can be guaranteed it will not go away when | |
515 | * dropping immap_sema. | |
516 | */ | |
517 | kref_get(&vma_lock->refs); | |
518 | break; | |
519 | } | |
520 | ||
243b1f2d PX |
521 | unmap_hugepage_range(vma, v_start, v_end, NULL, |
522 | ZAP_FLAG_DROP_MARKER); | |
40549ba8 | 523 | hugetlb_vma_unlock_write(vma); |
378397cc MK |
524 | } |
525 | ||
526 | i_mmap_unlock_write(mapping); | |
40549ba8 MK |
527 | |
528 | if (vma_lock) { | |
529 | /* | |
530 | * Wait on vma_lock. We know it is still valid as we have | |
531 | * a reference. We must 'open code' vma locking as we do | |
532 | * not know if vma_lock is still attached to vma. | |
533 | */ | |
534 | down_write(&vma_lock->rw_sema); | |
535 | i_mmap_lock_write(mapping); | |
536 | ||
537 | vma = vma_lock->vma; | |
538 | if (!vma) { | |
539 | /* | |
540 | * If lock is no longer attached to vma, then just | |
541 | * unlock, drop our reference and retry looking for | |
542 | * other vmas. | |
543 | */ | |
544 | up_write(&vma_lock->rw_sema); | |
545 | kref_put(&vma_lock->refs, hugetlb_vma_lock_release); | |
546 | goto retry; | |
547 | } | |
548 | ||
549 | /* | |
550 | * vma_lock is still attached to vma. Check to see if vma | |
551 | * still maps page and if so, unmap. | |
552 | */ | |
553 | v_start = vma_offset_start(vma, start); | |
554 | v_end = vma_offset_end(vma, end); | |
243b1f2d PX |
555 | if (hugetlb_vma_maps_page(vma, v_start, page)) |
556 | unmap_hugepage_range(vma, v_start, v_end, NULL, | |
557 | ZAP_FLAG_DROP_MARKER); | |
40549ba8 MK |
558 | |
559 | kref_put(&vma_lock->refs, hugetlb_vma_lock_release); | |
560 | hugetlb_vma_unlock_write(vma); | |
561 | ||
562 | goto retry; | |
563 | } | |
378397cc MK |
564 | } |
565 | ||
4aae8d1c | 566 | static void |
05e90bd0 PX |
567 | hugetlb_vmdelete_list(struct rb_root_cached *root, pgoff_t start, pgoff_t end, |
568 | zap_flags_t zap_flags) | |
4aae8d1c MK |
569 | { |
570 | struct vm_area_struct *vma; | |
571 | ||
572 | /* | |
d6aba4c8 SC |
573 | * end == 0 indicates that the entire range after start should be |
574 | * unmapped. Note, end is exclusive, whereas the interval tree takes | |
575 | * an inclusive "last". | |
4aae8d1c | 576 | */ |
d6aba4c8 | 577 | vma_interval_tree_foreach(vma, root, start, end ? end - 1 : ULONG_MAX) { |
378397cc | 578 | unsigned long v_start; |
4aae8d1c MK |
579 | unsigned long v_end; |
580 | ||
40549ba8 MK |
581 | if (!hugetlb_vma_trylock_write(vma)) |
582 | continue; | |
583 | ||
378397cc MK |
584 | v_start = vma_offset_start(vma, start); |
585 | v_end = vma_offset_end(vma, end); | |
4aae8d1c | 586 | |
243b1f2d | 587 | unmap_hugepage_range(vma, v_start, v_end, NULL, zap_flags); |
40549ba8 MK |
588 | |
589 | /* | |
590 | * Note that vma lock only exists for shared/non-private | |
591 | * vmas. Therefore, lock is not held when calling | |
592 | * unmap_hugepage_range for private vmas. | |
593 | */ | |
594 | hugetlb_vma_unlock_write(vma); | |
4aae8d1c MK |
595 | } |
596 | } | |
b5cec28d | 597 | |
c8627228 MK |
598 | /* |
599 | * Called with hugetlb fault mutex held. | |
600 | * Returns true if page was actually removed, false otherwise. | |
601 | */ | |
602 | static bool remove_inode_single_folio(struct hstate *h, struct inode *inode, | |
603 | struct address_space *mapping, | |
604 | struct folio *folio, pgoff_t index, | |
605 | bool truncate_op) | |
606 | { | |
607 | bool ret = false; | |
608 | ||
609 | /* | |
610 | * If folio is mapped, it was faulted in after being | |
611 | * unmapped in caller. Unmap (again) while holding | |
612 | * the fault mutex. The mutex will prevent faults | |
613 | * until we finish removing the folio. | |
614 | */ | |
378397cc MK |
615 | if (unlikely(folio_mapped(folio))) |
616 | hugetlb_unmap_file_folio(h, mapping, folio, index); | |
c8627228 MK |
617 | |
618 | folio_lock(folio); | |
619 | /* | |
fa27759a MK |
620 | * We must remove the folio from page cache before removing |
621 | * the region/ reserve map (hugetlb_unreserve_pages). In | |
622 | * rare out of memory conditions, removal of the region/reserve | |
623 | * map could fail. Correspondingly, the subpool and global | |
624 | * reserve usage count can need to be adjusted. | |
c8627228 | 625 | */ |
ece62684 SK |
626 | VM_BUG_ON_FOLIO(folio_test_hugetlb_restore_reserve(folio), folio); |
627 | hugetlb_delete_from_page_cache(folio); | |
fa27759a MK |
628 | ret = true; |
629 | if (!truncate_op) { | |
630 | if (unlikely(hugetlb_unreserve_pages(inode, index, | |
631 | index + 1, 1))) | |
632 | hugetlb_fix_reserve_counts(inode); | |
c8627228 MK |
633 | } |
634 | ||
635 | folio_unlock(folio); | |
636 | return ret; | |
637 | } | |
638 | ||
b5cec28d MK |
639 | /* |
640 | * remove_inode_hugepages handles two distinct cases: truncation and hole | |
641 | * punch. There are subtle differences in operation for each case. | |
4aae8d1c | 642 | * |
b5cec28d MK |
643 | * truncation is indicated by end of range being LLONG_MAX |
644 | * In this case, we first scan the range and release found pages. | |
1935ebd3 | 645 | * After releasing pages, hugetlb_unreserve_pages cleans up region/reserve |
c8627228 MK |
646 | * maps and global counts. Page faults can race with truncation. |
647 | * During faults, hugetlb_no_page() checks i_size before page allocation, | |
648 | * and again after obtaining page table lock. It will 'back out' | |
649 | * allocations in the truncated range. | |
b5cec28d MK |
650 | * hole punch is indicated if end is not LLONG_MAX |
651 | * In the hole punch case we scan the range and release found pages. | |
1935ebd3 ML |
652 | * Only when releasing a page is the associated region/reserve map |
653 | * deleted. The region/reserve map for ranges without associated | |
e7c58097 MK |
654 | * pages are not modified. Page faults can race with hole punch. |
655 | * This is indicated if we find a mapped page. | |
b5cec28d MK |
656 | * Note: If the passed end of range value is beyond the end of file, but |
657 | * not LLONG_MAX this routine still performs a hole punch operation. | |
658 | */ | |
659 | static void remove_inode_hugepages(struct inode *inode, loff_t lstart, | |
660 | loff_t lend) | |
1da177e4 | 661 | { |
a5516438 | 662 | struct hstate *h = hstate_inode(inode); |
b45b5bd6 | 663 | struct address_space *mapping = &inode->i_data; |
a5516438 | 664 | const pgoff_t start = lstart >> huge_page_shift(h); |
b5cec28d | 665 | const pgoff_t end = lend >> huge_page_shift(h); |
1508062e | 666 | struct folio_batch fbatch; |
d72dc8a2 | 667 | pgoff_t next, index; |
a43a8c39 | 668 | int i, freed = 0; |
b5cec28d | 669 | bool truncate_op = (lend == LLONG_MAX); |
1da177e4 | 670 | |
1508062e | 671 | folio_batch_init(&fbatch); |
1da177e4 | 672 | next = start; |
1508062e MWO |
673 | while (filemap_get_folios(mapping, &next, end - 1, &fbatch)) { |
674 | for (i = 0; i < folio_batch_count(&fbatch); ++i) { | |
675 | struct folio *folio = fbatch.folios[i]; | |
d4241a04 | 676 | u32 hash = 0; |
b5cec28d | 677 | |
1508062e | 678 | index = folio->index; |
188a3972 MK |
679 | hash = hugetlb_fault_mutex_hash(mapping, index); |
680 | mutex_lock(&hugetlb_fault_mutex_table[hash]); | |
e7c58097 | 681 | |
4aae8d1c | 682 | /* |
c8627228 | 683 | * Remove folio that was part of folio_batch. |
4aae8d1c | 684 | */ |
c8627228 MK |
685 | if (remove_inode_single_folio(h, inode, mapping, folio, |
686 | index, truncate_op)) | |
687 | freed++; | |
688 | ||
188a3972 | 689 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); |
1da177e4 | 690 | } |
1508062e | 691 | folio_batch_release(&fbatch); |
1817889e | 692 | cond_resched(); |
1da177e4 | 693 | } |
b5cec28d MK |
694 | |
695 | if (truncate_op) | |
696 | (void)hugetlb_unreserve_pages(inode, start, LONG_MAX, freed); | |
1da177e4 LT |
697 | } |
698 | ||
2bbbda30 | 699 | static void hugetlbfs_evict_inode(struct inode *inode) |
1da177e4 | 700 | { |
9119a41e JK |
701 | struct resv_map *resv_map; |
702 | ||
b5cec28d | 703 | remove_inode_hugepages(inode, 0, LLONG_MAX); |
f27a5136 MK |
704 | |
705 | /* | |
706 | * Get the resv_map from the address space embedded in the inode. | |
707 | * This is the address space which points to any resv_map allocated | |
708 | * at inode creation time. If this is a device special inode, | |
709 | * i_mapping may not point to the original address space. | |
710 | */ | |
711 | resv_map = (struct resv_map *)(&inode->i_data)->private_data; | |
712 | /* Only regular and link inodes have associated reserve maps */ | |
9119a41e JK |
713 | if (resv_map) |
714 | resv_map_release(&resv_map->refs); | |
dbd5768f | 715 | clear_inode(inode); |
149f4211 CH |
716 | } |
717 | ||
e5d319de | 718 | static void hugetlb_vmtruncate(struct inode *inode, loff_t offset) |
1da177e4 | 719 | { |
856fc295 | 720 | pgoff_t pgoff; |
1da177e4 | 721 | struct address_space *mapping = inode->i_mapping; |
a5516438 | 722 | struct hstate *h = hstate_inode(inode); |
1da177e4 | 723 | |
a5516438 | 724 | BUG_ON(offset & ~huge_page_mask(h)); |
856fc295 | 725 | pgoff = offset >> PAGE_SHIFT; |
1da177e4 | 726 | |
87bf91d3 | 727 | i_size_write(inode, offset); |
188a3972 | 728 | i_mmap_lock_write(mapping); |
f808c13f | 729 | if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)) |
05e90bd0 PX |
730 | hugetlb_vmdelete_list(&mapping->i_mmap, pgoff, 0, |
731 | ZAP_FLAG_DROP_MARKER); | |
c86aa7bb | 732 | i_mmap_unlock_write(mapping); |
e7c58097 | 733 | remove_inode_hugepages(inode, offset, LLONG_MAX); |
1da177e4 LT |
734 | } |
735 | ||
68d32527 MK |
736 | static void hugetlbfs_zero_partial_page(struct hstate *h, |
737 | struct address_space *mapping, | |
738 | loff_t start, | |
739 | loff_t end) | |
740 | { | |
741 | pgoff_t idx = start >> huge_page_shift(h); | |
742 | struct folio *folio; | |
743 | ||
744 | folio = filemap_lock_folio(mapping, idx); | |
66dabbb6 | 745 | if (IS_ERR(folio)) |
68d32527 MK |
746 | return; |
747 | ||
748 | start = start & ~huge_page_mask(h); | |
749 | end = end & ~huge_page_mask(h); | |
750 | if (!end) | |
751 | end = huge_page_size(h); | |
752 | ||
753 | folio_zero_segment(folio, (size_t)start, (size_t)end); | |
754 | ||
755 | folio_unlock(folio); | |
756 | folio_put(folio); | |
757 | } | |
758 | ||
70c3547e MK |
759 | static long hugetlbfs_punch_hole(struct inode *inode, loff_t offset, loff_t len) |
760 | { | |
68d32527 MK |
761 | struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode); |
762 | struct address_space *mapping = inode->i_mapping; | |
70c3547e MK |
763 | struct hstate *h = hstate_inode(inode); |
764 | loff_t hpage_size = huge_page_size(h); | |
765 | loff_t hole_start, hole_end; | |
766 | ||
767 | /* | |
68d32527 | 768 | * hole_start and hole_end indicate the full pages within the hole. |
70c3547e MK |
769 | */ |
770 | hole_start = round_up(offset, hpage_size); | |
771 | hole_end = round_down(offset + len, hpage_size); | |
772 | ||
68d32527 MK |
773 | inode_lock(inode); |
774 | ||
775 | /* protected by i_rwsem */ | |
776 | if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) { | |
777 | inode_unlock(inode); | |
778 | return -EPERM; | |
779 | } | |
70c3547e | 780 | |
68d32527 | 781 | i_mmap_lock_write(mapping); |
ff62a342 | 782 | |
68d32527 MK |
783 | /* If range starts before first full page, zero partial page. */ |
784 | if (offset < hole_start) | |
785 | hugetlbfs_zero_partial_page(h, mapping, | |
786 | offset, min(offset + len, hole_start)); | |
ff62a342 | 787 | |
68d32527 MK |
788 | /* Unmap users of full pages in the hole. */ |
789 | if (hole_end > hole_start) { | |
f808c13f | 790 | if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)) |
70c3547e | 791 | hugetlb_vmdelete_list(&mapping->i_mmap, |
05e90bd0 PX |
792 | hole_start >> PAGE_SHIFT, |
793 | hole_end >> PAGE_SHIFT, 0); | |
70c3547e MK |
794 | } |
795 | ||
68d32527 MK |
796 | /* If range extends beyond last full page, zero partial page. */ |
797 | if ((offset + len) > hole_end && (offset + len) > hole_start) | |
798 | hugetlbfs_zero_partial_page(h, mapping, | |
799 | hole_end, offset + len); | |
800 | ||
801 | i_mmap_unlock_write(mapping); | |
802 | ||
803 | /* Remove full pages from the file. */ | |
804 | if (hole_end > hole_start) | |
805 | remove_inode_hugepages(inode, hole_start, hole_end); | |
806 | ||
807 | inode_unlock(inode); | |
808 | ||
70c3547e MK |
809 | return 0; |
810 | } | |
811 | ||
812 | static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset, | |
813 | loff_t len) | |
814 | { | |
815 | struct inode *inode = file_inode(file); | |
ff62a342 | 816 | struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode); |
70c3547e MK |
817 | struct address_space *mapping = inode->i_mapping; |
818 | struct hstate *h = hstate_inode(inode); | |
819 | struct vm_area_struct pseudo_vma; | |
820 | struct mm_struct *mm = current->mm; | |
821 | loff_t hpage_size = huge_page_size(h); | |
822 | unsigned long hpage_shift = huge_page_shift(h); | |
823 | pgoff_t start, index, end; | |
824 | int error; | |
825 | u32 hash; | |
826 | ||
827 | if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) | |
828 | return -EOPNOTSUPP; | |
829 | ||
830 | if (mode & FALLOC_FL_PUNCH_HOLE) | |
831 | return hugetlbfs_punch_hole(inode, offset, len); | |
832 | ||
833 | /* | |
834 | * Default preallocate case. | |
835 | * For this range, start is rounded down and end is rounded up | |
836 | * as well as being converted to page offsets. | |
837 | */ | |
838 | start = offset >> hpage_shift; | |
839 | end = (offset + len + hpage_size - 1) >> hpage_shift; | |
840 | ||
5955102c | 841 | inode_lock(inode); |
70c3547e MK |
842 | |
843 | /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */ | |
844 | error = inode_newsize_ok(inode, offset + len); | |
845 | if (error) | |
846 | goto out; | |
847 | ||
ff62a342 MAL |
848 | if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) { |
849 | error = -EPERM; | |
850 | goto out; | |
851 | } | |
852 | ||
70c3547e MK |
853 | /* |
854 | * Initialize a pseudo vma as this is required by the huge page | |
855 | * allocation routines. If NUMA is configured, use page index | |
856 | * as input to create an allocation policy. | |
857 | */ | |
2c4541e2 | 858 | vma_init(&pseudo_vma, mm); |
1c71222e | 859 | vm_flags_init(&pseudo_vma, VM_HUGETLB | VM_MAYSHARE | VM_SHARED); |
70c3547e MK |
860 | pseudo_vma.vm_file = file; |
861 | ||
862 | for (index = start; index < end; index++) { | |
863 | /* | |
864 | * This is supposed to be the vaddr where the page is being | |
865 | * faulted in, but we have no vaddr here. | |
866 | */ | |
d0ce0e47 | 867 | struct folio *folio; |
70c3547e | 868 | unsigned long addr; |
70c3547e MK |
869 | |
870 | cond_resched(); | |
871 | ||
872 | /* | |
873 | * fallocate(2) manpage permits EINTR; we may have been | |
874 | * interrupted because we are using up too much memory. | |
875 | */ | |
876 | if (signal_pending(current)) { | |
877 | error = -EINTR; | |
878 | break; | |
879 | } | |
880 | ||
70c3547e MK |
881 | /* addr is the offset within the file (zero based) */ |
882 | addr = index * hpage_size; | |
883 | ||
188a3972 | 884 | /* mutex taken here, fault path and hole punch */ |
188b04a7 | 885 | hash = hugetlb_fault_mutex_hash(mapping, index); |
70c3547e MK |
886 | mutex_lock(&hugetlb_fault_mutex_table[hash]); |
887 | ||
888 | /* See if already present in mapping to avoid alloc/free */ | |
fd4aed8d MK |
889 | folio = filemap_get_folio(mapping, index); |
890 | if (!IS_ERR(folio)) { | |
891 | folio_put(folio); | |
70c3547e | 892 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); |
70c3547e MK |
893 | continue; |
894 | } | |
895 | ||
88ce3fef | 896 | /* |
d0ce0e47 | 897 | * Allocate folio without setting the avoid_reserve argument. |
88ce3fef ML |
898 | * There certainly are no reserves associated with the |
899 | * pseudo_vma. However, there could be shared mappings with | |
900 | * reserves for the file at the inode level. If we fallocate | |
d0ce0e47 | 901 | * folios in these areas, we need to consume the reserves |
88ce3fef ML |
902 | * to keep reservation accounting consistent. |
903 | */ | |
adef0803 | 904 | hugetlb_set_vma_policy(&pseudo_vma, inode, index); |
d0ce0e47 | 905 | folio = alloc_hugetlb_folio(&pseudo_vma, addr, 0); |
70c3547e | 906 | hugetlb_drop_vma_policy(&pseudo_vma); |
d0ce0e47 | 907 | if (IS_ERR(folio)) { |
70c3547e | 908 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); |
d0ce0e47 | 909 | error = PTR_ERR(folio); |
70c3547e MK |
910 | goto out; |
911 | } | |
d0ce0e47 SK |
912 | clear_huge_page(&folio->page, addr, pages_per_huge_page(h)); |
913 | __folio_mark_uptodate(folio); | |
9b91c0e2 | 914 | error = hugetlb_add_to_page_cache(folio, mapping, index); |
70c3547e | 915 | if (unlikely(error)) { |
d2d7bb44 | 916 | restore_reserve_on_error(h, &pseudo_vma, addr, folio); |
d0ce0e47 | 917 | folio_put(folio); |
70c3547e MK |
918 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); |
919 | goto out; | |
920 | } | |
921 | ||
922 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); | |
923 | ||
d0ce0e47 | 924 | folio_set_hugetlb_migratable(folio); |
70c3547e | 925 | /* |
d0ce0e47 SK |
926 | * folio_unlock because locked by hugetlb_add_to_page_cache() |
927 | * folio_put() due to reference from alloc_hugetlb_folio() | |
70c3547e | 928 | */ |
d0ce0e47 SK |
929 | folio_unlock(folio); |
930 | folio_put(folio); | |
70c3547e MK |
931 | } |
932 | ||
933 | if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size) | |
934 | i_size_write(inode, offset + len); | |
a72a7dea | 935 | inode_set_ctime_current(inode); |
70c3547e | 936 | out: |
5955102c | 937 | inode_unlock(inode); |
70c3547e MK |
938 | return error; |
939 | } | |
940 | ||
c1632a0f | 941 | static int hugetlbfs_setattr(struct mnt_idmap *idmap, |
549c7297 | 942 | struct dentry *dentry, struct iattr *attr) |
1da177e4 | 943 | { |
2b0143b5 | 944 | struct inode *inode = d_inode(dentry); |
a5516438 | 945 | struct hstate *h = hstate_inode(inode); |
1da177e4 LT |
946 | int error; |
947 | unsigned int ia_valid = attr->ia_valid; | |
ff62a342 | 948 | struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode); |
1da177e4 | 949 | |
c1632a0f | 950 | error = setattr_prepare(&nop_mnt_idmap, dentry, attr); |
1da177e4 | 951 | if (error) |
1025774c | 952 | return error; |
1da177e4 LT |
953 | |
954 | if (ia_valid & ATTR_SIZE) { | |
ff62a342 MAL |
955 | loff_t oldsize = inode->i_size; |
956 | loff_t newsize = attr->ia_size; | |
957 | ||
958 | if (newsize & ~huge_page_mask(h)) | |
1025774c | 959 | return -EINVAL; |
398c0da7 | 960 | /* protected by i_rwsem */ |
ff62a342 MAL |
961 | if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) || |
962 | (newsize > oldsize && (info->seals & F_SEAL_GROW))) | |
963 | return -EPERM; | |
e5d319de | 964 | hugetlb_vmtruncate(inode, newsize); |
1da177e4 | 965 | } |
1025774c | 966 | |
c1632a0f | 967 | setattr_copy(&nop_mnt_idmap, inode, attr); |
1025774c CH |
968 | mark_inode_dirty(inode); |
969 | return 0; | |
1da177e4 LT |
970 | } |
971 | ||
7d54fa64 | 972 | static struct inode *hugetlbfs_get_root(struct super_block *sb, |
32021982 | 973 | struct hugetlbfs_fs_context *ctx) |
1da177e4 LT |
974 | { |
975 | struct inode *inode; | |
1da177e4 LT |
976 | |
977 | inode = new_inode(sb); | |
978 | if (inode) { | |
85fe4025 | 979 | inode->i_ino = get_next_ino(); |
32021982 DH |
980 | inode->i_mode = S_IFDIR | ctx->mode; |
981 | inode->i_uid = ctx->uid; | |
982 | inode->i_gid = ctx->gid; | |
a72a7dea | 983 | inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode); |
7d54fa64 AV |
984 | inode->i_op = &hugetlbfs_dir_inode_operations; |
985 | inode->i_fop = &simple_dir_operations; | |
986 | /* directory inodes start off with i_nlink == 2 (for "." entry) */ | |
987 | inc_nlink(inode); | |
65ed7601 | 988 | lockdep_annotate_inode_mutex_key(inode); |
7d54fa64 AV |
989 | } |
990 | return inode; | |
991 | } | |
992 | ||
b610ded7 | 993 | /* |
c8c06efa | 994 | * Hugetlbfs is not reclaimable; therefore its i_mmap_rwsem will never |
b610ded7 | 995 | * be taken from reclaim -- unlike regular filesystems. This needs an |
88f306b6 | 996 | * annotation because huge_pmd_share() does an allocation under hugetlb's |
c8c06efa | 997 | * i_mmap_rwsem. |
b610ded7 | 998 | */ |
c8c06efa | 999 | static struct lock_class_key hugetlbfs_i_mmap_rwsem_key; |
b610ded7 | 1000 | |
7d54fa64 AV |
1001 | static struct inode *hugetlbfs_get_inode(struct super_block *sb, |
1002 | struct inode *dir, | |
18df2252 | 1003 | umode_t mode, dev_t dev) |
7d54fa64 AV |
1004 | { |
1005 | struct inode *inode; | |
58b6e5e8 | 1006 | struct resv_map *resv_map = NULL; |
9119a41e | 1007 | |
58b6e5e8 MK |
1008 | /* |
1009 | * Reserve maps are only needed for inodes that can have associated | |
1010 | * page allocations. | |
1011 | */ | |
1012 | if (S_ISREG(mode) || S_ISLNK(mode)) { | |
1013 | resv_map = resv_map_alloc(); | |
1014 | if (!resv_map) | |
1015 | return NULL; | |
1016 | } | |
7d54fa64 AV |
1017 | |
1018 | inode = new_inode(sb); | |
1019 | if (inode) { | |
ff62a342 MAL |
1020 | struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode); |
1021 | ||
7d54fa64 | 1022 | inode->i_ino = get_next_ino(); |
f2d40141 | 1023 | inode_init_owner(&nop_mnt_idmap, inode, dir, mode); |
c8c06efa DB |
1024 | lockdep_set_class(&inode->i_mapping->i_mmap_rwsem, |
1025 | &hugetlbfs_i_mmap_rwsem_key); | |
1da177e4 | 1026 | inode->i_mapping->a_ops = &hugetlbfs_aops; |
a72a7dea | 1027 | inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode); |
9119a41e | 1028 | inode->i_mapping->private_data = resv_map; |
ff62a342 | 1029 | info->seals = F_SEAL_SEAL; |
1da177e4 LT |
1030 | switch (mode & S_IFMT) { |
1031 | default: | |
1032 | init_special_inode(inode, mode, dev); | |
1033 | break; | |
1034 | case S_IFREG: | |
1035 | inode->i_op = &hugetlbfs_inode_operations; | |
1036 | inode->i_fop = &hugetlbfs_file_operations; | |
1037 | break; | |
1038 | case S_IFDIR: | |
1039 | inode->i_op = &hugetlbfs_dir_inode_operations; | |
1040 | inode->i_fop = &simple_dir_operations; | |
1041 | ||
1042 | /* directory inodes start off with i_nlink == 2 (for "." entry) */ | |
d8c76e6f | 1043 | inc_nlink(inode); |
1da177e4 LT |
1044 | break; |
1045 | case S_IFLNK: | |
1046 | inode->i_op = &page_symlink_inode_operations; | |
21fc61c7 | 1047 | inode_nohighmem(inode); |
1da177e4 LT |
1048 | break; |
1049 | } | |
e096d0c7 | 1050 | lockdep_annotate_inode_mutex_key(inode); |
58b6e5e8 MK |
1051 | } else { |
1052 | if (resv_map) | |
1053 | kref_put(&resv_map->refs, resv_map_release); | |
1054 | } | |
9119a41e | 1055 | |
1da177e4 LT |
1056 | return inode; |
1057 | } | |
1058 | ||
1059 | /* | |
1060 | * File creation. Allocate an inode, and we're done.. | |
1061 | */ | |
5ebb29be | 1062 | static int hugetlbfs_mknod(struct mnt_idmap *idmap, struct inode *dir, |
19ee5345 | 1063 | struct dentry *dentry, umode_t mode, dev_t dev) |
1da177e4 LT |
1064 | { |
1065 | struct inode *inode; | |
7d54fa64 AV |
1066 | |
1067 | inode = hugetlbfs_get_inode(dir->i_sb, dir, mode, dev); | |
19ee5345 AV |
1068 | if (!inode) |
1069 | return -ENOSPC; | |
a72a7dea | 1070 | dir->i_mtime = inode_set_ctime_current(dir); |
19ee5345 AV |
1071 | d_instantiate(dentry, inode); |
1072 | dget(dentry);/* Extra count - pin the dentry in core */ | |
1073 | return 0; | |
1ab5b82f PS |
1074 | } |
1075 | ||
c54bd91e | 1076 | static int hugetlbfs_mkdir(struct mnt_idmap *idmap, struct inode *dir, |
549c7297 | 1077 | struct dentry *dentry, umode_t mode) |
1da177e4 | 1078 | { |
5ebb29be | 1079 | int retval = hugetlbfs_mknod(&nop_mnt_idmap, dir, dentry, |
549c7297 | 1080 | mode | S_IFDIR, 0); |
1da177e4 | 1081 | if (!retval) |
d8c76e6f | 1082 | inc_nlink(dir); |
1da177e4 LT |
1083 | return retval; |
1084 | } | |
1085 | ||
6c960e68 | 1086 | static int hugetlbfs_create(struct mnt_idmap *idmap, |
549c7297 CB |
1087 | struct inode *dir, struct dentry *dentry, |
1088 | umode_t mode, bool excl) | |
1da177e4 | 1089 | { |
5ebb29be | 1090 | return hugetlbfs_mknod(&nop_mnt_idmap, dir, dentry, mode | S_IFREG, 0); |
1da177e4 LT |
1091 | } |
1092 | ||
011e2b71 | 1093 | static int hugetlbfs_tmpfile(struct mnt_idmap *idmap, |
863f144f | 1094 | struct inode *dir, struct file *file, |
549c7297 | 1095 | umode_t mode) |
1ab5b82f | 1096 | { |
19ee5345 AV |
1097 | struct inode *inode; |
1098 | ||
1099 | inode = hugetlbfs_get_inode(dir->i_sb, dir, mode | S_IFREG, 0); | |
1100 | if (!inode) | |
1101 | return -ENOSPC; | |
a72a7dea | 1102 | dir->i_mtime = inode_set_ctime_current(dir); |
863f144f MS |
1103 | d_tmpfile(file, inode); |
1104 | return finish_open_simple(file, 0); | |
1ab5b82f PS |
1105 | } |
1106 | ||
7a77db95 | 1107 | static int hugetlbfs_symlink(struct mnt_idmap *idmap, |
549c7297 CB |
1108 | struct inode *dir, struct dentry *dentry, |
1109 | const char *symname) | |
1da177e4 LT |
1110 | { |
1111 | struct inode *inode; | |
1112 | int error = -ENOSPC; | |
1da177e4 | 1113 | |
7d54fa64 | 1114 | inode = hugetlbfs_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0); |
1da177e4 LT |
1115 | if (inode) { |
1116 | int l = strlen(symname)+1; | |
1117 | error = page_symlink(inode, symname, l); | |
1118 | if (!error) { | |
1119 | d_instantiate(dentry, inode); | |
1120 | dget(dentry); | |
1121 | } else | |
1122 | iput(inode); | |
1123 | } | |
a72a7dea | 1124 | dir->i_mtime = inode_set_ctime_current(dir); |
1da177e4 LT |
1125 | |
1126 | return error; | |
1127 | } | |
1128 | ||
b890ec2a MWO |
1129 | #ifdef CONFIG_MIGRATION |
1130 | static int hugetlbfs_migrate_folio(struct address_space *mapping, | |
1131 | struct folio *dst, struct folio *src, | |
a6bc32b8 | 1132 | enum migrate_mode mode) |
290408d4 NH |
1133 | { |
1134 | int rc; | |
1135 | ||
b890ec2a | 1136 | rc = migrate_huge_page_move_mapping(mapping, dst, src); |
78bd5209 | 1137 | if (rc != MIGRATEPAGE_SUCCESS) |
290408d4 | 1138 | return rc; |
cb6acd01 | 1139 | |
149562f7 SK |
1140 | if (hugetlb_folio_subpool(src)) { |
1141 | hugetlb_set_folio_subpool(dst, | |
1142 | hugetlb_folio_subpool(src)); | |
1143 | hugetlb_set_folio_subpool(src, NULL); | |
cb6acd01 MK |
1144 | } |
1145 | ||
2916ecc0 | 1146 | if (mode != MIGRATE_SYNC_NO_COPY) |
b890ec2a | 1147 | folio_migrate_copy(dst, src); |
2916ecc0 | 1148 | else |
b890ec2a | 1149 | folio_migrate_flags(dst, src); |
290408d4 | 1150 | |
78bd5209 | 1151 | return MIGRATEPAGE_SUCCESS; |
290408d4 | 1152 | } |
b890ec2a MWO |
1153 | #else |
1154 | #define hugetlbfs_migrate_folio NULL | |
1155 | #endif | |
290408d4 | 1156 | |
78bb9203 NH |
1157 | static int hugetlbfs_error_remove_page(struct address_space *mapping, |
1158 | struct page *page) | |
1159 | { | |
78bb9203 NH |
1160 | return 0; |
1161 | } | |
1162 | ||
4a25220d DH |
1163 | /* |
1164 | * Display the mount options in /proc/mounts. | |
1165 | */ | |
1166 | static int hugetlbfs_show_options(struct seq_file *m, struct dentry *root) | |
1167 | { | |
1168 | struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(root->d_sb); | |
1169 | struct hugepage_subpool *spool = sbinfo->spool; | |
1170 | unsigned long hpage_size = huge_page_size(sbinfo->hstate); | |
1171 | unsigned hpage_shift = huge_page_shift(sbinfo->hstate); | |
1172 | char mod; | |
1173 | ||
1174 | if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID)) | |
1175 | seq_printf(m, ",uid=%u", | |
1176 | from_kuid_munged(&init_user_ns, sbinfo->uid)); | |
1177 | if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID)) | |
1178 | seq_printf(m, ",gid=%u", | |
1179 | from_kgid_munged(&init_user_ns, sbinfo->gid)); | |
1180 | if (sbinfo->mode != 0755) | |
1181 | seq_printf(m, ",mode=%o", sbinfo->mode); | |
1182 | if (sbinfo->max_inodes != -1) | |
1183 | seq_printf(m, ",nr_inodes=%lu", sbinfo->max_inodes); | |
1184 | ||
1185 | hpage_size /= 1024; | |
1186 | mod = 'K'; | |
1187 | if (hpage_size >= 1024) { | |
1188 | hpage_size /= 1024; | |
1189 | mod = 'M'; | |
1190 | } | |
1191 | seq_printf(m, ",pagesize=%lu%c", hpage_size, mod); | |
1192 | if (spool) { | |
1193 | if (spool->max_hpages != -1) | |
1194 | seq_printf(m, ",size=%llu", | |
1195 | (unsigned long long)spool->max_hpages << hpage_shift); | |
1196 | if (spool->min_hpages != -1) | |
1197 | seq_printf(m, ",min_size=%llu", | |
1198 | (unsigned long long)spool->min_hpages << hpage_shift); | |
1199 | } | |
1200 | return 0; | |
1201 | } | |
1202 | ||
726c3342 | 1203 | static int hugetlbfs_statfs(struct dentry *dentry, struct kstatfs *buf) |
1da177e4 | 1204 | { |
726c3342 | 1205 | struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(dentry->d_sb); |
2b0143b5 | 1206 | struct hstate *h = hstate_inode(d_inode(dentry)); |
1da177e4 LT |
1207 | |
1208 | buf->f_type = HUGETLBFS_MAGIC; | |
a5516438 | 1209 | buf->f_bsize = huge_page_size(h); |
1da177e4 LT |
1210 | if (sbinfo) { |
1211 | spin_lock(&sbinfo->stat_lock); | |
11680763 | 1212 | /* If no limits set, just report 0 or -1 for max/free/used |
74a8a65c | 1213 | * blocks, like simple_statfs() */ |
90481622 DG |
1214 | if (sbinfo->spool) { |
1215 | long free_pages; | |
1216 | ||
4b25f030 | 1217 | spin_lock_irq(&sbinfo->spool->lock); |
90481622 DG |
1218 | buf->f_blocks = sbinfo->spool->max_hpages; |
1219 | free_pages = sbinfo->spool->max_hpages | |
1220 | - sbinfo->spool->used_hpages; | |
1221 | buf->f_bavail = buf->f_bfree = free_pages; | |
4b25f030 | 1222 | spin_unlock_irq(&sbinfo->spool->lock); |
74a8a65c DG |
1223 | buf->f_files = sbinfo->max_inodes; |
1224 | buf->f_ffree = sbinfo->free_inodes; | |
1225 | } | |
1da177e4 LT |
1226 | spin_unlock(&sbinfo->stat_lock); |
1227 | } | |
1228 | buf->f_namelen = NAME_MAX; | |
1229 | return 0; | |
1230 | } | |
1231 | ||
1232 | static void hugetlbfs_put_super(struct super_block *sb) | |
1233 | { | |
1234 | struct hugetlbfs_sb_info *sbi = HUGETLBFS_SB(sb); | |
1235 | ||
1236 | if (sbi) { | |
1237 | sb->s_fs_info = NULL; | |
90481622 DG |
1238 | |
1239 | if (sbi->spool) | |
1240 | hugepage_put_subpool(sbi->spool); | |
1241 | ||
1da177e4 LT |
1242 | kfree(sbi); |
1243 | } | |
1244 | } | |
1245 | ||
96527980 CH |
1246 | static inline int hugetlbfs_dec_free_inodes(struct hugetlbfs_sb_info *sbinfo) |
1247 | { | |
1248 | if (sbinfo->free_inodes >= 0) { | |
1249 | spin_lock(&sbinfo->stat_lock); | |
1250 | if (unlikely(!sbinfo->free_inodes)) { | |
1251 | spin_unlock(&sbinfo->stat_lock); | |
1252 | return 0; | |
1253 | } | |
1254 | sbinfo->free_inodes--; | |
1255 | spin_unlock(&sbinfo->stat_lock); | |
1256 | } | |
1257 | ||
1258 | return 1; | |
1259 | } | |
1260 | ||
1261 | static void hugetlbfs_inc_free_inodes(struct hugetlbfs_sb_info *sbinfo) | |
1262 | { | |
1263 | if (sbinfo->free_inodes >= 0) { | |
1264 | spin_lock(&sbinfo->stat_lock); | |
1265 | sbinfo->free_inodes++; | |
1266 | spin_unlock(&sbinfo->stat_lock); | |
1267 | } | |
1268 | } | |
1269 | ||
1270 | ||
e18b890b | 1271 | static struct kmem_cache *hugetlbfs_inode_cachep; |
1da177e4 LT |
1272 | |
1273 | static struct inode *hugetlbfs_alloc_inode(struct super_block *sb) | |
1274 | { | |
96527980 | 1275 | struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(sb); |
1da177e4 LT |
1276 | struct hugetlbfs_inode_info *p; |
1277 | ||
96527980 CH |
1278 | if (unlikely(!hugetlbfs_dec_free_inodes(sbinfo))) |
1279 | return NULL; | |
fd60b288 | 1280 | p = alloc_inode_sb(sb, hugetlbfs_inode_cachep, GFP_KERNEL); |
96527980 CH |
1281 | if (unlikely(!p)) { |
1282 | hugetlbfs_inc_free_inodes(sbinfo); | |
1da177e4 | 1283 | return NULL; |
96527980 | 1284 | } |
4742a35d MK |
1285 | |
1286 | /* | |
1287 | * Any time after allocation, hugetlbfs_destroy_inode can be called | |
1288 | * for the inode. mpol_free_shared_policy is unconditionally called | |
1289 | * as part of hugetlbfs_destroy_inode. So, initialize policy here | |
1290 | * in case of a quick call to destroy. | |
1291 | * | |
1292 | * Note that the policy is initialized even if we are creating a | |
1293 | * private inode. This simplifies hugetlbfs_destroy_inode. | |
1294 | */ | |
1295 | mpol_shared_policy_init(&p->policy, NULL); | |
1296 | ||
1da177e4 LT |
1297 | return &p->vfs_inode; |
1298 | } | |
1299 | ||
b62de322 | 1300 | static void hugetlbfs_free_inode(struct inode *inode) |
fa0d7e3d | 1301 | { |
fa0d7e3d NP |
1302 | kmem_cache_free(hugetlbfs_inode_cachep, HUGETLBFS_I(inode)); |
1303 | } | |
1304 | ||
1da177e4 LT |
1305 | static void hugetlbfs_destroy_inode(struct inode *inode) |
1306 | { | |
96527980 | 1307 | hugetlbfs_inc_free_inodes(HUGETLBFS_SB(inode->i_sb)); |
1da177e4 | 1308 | mpol_free_shared_policy(&HUGETLBFS_I(inode)->policy); |
1da177e4 LT |
1309 | } |
1310 | ||
f5e54d6e | 1311 | static const struct address_space_operations hugetlbfs_aops = { |
800d15a5 NP |
1312 | .write_begin = hugetlbfs_write_begin, |
1313 | .write_end = hugetlbfs_write_end, | |
46de8b97 | 1314 | .dirty_folio = noop_dirty_folio, |
b890ec2a | 1315 | .migrate_folio = hugetlbfs_migrate_folio, |
78bb9203 | 1316 | .error_remove_page = hugetlbfs_error_remove_page, |
1da177e4 LT |
1317 | }; |
1318 | ||
96527980 | 1319 | |
51cc5068 | 1320 | static void init_once(void *foo) |
96527980 | 1321 | { |
dbaf7dc9 | 1322 | struct hugetlbfs_inode_info *ei = foo; |
96527980 | 1323 | |
a35afb83 | 1324 | inode_init_once(&ei->vfs_inode); |
96527980 CH |
1325 | } |
1326 | ||
4b6f5d20 | 1327 | const struct file_operations hugetlbfs_file_operations = { |
34d0640e | 1328 | .read_iter = hugetlbfs_read_iter, |
1da177e4 | 1329 | .mmap = hugetlbfs_file_mmap, |
1b061d92 | 1330 | .fsync = noop_fsync, |
1da177e4 | 1331 | .get_unmapped_area = hugetlb_get_unmapped_area, |
70c3547e MK |
1332 | .llseek = default_llseek, |
1333 | .fallocate = hugetlbfs_fallocate, | |
1da177e4 LT |
1334 | }; |
1335 | ||
92e1d5be | 1336 | static const struct inode_operations hugetlbfs_dir_inode_operations = { |
1da177e4 LT |
1337 | .create = hugetlbfs_create, |
1338 | .lookup = simple_lookup, | |
1339 | .link = simple_link, | |
1340 | .unlink = simple_unlink, | |
1341 | .symlink = hugetlbfs_symlink, | |
1342 | .mkdir = hugetlbfs_mkdir, | |
1343 | .rmdir = simple_rmdir, | |
1344 | .mknod = hugetlbfs_mknod, | |
1345 | .rename = simple_rename, | |
1346 | .setattr = hugetlbfs_setattr, | |
1ab5b82f | 1347 | .tmpfile = hugetlbfs_tmpfile, |
1da177e4 LT |
1348 | }; |
1349 | ||
92e1d5be | 1350 | static const struct inode_operations hugetlbfs_inode_operations = { |
1da177e4 LT |
1351 | .setattr = hugetlbfs_setattr, |
1352 | }; | |
1353 | ||
ee9b6d61 | 1354 | static const struct super_operations hugetlbfs_ops = { |
1da177e4 | 1355 | .alloc_inode = hugetlbfs_alloc_inode, |
b62de322 | 1356 | .free_inode = hugetlbfs_free_inode, |
1da177e4 | 1357 | .destroy_inode = hugetlbfs_destroy_inode, |
2bbbda30 | 1358 | .evict_inode = hugetlbfs_evict_inode, |
1da177e4 | 1359 | .statfs = hugetlbfs_statfs, |
1da177e4 | 1360 | .put_super = hugetlbfs_put_super, |
4a25220d | 1361 | .show_options = hugetlbfs_show_options, |
1da177e4 LT |
1362 | }; |
1363 | ||
7ca02d0a MK |
1364 | /* |
1365 | * Convert size option passed from command line to number of huge pages | |
1366 | * in the pool specified by hstate. Size option could be in bytes | |
1367 | * (val_type == SIZE_STD) or percentage of the pool (val_type == SIZE_PERCENT). | |
1368 | */ | |
4a25220d | 1369 | static long |
7ca02d0a | 1370 | hugetlbfs_size_to_hpages(struct hstate *h, unsigned long long size_opt, |
4a25220d | 1371 | enum hugetlbfs_size_type val_type) |
7ca02d0a MK |
1372 | { |
1373 | if (val_type == NO_SIZE) | |
1374 | return -1; | |
1375 | ||
1376 | if (val_type == SIZE_PERCENT) { | |
1377 | size_opt <<= huge_page_shift(h); | |
1378 | size_opt *= h->max_huge_pages; | |
1379 | do_div(size_opt, 100); | |
1380 | } | |
1381 | ||
1382 | size_opt >>= huge_page_shift(h); | |
1383 | return size_opt; | |
1384 | } | |
1385 | ||
32021982 DH |
1386 | /* |
1387 | * Parse one mount parameter. | |
1388 | */ | |
1389 | static int hugetlbfs_parse_param(struct fs_context *fc, struct fs_parameter *param) | |
1da177e4 | 1390 | { |
32021982 DH |
1391 | struct hugetlbfs_fs_context *ctx = fc->fs_private; |
1392 | struct fs_parse_result result; | |
1393 | char *rest; | |
1394 | unsigned long ps; | |
1395 | int opt; | |
1396 | ||
d7167b14 | 1397 | opt = fs_parse(fc, hugetlb_fs_parameters, param, &result); |
32021982 DH |
1398 | if (opt < 0) |
1399 | return opt; | |
1400 | ||
1401 | switch (opt) { | |
1402 | case Opt_uid: | |
1403 | ctx->uid = make_kuid(current_user_ns(), result.uint_32); | |
1404 | if (!uid_valid(ctx->uid)) | |
1405 | goto bad_val; | |
1da177e4 | 1406 | return 0; |
1da177e4 | 1407 | |
32021982 DH |
1408 | case Opt_gid: |
1409 | ctx->gid = make_kgid(current_user_ns(), result.uint_32); | |
1410 | if (!gid_valid(ctx->gid)) | |
1411 | goto bad_val; | |
1412 | return 0; | |
e73a75fa | 1413 | |
32021982 DH |
1414 | case Opt_mode: |
1415 | ctx->mode = result.uint_32 & 01777U; | |
1416 | return 0; | |
e73a75fa | 1417 | |
32021982 DH |
1418 | case Opt_size: |
1419 | /* memparse() will accept a K/M/G without a digit */ | |
26215b7e | 1420 | if (!param->string || !isdigit(param->string[0])) |
32021982 DH |
1421 | goto bad_val; |
1422 | ctx->max_size_opt = memparse(param->string, &rest); | |
1423 | ctx->max_val_type = SIZE_STD; | |
1424 | if (*rest == '%') | |
1425 | ctx->max_val_type = SIZE_PERCENT; | |
1426 | return 0; | |
e73a75fa | 1427 | |
32021982 DH |
1428 | case Opt_nr_inodes: |
1429 | /* memparse() will accept a K/M/G without a digit */ | |
26215b7e | 1430 | if (!param->string || !isdigit(param->string[0])) |
32021982 DH |
1431 | goto bad_val; |
1432 | ctx->nr_inodes = memparse(param->string, &rest); | |
1433 | return 0; | |
e73a75fa | 1434 | |
32021982 DH |
1435 | case Opt_pagesize: |
1436 | ps = memparse(param->string, &rest); | |
1437 | ctx->hstate = size_to_hstate(ps); | |
1438 | if (!ctx->hstate) { | |
d0036517 | 1439 | pr_err("Unsupported page size %lu MB\n", ps / SZ_1M); |
32021982 | 1440 | return -EINVAL; |
e73a75fa | 1441 | } |
32021982 | 1442 | return 0; |
1da177e4 | 1443 | |
32021982 DH |
1444 | case Opt_min_size: |
1445 | /* memparse() will accept a K/M/G without a digit */ | |
26215b7e | 1446 | if (!param->string || !isdigit(param->string[0])) |
32021982 DH |
1447 | goto bad_val; |
1448 | ctx->min_size_opt = memparse(param->string, &rest); | |
1449 | ctx->min_val_type = SIZE_STD; | |
1450 | if (*rest == '%') | |
1451 | ctx->min_val_type = SIZE_PERCENT; | |
1452 | return 0; | |
e73a75fa | 1453 | |
32021982 DH |
1454 | default: |
1455 | return -EINVAL; | |
1456 | } | |
a137e1cc | 1457 | |
32021982 | 1458 | bad_val: |
b5db30cf | 1459 | return invalfc(fc, "Bad value '%s' for mount option '%s'\n", |
32021982 DH |
1460 | param->string, param->key); |
1461 | } | |
7ca02d0a | 1462 | |
32021982 DH |
1463 | /* |
1464 | * Validate the parsed options. | |
1465 | */ | |
1466 | static int hugetlbfs_validate(struct fs_context *fc) | |
1467 | { | |
1468 | struct hugetlbfs_fs_context *ctx = fc->fs_private; | |
a137e1cc | 1469 | |
7ca02d0a MK |
1470 | /* |
1471 | * Use huge page pool size (in hstate) to convert the size | |
1472 | * options to number of huge pages. If NO_SIZE, -1 is returned. | |
1473 | */ | |
32021982 DH |
1474 | ctx->max_hpages = hugetlbfs_size_to_hpages(ctx->hstate, |
1475 | ctx->max_size_opt, | |
1476 | ctx->max_val_type); | |
1477 | ctx->min_hpages = hugetlbfs_size_to_hpages(ctx->hstate, | |
1478 | ctx->min_size_opt, | |
1479 | ctx->min_val_type); | |
7ca02d0a MK |
1480 | |
1481 | /* | |
1482 | * If max_size was specified, then min_size must be smaller | |
1483 | */ | |
32021982 DH |
1484 | if (ctx->max_val_type > NO_SIZE && |
1485 | ctx->min_hpages > ctx->max_hpages) { | |
1486 | pr_err("Minimum size can not be greater than maximum size\n"); | |
7ca02d0a | 1487 | return -EINVAL; |
a137e1cc AK |
1488 | } |
1489 | ||
1da177e4 LT |
1490 | return 0; |
1491 | } | |
1492 | ||
1493 | static int | |
32021982 | 1494 | hugetlbfs_fill_super(struct super_block *sb, struct fs_context *fc) |
1da177e4 | 1495 | { |
32021982 | 1496 | struct hugetlbfs_fs_context *ctx = fc->fs_private; |
1da177e4 LT |
1497 | struct hugetlbfs_sb_info *sbinfo; |
1498 | ||
1da177e4 LT |
1499 | sbinfo = kmalloc(sizeof(struct hugetlbfs_sb_info), GFP_KERNEL); |
1500 | if (!sbinfo) | |
1501 | return -ENOMEM; | |
1502 | sb->s_fs_info = sbinfo; | |
1503 | spin_lock_init(&sbinfo->stat_lock); | |
32021982 DH |
1504 | sbinfo->hstate = ctx->hstate; |
1505 | sbinfo->max_inodes = ctx->nr_inodes; | |
1506 | sbinfo->free_inodes = ctx->nr_inodes; | |
1507 | sbinfo->spool = NULL; | |
1508 | sbinfo->uid = ctx->uid; | |
1509 | sbinfo->gid = ctx->gid; | |
1510 | sbinfo->mode = ctx->mode; | |
4a25220d | 1511 | |
7ca02d0a MK |
1512 | /* |
1513 | * Allocate and initialize subpool if maximum or minimum size is | |
1935ebd3 | 1514 | * specified. Any needed reservations (for minimum size) are taken |
445c8098 | 1515 | * when the subpool is created. |
7ca02d0a | 1516 | */ |
32021982 DH |
1517 | if (ctx->max_hpages != -1 || ctx->min_hpages != -1) { |
1518 | sbinfo->spool = hugepage_new_subpool(ctx->hstate, | |
1519 | ctx->max_hpages, | |
1520 | ctx->min_hpages); | |
90481622 DG |
1521 | if (!sbinfo->spool) |
1522 | goto out_free; | |
1523 | } | |
1da177e4 | 1524 | sb->s_maxbytes = MAX_LFS_FILESIZE; |
32021982 DH |
1525 | sb->s_blocksize = huge_page_size(ctx->hstate); |
1526 | sb->s_blocksize_bits = huge_page_shift(ctx->hstate); | |
1da177e4 LT |
1527 | sb->s_magic = HUGETLBFS_MAGIC; |
1528 | sb->s_op = &hugetlbfs_ops; | |
1529 | sb->s_time_gran = 1; | |
15568299 MK |
1530 | |
1531 | /* | |
1532 | * Due to the special and limited functionality of hugetlbfs, it does | |
1533 | * not work well as a stacking filesystem. | |
1534 | */ | |
1535 | sb->s_stack_depth = FILESYSTEM_MAX_STACK_DEPTH; | |
32021982 | 1536 | sb->s_root = d_make_root(hugetlbfs_get_root(sb, ctx)); |
48fde701 | 1537 | if (!sb->s_root) |
1da177e4 | 1538 | goto out_free; |
1da177e4 LT |
1539 | return 0; |
1540 | out_free: | |
6e6870d4 | 1541 | kfree(sbinfo->spool); |
1da177e4 LT |
1542 | kfree(sbinfo); |
1543 | return -ENOMEM; | |
1544 | } | |
1545 | ||
32021982 DH |
1546 | static int hugetlbfs_get_tree(struct fs_context *fc) |
1547 | { | |
1548 | int err = hugetlbfs_validate(fc); | |
1549 | if (err) | |
1550 | return err; | |
2ac295d4 | 1551 | return get_tree_nodev(fc, hugetlbfs_fill_super); |
32021982 DH |
1552 | } |
1553 | ||
1554 | static void hugetlbfs_fs_context_free(struct fs_context *fc) | |
1555 | { | |
1556 | kfree(fc->fs_private); | |
1557 | } | |
1558 | ||
1559 | static const struct fs_context_operations hugetlbfs_fs_context_ops = { | |
1560 | .free = hugetlbfs_fs_context_free, | |
1561 | .parse_param = hugetlbfs_parse_param, | |
1562 | .get_tree = hugetlbfs_get_tree, | |
1563 | }; | |
1564 | ||
1565 | static int hugetlbfs_init_fs_context(struct fs_context *fc) | |
1da177e4 | 1566 | { |
32021982 DH |
1567 | struct hugetlbfs_fs_context *ctx; |
1568 | ||
1569 | ctx = kzalloc(sizeof(struct hugetlbfs_fs_context), GFP_KERNEL); | |
1570 | if (!ctx) | |
1571 | return -ENOMEM; | |
1572 | ||
1573 | ctx->max_hpages = -1; /* No limit on size by default */ | |
1574 | ctx->nr_inodes = -1; /* No limit on number of inodes by default */ | |
1575 | ctx->uid = current_fsuid(); | |
1576 | ctx->gid = current_fsgid(); | |
1577 | ctx->mode = 0755; | |
1578 | ctx->hstate = &default_hstate; | |
1579 | ctx->min_hpages = -1; /* No default minimum size */ | |
1580 | ctx->max_val_type = NO_SIZE; | |
1581 | ctx->min_val_type = NO_SIZE; | |
1582 | fc->fs_private = ctx; | |
1583 | fc->ops = &hugetlbfs_fs_context_ops; | |
1584 | return 0; | |
1da177e4 LT |
1585 | } |
1586 | ||
1587 | static struct file_system_type hugetlbfs_fs_type = { | |
32021982 DH |
1588 | .name = "hugetlbfs", |
1589 | .init_fs_context = hugetlbfs_init_fs_context, | |
d7167b14 | 1590 | .parameters = hugetlb_fs_parameters, |
32021982 | 1591 | .kill_sb = kill_litter_super, |
1da177e4 LT |
1592 | }; |
1593 | ||
42d7395f | 1594 | static struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE]; |
1da177e4 | 1595 | |
ef1ff6b8 | 1596 | static int can_do_hugetlb_shm(void) |
1da177e4 | 1597 | { |
a0eb3a05 EB |
1598 | kgid_t shm_group; |
1599 | shm_group = make_kgid(&init_user_ns, sysctl_hugetlb_shm_group); | |
1600 | return capable(CAP_IPC_LOCK) || in_group_p(shm_group); | |
1da177e4 LT |
1601 | } |
1602 | ||
42d7395f AK |
1603 | static int get_hstate_idx(int page_size_log) |
1604 | { | |
af73e4d9 | 1605 | struct hstate *h = hstate_sizelog(page_size_log); |
42d7395f | 1606 | |
42d7395f AK |
1607 | if (!h) |
1608 | return -1; | |
04adbc3f | 1609 | return hstate_index(h); |
42d7395f AK |
1610 | } |
1611 | ||
af73e4d9 NH |
1612 | /* |
1613 | * Note that size should be aligned to proper hugepage size in caller side, | |
1614 | * otherwise hugetlb_reserve_pages reserves one less hugepages than intended. | |
1615 | */ | |
1616 | struct file *hugetlb_file_setup(const char *name, size_t size, | |
83c1fd76 | 1617 | vm_flags_t acctflag, int creat_flags, |
1618 | int page_size_log) | |
1da177e4 | 1619 | { |
1da177e4 | 1620 | struct inode *inode; |
e68375c8 | 1621 | struct vfsmount *mnt; |
42d7395f | 1622 | int hstate_idx; |
e68375c8 | 1623 | struct file *file; |
42d7395f AK |
1624 | |
1625 | hstate_idx = get_hstate_idx(page_size_log); | |
1626 | if (hstate_idx < 0) | |
1627 | return ERR_PTR(-ENODEV); | |
1da177e4 | 1628 | |
e68375c8 AV |
1629 | mnt = hugetlbfs_vfsmount[hstate_idx]; |
1630 | if (!mnt) | |
5bc98594 AM |
1631 | return ERR_PTR(-ENOENT); |
1632 | ||
ef1ff6b8 | 1633 | if (creat_flags == HUGETLB_SHMFS_INODE && !can_do_hugetlb_shm()) { |
83c1fd76 | 1634 | struct ucounts *ucounts = current_ucounts(); |
1635 | ||
1636 | if (user_shm_lock(size, ucounts)) { | |
1637 | pr_warn_once("%s (%d): Using mlock ulimits for SHM_HUGETLB is obsolete\n", | |
21a3c273 | 1638 | current->comm, current->pid); |
83c1fd76 | 1639 | user_shm_unlock(size, ucounts); |
353d5c30 | 1640 | } |
83c1fd76 | 1641 | return ERR_PTR(-EPERM); |
2584e517 | 1642 | } |
1da177e4 | 1643 | |
39b65252 | 1644 | file = ERR_PTR(-ENOSPC); |
e68375c8 | 1645 | inode = hugetlbfs_get_inode(mnt->mnt_sb, NULL, S_IFREG | S_IRWXUGO, 0); |
1da177e4 | 1646 | if (!inode) |
e68375c8 | 1647 | goto out; |
e1832f29 SS |
1648 | if (creat_flags == HUGETLB_SHMFS_INODE) |
1649 | inode->i_flags |= S_PRIVATE; | |
1da177e4 | 1650 | |
1da177e4 | 1651 | inode->i_size = size; |
6d6b77f1 | 1652 | clear_nlink(inode); |
ce8d2cdf | 1653 | |
33b8f84a | 1654 | if (!hugetlb_reserve_pages(inode, 0, |
e68375c8 AV |
1655 | size >> huge_page_shift(hstate_inode(inode)), NULL, |
1656 | acctflag)) | |
1657 | file = ERR_PTR(-ENOMEM); | |
1658 | else | |
1659 | file = alloc_file_pseudo(inode, mnt, name, O_RDWR, | |
1660 | &hugetlbfs_file_operations); | |
1661 | if (!IS_ERR(file)) | |
1662 | return file; | |
1da177e4 | 1663 | |
b45b5bd6 | 1664 | iput(inode); |
e68375c8 | 1665 | out: |
39b65252 | 1666 | return file; |
1da177e4 LT |
1667 | } |
1668 | ||
32021982 DH |
1669 | static struct vfsmount *__init mount_one_hugetlbfs(struct hstate *h) |
1670 | { | |
1671 | struct fs_context *fc; | |
1672 | struct vfsmount *mnt; | |
1673 | ||
1674 | fc = fs_context_for_mount(&hugetlbfs_fs_type, SB_KERNMOUNT); | |
1675 | if (IS_ERR(fc)) { | |
1676 | mnt = ERR_CAST(fc); | |
1677 | } else { | |
1678 | struct hugetlbfs_fs_context *ctx = fc->fs_private; | |
1679 | ctx->hstate = h; | |
1680 | mnt = fc_mount(fc); | |
1681 | put_fs_context(fc); | |
1682 | } | |
1683 | if (IS_ERR(mnt)) | |
a25fddce | 1684 | pr_err("Cannot mount internal hugetlbfs for page size %luK", |
d0036517 | 1685 | huge_page_size(h) / SZ_1K); |
32021982 DH |
1686 | return mnt; |
1687 | } | |
1688 | ||
1da177e4 LT |
1689 | static int __init init_hugetlbfs_fs(void) |
1690 | { | |
32021982 | 1691 | struct vfsmount *mnt; |
42d7395f | 1692 | struct hstate *h; |
1da177e4 | 1693 | int error; |
42d7395f | 1694 | int i; |
1da177e4 | 1695 | |
457c1b27 | 1696 | if (!hugepages_supported()) { |
9b857d26 | 1697 | pr_info("disabling because there are no supported hugepage sizes\n"); |
457c1b27 NA |
1698 | return -ENOTSUPP; |
1699 | } | |
1700 | ||
d1d5e05f | 1701 | error = -ENOMEM; |
1da177e4 LT |
1702 | hugetlbfs_inode_cachep = kmem_cache_create("hugetlbfs_inode_cache", |
1703 | sizeof(struct hugetlbfs_inode_info), | |
5d097056 | 1704 | 0, SLAB_ACCOUNT, init_once); |
1da177e4 | 1705 | if (hugetlbfs_inode_cachep == NULL) |
8fc312b3 | 1706 | goto out; |
1da177e4 LT |
1707 | |
1708 | error = register_filesystem(&hugetlbfs_fs_type); | |
1709 | if (error) | |
8fc312b3 | 1710 | goto out_free; |
1da177e4 | 1711 | |
8fc312b3 | 1712 | /* default hstate mount is required */ |
3b2275a8 | 1713 | mnt = mount_one_hugetlbfs(&default_hstate); |
8fc312b3 MK |
1714 | if (IS_ERR(mnt)) { |
1715 | error = PTR_ERR(mnt); | |
1716 | goto out_unreg; | |
1717 | } | |
1718 | hugetlbfs_vfsmount[default_hstate_idx] = mnt; | |
1719 | ||
1720 | /* other hstates are optional */ | |
42d7395f AK |
1721 | i = 0; |
1722 | for_each_hstate(h) { | |
15f0ec94 JS |
1723 | if (i == default_hstate_idx) { |
1724 | i++; | |
8fc312b3 | 1725 | continue; |
15f0ec94 | 1726 | } |
8fc312b3 | 1727 | |
32021982 | 1728 | mnt = mount_one_hugetlbfs(h); |
8fc312b3 MK |
1729 | if (IS_ERR(mnt)) |
1730 | hugetlbfs_vfsmount[i] = NULL; | |
1731 | else | |
1732 | hugetlbfs_vfsmount[i] = mnt; | |
42d7395f AK |
1733 | i++; |
1734 | } | |
32021982 DH |
1735 | |
1736 | return 0; | |
1da177e4 | 1737 | |
8fc312b3 MK |
1738 | out_unreg: |
1739 | (void)unregister_filesystem(&hugetlbfs_fs_type); | |
1740 | out_free: | |
d1d5e05f | 1741 | kmem_cache_destroy(hugetlbfs_inode_cachep); |
8fc312b3 | 1742 | out: |
1da177e4 LT |
1743 | return error; |
1744 | } | |
3e89e1c5 | 1745 | fs_initcall(init_hugetlbfs_fs) |