2 * hugetlbpage-backed filesystem. Based on ramfs.
4 * Nadia Yvette Chambers, 2002
6 * Copyright (C) 2002 Linus Torvalds.
10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 #include <linux/thread_info.h>
13 #include <asm/current.h>
14 #include <linux/falloc.h>
16 #include <linux/mount.h>
17 #include <linux/file.h>
18 #include <linux/kernel.h>
19 #include <linux/writeback.h>
20 #include <linux/pagemap.h>
21 #include <linux/highmem.h>
22 #include <linux/init.h>
23 #include <linux/string.h>
24 #include <linux/capability.h>
25 #include <linux/ctype.h>
26 #include <linux/backing-dev.h>
27 #include <linux/hugetlb.h>
28 #include <linux/pagevec.h>
29 #include <linux/fs_parser.h>
30 #include <linux/mman.h>
31 #include <linux/slab.h>
32 #include <linux/dnotify.h>
33 #include <linux/statfs.h>
34 #include <linux/security.h>
35 #include <linux/magic.h>
36 #include <linux/migrate.h>
37 #include <linux/uio.h>
39 #include <linux/uaccess.h>
40 #include <linux/sched/mm.h>
42 static const struct address_space_operations hugetlbfs_aops;
43 static const struct file_operations hugetlbfs_file_operations;
44 static const struct inode_operations hugetlbfs_dir_inode_operations;
45 static const struct inode_operations hugetlbfs_inode_operations;
47 enum hugetlbfs_size_type { NO_SIZE, SIZE_STD, SIZE_PERCENT };
49 struct hugetlbfs_fs_context {
50 struct hstate *hstate;
51 unsigned long long max_size_opt;
52 unsigned long long min_size_opt;
56 enum hugetlbfs_size_type max_val_type;
57 enum hugetlbfs_size_type min_val_type;
63 int sysctl_hugetlb_shm_group;
75 static const struct fs_parameter_spec hugetlb_fs_parameters[] = {
76 fsparam_u32 ("gid", Opt_gid),
77 fsparam_string("min_size", Opt_min_size),
78 fsparam_u32oct("mode", Opt_mode),
79 fsparam_string("nr_inodes", Opt_nr_inodes),
80 fsparam_string("pagesize", Opt_pagesize),
81 fsparam_string("size", Opt_size),
82 fsparam_u32 ("uid", Opt_uid),
87 * Mask used when checking the page offset value passed in via system
88 * calls. This value will be converted to a loff_t which is signed.
89 * Therefore, we want to check the upper PAGE_SHIFT + 1 bits of the
90 * value. The extra bit (- 1 in the shift value) is to take the sign
93 #define PGOFF_LOFFT_MAX \
94 (((1UL << (PAGE_SHIFT + 1)) - 1) << (BITS_PER_LONG - (PAGE_SHIFT + 1)))
96 static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
98 struct inode *inode = file_inode(file);
99 struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
102 struct hstate *h = hstate_file(file);
106 * vma address alignment (but not the pgoff alignment) has
107 * already been checked by prepare_hugepage_range. If you add
108 * any error returns here, do so after setting VM_HUGETLB, so
109 * is_vm_hugetlb_page tests below unmap_region go the right
110 * way when do_mmap unwinds (may be important on powerpc
113 vm_flags_set(vma, VM_HUGETLB | VM_DONTEXPAND);
114 vma->vm_ops = &hugetlb_vm_ops;
116 ret = seal_check_write(info->seals, vma);
121 * page based offset in vm_pgoff could be sufficiently large to
122 * overflow a loff_t when converted to byte offset. This can
123 * only happen on architectures where sizeof(loff_t) ==
124 * sizeof(unsigned long). So, only check in those instances.
126 if (sizeof(unsigned long) == sizeof(loff_t)) {
127 if (vma->vm_pgoff & PGOFF_LOFFT_MAX)
131 /* must be huge page aligned */
132 if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT))
135 vma_len = (loff_t)(vma->vm_end - vma->vm_start);
136 len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
137 /* check for overflow */
146 vm_flags = vma->vm_flags;
148 * for SHM_HUGETLB, the pages are reserved in the shmget() call so skip
149 * reserving here. Note: only for SHM hugetlbfs file, the inode
150 * flag S_PRIVATE is set.
152 if (inode->i_flags & S_PRIVATE)
153 vm_flags |= VM_NORESERVE;
155 if (!hugetlb_reserve_pages(inode,
156 vma->vm_pgoff >> huge_page_order(h),
157 len >> huge_page_shift(h), vma,
162 if (vma->vm_flags & VM_WRITE && inode->i_size < len)
163 i_size_write(inode, len);
171 * Called under mmap_write_lock(mm).
175 hugetlb_get_unmapped_area_bottomup(struct file *file, unsigned long addr,
176 unsigned long len, unsigned long pgoff, unsigned long flags)
178 struct hstate *h = hstate_file(file);
179 struct vm_unmapped_area_info info = {};
182 info.low_limit = current->mm->mmap_base;
183 info.high_limit = arch_get_mmap_end(addr, len, flags);
184 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
185 return vm_unmapped_area(&info);
189 hugetlb_get_unmapped_area_topdown(struct file *file, unsigned long addr,
190 unsigned long len, unsigned long pgoff, unsigned long flags)
192 struct hstate *h = hstate_file(file);
193 struct vm_unmapped_area_info info = {};
195 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
197 info.low_limit = PAGE_SIZE;
198 info.high_limit = arch_get_mmap_base(addr, current->mm->mmap_base);
199 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
200 addr = vm_unmapped_area(&info);
203 * A failed mmap() very likely causes application failure,
204 * so fall back to the bottom-up function here. This scenario
205 * can happen with large stack limits and large mmap()
208 if (unlikely(offset_in_page(addr))) {
209 VM_BUG_ON(addr != -ENOMEM);
211 info.low_limit = current->mm->mmap_base;
212 info.high_limit = arch_get_mmap_end(addr, len, flags);
213 addr = vm_unmapped_area(&info);
220 generic_hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
221 unsigned long len, unsigned long pgoff,
224 struct mm_struct *mm = current->mm;
225 struct vm_area_struct *vma;
226 struct hstate *h = hstate_file(file);
227 const unsigned long mmap_end = arch_get_mmap_end(addr, len, flags);
229 if (len & ~huge_page_mask(h))
234 if (flags & MAP_FIXED) {
235 if (prepare_hugepage_range(file, addr, len))
241 addr = ALIGN(addr, huge_page_size(h));
242 vma = find_vma(mm, addr);
243 if (mmap_end - len >= addr &&
244 (!vma || addr + len <= vm_start_gap(vma)))
249 * Use MMF_TOPDOWN flag as a hint to use topdown routine.
250 * If architectures have special needs, they should define their own
251 * version of hugetlb_get_unmapped_area.
253 if (test_bit(MMF_TOPDOWN, &mm->flags))
254 return hugetlb_get_unmapped_area_topdown(file, addr, len,
256 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
260 #ifndef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
262 hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
263 unsigned long len, unsigned long pgoff,
266 return generic_hugetlb_get_unmapped_area(file, addr, len, pgoff, flags);
271 * Someone wants to read @bytes from a HWPOISON hugetlb @page from @offset.
272 * Returns the maximum number of bytes one can read without touching the 1st raw
275 * The implementation borrows the iteration logic from copy_page_to_iter*.
277 static size_t adjust_range_hwpoison(struct page *page, size_t offset, size_t bytes)
282 /* First subpage to start the loop. */
283 page = nth_page(page, offset / PAGE_SIZE);
286 if (is_raw_hwpoison_page_in_hugepage(page))
289 /* Safe to read n bytes without touching HWPOISON subpage. */
290 n = min(bytes, (size_t)PAGE_SIZE - offset);
296 if (offset == PAGE_SIZE) {
297 page = nth_page(page, 1);
306 * Support for read() - Find the page attached to f_mapping and copy out the
307 * data. This provides functionality similar to filemap_read().
309 static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to)
311 struct file *file = iocb->ki_filp;
312 struct hstate *h = hstate_file(file);
313 struct address_space *mapping = file->f_mapping;
314 struct inode *inode = mapping->host;
315 unsigned long index = iocb->ki_pos >> huge_page_shift(h);
316 unsigned long offset = iocb->ki_pos & ~huge_page_mask(h);
317 unsigned long end_index;
321 while (iov_iter_count(to)) {
323 size_t nr, copied, want;
325 /* nr is the maximum number of bytes to copy from this page */
326 nr = huge_page_size(h);
327 isize = i_size_read(inode);
330 end_index = (isize - 1) >> huge_page_shift(h);
331 if (index > end_index)
333 if (index == end_index) {
334 nr = ((isize - 1) & ~huge_page_mask(h)) + 1;
341 folio = filemap_lock_hugetlb_folio(h, mapping, index);
344 * We have a HOLE, zero out the user-buffer for the
345 * length of the hole or request.
347 copied = iov_iter_zero(nr, to);
351 if (!folio_test_hwpoison(folio))
355 * Adjust how many bytes safe to read without
356 * touching the 1st raw HWPOISON subpage after
359 want = adjust_range_hwpoison(&folio->page, offset, nr);
368 * We have the folio, copy it to user space buffer.
370 copied = copy_folio_to_iter(folio, offset, want, to);
375 if (copied != nr && iov_iter_count(to)) {
380 index += offset >> huge_page_shift(h);
381 offset &= ~huge_page_mask(h);
383 iocb->ki_pos = ((loff_t)index << huge_page_shift(h)) + offset;
387 static int hugetlbfs_write_begin(struct file *file,
388 struct address_space *mapping,
389 loff_t pos, unsigned len,
390 struct page **pagep, void **fsdata)
395 static int hugetlbfs_write_end(struct file *file, struct address_space *mapping,
396 loff_t pos, unsigned len, unsigned copied,
397 struct page *page, void *fsdata)
403 static void hugetlb_delete_from_page_cache(struct folio *folio)
405 folio_clear_dirty(folio);
406 folio_clear_uptodate(folio);
407 filemap_remove_folio(folio);
411 * Called with i_mmap_rwsem held for inode based vma maps. This makes
412 * sure vma (and vm_mm) will not go away. We also hold the hugetlb fault
413 * mutex for the page in the mapping. So, we can not race with page being
414 * faulted into the vma.
416 static bool hugetlb_vma_maps_page(struct vm_area_struct *vma,
417 unsigned long addr, struct page *page)
421 ptep = hugetlb_walk(vma, addr, huge_page_size(hstate_vma(vma)));
425 pte = huge_ptep_get(ptep);
426 if (huge_pte_none(pte) || !pte_present(pte))
429 if (pte_page(pte) == page)
436 * Can vma_offset_start/vma_offset_end overflow on 32-bit arches?
437 * No, because the interval tree returns us only those vmas
438 * which overlap the truncated area starting at pgoff,
439 * and no vma on a 32-bit arch can span beyond the 4GB.
441 static unsigned long vma_offset_start(struct vm_area_struct *vma, pgoff_t start)
443 unsigned long offset = 0;
445 if (vma->vm_pgoff < start)
446 offset = (start - vma->vm_pgoff) << PAGE_SHIFT;
448 return vma->vm_start + offset;
451 static unsigned long vma_offset_end(struct vm_area_struct *vma, pgoff_t end)
458 t_end = ((end - vma->vm_pgoff) << PAGE_SHIFT) + vma->vm_start;
459 if (t_end > vma->vm_end)
465 * Called with hugetlb fault mutex held. Therefore, no more mappings to
466 * this folio can be created while executing the routine.
468 static void hugetlb_unmap_file_folio(struct hstate *h,
469 struct address_space *mapping,
470 struct folio *folio, pgoff_t index)
472 struct rb_root_cached *root = &mapping->i_mmap;
473 struct hugetlb_vma_lock *vma_lock;
474 struct page *page = &folio->page;
475 struct vm_area_struct *vma;
476 unsigned long v_start;
480 start = index * pages_per_huge_page(h);
481 end = (index + 1) * pages_per_huge_page(h);
483 i_mmap_lock_write(mapping);
486 vma_interval_tree_foreach(vma, root, start, end - 1) {
487 v_start = vma_offset_start(vma, start);
488 v_end = vma_offset_end(vma, end);
490 if (!hugetlb_vma_maps_page(vma, v_start, page))
493 if (!hugetlb_vma_trylock_write(vma)) {
494 vma_lock = vma->vm_private_data;
496 * If we can not get vma lock, we need to drop
497 * immap_sema and take locks in order. First,
498 * take a ref on the vma_lock structure so that
499 * we can be guaranteed it will not go away when
500 * dropping immap_sema.
502 kref_get(&vma_lock->refs);
506 unmap_hugepage_range(vma, v_start, v_end, NULL,
507 ZAP_FLAG_DROP_MARKER);
508 hugetlb_vma_unlock_write(vma);
511 i_mmap_unlock_write(mapping);
515 * Wait on vma_lock. We know it is still valid as we have
516 * a reference. We must 'open code' vma locking as we do
517 * not know if vma_lock is still attached to vma.
519 down_write(&vma_lock->rw_sema);
520 i_mmap_lock_write(mapping);
525 * If lock is no longer attached to vma, then just
526 * unlock, drop our reference and retry looking for
529 up_write(&vma_lock->rw_sema);
530 kref_put(&vma_lock->refs, hugetlb_vma_lock_release);
535 * vma_lock is still attached to vma. Check to see if vma
536 * still maps page and if so, unmap.
538 v_start = vma_offset_start(vma, start);
539 v_end = vma_offset_end(vma, end);
540 if (hugetlb_vma_maps_page(vma, v_start, page))
541 unmap_hugepage_range(vma, v_start, v_end, NULL,
542 ZAP_FLAG_DROP_MARKER);
544 kref_put(&vma_lock->refs, hugetlb_vma_lock_release);
545 hugetlb_vma_unlock_write(vma);
552 hugetlb_vmdelete_list(struct rb_root_cached *root, pgoff_t start, pgoff_t end,
553 zap_flags_t zap_flags)
555 struct vm_area_struct *vma;
558 * end == 0 indicates that the entire range after start should be
559 * unmapped. Note, end is exclusive, whereas the interval tree takes
560 * an inclusive "last".
562 vma_interval_tree_foreach(vma, root, start, end ? end - 1 : ULONG_MAX) {
563 unsigned long v_start;
566 if (!hugetlb_vma_trylock_write(vma))
569 v_start = vma_offset_start(vma, start);
570 v_end = vma_offset_end(vma, end);
572 unmap_hugepage_range(vma, v_start, v_end, NULL, zap_flags);
575 * Note that vma lock only exists for shared/non-private
576 * vmas. Therefore, lock is not held when calling
577 * unmap_hugepage_range for private vmas.
579 hugetlb_vma_unlock_write(vma);
584 * Called with hugetlb fault mutex held.
585 * Returns true if page was actually removed, false otherwise.
587 static bool remove_inode_single_folio(struct hstate *h, struct inode *inode,
588 struct address_space *mapping,
589 struct folio *folio, pgoff_t index,
595 * If folio is mapped, it was faulted in after being
596 * unmapped in caller. Unmap (again) while holding
597 * the fault mutex. The mutex will prevent faults
598 * until we finish removing the folio.
600 if (unlikely(folio_mapped(folio)))
601 hugetlb_unmap_file_folio(h, mapping, folio, index);
605 * We must remove the folio from page cache before removing
606 * the region/ reserve map (hugetlb_unreserve_pages). In
607 * rare out of memory conditions, removal of the region/reserve
608 * map could fail. Correspondingly, the subpool and global
609 * reserve usage count can need to be adjusted.
611 VM_BUG_ON_FOLIO(folio_test_hugetlb_restore_reserve(folio), folio);
612 hugetlb_delete_from_page_cache(folio);
615 if (unlikely(hugetlb_unreserve_pages(inode, index,
617 hugetlb_fix_reserve_counts(inode);
625 * remove_inode_hugepages handles two distinct cases: truncation and hole
626 * punch. There are subtle differences in operation for each case.
628 * truncation is indicated by end of range being LLONG_MAX
629 * In this case, we first scan the range and release found pages.
630 * After releasing pages, hugetlb_unreserve_pages cleans up region/reserve
631 * maps and global counts. Page faults can race with truncation.
632 * During faults, hugetlb_no_page() checks i_size before page allocation,
633 * and again after obtaining page table lock. It will 'back out'
634 * allocations in the truncated range.
635 * hole punch is indicated if end is not LLONG_MAX
636 * In the hole punch case we scan the range and release found pages.
637 * Only when releasing a page is the associated region/reserve map
638 * deleted. The region/reserve map for ranges without associated
639 * pages are not modified. Page faults can race with hole punch.
640 * This is indicated if we find a mapped page.
641 * Note: If the passed end of range value is beyond the end of file, but
642 * not LLONG_MAX this routine still performs a hole punch operation.
644 static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
647 struct hstate *h = hstate_inode(inode);
648 struct address_space *mapping = &inode->i_data;
649 const pgoff_t end = lend >> PAGE_SHIFT;
650 struct folio_batch fbatch;
653 bool truncate_op = (lend == LLONG_MAX);
655 folio_batch_init(&fbatch);
656 next = lstart >> PAGE_SHIFT;
657 while (filemap_get_folios(mapping, &next, end - 1, &fbatch)) {
658 for (i = 0; i < folio_batch_count(&fbatch); ++i) {
659 struct folio *folio = fbatch.folios[i];
662 index = folio->index >> huge_page_order(h);
663 hash = hugetlb_fault_mutex_hash(mapping, index);
664 mutex_lock(&hugetlb_fault_mutex_table[hash]);
667 * Remove folio that was part of folio_batch.
669 if (remove_inode_single_folio(h, inode, mapping, folio,
673 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
675 folio_batch_release(&fbatch);
680 (void)hugetlb_unreserve_pages(inode,
681 lstart >> huge_page_shift(h),
685 static void hugetlbfs_evict_inode(struct inode *inode)
687 struct resv_map *resv_map;
689 remove_inode_hugepages(inode, 0, LLONG_MAX);
692 * Get the resv_map from the address space embedded in the inode.
693 * This is the address space which points to any resv_map allocated
694 * at inode creation time. If this is a device special inode,
695 * i_mapping may not point to the original address space.
697 resv_map = (struct resv_map *)(&inode->i_data)->i_private_data;
698 /* Only regular and link inodes have associated reserve maps */
700 resv_map_release(&resv_map->refs);
704 static void hugetlb_vmtruncate(struct inode *inode, loff_t offset)
707 struct address_space *mapping = inode->i_mapping;
708 struct hstate *h = hstate_inode(inode);
710 BUG_ON(offset & ~huge_page_mask(h));
711 pgoff = offset >> PAGE_SHIFT;
713 i_size_write(inode, offset);
714 i_mmap_lock_write(mapping);
715 if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root))
716 hugetlb_vmdelete_list(&mapping->i_mmap, pgoff, 0,
717 ZAP_FLAG_DROP_MARKER);
718 i_mmap_unlock_write(mapping);
719 remove_inode_hugepages(inode, offset, LLONG_MAX);
722 static void hugetlbfs_zero_partial_page(struct hstate *h,
723 struct address_space *mapping,
727 pgoff_t idx = start >> huge_page_shift(h);
730 folio = filemap_lock_hugetlb_folio(h, mapping, idx);
734 start = start & ~huge_page_mask(h);
735 end = end & ~huge_page_mask(h);
737 end = huge_page_size(h);
739 folio_zero_segment(folio, (size_t)start, (size_t)end);
745 static long hugetlbfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
747 struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
748 struct address_space *mapping = inode->i_mapping;
749 struct hstate *h = hstate_inode(inode);
750 loff_t hpage_size = huge_page_size(h);
751 loff_t hole_start, hole_end;
754 * hole_start and hole_end indicate the full pages within the hole.
756 hole_start = round_up(offset, hpage_size);
757 hole_end = round_down(offset + len, hpage_size);
761 /* protected by i_rwsem */
762 if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) {
767 i_mmap_lock_write(mapping);
769 /* If range starts before first full page, zero partial page. */
770 if (offset < hole_start)
771 hugetlbfs_zero_partial_page(h, mapping,
772 offset, min(offset + len, hole_start));
774 /* Unmap users of full pages in the hole. */
775 if (hole_end > hole_start) {
776 if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root))
777 hugetlb_vmdelete_list(&mapping->i_mmap,
778 hole_start >> PAGE_SHIFT,
779 hole_end >> PAGE_SHIFT, 0);
782 /* If range extends beyond last full page, zero partial page. */
783 if ((offset + len) > hole_end && (offset + len) > hole_start)
784 hugetlbfs_zero_partial_page(h, mapping,
785 hole_end, offset + len);
787 i_mmap_unlock_write(mapping);
789 /* Remove full pages from the file. */
790 if (hole_end > hole_start)
791 remove_inode_hugepages(inode, hole_start, hole_end);
798 static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
801 struct inode *inode = file_inode(file);
802 struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
803 struct address_space *mapping = inode->i_mapping;
804 struct hstate *h = hstate_inode(inode);
805 struct vm_area_struct pseudo_vma;
806 struct mm_struct *mm = current->mm;
807 loff_t hpage_size = huge_page_size(h);
808 unsigned long hpage_shift = huge_page_shift(h);
809 pgoff_t start, index, end;
813 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
816 if (mode & FALLOC_FL_PUNCH_HOLE)
817 return hugetlbfs_punch_hole(inode, offset, len);
820 * Default preallocate case.
821 * For this range, start is rounded down and end is rounded up
822 * as well as being converted to page offsets.
824 start = offset >> hpage_shift;
825 end = (offset + len + hpage_size - 1) >> hpage_shift;
829 /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */
830 error = inode_newsize_ok(inode, offset + len);
834 if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) {
840 * Initialize a pseudo vma as this is required by the huge page
841 * allocation routines.
843 vma_init(&pseudo_vma, mm);
844 vm_flags_init(&pseudo_vma, VM_HUGETLB | VM_MAYSHARE | VM_SHARED);
845 pseudo_vma.vm_file = file;
847 for (index = start; index < end; index++) {
849 * This is supposed to be the vaddr where the page is being
850 * faulted in, but we have no vaddr here.
858 * fallocate(2) manpage permits EINTR; we may have been
859 * interrupted because we are using up too much memory.
861 if (signal_pending(current)) {
866 /* addr is the offset within the file (zero based) */
867 addr = index * hpage_size;
869 /* mutex taken here, fault path and hole punch */
870 hash = hugetlb_fault_mutex_hash(mapping, index);
871 mutex_lock(&hugetlb_fault_mutex_table[hash]);
873 /* See if already present in mapping to avoid alloc/free */
874 folio = filemap_get_folio(mapping, index << huge_page_order(h));
875 if (!IS_ERR(folio)) {
877 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
882 * Allocate folio without setting the avoid_reserve argument.
883 * There certainly are no reserves associated with the
884 * pseudo_vma. However, there could be shared mappings with
885 * reserves for the file at the inode level. If we fallocate
886 * folios in these areas, we need to consume the reserves
887 * to keep reservation accounting consistent.
889 folio = alloc_hugetlb_folio(&pseudo_vma, addr, 0);
891 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
892 error = PTR_ERR(folio);
895 clear_huge_page(&folio->page, addr, pages_per_huge_page(h));
896 __folio_mark_uptodate(folio);
897 error = hugetlb_add_to_page_cache(folio, mapping, index);
898 if (unlikely(error)) {
899 restore_reserve_on_error(h, &pseudo_vma, addr, folio);
901 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
905 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
907 folio_set_hugetlb_migratable(folio);
909 * folio_unlock because locked by hugetlb_add_to_page_cache()
910 * folio_put() due to reference from alloc_hugetlb_folio()
916 if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size)
917 i_size_write(inode, offset + len);
918 inode_set_ctime_current(inode);
924 static int hugetlbfs_setattr(struct mnt_idmap *idmap,
925 struct dentry *dentry, struct iattr *attr)
927 struct inode *inode = d_inode(dentry);
928 struct hstate *h = hstate_inode(inode);
930 unsigned int ia_valid = attr->ia_valid;
931 struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
933 error = setattr_prepare(idmap, dentry, attr);
937 if (ia_valid & ATTR_SIZE) {
938 loff_t oldsize = inode->i_size;
939 loff_t newsize = attr->ia_size;
941 if (newsize & ~huge_page_mask(h))
943 /* protected by i_rwsem */
944 if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) ||
945 (newsize > oldsize && (info->seals & F_SEAL_GROW)))
947 hugetlb_vmtruncate(inode, newsize);
950 setattr_copy(idmap, inode, attr);
951 mark_inode_dirty(inode);
955 static struct inode *hugetlbfs_get_root(struct super_block *sb,
956 struct hugetlbfs_fs_context *ctx)
960 inode = new_inode(sb);
962 inode->i_ino = get_next_ino();
963 inode->i_mode = S_IFDIR | ctx->mode;
964 inode->i_uid = ctx->uid;
965 inode->i_gid = ctx->gid;
966 simple_inode_init_ts(inode);
967 inode->i_op = &hugetlbfs_dir_inode_operations;
968 inode->i_fop = &simple_dir_operations;
969 /* directory inodes start off with i_nlink == 2 (for "." entry) */
971 lockdep_annotate_inode_mutex_key(inode);
977 * Hugetlbfs is not reclaimable; therefore its i_mmap_rwsem will never
978 * be taken from reclaim -- unlike regular filesystems. This needs an
979 * annotation because huge_pmd_share() does an allocation under hugetlb's
982 static struct lock_class_key hugetlbfs_i_mmap_rwsem_key;
984 static struct inode *hugetlbfs_get_inode(struct super_block *sb,
985 struct mnt_idmap *idmap,
987 umode_t mode, dev_t dev)
990 struct resv_map *resv_map = NULL;
993 * Reserve maps are only needed for inodes that can have associated
996 if (S_ISREG(mode) || S_ISLNK(mode)) {
997 resv_map = resv_map_alloc();
1002 inode = new_inode(sb);
1004 struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
1006 inode->i_ino = get_next_ino();
1007 inode_init_owner(idmap, inode, dir, mode);
1008 lockdep_set_class(&inode->i_mapping->i_mmap_rwsem,
1009 &hugetlbfs_i_mmap_rwsem_key);
1010 inode->i_mapping->a_ops = &hugetlbfs_aops;
1011 simple_inode_init_ts(inode);
1012 inode->i_mapping->i_private_data = resv_map;
1013 info->seals = F_SEAL_SEAL;
1014 switch (mode & S_IFMT) {
1016 init_special_inode(inode, mode, dev);
1019 inode->i_op = &hugetlbfs_inode_operations;
1020 inode->i_fop = &hugetlbfs_file_operations;
1023 inode->i_op = &hugetlbfs_dir_inode_operations;
1024 inode->i_fop = &simple_dir_operations;
1026 /* directory inodes start off with i_nlink == 2 (for "." entry) */
1030 inode->i_op = &page_symlink_inode_operations;
1031 inode_nohighmem(inode);
1034 lockdep_annotate_inode_mutex_key(inode);
1037 kref_put(&resv_map->refs, resv_map_release);
1044 * File creation. Allocate an inode, and we're done..
1046 static int hugetlbfs_mknod(struct mnt_idmap *idmap, struct inode *dir,
1047 struct dentry *dentry, umode_t mode, dev_t dev)
1049 struct inode *inode;
1051 inode = hugetlbfs_get_inode(dir->i_sb, idmap, dir, mode, dev);
1054 inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
1055 d_instantiate(dentry, inode);
1056 dget(dentry);/* Extra count - pin the dentry in core */
1060 static int hugetlbfs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
1061 struct dentry *dentry, umode_t mode)
1063 int retval = hugetlbfs_mknod(idmap, dir, dentry,
1070 static int hugetlbfs_create(struct mnt_idmap *idmap,
1071 struct inode *dir, struct dentry *dentry,
1072 umode_t mode, bool excl)
1074 return hugetlbfs_mknod(idmap, dir, dentry, mode | S_IFREG, 0);
1077 static int hugetlbfs_tmpfile(struct mnt_idmap *idmap,
1078 struct inode *dir, struct file *file,
1081 struct inode *inode;
1083 inode = hugetlbfs_get_inode(dir->i_sb, idmap, dir, mode | S_IFREG, 0);
1086 inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
1087 d_tmpfile(file, inode);
1088 return finish_open_simple(file, 0);
1091 static int hugetlbfs_symlink(struct mnt_idmap *idmap,
1092 struct inode *dir, struct dentry *dentry,
1093 const char *symname)
1095 const umode_t mode = S_IFLNK|S_IRWXUGO;
1096 struct inode *inode;
1097 int error = -ENOSPC;
1099 inode = hugetlbfs_get_inode(dir->i_sb, idmap, dir, mode, 0);
1101 int l = strlen(symname)+1;
1102 error = page_symlink(inode, symname, l);
1104 d_instantiate(dentry, inode);
1109 inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
1114 #ifdef CONFIG_MIGRATION
1115 static int hugetlbfs_migrate_folio(struct address_space *mapping,
1116 struct folio *dst, struct folio *src,
1117 enum migrate_mode mode)
1121 rc = migrate_huge_page_move_mapping(mapping, dst, src);
1122 if (rc != MIGRATEPAGE_SUCCESS)
1125 if (hugetlb_folio_subpool(src)) {
1126 hugetlb_set_folio_subpool(dst,
1127 hugetlb_folio_subpool(src));
1128 hugetlb_set_folio_subpool(src, NULL);
1131 if (mode != MIGRATE_SYNC_NO_COPY)
1132 folio_migrate_copy(dst, src);
1134 folio_migrate_flags(dst, src);
1136 return MIGRATEPAGE_SUCCESS;
1139 #define hugetlbfs_migrate_folio NULL
1142 static int hugetlbfs_error_remove_folio(struct address_space *mapping,
1143 struct folio *folio)
1149 * Display the mount options in /proc/mounts.
1151 static int hugetlbfs_show_options(struct seq_file *m, struct dentry *root)
1153 struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(root->d_sb);
1154 struct hugepage_subpool *spool = sbinfo->spool;
1155 unsigned long hpage_size = huge_page_size(sbinfo->hstate);
1156 unsigned hpage_shift = huge_page_shift(sbinfo->hstate);
1159 if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID))
1160 seq_printf(m, ",uid=%u",
1161 from_kuid_munged(&init_user_ns, sbinfo->uid));
1162 if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID))
1163 seq_printf(m, ",gid=%u",
1164 from_kgid_munged(&init_user_ns, sbinfo->gid));
1165 if (sbinfo->mode != 0755)
1166 seq_printf(m, ",mode=%o", sbinfo->mode);
1167 if (sbinfo->max_inodes != -1)
1168 seq_printf(m, ",nr_inodes=%lu", sbinfo->max_inodes);
1172 if (hpage_size >= 1024) {
1176 seq_printf(m, ",pagesize=%lu%c", hpage_size, mod);
1178 if (spool->max_hpages != -1)
1179 seq_printf(m, ",size=%llu",
1180 (unsigned long long)spool->max_hpages << hpage_shift);
1181 if (spool->min_hpages != -1)
1182 seq_printf(m, ",min_size=%llu",
1183 (unsigned long long)spool->min_hpages << hpage_shift);
1188 static int hugetlbfs_statfs(struct dentry *dentry, struct kstatfs *buf)
1190 struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(dentry->d_sb);
1191 struct hstate *h = hstate_inode(d_inode(dentry));
1192 u64 id = huge_encode_dev(dentry->d_sb->s_dev);
1194 buf->f_fsid = u64_to_fsid(id);
1195 buf->f_type = HUGETLBFS_MAGIC;
1196 buf->f_bsize = huge_page_size(h);
1198 spin_lock(&sbinfo->stat_lock);
1199 /* If no limits set, just report 0 or -1 for max/free/used
1200 * blocks, like simple_statfs() */
1201 if (sbinfo->spool) {
1204 spin_lock_irq(&sbinfo->spool->lock);
1205 buf->f_blocks = sbinfo->spool->max_hpages;
1206 free_pages = sbinfo->spool->max_hpages
1207 - sbinfo->spool->used_hpages;
1208 buf->f_bavail = buf->f_bfree = free_pages;
1209 spin_unlock_irq(&sbinfo->spool->lock);
1210 buf->f_files = sbinfo->max_inodes;
1211 buf->f_ffree = sbinfo->free_inodes;
1213 spin_unlock(&sbinfo->stat_lock);
1215 buf->f_namelen = NAME_MAX;
1219 static void hugetlbfs_put_super(struct super_block *sb)
1221 struct hugetlbfs_sb_info *sbi = HUGETLBFS_SB(sb);
1224 sb->s_fs_info = NULL;
1227 hugepage_put_subpool(sbi->spool);
1233 static inline int hugetlbfs_dec_free_inodes(struct hugetlbfs_sb_info *sbinfo)
1235 if (sbinfo->free_inodes >= 0) {
1236 spin_lock(&sbinfo->stat_lock);
1237 if (unlikely(!sbinfo->free_inodes)) {
1238 spin_unlock(&sbinfo->stat_lock);
1241 sbinfo->free_inodes--;
1242 spin_unlock(&sbinfo->stat_lock);
1248 static void hugetlbfs_inc_free_inodes(struct hugetlbfs_sb_info *sbinfo)
1250 if (sbinfo->free_inodes >= 0) {
1251 spin_lock(&sbinfo->stat_lock);
1252 sbinfo->free_inodes++;
1253 spin_unlock(&sbinfo->stat_lock);
1258 static struct kmem_cache *hugetlbfs_inode_cachep;
1260 static struct inode *hugetlbfs_alloc_inode(struct super_block *sb)
1262 struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(sb);
1263 struct hugetlbfs_inode_info *p;
1265 if (unlikely(!hugetlbfs_dec_free_inodes(sbinfo)))
1267 p = alloc_inode_sb(sb, hugetlbfs_inode_cachep, GFP_KERNEL);
1269 hugetlbfs_inc_free_inodes(sbinfo);
1272 return &p->vfs_inode;
1275 static void hugetlbfs_free_inode(struct inode *inode)
1277 kmem_cache_free(hugetlbfs_inode_cachep, HUGETLBFS_I(inode));
1280 static void hugetlbfs_destroy_inode(struct inode *inode)
1282 hugetlbfs_inc_free_inodes(HUGETLBFS_SB(inode->i_sb));
1285 static const struct address_space_operations hugetlbfs_aops = {
1286 .write_begin = hugetlbfs_write_begin,
1287 .write_end = hugetlbfs_write_end,
1288 .dirty_folio = noop_dirty_folio,
1289 .migrate_folio = hugetlbfs_migrate_folio,
1290 .error_remove_folio = hugetlbfs_error_remove_folio,
1294 static void init_once(void *foo)
1296 struct hugetlbfs_inode_info *ei = foo;
1298 inode_init_once(&ei->vfs_inode);
1301 static const struct file_operations hugetlbfs_file_operations = {
1302 .read_iter = hugetlbfs_read_iter,
1303 .mmap = hugetlbfs_file_mmap,
1304 .fsync = noop_fsync,
1305 .get_unmapped_area = hugetlb_get_unmapped_area,
1306 .llseek = default_llseek,
1307 .fallocate = hugetlbfs_fallocate,
1308 .fop_flags = FOP_HUGE_PAGES,
1311 static const struct inode_operations hugetlbfs_dir_inode_operations = {
1312 .create = hugetlbfs_create,
1313 .lookup = simple_lookup,
1314 .link = simple_link,
1315 .unlink = simple_unlink,
1316 .symlink = hugetlbfs_symlink,
1317 .mkdir = hugetlbfs_mkdir,
1318 .rmdir = simple_rmdir,
1319 .mknod = hugetlbfs_mknod,
1320 .rename = simple_rename,
1321 .setattr = hugetlbfs_setattr,
1322 .tmpfile = hugetlbfs_tmpfile,
1325 static const struct inode_operations hugetlbfs_inode_operations = {
1326 .setattr = hugetlbfs_setattr,
1329 static const struct super_operations hugetlbfs_ops = {
1330 .alloc_inode = hugetlbfs_alloc_inode,
1331 .free_inode = hugetlbfs_free_inode,
1332 .destroy_inode = hugetlbfs_destroy_inode,
1333 .evict_inode = hugetlbfs_evict_inode,
1334 .statfs = hugetlbfs_statfs,
1335 .put_super = hugetlbfs_put_super,
1336 .show_options = hugetlbfs_show_options,
1340 * Convert size option passed from command line to number of huge pages
1341 * in the pool specified by hstate. Size option could be in bytes
1342 * (val_type == SIZE_STD) or percentage of the pool (val_type == SIZE_PERCENT).
1345 hugetlbfs_size_to_hpages(struct hstate *h, unsigned long long size_opt,
1346 enum hugetlbfs_size_type val_type)
1348 if (val_type == NO_SIZE)
1351 if (val_type == SIZE_PERCENT) {
1352 size_opt <<= huge_page_shift(h);
1353 size_opt *= h->max_huge_pages;
1354 do_div(size_opt, 100);
1357 size_opt >>= huge_page_shift(h);
1362 * Parse one mount parameter.
1364 static int hugetlbfs_parse_param(struct fs_context *fc, struct fs_parameter *param)
1366 struct hugetlbfs_fs_context *ctx = fc->fs_private;
1367 struct fs_parse_result result;
1373 opt = fs_parse(fc, hugetlb_fs_parameters, param, &result);
1379 ctx->uid = make_kuid(current_user_ns(), result.uint_32);
1380 if (!uid_valid(ctx->uid))
1385 ctx->gid = make_kgid(current_user_ns(), result.uint_32);
1386 if (!gid_valid(ctx->gid))
1391 ctx->mode = result.uint_32 & 01777U;
1395 /* memparse() will accept a K/M/G without a digit */
1396 if (!param->string || !isdigit(param->string[0]))
1398 ctx->max_size_opt = memparse(param->string, &rest);
1399 ctx->max_val_type = SIZE_STD;
1401 ctx->max_val_type = SIZE_PERCENT;
1405 /* memparse() will accept a K/M/G without a digit */
1406 if (!param->string || !isdigit(param->string[0]))
1408 ctx->nr_inodes = memparse(param->string, &rest);
1412 ps = memparse(param->string, &rest);
1413 h = size_to_hstate(ps);
1415 pr_err("Unsupported page size %lu MB\n", ps / SZ_1M);
1422 /* memparse() will accept a K/M/G without a digit */
1423 if (!param->string || !isdigit(param->string[0]))
1425 ctx->min_size_opt = memparse(param->string, &rest);
1426 ctx->min_val_type = SIZE_STD;
1428 ctx->min_val_type = SIZE_PERCENT;
1436 return invalfc(fc, "Bad value '%s' for mount option '%s'\n",
1437 param->string, param->key);
1441 * Validate the parsed options.
1443 static int hugetlbfs_validate(struct fs_context *fc)
1445 struct hugetlbfs_fs_context *ctx = fc->fs_private;
1448 * Use huge page pool size (in hstate) to convert the size
1449 * options to number of huge pages. If NO_SIZE, -1 is returned.
1451 ctx->max_hpages = hugetlbfs_size_to_hpages(ctx->hstate,
1454 ctx->min_hpages = hugetlbfs_size_to_hpages(ctx->hstate,
1459 * If max_size was specified, then min_size must be smaller
1461 if (ctx->max_val_type > NO_SIZE &&
1462 ctx->min_hpages > ctx->max_hpages) {
1463 pr_err("Minimum size can not be greater than maximum size\n");
1471 hugetlbfs_fill_super(struct super_block *sb, struct fs_context *fc)
1473 struct hugetlbfs_fs_context *ctx = fc->fs_private;
1474 struct hugetlbfs_sb_info *sbinfo;
1476 sbinfo = kmalloc(sizeof(struct hugetlbfs_sb_info), GFP_KERNEL);
1479 sb->s_fs_info = sbinfo;
1480 spin_lock_init(&sbinfo->stat_lock);
1481 sbinfo->hstate = ctx->hstate;
1482 sbinfo->max_inodes = ctx->nr_inodes;
1483 sbinfo->free_inodes = ctx->nr_inodes;
1484 sbinfo->spool = NULL;
1485 sbinfo->uid = ctx->uid;
1486 sbinfo->gid = ctx->gid;
1487 sbinfo->mode = ctx->mode;
1490 * Allocate and initialize subpool if maximum or minimum size is
1491 * specified. Any needed reservations (for minimum size) are taken
1492 * when the subpool is created.
1494 if (ctx->max_hpages != -1 || ctx->min_hpages != -1) {
1495 sbinfo->spool = hugepage_new_subpool(ctx->hstate,
1501 sb->s_maxbytes = MAX_LFS_FILESIZE;
1502 sb->s_blocksize = huge_page_size(ctx->hstate);
1503 sb->s_blocksize_bits = huge_page_shift(ctx->hstate);
1504 sb->s_magic = HUGETLBFS_MAGIC;
1505 sb->s_op = &hugetlbfs_ops;
1506 sb->s_time_gran = 1;
1509 * Due to the special and limited functionality of hugetlbfs, it does
1510 * not work well as a stacking filesystem.
1512 sb->s_stack_depth = FILESYSTEM_MAX_STACK_DEPTH;
1513 sb->s_root = d_make_root(hugetlbfs_get_root(sb, ctx));
1518 kfree(sbinfo->spool);
1523 static int hugetlbfs_get_tree(struct fs_context *fc)
1525 int err = hugetlbfs_validate(fc);
1528 return get_tree_nodev(fc, hugetlbfs_fill_super);
1531 static void hugetlbfs_fs_context_free(struct fs_context *fc)
1533 kfree(fc->fs_private);
1536 static const struct fs_context_operations hugetlbfs_fs_context_ops = {
1537 .free = hugetlbfs_fs_context_free,
1538 .parse_param = hugetlbfs_parse_param,
1539 .get_tree = hugetlbfs_get_tree,
1542 static int hugetlbfs_init_fs_context(struct fs_context *fc)
1544 struct hugetlbfs_fs_context *ctx;
1546 ctx = kzalloc(sizeof(struct hugetlbfs_fs_context), GFP_KERNEL);
1550 ctx->max_hpages = -1; /* No limit on size by default */
1551 ctx->nr_inodes = -1; /* No limit on number of inodes by default */
1552 ctx->uid = current_fsuid();
1553 ctx->gid = current_fsgid();
1555 ctx->hstate = &default_hstate;
1556 ctx->min_hpages = -1; /* No default minimum size */
1557 ctx->max_val_type = NO_SIZE;
1558 ctx->min_val_type = NO_SIZE;
1559 fc->fs_private = ctx;
1560 fc->ops = &hugetlbfs_fs_context_ops;
1564 static struct file_system_type hugetlbfs_fs_type = {
1565 .name = "hugetlbfs",
1566 .init_fs_context = hugetlbfs_init_fs_context,
1567 .parameters = hugetlb_fs_parameters,
1568 .kill_sb = kill_litter_super,
1569 .fs_flags = FS_ALLOW_IDMAP,
1572 static struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
1574 static int can_do_hugetlb_shm(void)
1577 shm_group = make_kgid(&init_user_ns, sysctl_hugetlb_shm_group);
1578 return capable(CAP_IPC_LOCK) || in_group_p(shm_group);
1581 static int get_hstate_idx(int page_size_log)
1583 struct hstate *h = hstate_sizelog(page_size_log);
1587 return hstate_index(h);
1591 * Note that size should be aligned to proper hugepage size in caller side,
1592 * otherwise hugetlb_reserve_pages reserves one less hugepages than intended.
1594 struct file *hugetlb_file_setup(const char *name, size_t size,
1595 vm_flags_t acctflag, int creat_flags,
1598 struct inode *inode;
1599 struct vfsmount *mnt;
1603 hstate_idx = get_hstate_idx(page_size_log);
1605 return ERR_PTR(-ENODEV);
1607 mnt = hugetlbfs_vfsmount[hstate_idx];
1609 return ERR_PTR(-ENOENT);
1611 if (creat_flags == HUGETLB_SHMFS_INODE && !can_do_hugetlb_shm()) {
1612 struct ucounts *ucounts = current_ucounts();
1614 if (user_shm_lock(size, ucounts)) {
1615 pr_warn_once("%s (%d): Using mlock ulimits for SHM_HUGETLB is obsolete\n",
1616 current->comm, current->pid);
1617 user_shm_unlock(size, ucounts);
1619 return ERR_PTR(-EPERM);
1622 file = ERR_PTR(-ENOSPC);
1623 /* hugetlbfs_vfsmount[] mounts do not use idmapped mounts. */
1624 inode = hugetlbfs_get_inode(mnt->mnt_sb, &nop_mnt_idmap, NULL,
1625 S_IFREG | S_IRWXUGO, 0);
1628 if (creat_flags == HUGETLB_SHMFS_INODE)
1629 inode->i_flags |= S_PRIVATE;
1631 inode->i_size = size;
1634 if (!hugetlb_reserve_pages(inode, 0,
1635 size >> huge_page_shift(hstate_inode(inode)), NULL,
1637 file = ERR_PTR(-ENOMEM);
1639 file = alloc_file_pseudo(inode, mnt, name, O_RDWR,
1640 &hugetlbfs_file_operations);
1649 static struct vfsmount *__init mount_one_hugetlbfs(struct hstate *h)
1651 struct fs_context *fc;
1652 struct vfsmount *mnt;
1654 fc = fs_context_for_mount(&hugetlbfs_fs_type, SB_KERNMOUNT);
1658 struct hugetlbfs_fs_context *ctx = fc->fs_private;
1664 pr_err("Cannot mount internal hugetlbfs for page size %luK",
1665 huge_page_size(h) / SZ_1K);
1669 static int __init init_hugetlbfs_fs(void)
1671 struct vfsmount *mnt;
1676 if (!hugepages_supported()) {
1677 pr_info("disabling because there are no supported hugepage sizes\n");
1682 hugetlbfs_inode_cachep = kmem_cache_create("hugetlbfs_inode_cache",
1683 sizeof(struct hugetlbfs_inode_info),
1684 0, SLAB_ACCOUNT, init_once);
1685 if (hugetlbfs_inode_cachep == NULL)
1688 error = register_filesystem(&hugetlbfs_fs_type);
1692 /* default hstate mount is required */
1693 mnt = mount_one_hugetlbfs(&default_hstate);
1695 error = PTR_ERR(mnt);
1698 hugetlbfs_vfsmount[default_hstate_idx] = mnt;
1700 /* other hstates are optional */
1702 for_each_hstate(h) {
1703 if (i == default_hstate_idx) {
1708 mnt = mount_one_hugetlbfs(h);
1710 hugetlbfs_vfsmount[i] = NULL;
1712 hugetlbfs_vfsmount[i] = mnt;
1719 (void)unregister_filesystem(&hugetlbfs_fs_type);
1721 kmem_cache_destroy(hugetlbfs_inode_cachep);
1725 fs_initcall(init_hugetlbfs_fs)