2 * hugetlbpage-backed filesystem. Based on ramfs.
4 * Nadia Yvette Chambers, 2002
6 * Copyright (C) 2002 Linus Torvalds.
10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 #include <linux/thread_info.h>
13 #include <asm/current.h>
14 #include <linux/sched/signal.h> /* remove ASAP */
15 #include <linux/falloc.h>
17 #include <linux/mount.h>
18 #include <linux/file.h>
19 #include <linux/kernel.h>
20 #include <linux/writeback.h>
21 #include <linux/pagemap.h>
22 #include <linux/highmem.h>
23 #include <linux/init.h>
24 #include <linux/string.h>
25 #include <linux/capability.h>
26 #include <linux/ctype.h>
27 #include <linux/backing-dev.h>
28 #include <linux/hugetlb.h>
29 #include <linux/pagevec.h>
30 #include <linux/parser.h>
31 #include <linux/mman.h>
32 #include <linux/slab.h>
33 #include <linux/dnotify.h>
34 #include <linux/statfs.h>
35 #include <linux/security.h>
36 #include <linux/magic.h>
37 #include <linux/migrate.h>
38 #include <linux/uio.h>
40 #include <linux/uaccess.h>
42 static const struct super_operations hugetlbfs_ops;
43 static const struct address_space_operations hugetlbfs_aops;
44 const struct file_operations hugetlbfs_file_operations;
45 static const struct inode_operations hugetlbfs_dir_inode_operations;
46 static const struct inode_operations hugetlbfs_inode_operations;
48 struct hugetlbfs_config {
49 struct hstate *hstate;
58 int sysctl_hugetlb_shm_group;
61 Opt_size, Opt_nr_inodes,
62 Opt_mode, Opt_uid, Opt_gid,
63 Opt_pagesize, Opt_min_size,
67 static const match_table_t tokens = {
68 {Opt_size, "size=%s"},
69 {Opt_nr_inodes, "nr_inodes=%s"},
70 {Opt_mode, "mode=%o"},
73 {Opt_pagesize, "pagesize=%s"},
74 {Opt_min_size, "min_size=%s"},
79 static inline void hugetlb_set_vma_policy(struct vm_area_struct *vma,
80 struct inode *inode, pgoff_t index)
82 vma->vm_policy = mpol_shared_policy_lookup(&HUGETLBFS_I(inode)->policy,
86 static inline void hugetlb_drop_vma_policy(struct vm_area_struct *vma)
88 mpol_cond_put(vma->vm_policy);
91 static inline void hugetlb_set_vma_policy(struct vm_area_struct *vma,
92 struct inode *inode, pgoff_t index)
96 static inline void hugetlb_drop_vma_policy(struct vm_area_struct *vma)
101 static void huge_pagevec_release(struct pagevec *pvec)
105 for (i = 0; i < pagevec_count(pvec); ++i)
106 put_page(pvec->pages[i]);
108 pagevec_reinit(pvec);
112 * Mask used when checking the page offset value passed in via system
113 * calls. This value will be converted to a loff_t which is signed.
114 * Therefore, we want to check the upper PAGE_SHIFT + 1 bits of the
115 * value. The extra bit (- 1 in the shift value) is to take the sign
118 #define PGOFF_LOFFT_MAX \
119 (((1UL << (PAGE_SHIFT + 1)) - 1) << (BITS_PER_LONG - (PAGE_SHIFT + 1)))
121 static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
123 struct inode *inode = file_inode(file);
126 struct hstate *h = hstate_file(file);
129 * vma address alignment (but not the pgoff alignment) has
130 * already been checked by prepare_hugepage_range. If you add
131 * any error returns here, do so after setting VM_HUGETLB, so
132 * is_vm_hugetlb_page tests below unmap_region go the right
133 * way when do_mmap_pgoff unwinds (may be important on powerpc
136 vma->vm_flags |= VM_HUGETLB | VM_DONTEXPAND;
137 vma->vm_ops = &hugetlb_vm_ops;
140 * page based offset in vm_pgoff could be sufficiently large to
141 * overflow a (l)off_t when converted to byte offset.
143 if (vma->vm_pgoff & PGOFF_LOFFT_MAX)
146 /* must be huge page aligned */
147 if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT))
150 vma_len = (loff_t)(vma->vm_end - vma->vm_start);
151 len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
152 /* check for overflow */
160 if (hugetlb_reserve_pages(inode,
161 vma->vm_pgoff >> huge_page_order(h),
162 len >> huge_page_shift(h), vma,
167 if (vma->vm_flags & VM_WRITE && inode->i_size < len)
168 i_size_write(inode, len);
176 * Called under down_write(mmap_sem).
179 #ifndef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
181 hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
182 unsigned long len, unsigned long pgoff, unsigned long flags)
184 struct mm_struct *mm = current->mm;
185 struct vm_area_struct *vma;
186 struct hstate *h = hstate_file(file);
187 struct vm_unmapped_area_info info;
189 if (len & ~huge_page_mask(h))
194 if (flags & MAP_FIXED) {
195 if (prepare_hugepage_range(file, addr, len))
201 addr = ALIGN(addr, huge_page_size(h));
202 vma = find_vma(mm, addr);
203 if (TASK_SIZE - len >= addr &&
204 (!vma || addr + len <= vm_start_gap(vma)))
210 info.low_limit = TASK_UNMAPPED_BASE;
211 info.high_limit = TASK_SIZE;
212 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
213 info.align_offset = 0;
214 return vm_unmapped_area(&info);
219 hugetlbfs_read_actor(struct page *page, unsigned long offset,
220 struct iov_iter *to, unsigned long size)
225 /* Find which 4k chunk and offset with in that chunk */
226 i = offset >> PAGE_SHIFT;
227 offset = offset & ~PAGE_MASK;
231 chunksize = PAGE_SIZE;
234 if (chunksize > size)
236 n = copy_page_to_iter(&page[i], offset, chunksize, to);
248 * Support for read() - Find the page attached to f_mapping and copy out the
249 * data. Its *very* similar to do_generic_mapping_read(), we can't use that
250 * since it has PAGE_SIZE assumptions.
252 static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to)
254 struct file *file = iocb->ki_filp;
255 struct hstate *h = hstate_file(file);
256 struct address_space *mapping = file->f_mapping;
257 struct inode *inode = mapping->host;
258 unsigned long index = iocb->ki_pos >> huge_page_shift(h);
259 unsigned long offset = iocb->ki_pos & ~huge_page_mask(h);
260 unsigned long end_index;
264 while (iov_iter_count(to)) {
268 /* nr is the maximum number of bytes to copy from this page */
269 nr = huge_page_size(h);
270 isize = i_size_read(inode);
273 end_index = (isize - 1) >> huge_page_shift(h);
274 if (index > end_index)
276 if (index == end_index) {
277 nr = ((isize - 1) & ~huge_page_mask(h)) + 1;
284 page = find_lock_page(mapping, index);
285 if (unlikely(page == NULL)) {
287 * We have a HOLE, zero out the user-buffer for the
288 * length of the hole or request.
290 copied = iov_iter_zero(nr, to);
295 * We have the page, copy it to user space buffer.
297 copied = hugetlbfs_read_actor(page, offset, to, nr);
302 if (copied != nr && iov_iter_count(to)) {
307 index += offset >> huge_page_shift(h);
308 offset &= ~huge_page_mask(h);
310 iocb->ki_pos = ((loff_t)index << huge_page_shift(h)) + offset;
314 static int hugetlbfs_write_begin(struct file *file,
315 struct address_space *mapping,
316 loff_t pos, unsigned len, unsigned flags,
317 struct page **pagep, void **fsdata)
322 static int hugetlbfs_write_end(struct file *file, struct address_space *mapping,
323 loff_t pos, unsigned len, unsigned copied,
324 struct page *page, void *fsdata)
330 static void remove_huge_page(struct page *page)
332 ClearPageDirty(page);
333 ClearPageUptodate(page);
334 delete_from_page_cache(page);
338 hugetlb_vmdelete_list(struct rb_root_cached *root, pgoff_t start, pgoff_t end)
340 struct vm_area_struct *vma;
343 * end == 0 indicates that the entire range after
344 * start should be unmapped.
346 vma_interval_tree_foreach(vma, root, start, end ? end : ULONG_MAX) {
347 unsigned long v_offset;
351 * Can the expression below overflow on 32-bit arches?
352 * No, because the interval tree returns us only those vmas
353 * which overlap the truncated area starting at pgoff,
354 * and no vma on a 32-bit arch can span beyond the 4GB.
356 if (vma->vm_pgoff < start)
357 v_offset = (start - vma->vm_pgoff) << PAGE_SHIFT;
364 v_end = ((end - vma->vm_pgoff) << PAGE_SHIFT)
366 if (v_end > vma->vm_end)
370 unmap_hugepage_range(vma, vma->vm_start + v_offset, v_end,
376 * remove_inode_hugepages handles two distinct cases: truncation and hole
377 * punch. There are subtle differences in operation for each case.
379 * truncation is indicated by end of range being LLONG_MAX
380 * In this case, we first scan the range and release found pages.
381 * After releasing pages, hugetlb_unreserve_pages cleans up region/reserv
382 * maps and global counts. Page faults can not race with truncation
383 * in this routine. hugetlb_no_page() prevents page faults in the
384 * truncated range. It checks i_size before allocation, and again after
385 * with the page table lock for the page held. The same lock must be
386 * acquired to unmap a page.
387 * hole punch is indicated if end is not LLONG_MAX
388 * In the hole punch case we scan the range and release found pages.
389 * Only when releasing a page is the associated region/reserv map
390 * deleted. The region/reserv map for ranges without associated
391 * pages are not modified. Page faults can race with hole punch.
392 * This is indicated if we find a mapped page.
393 * Note: If the passed end of range value is beyond the end of file, but
394 * not LLONG_MAX this routine still performs a hole punch operation.
396 static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
399 struct hstate *h = hstate_inode(inode);
400 struct address_space *mapping = &inode->i_data;
401 const pgoff_t start = lstart >> huge_page_shift(h);
402 const pgoff_t end = lend >> huge_page_shift(h);
403 struct vm_area_struct pseudo_vma;
407 bool truncate_op = (lend == LLONG_MAX);
409 memset(&pseudo_vma, 0, sizeof(struct vm_area_struct));
410 pseudo_vma.vm_flags = (VM_HUGETLB | VM_MAYSHARE | VM_SHARED);
415 * When no more pages are found, we are done.
417 if (!pagevec_lookup_range(&pvec, mapping, &next, end - 1))
420 for (i = 0; i < pagevec_count(&pvec); ++i) {
421 struct page *page = pvec.pages[i];
425 hash = hugetlb_fault_mutex_hash(h, current->mm,
428 mutex_lock(&hugetlb_fault_mutex_table[hash]);
431 * If page is mapped, it was faulted in after being
432 * unmapped in caller. Unmap (again) now after taking
433 * the fault mutex. The mutex will prevent faults
434 * until we finish removing the page.
436 * This race can only happen in the hole punch case.
437 * Getting here in a truncate operation is a bug.
439 if (unlikely(page_mapped(page))) {
442 i_mmap_lock_write(mapping);
443 hugetlb_vmdelete_list(&mapping->i_mmap,
444 index * pages_per_huge_page(h),
445 (index + 1) * pages_per_huge_page(h));
446 i_mmap_unlock_write(mapping);
451 * We must free the huge page and remove from page
452 * cache (remove_huge_page) BEFORE removing the
453 * region/reserve map (hugetlb_unreserve_pages). In
454 * rare out of memory conditions, removal of the
455 * region/reserve map could fail. Correspondingly,
456 * the subpool and global reserve usage count can need
459 VM_BUG_ON(PagePrivate(page));
460 remove_huge_page(page);
463 if (unlikely(hugetlb_unreserve_pages(inode,
464 index, index + 1, 1)))
465 hugetlb_fix_reserve_counts(inode);
469 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
471 huge_pagevec_release(&pvec);
476 (void)hugetlb_unreserve_pages(inode, start, LONG_MAX, freed);
479 static void hugetlbfs_evict_inode(struct inode *inode)
481 struct resv_map *resv_map;
483 remove_inode_hugepages(inode, 0, LLONG_MAX);
484 resv_map = (struct resv_map *)inode->i_mapping->private_data;
485 /* root inode doesn't have the resv_map, so we should check it */
487 resv_map_release(&resv_map->refs);
491 static int hugetlb_vmtruncate(struct inode *inode, loff_t offset)
494 struct address_space *mapping = inode->i_mapping;
495 struct hstate *h = hstate_inode(inode);
497 BUG_ON(offset & ~huge_page_mask(h));
498 pgoff = offset >> PAGE_SHIFT;
500 i_size_write(inode, offset);
501 i_mmap_lock_write(mapping);
502 if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root))
503 hugetlb_vmdelete_list(&mapping->i_mmap, pgoff, 0);
504 i_mmap_unlock_write(mapping);
505 remove_inode_hugepages(inode, offset, LLONG_MAX);
509 static long hugetlbfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
511 struct hstate *h = hstate_inode(inode);
512 loff_t hpage_size = huge_page_size(h);
513 loff_t hole_start, hole_end;
516 * For hole punch round up the beginning offset of the hole and
517 * round down the end.
519 hole_start = round_up(offset, hpage_size);
520 hole_end = round_down(offset + len, hpage_size);
522 if (hole_end > hole_start) {
523 struct address_space *mapping = inode->i_mapping;
524 struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
528 /* protected by i_mutex */
529 if (info->seals & F_SEAL_WRITE) {
534 i_mmap_lock_write(mapping);
535 if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root))
536 hugetlb_vmdelete_list(&mapping->i_mmap,
537 hole_start >> PAGE_SHIFT,
538 hole_end >> PAGE_SHIFT);
539 i_mmap_unlock_write(mapping);
540 remove_inode_hugepages(inode, hole_start, hole_end);
547 static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
550 struct inode *inode = file_inode(file);
551 struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
552 struct address_space *mapping = inode->i_mapping;
553 struct hstate *h = hstate_inode(inode);
554 struct vm_area_struct pseudo_vma;
555 struct mm_struct *mm = current->mm;
556 loff_t hpage_size = huge_page_size(h);
557 unsigned long hpage_shift = huge_page_shift(h);
558 pgoff_t start, index, end;
562 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
565 if (mode & FALLOC_FL_PUNCH_HOLE)
566 return hugetlbfs_punch_hole(inode, offset, len);
569 * Default preallocate case.
570 * For this range, start is rounded down and end is rounded up
571 * as well as being converted to page offsets.
573 start = offset >> hpage_shift;
574 end = (offset + len + hpage_size - 1) >> hpage_shift;
578 /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */
579 error = inode_newsize_ok(inode, offset + len);
583 if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) {
589 * Initialize a pseudo vma as this is required by the huge page
590 * allocation routines. If NUMA is configured, use page index
591 * as input to create an allocation policy.
593 memset(&pseudo_vma, 0, sizeof(struct vm_area_struct));
594 pseudo_vma.vm_flags = (VM_HUGETLB | VM_MAYSHARE | VM_SHARED);
595 pseudo_vma.vm_file = file;
597 for (index = start; index < end; index++) {
599 * This is supposed to be the vaddr where the page is being
600 * faulted in, but we have no vaddr here.
604 int avoid_reserve = 0;
609 * fallocate(2) manpage permits EINTR; we may have been
610 * interrupted because we are using up too much memory.
612 if (signal_pending(current)) {
617 /* Set numa allocation policy based on index */
618 hugetlb_set_vma_policy(&pseudo_vma, inode, index);
620 /* addr is the offset within the file (zero based) */
621 addr = index * hpage_size;
623 /* mutex taken here, fault path and hole punch */
624 hash = hugetlb_fault_mutex_hash(h, mm, &pseudo_vma, mapping,
626 mutex_lock(&hugetlb_fault_mutex_table[hash]);
628 /* See if already present in mapping to avoid alloc/free */
629 page = find_get_page(mapping, index);
632 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
633 hugetlb_drop_vma_policy(&pseudo_vma);
637 /* Allocate page and add to page cache */
638 page = alloc_huge_page(&pseudo_vma, addr, avoid_reserve);
639 hugetlb_drop_vma_policy(&pseudo_vma);
641 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
642 error = PTR_ERR(page);
645 clear_huge_page(page, addr, pages_per_huge_page(h));
646 __SetPageUptodate(page);
647 error = huge_add_to_page_cache(page, mapping, index);
648 if (unlikely(error)) {
650 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
654 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
657 * unlock_page because locked by add_to_page_cache()
658 * page_put due to reference from alloc_huge_page()
664 if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size)
665 i_size_write(inode, offset + len);
666 inode->i_ctime = current_time(inode);
672 static int hugetlbfs_setattr(struct dentry *dentry, struct iattr *attr)
674 struct inode *inode = d_inode(dentry);
675 struct hstate *h = hstate_inode(inode);
677 unsigned int ia_valid = attr->ia_valid;
678 struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
682 error = setattr_prepare(dentry, attr);
686 if (ia_valid & ATTR_SIZE) {
687 loff_t oldsize = inode->i_size;
688 loff_t newsize = attr->ia_size;
690 if (newsize & ~huge_page_mask(h))
692 /* protected by i_mutex */
693 if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) ||
694 (newsize > oldsize && (info->seals & F_SEAL_GROW)))
696 error = hugetlb_vmtruncate(inode, newsize);
701 setattr_copy(inode, attr);
702 mark_inode_dirty(inode);
706 static struct inode *hugetlbfs_get_root(struct super_block *sb,
707 struct hugetlbfs_config *config)
711 inode = new_inode(sb);
713 inode->i_ino = get_next_ino();
714 inode->i_mode = S_IFDIR | config->mode;
715 inode->i_uid = config->uid;
716 inode->i_gid = config->gid;
717 inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
718 inode->i_op = &hugetlbfs_dir_inode_operations;
719 inode->i_fop = &simple_dir_operations;
720 /* directory inodes start off with i_nlink == 2 (for "." entry) */
722 lockdep_annotate_inode_mutex_key(inode);
728 * Hugetlbfs is not reclaimable; therefore its i_mmap_rwsem will never
729 * be taken from reclaim -- unlike regular filesystems. This needs an
730 * annotation because huge_pmd_share() does an allocation under hugetlb's
733 static struct lock_class_key hugetlbfs_i_mmap_rwsem_key;
735 static struct inode *hugetlbfs_get_inode(struct super_block *sb,
737 umode_t mode, dev_t dev)
740 struct resv_map *resv_map;
742 resv_map = resv_map_alloc();
746 inode = new_inode(sb);
748 struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
750 inode->i_ino = get_next_ino();
751 inode_init_owner(inode, dir, mode);
752 lockdep_set_class(&inode->i_mapping->i_mmap_rwsem,
753 &hugetlbfs_i_mmap_rwsem_key);
754 inode->i_mapping->a_ops = &hugetlbfs_aops;
755 inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
756 inode->i_mapping->private_data = resv_map;
757 info->seals = F_SEAL_SEAL;
758 switch (mode & S_IFMT) {
760 init_special_inode(inode, mode, dev);
763 inode->i_op = &hugetlbfs_inode_operations;
764 inode->i_fop = &hugetlbfs_file_operations;
767 inode->i_op = &hugetlbfs_dir_inode_operations;
768 inode->i_fop = &simple_dir_operations;
770 /* directory inodes start off with i_nlink == 2 (for "." entry) */
774 inode->i_op = &page_symlink_inode_operations;
775 inode_nohighmem(inode);
778 lockdep_annotate_inode_mutex_key(inode);
780 kref_put(&resv_map->refs, resv_map_release);
786 * File creation. Allocate an inode, and we're done..
788 static int hugetlbfs_mknod(struct inode *dir,
789 struct dentry *dentry, umode_t mode, dev_t dev)
794 inode = hugetlbfs_get_inode(dir->i_sb, dir, mode, dev);
796 dir->i_ctime = dir->i_mtime = current_time(dir);
797 d_instantiate(dentry, inode);
798 dget(dentry); /* Extra count - pin the dentry in core */
804 static int hugetlbfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
806 int retval = hugetlbfs_mknod(dir, dentry, mode | S_IFDIR, 0);
812 static int hugetlbfs_create(struct inode *dir, struct dentry *dentry, umode_t mode, bool excl)
814 return hugetlbfs_mknod(dir, dentry, mode | S_IFREG, 0);
817 static int hugetlbfs_symlink(struct inode *dir,
818 struct dentry *dentry, const char *symname)
823 inode = hugetlbfs_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0);
825 int l = strlen(symname)+1;
826 error = page_symlink(inode, symname, l);
828 d_instantiate(dentry, inode);
833 dir->i_ctime = dir->i_mtime = current_time(dir);
839 * mark the head page dirty
841 static int hugetlbfs_set_page_dirty(struct page *page)
843 struct page *head = compound_head(page);
849 static int hugetlbfs_migrate_page(struct address_space *mapping,
850 struct page *newpage, struct page *page,
851 enum migrate_mode mode)
855 rc = migrate_huge_page_move_mapping(mapping, newpage, page);
856 if (rc != MIGRATEPAGE_SUCCESS)
858 if (mode != MIGRATE_SYNC_NO_COPY)
859 migrate_page_copy(newpage, page);
861 migrate_page_states(newpage, page);
863 return MIGRATEPAGE_SUCCESS;
866 static int hugetlbfs_error_remove_page(struct address_space *mapping,
869 struct inode *inode = mapping->host;
870 pgoff_t index = page->index;
872 remove_huge_page(page);
873 if (unlikely(hugetlb_unreserve_pages(inode, index, index + 1, 1)))
874 hugetlb_fix_reserve_counts(inode);
880 * Display the mount options in /proc/mounts.
882 static int hugetlbfs_show_options(struct seq_file *m, struct dentry *root)
884 struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(root->d_sb);
885 struct hugepage_subpool *spool = sbinfo->spool;
886 unsigned long hpage_size = huge_page_size(sbinfo->hstate);
887 unsigned hpage_shift = huge_page_shift(sbinfo->hstate);
890 if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID))
891 seq_printf(m, ",uid=%u",
892 from_kuid_munged(&init_user_ns, sbinfo->uid));
893 if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID))
894 seq_printf(m, ",gid=%u",
895 from_kgid_munged(&init_user_ns, sbinfo->gid));
896 if (sbinfo->mode != 0755)
897 seq_printf(m, ",mode=%o", sbinfo->mode);
898 if (sbinfo->max_inodes != -1)
899 seq_printf(m, ",nr_inodes=%lu", sbinfo->max_inodes);
903 if (hpage_size >= 1024) {
907 seq_printf(m, ",pagesize=%lu%c", hpage_size, mod);
909 if (spool->max_hpages != -1)
910 seq_printf(m, ",size=%llu",
911 (unsigned long long)spool->max_hpages << hpage_shift);
912 if (spool->min_hpages != -1)
913 seq_printf(m, ",min_size=%llu",
914 (unsigned long long)spool->min_hpages << hpage_shift);
919 static int hugetlbfs_statfs(struct dentry *dentry, struct kstatfs *buf)
921 struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(dentry->d_sb);
922 struct hstate *h = hstate_inode(d_inode(dentry));
924 buf->f_type = HUGETLBFS_MAGIC;
925 buf->f_bsize = huge_page_size(h);
927 spin_lock(&sbinfo->stat_lock);
928 /* If no limits set, just report 0 for max/free/used
929 * blocks, like simple_statfs() */
933 spin_lock(&sbinfo->spool->lock);
934 buf->f_blocks = sbinfo->spool->max_hpages;
935 free_pages = sbinfo->spool->max_hpages
936 - sbinfo->spool->used_hpages;
937 buf->f_bavail = buf->f_bfree = free_pages;
938 spin_unlock(&sbinfo->spool->lock);
939 buf->f_files = sbinfo->max_inodes;
940 buf->f_ffree = sbinfo->free_inodes;
942 spin_unlock(&sbinfo->stat_lock);
944 buf->f_namelen = NAME_MAX;
948 static void hugetlbfs_put_super(struct super_block *sb)
950 struct hugetlbfs_sb_info *sbi = HUGETLBFS_SB(sb);
953 sb->s_fs_info = NULL;
956 hugepage_put_subpool(sbi->spool);
962 static inline int hugetlbfs_dec_free_inodes(struct hugetlbfs_sb_info *sbinfo)
964 if (sbinfo->free_inodes >= 0) {
965 spin_lock(&sbinfo->stat_lock);
966 if (unlikely(!sbinfo->free_inodes)) {
967 spin_unlock(&sbinfo->stat_lock);
970 sbinfo->free_inodes--;
971 spin_unlock(&sbinfo->stat_lock);
977 static void hugetlbfs_inc_free_inodes(struct hugetlbfs_sb_info *sbinfo)
979 if (sbinfo->free_inodes >= 0) {
980 spin_lock(&sbinfo->stat_lock);
981 sbinfo->free_inodes++;
982 spin_unlock(&sbinfo->stat_lock);
987 static struct kmem_cache *hugetlbfs_inode_cachep;
989 static struct inode *hugetlbfs_alloc_inode(struct super_block *sb)
991 struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(sb);
992 struct hugetlbfs_inode_info *p;
994 if (unlikely(!hugetlbfs_dec_free_inodes(sbinfo)))
996 p = kmem_cache_alloc(hugetlbfs_inode_cachep, GFP_KERNEL);
998 hugetlbfs_inc_free_inodes(sbinfo);
1003 * Any time after allocation, hugetlbfs_destroy_inode can be called
1004 * for the inode. mpol_free_shared_policy is unconditionally called
1005 * as part of hugetlbfs_destroy_inode. So, initialize policy here
1006 * in case of a quick call to destroy.
1008 * Note that the policy is initialized even if we are creating a
1009 * private inode. This simplifies hugetlbfs_destroy_inode.
1011 mpol_shared_policy_init(&p->policy, NULL);
1013 return &p->vfs_inode;
1016 static void hugetlbfs_i_callback(struct rcu_head *head)
1018 struct inode *inode = container_of(head, struct inode, i_rcu);
1019 kmem_cache_free(hugetlbfs_inode_cachep, HUGETLBFS_I(inode));
1022 static void hugetlbfs_destroy_inode(struct inode *inode)
1024 hugetlbfs_inc_free_inodes(HUGETLBFS_SB(inode->i_sb));
1025 mpol_free_shared_policy(&HUGETLBFS_I(inode)->policy);
1026 call_rcu(&inode->i_rcu, hugetlbfs_i_callback);
1029 static const struct address_space_operations hugetlbfs_aops = {
1030 .write_begin = hugetlbfs_write_begin,
1031 .write_end = hugetlbfs_write_end,
1032 .set_page_dirty = hugetlbfs_set_page_dirty,
1033 .migratepage = hugetlbfs_migrate_page,
1034 .error_remove_page = hugetlbfs_error_remove_page,
1038 static void init_once(void *foo)
1040 struct hugetlbfs_inode_info *ei = (struct hugetlbfs_inode_info *)foo;
1042 inode_init_once(&ei->vfs_inode);
1045 const struct file_operations hugetlbfs_file_operations = {
1046 .read_iter = hugetlbfs_read_iter,
1047 .mmap = hugetlbfs_file_mmap,
1048 .fsync = noop_fsync,
1049 .get_unmapped_area = hugetlb_get_unmapped_area,
1050 .llseek = default_llseek,
1051 .fallocate = hugetlbfs_fallocate,
1054 static const struct inode_operations hugetlbfs_dir_inode_operations = {
1055 .create = hugetlbfs_create,
1056 .lookup = simple_lookup,
1057 .link = simple_link,
1058 .unlink = simple_unlink,
1059 .symlink = hugetlbfs_symlink,
1060 .mkdir = hugetlbfs_mkdir,
1061 .rmdir = simple_rmdir,
1062 .mknod = hugetlbfs_mknod,
1063 .rename = simple_rename,
1064 .setattr = hugetlbfs_setattr,
1067 static const struct inode_operations hugetlbfs_inode_operations = {
1068 .setattr = hugetlbfs_setattr,
1071 static const struct super_operations hugetlbfs_ops = {
1072 .alloc_inode = hugetlbfs_alloc_inode,
1073 .destroy_inode = hugetlbfs_destroy_inode,
1074 .evict_inode = hugetlbfs_evict_inode,
1075 .statfs = hugetlbfs_statfs,
1076 .put_super = hugetlbfs_put_super,
1077 .show_options = hugetlbfs_show_options,
1080 enum hugetlbfs_size_type { NO_SIZE, SIZE_STD, SIZE_PERCENT };
1083 * Convert size option passed from command line to number of huge pages
1084 * in the pool specified by hstate. Size option could be in bytes
1085 * (val_type == SIZE_STD) or percentage of the pool (val_type == SIZE_PERCENT).
1088 hugetlbfs_size_to_hpages(struct hstate *h, unsigned long long size_opt,
1089 enum hugetlbfs_size_type val_type)
1091 if (val_type == NO_SIZE)
1094 if (val_type == SIZE_PERCENT) {
1095 size_opt <<= huge_page_shift(h);
1096 size_opt *= h->max_huge_pages;
1097 do_div(size_opt, 100);
1100 size_opt >>= huge_page_shift(h);
1105 hugetlbfs_parse_options(char *options, struct hugetlbfs_config *pconfig)
1108 substring_t args[MAX_OPT_ARGS];
1110 unsigned long long max_size_opt = 0, min_size_opt = 0;
1111 enum hugetlbfs_size_type max_val_type = NO_SIZE, min_val_type = NO_SIZE;
1116 while ((p = strsep(&options, ",")) != NULL) {
1121 token = match_token(p, tokens, args);
1124 if (match_int(&args[0], &option))
1126 pconfig->uid = make_kuid(current_user_ns(), option);
1127 if (!uid_valid(pconfig->uid))
1132 if (match_int(&args[0], &option))
1134 pconfig->gid = make_kgid(current_user_ns(), option);
1135 if (!gid_valid(pconfig->gid))
1140 if (match_octal(&args[0], &option))
1142 pconfig->mode = option & 01777U;
1146 /* memparse() will accept a K/M/G without a digit */
1147 if (!isdigit(*args[0].from))
1149 max_size_opt = memparse(args[0].from, &rest);
1150 max_val_type = SIZE_STD;
1152 max_val_type = SIZE_PERCENT;
1157 /* memparse() will accept a K/M/G without a digit */
1158 if (!isdigit(*args[0].from))
1160 pconfig->nr_inodes = memparse(args[0].from, &rest);
1163 case Opt_pagesize: {
1165 ps = memparse(args[0].from, &rest);
1166 pconfig->hstate = size_to_hstate(ps);
1167 if (!pconfig->hstate) {
1168 pr_err("Unsupported page size %lu MB\n",
1175 case Opt_min_size: {
1176 /* memparse() will accept a K/M/G without a digit */
1177 if (!isdigit(*args[0].from))
1179 min_size_opt = memparse(args[0].from, &rest);
1180 min_val_type = SIZE_STD;
1182 min_val_type = SIZE_PERCENT;
1187 pr_err("Bad mount option: \"%s\"\n", p);
1194 * Use huge page pool size (in hstate) to convert the size
1195 * options to number of huge pages. If NO_SIZE, -1 is returned.
1197 pconfig->max_hpages = hugetlbfs_size_to_hpages(pconfig->hstate,
1198 max_size_opt, max_val_type);
1199 pconfig->min_hpages = hugetlbfs_size_to_hpages(pconfig->hstate,
1200 min_size_opt, min_val_type);
1203 * If max_size was specified, then min_size must be smaller
1205 if (max_val_type > NO_SIZE &&
1206 pconfig->min_hpages > pconfig->max_hpages) {
1207 pr_err("minimum size can not be greater than maximum size\n");
1214 pr_err("Bad value '%s' for mount option '%s'\n", args[0].from, p);
1219 hugetlbfs_fill_super(struct super_block *sb, void *data, int silent)
1222 struct hugetlbfs_config config;
1223 struct hugetlbfs_sb_info *sbinfo;
1225 config.max_hpages = -1; /* No limit on size by default */
1226 config.nr_inodes = -1; /* No limit on number of inodes by default */
1227 config.uid = current_fsuid();
1228 config.gid = current_fsgid();
1230 config.hstate = &default_hstate;
1231 config.min_hpages = -1; /* No default minimum size */
1232 ret = hugetlbfs_parse_options(data, &config);
1236 sbinfo = kmalloc(sizeof(struct hugetlbfs_sb_info), GFP_KERNEL);
1239 sb->s_fs_info = sbinfo;
1240 sbinfo->hstate = config.hstate;
1241 spin_lock_init(&sbinfo->stat_lock);
1242 sbinfo->max_inodes = config.nr_inodes;
1243 sbinfo->free_inodes = config.nr_inodes;
1244 sbinfo->spool = NULL;
1245 sbinfo->uid = config.uid;
1246 sbinfo->gid = config.gid;
1247 sbinfo->mode = config.mode;
1250 * Allocate and initialize subpool if maximum or minimum size is
1251 * specified. Any needed reservations (for minimim size) are taken
1252 * taken when the subpool is created.
1254 if (config.max_hpages != -1 || config.min_hpages != -1) {
1255 sbinfo->spool = hugepage_new_subpool(config.hstate,
1261 sb->s_maxbytes = MAX_LFS_FILESIZE;
1262 sb->s_blocksize = huge_page_size(config.hstate);
1263 sb->s_blocksize_bits = huge_page_shift(config.hstate);
1264 sb->s_magic = HUGETLBFS_MAGIC;
1265 sb->s_op = &hugetlbfs_ops;
1266 sb->s_time_gran = 1;
1267 sb->s_root = d_make_root(hugetlbfs_get_root(sb, &config));
1272 kfree(sbinfo->spool);
1277 static struct dentry *hugetlbfs_mount(struct file_system_type *fs_type,
1278 int flags, const char *dev_name, void *data)
1280 return mount_nodev(fs_type, flags, data, hugetlbfs_fill_super);
1283 static struct file_system_type hugetlbfs_fs_type = {
1284 .name = "hugetlbfs",
1285 .mount = hugetlbfs_mount,
1286 .kill_sb = kill_litter_super,
1289 static struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
1291 static int can_do_hugetlb_shm(void)
1294 shm_group = make_kgid(&init_user_ns, sysctl_hugetlb_shm_group);
1295 return capable(CAP_IPC_LOCK) || in_group_p(shm_group);
1298 static int get_hstate_idx(int page_size_log)
1300 struct hstate *h = hstate_sizelog(page_size_log);
1307 static const struct dentry_operations anon_ops = {
1308 .d_dname = simple_dname
1312 * Note that size should be aligned to proper hugepage size in caller side,
1313 * otherwise hugetlb_reserve_pages reserves one less hugepages than intended.
1315 struct file *hugetlb_file_setup(const char *name, size_t size,
1316 vm_flags_t acctflag, struct user_struct **user,
1317 int creat_flags, int page_size_log)
1319 struct file *file = ERR_PTR(-ENOMEM);
1320 struct inode *inode;
1322 struct super_block *sb;
1323 struct qstr quick_string;
1326 hstate_idx = get_hstate_idx(page_size_log);
1328 return ERR_PTR(-ENODEV);
1331 if (!hugetlbfs_vfsmount[hstate_idx])
1332 return ERR_PTR(-ENOENT);
1334 if (creat_flags == HUGETLB_SHMFS_INODE && !can_do_hugetlb_shm()) {
1335 *user = current_user();
1336 if (user_shm_lock(size, *user)) {
1338 pr_warn_once("%s (%d): Using mlock ulimits for SHM_HUGETLB is deprecated\n",
1339 current->comm, current->pid);
1340 task_unlock(current);
1343 return ERR_PTR(-EPERM);
1347 sb = hugetlbfs_vfsmount[hstate_idx]->mnt_sb;
1348 quick_string.name = name;
1349 quick_string.len = strlen(quick_string.name);
1350 quick_string.hash = 0;
1351 path.dentry = d_alloc_pseudo(sb, &quick_string);
1353 goto out_shm_unlock;
1355 d_set_d_op(path.dentry, &anon_ops);
1356 path.mnt = mntget(hugetlbfs_vfsmount[hstate_idx]);
1357 file = ERR_PTR(-ENOSPC);
1358 inode = hugetlbfs_get_inode(sb, NULL, S_IFREG | S_IRWXUGO, 0);
1361 if (creat_flags == HUGETLB_SHMFS_INODE)
1362 inode->i_flags |= S_PRIVATE;
1364 file = ERR_PTR(-ENOMEM);
1365 if (hugetlb_reserve_pages(inode, 0,
1366 size >> huge_page_shift(hstate_inode(inode)), NULL,
1370 d_instantiate(path.dentry, inode);
1371 inode->i_size = size;
1374 file = alloc_file(&path, FMODE_WRITE | FMODE_READ,
1375 &hugetlbfs_file_operations);
1377 goto out_dentry; /* inode is already attached */
1387 user_shm_unlock(size, *user);
1393 static int __init init_hugetlbfs_fs(void)
1399 if (!hugepages_supported()) {
1400 pr_info("disabling because there are no supported hugepage sizes\n");
1405 hugetlbfs_inode_cachep = kmem_cache_create("hugetlbfs_inode_cache",
1406 sizeof(struct hugetlbfs_inode_info),
1407 0, SLAB_ACCOUNT, init_once);
1408 if (hugetlbfs_inode_cachep == NULL)
1411 error = register_filesystem(&hugetlbfs_fs_type);
1416 for_each_hstate(h) {
1418 unsigned ps_kb = 1U << (h->order + PAGE_SHIFT - 10);
1420 snprintf(buf, sizeof(buf), "pagesize=%uK", ps_kb);
1421 hugetlbfs_vfsmount[i] = kern_mount_data(&hugetlbfs_fs_type,
1424 if (IS_ERR(hugetlbfs_vfsmount[i])) {
1425 pr_err("Cannot mount internal hugetlbfs for "
1426 "page size %uK", ps_kb);
1427 error = PTR_ERR(hugetlbfs_vfsmount[i]);
1428 hugetlbfs_vfsmount[i] = NULL;
1432 /* Non default hstates are optional */
1433 if (!IS_ERR_OR_NULL(hugetlbfs_vfsmount[default_hstate_idx]))
1437 kmem_cache_destroy(hugetlbfs_inode_cachep);
1441 fs_initcall(init_hugetlbfs_fs)