2 * hugetlbpage-backed filesystem. Based on ramfs.
4 * Nadia Yvette Chambers, 2002
6 * Copyright (C) 2002 Linus Torvalds.
10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 #include <linux/thread_info.h>
13 #include <asm/current.h>
14 #include <linux/sched/signal.h> /* remove ASAP */
15 #include <linux/falloc.h>
17 #include <linux/mount.h>
18 #include <linux/file.h>
19 #include <linux/kernel.h>
20 #include <linux/writeback.h>
21 #include <linux/pagemap.h>
22 #include <linux/highmem.h>
23 #include <linux/init.h>
24 #include <linux/string.h>
25 #include <linux/capability.h>
26 #include <linux/ctype.h>
27 #include <linux/backing-dev.h>
28 #include <linux/hugetlb.h>
29 #include <linux/pagevec.h>
30 #include <linux/parser.h>
31 #include <linux/mman.h>
32 #include <linux/slab.h>
33 #include <linux/dnotify.h>
34 #include <linux/statfs.h>
35 #include <linux/security.h>
36 #include <linux/magic.h>
37 #include <linux/migrate.h>
38 #include <linux/uio.h>
40 #include <linux/uaccess.h>
42 static const struct super_operations hugetlbfs_ops;
43 static const struct address_space_operations hugetlbfs_aops;
44 const struct file_operations hugetlbfs_file_operations;
45 static const struct inode_operations hugetlbfs_dir_inode_operations;
46 static const struct inode_operations hugetlbfs_inode_operations;
48 struct hugetlbfs_config {
49 struct hstate *hstate;
58 struct hugetlbfs_inode_info {
59 struct shared_policy policy;
60 struct inode vfs_inode;
63 static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode)
65 return container_of(inode, struct hugetlbfs_inode_info, vfs_inode);
68 int sysctl_hugetlb_shm_group;
71 Opt_size, Opt_nr_inodes,
72 Opt_mode, Opt_uid, Opt_gid,
73 Opt_pagesize, Opt_min_size,
77 static const match_table_t tokens = {
78 {Opt_size, "size=%s"},
79 {Opt_nr_inodes, "nr_inodes=%s"},
80 {Opt_mode, "mode=%o"},
83 {Opt_pagesize, "pagesize=%s"},
84 {Opt_min_size, "min_size=%s"},
89 static inline void hugetlb_set_vma_policy(struct vm_area_struct *vma,
90 struct inode *inode, pgoff_t index)
92 vma->vm_policy = mpol_shared_policy_lookup(&HUGETLBFS_I(inode)->policy,
96 static inline void hugetlb_drop_vma_policy(struct vm_area_struct *vma)
98 mpol_cond_put(vma->vm_policy);
101 static inline void hugetlb_set_vma_policy(struct vm_area_struct *vma,
102 struct inode *inode, pgoff_t index)
106 static inline void hugetlb_drop_vma_policy(struct vm_area_struct *vma)
111 static void huge_pagevec_release(struct pagevec *pvec)
115 for (i = 0; i < pagevec_count(pvec); ++i)
116 put_page(pvec->pages[i]);
118 pagevec_reinit(pvec);
121 static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
123 struct inode *inode = file_inode(file);
126 struct hstate *h = hstate_file(file);
129 * vma address alignment (but not the pgoff alignment) has
130 * already been checked by prepare_hugepage_range. If you add
131 * any error returns here, do so after setting VM_HUGETLB, so
132 * is_vm_hugetlb_page tests below unmap_region go the right
133 * way when do_mmap_pgoff unwinds (may be important on powerpc
136 vma->vm_flags |= VM_HUGETLB | VM_DONTEXPAND;
137 vma->vm_ops = &hugetlb_vm_ops;
140 * Offset passed to mmap (before page shift) could have been
141 * negative when represented as a (l)off_t.
143 if (((loff_t)vma->vm_pgoff << PAGE_SHIFT) < 0)
146 if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT))
149 vma_len = (loff_t)(vma->vm_end - vma->vm_start);
150 len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
151 /* check for overflow */
159 if (hugetlb_reserve_pages(inode,
160 vma->vm_pgoff >> huge_page_order(h),
161 len >> huge_page_shift(h), vma,
166 if (vma->vm_flags & VM_WRITE && inode->i_size < len)
167 i_size_write(inode, len);
175 * Called under down_write(mmap_sem).
178 #ifndef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
180 hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
181 unsigned long len, unsigned long pgoff, unsigned long flags)
183 struct mm_struct *mm = current->mm;
184 struct vm_area_struct *vma;
185 struct hstate *h = hstate_file(file);
186 struct vm_unmapped_area_info info;
188 if (len & ~huge_page_mask(h))
193 if (flags & MAP_FIXED) {
194 if (prepare_hugepage_range(file, addr, len))
200 addr = ALIGN(addr, huge_page_size(h));
201 vma = find_vma(mm, addr);
202 if (TASK_SIZE - len >= addr &&
203 (!vma || addr + len <= vm_start_gap(vma)))
209 info.low_limit = TASK_UNMAPPED_BASE;
210 info.high_limit = TASK_SIZE;
211 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
212 info.align_offset = 0;
213 return vm_unmapped_area(&info);
218 hugetlbfs_read_actor(struct page *page, unsigned long offset,
219 struct iov_iter *to, unsigned long size)
224 /* Find which 4k chunk and offset with in that chunk */
225 i = offset >> PAGE_SHIFT;
226 offset = offset & ~PAGE_MASK;
230 chunksize = PAGE_SIZE;
233 if (chunksize > size)
235 n = copy_page_to_iter(&page[i], offset, chunksize, to);
247 * Support for read() - Find the page attached to f_mapping and copy out the
248 * data. Its *very* similar to do_generic_mapping_read(), we can't use that
249 * since it has PAGE_SIZE assumptions.
251 static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to)
253 struct file *file = iocb->ki_filp;
254 struct hstate *h = hstate_file(file);
255 struct address_space *mapping = file->f_mapping;
256 struct inode *inode = mapping->host;
257 unsigned long index = iocb->ki_pos >> huge_page_shift(h);
258 unsigned long offset = iocb->ki_pos & ~huge_page_mask(h);
259 unsigned long end_index;
263 while (iov_iter_count(to)) {
267 /* nr is the maximum number of bytes to copy from this page */
268 nr = huge_page_size(h);
269 isize = i_size_read(inode);
272 end_index = (isize - 1) >> huge_page_shift(h);
273 if (index > end_index)
275 if (index == end_index) {
276 nr = ((isize - 1) & ~huge_page_mask(h)) + 1;
283 page = find_lock_page(mapping, index);
284 if (unlikely(page == NULL)) {
286 * We have a HOLE, zero out the user-buffer for the
287 * length of the hole or request.
289 copied = iov_iter_zero(nr, to);
294 * We have the page, copy it to user space buffer.
296 copied = hugetlbfs_read_actor(page, offset, to, nr);
301 if (copied != nr && iov_iter_count(to)) {
306 index += offset >> huge_page_shift(h);
307 offset &= ~huge_page_mask(h);
309 iocb->ki_pos = ((loff_t)index << huge_page_shift(h)) + offset;
313 static int hugetlbfs_write_begin(struct file *file,
314 struct address_space *mapping,
315 loff_t pos, unsigned len, unsigned flags,
316 struct page **pagep, void **fsdata)
321 static int hugetlbfs_write_end(struct file *file, struct address_space *mapping,
322 loff_t pos, unsigned len, unsigned copied,
323 struct page *page, void *fsdata)
329 static void remove_huge_page(struct page *page)
331 ClearPageDirty(page);
332 ClearPageUptodate(page);
333 delete_from_page_cache(page);
337 hugetlb_vmdelete_list(struct rb_root_cached *root, pgoff_t start, pgoff_t end)
339 struct vm_area_struct *vma;
342 * end == 0 indicates that the entire range after
343 * start should be unmapped.
345 vma_interval_tree_foreach(vma, root, start, end ? end : ULONG_MAX) {
346 unsigned long v_offset;
350 * Can the expression below overflow on 32-bit arches?
351 * No, because the interval tree returns us only those vmas
352 * which overlap the truncated area starting at pgoff,
353 * and no vma on a 32-bit arch can span beyond the 4GB.
355 if (vma->vm_pgoff < start)
356 v_offset = (start - vma->vm_pgoff) << PAGE_SHIFT;
363 v_end = ((end - vma->vm_pgoff) << PAGE_SHIFT)
365 if (v_end > vma->vm_end)
369 unmap_hugepage_range(vma, vma->vm_start + v_offset, v_end,
375 * remove_inode_hugepages handles two distinct cases: truncation and hole
376 * punch. There are subtle differences in operation for each case.
378 * truncation is indicated by end of range being LLONG_MAX
379 * In this case, we first scan the range and release found pages.
380 * After releasing pages, hugetlb_unreserve_pages cleans up region/reserv
381 * maps and global counts. Page faults can not race with truncation
382 * in this routine. hugetlb_no_page() prevents page faults in the
383 * truncated range. It checks i_size before allocation, and again after
384 * with the page table lock for the page held. The same lock must be
385 * acquired to unmap a page.
386 * hole punch is indicated if end is not LLONG_MAX
387 * In the hole punch case we scan the range and release found pages.
388 * Only when releasing a page is the associated region/reserv map
389 * deleted. The region/reserv map for ranges without associated
390 * pages are not modified. Page faults can race with hole punch.
391 * This is indicated if we find a mapped page.
392 * Note: If the passed end of range value is beyond the end of file, but
393 * not LLONG_MAX this routine still performs a hole punch operation.
395 static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
398 struct hstate *h = hstate_inode(inode);
399 struct address_space *mapping = &inode->i_data;
400 const pgoff_t start = lstart >> huge_page_shift(h);
401 const pgoff_t end = lend >> huge_page_shift(h);
402 struct vm_area_struct pseudo_vma;
406 bool truncate_op = (lend == LLONG_MAX);
408 memset(&pseudo_vma, 0, sizeof(struct vm_area_struct));
409 pseudo_vma.vm_flags = (VM_HUGETLB | VM_MAYSHARE | VM_SHARED);
414 * When no more pages are found, we are done.
416 if (!pagevec_lookup_range(&pvec, mapping, &next, end - 1))
419 for (i = 0; i < pagevec_count(&pvec); ++i) {
420 struct page *page = pvec.pages[i];
424 hash = hugetlb_fault_mutex_hash(h, current->mm,
427 mutex_lock(&hugetlb_fault_mutex_table[hash]);
430 * If page is mapped, it was faulted in after being
431 * unmapped in caller. Unmap (again) now after taking
432 * the fault mutex. The mutex will prevent faults
433 * until we finish removing the page.
435 * This race can only happen in the hole punch case.
436 * Getting here in a truncate operation is a bug.
438 if (unlikely(page_mapped(page))) {
441 i_mmap_lock_write(mapping);
442 hugetlb_vmdelete_list(&mapping->i_mmap,
443 index * pages_per_huge_page(h),
444 (index + 1) * pages_per_huge_page(h));
445 i_mmap_unlock_write(mapping);
450 * We must free the huge page and remove from page
451 * cache (remove_huge_page) BEFORE removing the
452 * region/reserve map (hugetlb_unreserve_pages). In
453 * rare out of memory conditions, removal of the
454 * region/reserve map could fail. Correspondingly,
455 * the subpool and global reserve usage count can need
458 VM_BUG_ON(PagePrivate(page));
459 remove_huge_page(page);
462 if (unlikely(hugetlb_unreserve_pages(inode,
463 index, index + 1, 1)))
464 hugetlb_fix_reserve_counts(inode);
468 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
470 huge_pagevec_release(&pvec);
475 (void)hugetlb_unreserve_pages(inode, start, LONG_MAX, freed);
478 static void hugetlbfs_evict_inode(struct inode *inode)
480 struct resv_map *resv_map;
482 remove_inode_hugepages(inode, 0, LLONG_MAX);
483 resv_map = (struct resv_map *)inode->i_mapping->private_data;
484 /* root inode doesn't have the resv_map, so we should check it */
486 resv_map_release(&resv_map->refs);
490 static int hugetlb_vmtruncate(struct inode *inode, loff_t offset)
493 struct address_space *mapping = inode->i_mapping;
494 struct hstate *h = hstate_inode(inode);
496 BUG_ON(offset & ~huge_page_mask(h));
497 pgoff = offset >> PAGE_SHIFT;
499 i_size_write(inode, offset);
500 i_mmap_lock_write(mapping);
501 if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root))
502 hugetlb_vmdelete_list(&mapping->i_mmap, pgoff, 0);
503 i_mmap_unlock_write(mapping);
504 remove_inode_hugepages(inode, offset, LLONG_MAX);
508 static long hugetlbfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
510 struct hstate *h = hstate_inode(inode);
511 loff_t hpage_size = huge_page_size(h);
512 loff_t hole_start, hole_end;
515 * For hole punch round up the beginning offset of the hole and
516 * round down the end.
518 hole_start = round_up(offset, hpage_size);
519 hole_end = round_down(offset + len, hpage_size);
521 if (hole_end > hole_start) {
522 struct address_space *mapping = inode->i_mapping;
525 i_mmap_lock_write(mapping);
526 if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root))
527 hugetlb_vmdelete_list(&mapping->i_mmap,
528 hole_start >> PAGE_SHIFT,
529 hole_end >> PAGE_SHIFT);
530 i_mmap_unlock_write(mapping);
531 remove_inode_hugepages(inode, hole_start, hole_end);
538 static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
541 struct inode *inode = file_inode(file);
542 struct address_space *mapping = inode->i_mapping;
543 struct hstate *h = hstate_inode(inode);
544 struct vm_area_struct pseudo_vma;
545 struct mm_struct *mm = current->mm;
546 loff_t hpage_size = huge_page_size(h);
547 unsigned long hpage_shift = huge_page_shift(h);
548 pgoff_t start, index, end;
552 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
555 if (mode & FALLOC_FL_PUNCH_HOLE)
556 return hugetlbfs_punch_hole(inode, offset, len);
559 * Default preallocate case.
560 * For this range, start is rounded down and end is rounded up
561 * as well as being converted to page offsets.
563 start = offset >> hpage_shift;
564 end = (offset + len + hpage_size - 1) >> hpage_shift;
568 /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */
569 error = inode_newsize_ok(inode, offset + len);
574 * Initialize a pseudo vma as this is required by the huge page
575 * allocation routines. If NUMA is configured, use page index
576 * as input to create an allocation policy.
578 memset(&pseudo_vma, 0, sizeof(struct vm_area_struct));
579 pseudo_vma.vm_flags = (VM_HUGETLB | VM_MAYSHARE | VM_SHARED);
580 pseudo_vma.vm_file = file;
582 for (index = start; index < end; index++) {
584 * This is supposed to be the vaddr where the page is being
585 * faulted in, but we have no vaddr here.
589 int avoid_reserve = 0;
594 * fallocate(2) manpage permits EINTR; we may have been
595 * interrupted because we are using up too much memory.
597 if (signal_pending(current)) {
602 /* Set numa allocation policy based on index */
603 hugetlb_set_vma_policy(&pseudo_vma, inode, index);
605 /* addr is the offset within the file (zero based) */
606 addr = index * hpage_size;
608 /* mutex taken here, fault path and hole punch */
609 hash = hugetlb_fault_mutex_hash(h, mm, &pseudo_vma, mapping,
611 mutex_lock(&hugetlb_fault_mutex_table[hash]);
613 /* See if already present in mapping to avoid alloc/free */
614 page = find_get_page(mapping, index);
617 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
618 hugetlb_drop_vma_policy(&pseudo_vma);
622 /* Allocate page and add to page cache */
623 page = alloc_huge_page(&pseudo_vma, addr, avoid_reserve);
624 hugetlb_drop_vma_policy(&pseudo_vma);
626 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
627 error = PTR_ERR(page);
630 clear_huge_page(page, addr, pages_per_huge_page(h));
631 __SetPageUptodate(page);
632 error = huge_add_to_page_cache(page, mapping, index);
633 if (unlikely(error)) {
635 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
639 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
642 * page_put due to reference from alloc_huge_page()
643 * unlock_page because locked by add_to_page_cache()
649 if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size)
650 i_size_write(inode, offset + len);
651 inode->i_ctime = current_time(inode);
657 static int hugetlbfs_setattr(struct dentry *dentry, struct iattr *attr)
659 struct inode *inode = d_inode(dentry);
660 struct hstate *h = hstate_inode(inode);
662 unsigned int ia_valid = attr->ia_valid;
666 error = setattr_prepare(dentry, attr);
670 if (ia_valid & ATTR_SIZE) {
671 if (attr->ia_size & ~huge_page_mask(h))
673 error = hugetlb_vmtruncate(inode, attr->ia_size);
678 setattr_copy(inode, attr);
679 mark_inode_dirty(inode);
683 static struct inode *hugetlbfs_get_root(struct super_block *sb,
684 struct hugetlbfs_config *config)
688 inode = new_inode(sb);
690 inode->i_ino = get_next_ino();
691 inode->i_mode = S_IFDIR | config->mode;
692 inode->i_uid = config->uid;
693 inode->i_gid = config->gid;
694 inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
695 inode->i_op = &hugetlbfs_dir_inode_operations;
696 inode->i_fop = &simple_dir_operations;
697 /* directory inodes start off with i_nlink == 2 (for "." entry) */
699 lockdep_annotate_inode_mutex_key(inode);
705 * Hugetlbfs is not reclaimable; therefore its i_mmap_rwsem will never
706 * be taken from reclaim -- unlike regular filesystems. This needs an
707 * annotation because huge_pmd_share() does an allocation under hugetlb's
710 static struct lock_class_key hugetlbfs_i_mmap_rwsem_key;
712 static struct inode *hugetlbfs_get_inode(struct super_block *sb,
714 umode_t mode, dev_t dev)
717 struct resv_map *resv_map;
719 resv_map = resv_map_alloc();
723 inode = new_inode(sb);
725 inode->i_ino = get_next_ino();
726 inode_init_owner(inode, dir, mode);
727 lockdep_set_class(&inode->i_mapping->i_mmap_rwsem,
728 &hugetlbfs_i_mmap_rwsem_key);
729 inode->i_mapping->a_ops = &hugetlbfs_aops;
730 inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
731 inode->i_mapping->private_data = resv_map;
732 switch (mode & S_IFMT) {
734 init_special_inode(inode, mode, dev);
737 inode->i_op = &hugetlbfs_inode_operations;
738 inode->i_fop = &hugetlbfs_file_operations;
741 inode->i_op = &hugetlbfs_dir_inode_operations;
742 inode->i_fop = &simple_dir_operations;
744 /* directory inodes start off with i_nlink == 2 (for "." entry) */
748 inode->i_op = &page_symlink_inode_operations;
749 inode_nohighmem(inode);
752 lockdep_annotate_inode_mutex_key(inode);
754 kref_put(&resv_map->refs, resv_map_release);
760 * File creation. Allocate an inode, and we're done..
762 static int hugetlbfs_mknod(struct inode *dir,
763 struct dentry *dentry, umode_t mode, dev_t dev)
768 inode = hugetlbfs_get_inode(dir->i_sb, dir, mode, dev);
770 dir->i_ctime = dir->i_mtime = current_time(dir);
771 d_instantiate(dentry, inode);
772 dget(dentry); /* Extra count - pin the dentry in core */
778 static int hugetlbfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
780 int retval = hugetlbfs_mknod(dir, dentry, mode | S_IFDIR, 0);
786 static int hugetlbfs_create(struct inode *dir, struct dentry *dentry, umode_t mode, bool excl)
788 return hugetlbfs_mknod(dir, dentry, mode | S_IFREG, 0);
791 static int hugetlbfs_symlink(struct inode *dir,
792 struct dentry *dentry, const char *symname)
797 inode = hugetlbfs_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0);
799 int l = strlen(symname)+1;
800 error = page_symlink(inode, symname, l);
802 d_instantiate(dentry, inode);
807 dir->i_ctime = dir->i_mtime = current_time(dir);
813 * mark the head page dirty
815 static int hugetlbfs_set_page_dirty(struct page *page)
817 struct page *head = compound_head(page);
823 static int hugetlbfs_migrate_page(struct address_space *mapping,
824 struct page *newpage, struct page *page,
825 enum migrate_mode mode)
829 rc = migrate_huge_page_move_mapping(mapping, newpage, page);
830 if (rc != MIGRATEPAGE_SUCCESS)
832 if (mode != MIGRATE_SYNC_NO_COPY)
833 migrate_page_copy(newpage, page);
835 migrate_page_states(newpage, page);
837 return MIGRATEPAGE_SUCCESS;
840 static int hugetlbfs_error_remove_page(struct address_space *mapping,
843 struct inode *inode = mapping->host;
844 pgoff_t index = page->index;
846 remove_huge_page(page);
847 if (unlikely(hugetlb_unreserve_pages(inode, index, index + 1, 1)))
848 hugetlb_fix_reserve_counts(inode);
854 * Display the mount options in /proc/mounts.
856 static int hugetlbfs_show_options(struct seq_file *m, struct dentry *root)
858 struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(root->d_sb);
859 struct hugepage_subpool *spool = sbinfo->spool;
860 unsigned long hpage_size = huge_page_size(sbinfo->hstate);
861 unsigned hpage_shift = huge_page_shift(sbinfo->hstate);
864 if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID))
865 seq_printf(m, ",uid=%u",
866 from_kuid_munged(&init_user_ns, sbinfo->uid));
867 if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID))
868 seq_printf(m, ",gid=%u",
869 from_kgid_munged(&init_user_ns, sbinfo->gid));
870 if (sbinfo->mode != 0755)
871 seq_printf(m, ",mode=%o", sbinfo->mode);
872 if (sbinfo->max_inodes != -1)
873 seq_printf(m, ",nr_inodes=%lu", sbinfo->max_inodes);
877 if (hpage_size >= 1024) {
881 seq_printf(m, ",pagesize=%lu%c", hpage_size, mod);
883 if (spool->max_hpages != -1)
884 seq_printf(m, ",size=%llu",
885 (unsigned long long)spool->max_hpages << hpage_shift);
886 if (spool->min_hpages != -1)
887 seq_printf(m, ",min_size=%llu",
888 (unsigned long long)spool->min_hpages << hpage_shift);
893 static int hugetlbfs_statfs(struct dentry *dentry, struct kstatfs *buf)
895 struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(dentry->d_sb);
896 struct hstate *h = hstate_inode(d_inode(dentry));
898 buf->f_type = HUGETLBFS_MAGIC;
899 buf->f_bsize = huge_page_size(h);
901 spin_lock(&sbinfo->stat_lock);
902 /* If no limits set, just report 0 for max/free/used
903 * blocks, like simple_statfs() */
907 spin_lock(&sbinfo->spool->lock);
908 buf->f_blocks = sbinfo->spool->max_hpages;
909 free_pages = sbinfo->spool->max_hpages
910 - sbinfo->spool->used_hpages;
911 buf->f_bavail = buf->f_bfree = free_pages;
912 spin_unlock(&sbinfo->spool->lock);
913 buf->f_files = sbinfo->max_inodes;
914 buf->f_ffree = sbinfo->free_inodes;
916 spin_unlock(&sbinfo->stat_lock);
918 buf->f_namelen = NAME_MAX;
922 static void hugetlbfs_put_super(struct super_block *sb)
924 struct hugetlbfs_sb_info *sbi = HUGETLBFS_SB(sb);
927 sb->s_fs_info = NULL;
930 hugepage_put_subpool(sbi->spool);
936 static inline int hugetlbfs_dec_free_inodes(struct hugetlbfs_sb_info *sbinfo)
938 if (sbinfo->free_inodes >= 0) {
939 spin_lock(&sbinfo->stat_lock);
940 if (unlikely(!sbinfo->free_inodes)) {
941 spin_unlock(&sbinfo->stat_lock);
944 sbinfo->free_inodes--;
945 spin_unlock(&sbinfo->stat_lock);
951 static void hugetlbfs_inc_free_inodes(struct hugetlbfs_sb_info *sbinfo)
953 if (sbinfo->free_inodes >= 0) {
954 spin_lock(&sbinfo->stat_lock);
955 sbinfo->free_inodes++;
956 spin_unlock(&sbinfo->stat_lock);
961 static struct kmem_cache *hugetlbfs_inode_cachep;
963 static struct inode *hugetlbfs_alloc_inode(struct super_block *sb)
965 struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(sb);
966 struct hugetlbfs_inode_info *p;
968 if (unlikely(!hugetlbfs_dec_free_inodes(sbinfo)))
970 p = kmem_cache_alloc(hugetlbfs_inode_cachep, GFP_KERNEL);
972 hugetlbfs_inc_free_inodes(sbinfo);
977 * Any time after allocation, hugetlbfs_destroy_inode can be called
978 * for the inode. mpol_free_shared_policy is unconditionally called
979 * as part of hugetlbfs_destroy_inode. So, initialize policy here
980 * in case of a quick call to destroy.
982 * Note that the policy is initialized even if we are creating a
983 * private inode. This simplifies hugetlbfs_destroy_inode.
985 mpol_shared_policy_init(&p->policy, NULL);
987 return &p->vfs_inode;
990 static void hugetlbfs_i_callback(struct rcu_head *head)
992 struct inode *inode = container_of(head, struct inode, i_rcu);
993 kmem_cache_free(hugetlbfs_inode_cachep, HUGETLBFS_I(inode));
996 static void hugetlbfs_destroy_inode(struct inode *inode)
998 hugetlbfs_inc_free_inodes(HUGETLBFS_SB(inode->i_sb));
999 mpol_free_shared_policy(&HUGETLBFS_I(inode)->policy);
1000 call_rcu(&inode->i_rcu, hugetlbfs_i_callback);
1003 static const struct address_space_operations hugetlbfs_aops = {
1004 .write_begin = hugetlbfs_write_begin,
1005 .write_end = hugetlbfs_write_end,
1006 .set_page_dirty = hugetlbfs_set_page_dirty,
1007 .migratepage = hugetlbfs_migrate_page,
1008 .error_remove_page = hugetlbfs_error_remove_page,
1012 static void init_once(void *foo)
1014 struct hugetlbfs_inode_info *ei = (struct hugetlbfs_inode_info *)foo;
1016 inode_init_once(&ei->vfs_inode);
1019 const struct file_operations hugetlbfs_file_operations = {
1020 .read_iter = hugetlbfs_read_iter,
1021 .mmap = hugetlbfs_file_mmap,
1022 .fsync = noop_fsync,
1023 .get_unmapped_area = hugetlb_get_unmapped_area,
1024 .llseek = default_llseek,
1025 .fallocate = hugetlbfs_fallocate,
1028 static const struct inode_operations hugetlbfs_dir_inode_operations = {
1029 .create = hugetlbfs_create,
1030 .lookup = simple_lookup,
1031 .link = simple_link,
1032 .unlink = simple_unlink,
1033 .symlink = hugetlbfs_symlink,
1034 .mkdir = hugetlbfs_mkdir,
1035 .rmdir = simple_rmdir,
1036 .mknod = hugetlbfs_mknod,
1037 .rename = simple_rename,
1038 .setattr = hugetlbfs_setattr,
1041 static const struct inode_operations hugetlbfs_inode_operations = {
1042 .setattr = hugetlbfs_setattr,
1045 static const struct super_operations hugetlbfs_ops = {
1046 .alloc_inode = hugetlbfs_alloc_inode,
1047 .destroy_inode = hugetlbfs_destroy_inode,
1048 .evict_inode = hugetlbfs_evict_inode,
1049 .statfs = hugetlbfs_statfs,
1050 .put_super = hugetlbfs_put_super,
1051 .show_options = hugetlbfs_show_options,
1054 enum hugetlbfs_size_type { NO_SIZE, SIZE_STD, SIZE_PERCENT };
1057 * Convert size option passed from command line to number of huge pages
1058 * in the pool specified by hstate. Size option could be in bytes
1059 * (val_type == SIZE_STD) or percentage of the pool (val_type == SIZE_PERCENT).
1062 hugetlbfs_size_to_hpages(struct hstate *h, unsigned long long size_opt,
1063 enum hugetlbfs_size_type val_type)
1065 if (val_type == NO_SIZE)
1068 if (val_type == SIZE_PERCENT) {
1069 size_opt <<= huge_page_shift(h);
1070 size_opt *= h->max_huge_pages;
1071 do_div(size_opt, 100);
1074 size_opt >>= huge_page_shift(h);
1079 hugetlbfs_parse_options(char *options, struct hugetlbfs_config *pconfig)
1082 substring_t args[MAX_OPT_ARGS];
1084 unsigned long long max_size_opt = 0, min_size_opt = 0;
1085 enum hugetlbfs_size_type max_val_type = NO_SIZE, min_val_type = NO_SIZE;
1090 while ((p = strsep(&options, ",")) != NULL) {
1095 token = match_token(p, tokens, args);
1098 if (match_int(&args[0], &option))
1100 pconfig->uid = make_kuid(current_user_ns(), option);
1101 if (!uid_valid(pconfig->uid))
1106 if (match_int(&args[0], &option))
1108 pconfig->gid = make_kgid(current_user_ns(), option);
1109 if (!gid_valid(pconfig->gid))
1114 if (match_octal(&args[0], &option))
1116 pconfig->mode = option & 01777U;
1120 /* memparse() will accept a K/M/G without a digit */
1121 if (!isdigit(*args[0].from))
1123 max_size_opt = memparse(args[0].from, &rest);
1124 max_val_type = SIZE_STD;
1126 max_val_type = SIZE_PERCENT;
1131 /* memparse() will accept a K/M/G without a digit */
1132 if (!isdigit(*args[0].from))
1134 pconfig->nr_inodes = memparse(args[0].from, &rest);
1137 case Opt_pagesize: {
1139 ps = memparse(args[0].from, &rest);
1140 pconfig->hstate = size_to_hstate(ps);
1141 if (!pconfig->hstate) {
1142 pr_err("Unsupported page size %lu MB\n",
1149 case Opt_min_size: {
1150 /* memparse() will accept a K/M/G without a digit */
1151 if (!isdigit(*args[0].from))
1153 min_size_opt = memparse(args[0].from, &rest);
1154 min_val_type = SIZE_STD;
1156 min_val_type = SIZE_PERCENT;
1161 pr_err("Bad mount option: \"%s\"\n", p);
1168 * Use huge page pool size (in hstate) to convert the size
1169 * options to number of huge pages. If NO_SIZE, -1 is returned.
1171 pconfig->max_hpages = hugetlbfs_size_to_hpages(pconfig->hstate,
1172 max_size_opt, max_val_type);
1173 pconfig->min_hpages = hugetlbfs_size_to_hpages(pconfig->hstate,
1174 min_size_opt, min_val_type);
1177 * If max_size was specified, then min_size must be smaller
1179 if (max_val_type > NO_SIZE &&
1180 pconfig->min_hpages > pconfig->max_hpages) {
1181 pr_err("minimum size can not be greater than maximum size\n");
1188 pr_err("Bad value '%s' for mount option '%s'\n", args[0].from, p);
1193 hugetlbfs_fill_super(struct super_block *sb, void *data, int silent)
1196 struct hugetlbfs_config config;
1197 struct hugetlbfs_sb_info *sbinfo;
1199 config.max_hpages = -1; /* No limit on size by default */
1200 config.nr_inodes = -1; /* No limit on number of inodes by default */
1201 config.uid = current_fsuid();
1202 config.gid = current_fsgid();
1204 config.hstate = &default_hstate;
1205 config.min_hpages = -1; /* No default minimum size */
1206 ret = hugetlbfs_parse_options(data, &config);
1210 sbinfo = kmalloc(sizeof(struct hugetlbfs_sb_info), GFP_KERNEL);
1213 sb->s_fs_info = sbinfo;
1214 sbinfo->hstate = config.hstate;
1215 spin_lock_init(&sbinfo->stat_lock);
1216 sbinfo->max_inodes = config.nr_inodes;
1217 sbinfo->free_inodes = config.nr_inodes;
1218 sbinfo->spool = NULL;
1219 sbinfo->uid = config.uid;
1220 sbinfo->gid = config.gid;
1221 sbinfo->mode = config.mode;
1224 * Allocate and initialize subpool if maximum or minimum size is
1225 * specified. Any needed reservations (for minimim size) are taken
1226 * taken when the subpool is created.
1228 if (config.max_hpages != -1 || config.min_hpages != -1) {
1229 sbinfo->spool = hugepage_new_subpool(config.hstate,
1235 sb->s_maxbytes = MAX_LFS_FILESIZE;
1236 sb->s_blocksize = huge_page_size(config.hstate);
1237 sb->s_blocksize_bits = huge_page_shift(config.hstate);
1238 sb->s_magic = HUGETLBFS_MAGIC;
1239 sb->s_op = &hugetlbfs_ops;
1240 sb->s_time_gran = 1;
1241 sb->s_root = d_make_root(hugetlbfs_get_root(sb, &config));
1246 kfree(sbinfo->spool);
1251 static struct dentry *hugetlbfs_mount(struct file_system_type *fs_type,
1252 int flags, const char *dev_name, void *data)
1254 return mount_nodev(fs_type, flags, data, hugetlbfs_fill_super);
1257 static struct file_system_type hugetlbfs_fs_type = {
1258 .name = "hugetlbfs",
1259 .mount = hugetlbfs_mount,
1260 .kill_sb = kill_litter_super,
1263 static struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
1265 static int can_do_hugetlb_shm(void)
1268 shm_group = make_kgid(&init_user_ns, sysctl_hugetlb_shm_group);
1269 return capable(CAP_IPC_LOCK) || in_group_p(shm_group);
1272 static int get_hstate_idx(int page_size_log)
1274 struct hstate *h = hstate_sizelog(page_size_log);
1281 static const struct dentry_operations anon_ops = {
1282 .d_dname = simple_dname
1286 * Note that size should be aligned to proper hugepage size in caller side,
1287 * otherwise hugetlb_reserve_pages reserves one less hugepages than intended.
1289 struct file *hugetlb_file_setup(const char *name, size_t size,
1290 vm_flags_t acctflag, struct user_struct **user,
1291 int creat_flags, int page_size_log)
1293 struct file *file = ERR_PTR(-ENOMEM);
1294 struct inode *inode;
1296 struct super_block *sb;
1297 struct qstr quick_string;
1300 hstate_idx = get_hstate_idx(page_size_log);
1302 return ERR_PTR(-ENODEV);
1305 if (!hugetlbfs_vfsmount[hstate_idx])
1306 return ERR_PTR(-ENOENT);
1308 if (creat_flags == HUGETLB_SHMFS_INODE && !can_do_hugetlb_shm()) {
1309 *user = current_user();
1310 if (user_shm_lock(size, *user)) {
1312 pr_warn_once("%s (%d): Using mlock ulimits for SHM_HUGETLB is deprecated\n",
1313 current->comm, current->pid);
1314 task_unlock(current);
1317 return ERR_PTR(-EPERM);
1321 sb = hugetlbfs_vfsmount[hstate_idx]->mnt_sb;
1322 quick_string.name = name;
1323 quick_string.len = strlen(quick_string.name);
1324 quick_string.hash = 0;
1325 path.dentry = d_alloc_pseudo(sb, &quick_string);
1327 goto out_shm_unlock;
1329 d_set_d_op(path.dentry, &anon_ops);
1330 path.mnt = mntget(hugetlbfs_vfsmount[hstate_idx]);
1331 file = ERR_PTR(-ENOSPC);
1332 inode = hugetlbfs_get_inode(sb, NULL, S_IFREG | S_IRWXUGO, 0);
1335 if (creat_flags == HUGETLB_SHMFS_INODE)
1336 inode->i_flags |= S_PRIVATE;
1338 file = ERR_PTR(-ENOMEM);
1339 if (hugetlb_reserve_pages(inode, 0,
1340 size >> huge_page_shift(hstate_inode(inode)), NULL,
1344 d_instantiate(path.dentry, inode);
1345 inode->i_size = size;
1348 file = alloc_file(&path, FMODE_WRITE | FMODE_READ,
1349 &hugetlbfs_file_operations);
1351 goto out_dentry; /* inode is already attached */
1361 user_shm_unlock(size, *user);
1367 static int __init init_hugetlbfs_fs(void)
1373 if (!hugepages_supported()) {
1374 pr_info("disabling because there are no supported hugepage sizes\n");
1379 hugetlbfs_inode_cachep = kmem_cache_create("hugetlbfs_inode_cache",
1380 sizeof(struct hugetlbfs_inode_info),
1381 0, SLAB_ACCOUNT, init_once);
1382 if (hugetlbfs_inode_cachep == NULL)
1385 error = register_filesystem(&hugetlbfs_fs_type);
1390 for_each_hstate(h) {
1392 unsigned ps_kb = 1U << (h->order + PAGE_SHIFT - 10);
1394 snprintf(buf, sizeof(buf), "pagesize=%uK", ps_kb);
1395 hugetlbfs_vfsmount[i] = kern_mount_data(&hugetlbfs_fs_type,
1398 if (IS_ERR(hugetlbfs_vfsmount[i])) {
1399 pr_err("Cannot mount internal hugetlbfs for "
1400 "page size %uK", ps_kb);
1401 error = PTR_ERR(hugetlbfs_vfsmount[i]);
1402 hugetlbfs_vfsmount[i] = NULL;
1406 /* Non default hstates are optional */
1407 if (!IS_ERR_OR_NULL(hugetlbfs_vfsmount[default_hstate_idx]))
1411 kmem_cache_destroy(hugetlbfs_inode_cachep);
1415 fs_initcall(init_hugetlbfs_fs)