Merge branch 'pm-cpufreq'
[linux-2.6-block.git] / fs / hugetlbfs / inode.c
CommitLineData
1da177e4
LT
1/*
2 * hugetlbpage-backed filesystem. Based on ramfs.
3 *
6d49e352 4 * Nadia Yvette Chambers, 2002
1da177e4
LT
5 *
6 * Copyright (C) 2002 Linus Torvalds.
7 */
8
9b857d26
AM
9#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10
1da177e4
LT
11#include <linux/module.h>
12#include <linux/thread_info.h>
13#include <asm/current.h>
14#include <linux/sched.h> /* remove ASAP */
15#include <linux/fs.h>
16#include <linux/mount.h>
17#include <linux/file.h>
e73a75fa 18#include <linux/kernel.h>
1da177e4
LT
19#include <linux/writeback.h>
20#include <linux/pagemap.h>
21#include <linux/highmem.h>
22#include <linux/init.h>
23#include <linux/string.h>
16f7e0fe 24#include <linux/capability.h>
e73a75fa 25#include <linux/ctype.h>
1da177e4
LT
26#include <linux/backing-dev.h>
27#include <linux/hugetlb.h>
28#include <linux/pagevec.h>
e73a75fa 29#include <linux/parser.h>
036e0856 30#include <linux/mman.h>
1da177e4
LT
31#include <linux/slab.h>
32#include <linux/dnotify.h>
33#include <linux/statfs.h>
34#include <linux/security.h>
1fd7317d 35#include <linux/magic.h>
290408d4 36#include <linux/migrate.h>
34d0640e 37#include <linux/uio.h>
1da177e4
LT
38
39#include <asm/uaccess.h>
40
ee9b6d61 41static const struct super_operations hugetlbfs_ops;
f5e54d6e 42static const struct address_space_operations hugetlbfs_aops;
4b6f5d20 43const struct file_operations hugetlbfs_file_operations;
92e1d5be
AV
44static const struct inode_operations hugetlbfs_dir_inode_operations;
45static const struct inode_operations hugetlbfs_inode_operations;
1da177e4 46
a1d776ee 47struct hugetlbfs_config {
a0eb3a05
EB
48 kuid_t uid;
49 kgid_t gid;
a1d776ee 50 umode_t mode;
7ca02d0a 51 long max_hpages;
a1d776ee
DG
52 long nr_inodes;
53 struct hstate *hstate;
7ca02d0a 54 long min_hpages;
a1d776ee
DG
55};
56
57struct hugetlbfs_inode_info {
58 struct shared_policy policy;
59 struct inode vfs_inode;
60};
61
62static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode)
63{
64 return container_of(inode, struct hugetlbfs_inode_info, vfs_inode);
65}
66
1da177e4
LT
67int sysctl_hugetlb_shm_group;
68
e73a75fa
RD
69enum {
70 Opt_size, Opt_nr_inodes,
71 Opt_mode, Opt_uid, Opt_gid,
7ca02d0a 72 Opt_pagesize, Opt_min_size,
e73a75fa
RD
73 Opt_err,
74};
75
a447c093 76static const match_table_t tokens = {
e73a75fa
RD
77 {Opt_size, "size=%s"},
78 {Opt_nr_inodes, "nr_inodes=%s"},
79 {Opt_mode, "mode=%o"},
80 {Opt_uid, "uid=%u"},
81 {Opt_gid, "gid=%u"},
a137e1cc 82 {Opt_pagesize, "pagesize=%s"},
7ca02d0a 83 {Opt_min_size, "min_size=%s"},
e73a75fa
RD
84 {Opt_err, NULL},
85};
86
2e9b367c
AL
87static void huge_pagevec_release(struct pagevec *pvec)
88{
89 int i;
90
91 for (i = 0; i < pagevec_count(pvec); ++i)
92 put_page(pvec->pages[i]);
93
94 pagevec_reinit(pvec);
95}
96
1da177e4
LT
97static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
98{
496ad9aa 99 struct inode *inode = file_inode(file);
1da177e4
LT
100 loff_t len, vma_len;
101 int ret;
a5516438 102 struct hstate *h = hstate_file(file);
1da177e4 103
68589bc3 104 /*
dec4ad86
DG
105 * vma address alignment (but not the pgoff alignment) has
106 * already been checked by prepare_hugepage_range. If you add
107 * any error returns here, do so after setting VM_HUGETLB, so
108 * is_vm_hugetlb_page tests below unmap_region go the right
109 * way when do_mmap_pgoff unwinds (may be important on powerpc
110 * and ia64).
68589bc3 111 */
a2fce914 112 vma->vm_flags |= VM_HUGETLB | VM_DONTEXPAND;
68589bc3 113 vma->vm_ops = &hugetlb_vm_ops;
1da177e4 114
2b37c35e 115 if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT))
dec4ad86
DG
116 return -EINVAL;
117
1da177e4
LT
118 vma_len = (loff_t)(vma->vm_end - vma->vm_start);
119
1b1dcc1b 120 mutex_lock(&inode->i_mutex);
1da177e4 121 file_accessed(file);
1da177e4
LT
122
123 ret = -ENOMEM;
124 len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
1da177e4 125
a1e78772 126 if (hugetlb_reserve_pages(inode,
a5516438 127 vma->vm_pgoff >> huge_page_order(h),
5a6fe125
MG
128 len >> huge_page_shift(h), vma,
129 vma->vm_flags))
a43a8c39 130 goto out;
b45b5bd6 131
4c887265 132 ret = 0;
b6174df5 133 if (vma->vm_flags & VM_WRITE && inode->i_size < len)
1da177e4
LT
134 inode->i_size = len;
135out:
1b1dcc1b 136 mutex_unlock(&inode->i_mutex);
1da177e4
LT
137
138 return ret;
139}
140
141/*
508034a3 142 * Called under down_write(mmap_sem).
1da177e4
LT
143 */
144
d2ba27e8 145#ifndef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
1da177e4
LT
146static unsigned long
147hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
148 unsigned long len, unsigned long pgoff, unsigned long flags)
149{
150 struct mm_struct *mm = current->mm;
151 struct vm_area_struct *vma;
a5516438 152 struct hstate *h = hstate_file(file);
08659355 153 struct vm_unmapped_area_info info;
1da177e4 154
a5516438 155 if (len & ~huge_page_mask(h))
1da177e4
LT
156 return -EINVAL;
157 if (len > TASK_SIZE)
158 return -ENOMEM;
159
036e0856 160 if (flags & MAP_FIXED) {
a5516438 161 if (prepare_hugepage_range(file, addr, len))
036e0856
BH
162 return -EINVAL;
163 return addr;
164 }
165
1da177e4 166 if (addr) {
a5516438 167 addr = ALIGN(addr, huge_page_size(h));
1da177e4
LT
168 vma = find_vma(mm, addr);
169 if (TASK_SIZE - len >= addr &&
170 (!vma || addr + len <= vma->vm_start))
171 return addr;
172 }
173
08659355
ML
174 info.flags = 0;
175 info.length = len;
176 info.low_limit = TASK_UNMAPPED_BASE;
177 info.high_limit = TASK_SIZE;
178 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
179 info.align_offset = 0;
180 return vm_unmapped_area(&info);
1da177e4
LT
181}
182#endif
183
34d0640e 184static size_t
e63e1e5a 185hugetlbfs_read_actor(struct page *page, unsigned long offset,
34d0640e 186 struct iov_iter *to, unsigned long size)
e63e1e5a 187{
34d0640e 188 size_t copied = 0;
e63e1e5a
BP
189 int i, chunksize;
190
e63e1e5a
BP
191 /* Find which 4k chunk and offset with in that chunk */
192 i = offset >> PAGE_CACHE_SHIFT;
193 offset = offset & ~PAGE_CACHE_MASK;
194
195 while (size) {
34d0640e 196 size_t n;
e63e1e5a
BP
197 chunksize = PAGE_CACHE_SIZE;
198 if (offset)
199 chunksize -= offset;
200 if (chunksize > size)
201 chunksize = size;
34d0640e
AV
202 n = copy_page_to_iter(&page[i], offset, chunksize, to);
203 copied += n;
204 if (n != chunksize)
205 return copied;
e63e1e5a
BP
206 offset = 0;
207 size -= chunksize;
e63e1e5a
BP
208 i++;
209 }
34d0640e 210 return copied;
e63e1e5a
BP
211}
212
213/*
214 * Support for read() - Find the page attached to f_mapping and copy out the
215 * data. Its *very* similar to do_generic_mapping_read(), we can't use that
216 * since it has PAGE_CACHE_SIZE assumptions.
217 */
34d0640e 218static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to)
e63e1e5a 219{
34d0640e
AV
220 struct file *file = iocb->ki_filp;
221 struct hstate *h = hstate_file(file);
222 struct address_space *mapping = file->f_mapping;
e63e1e5a 223 struct inode *inode = mapping->host;
34d0640e
AV
224 unsigned long index = iocb->ki_pos >> huge_page_shift(h);
225 unsigned long offset = iocb->ki_pos & ~huge_page_mask(h);
e63e1e5a
BP
226 unsigned long end_index;
227 loff_t isize;
228 ssize_t retval = 0;
229
34d0640e 230 while (iov_iter_count(to)) {
e63e1e5a 231 struct page *page;
34d0640e 232 size_t nr, copied;
e63e1e5a
BP
233
234 /* nr is the maximum number of bytes to copy from this page */
a5516438 235 nr = huge_page_size(h);
a05b0855
AK
236 isize = i_size_read(inode);
237 if (!isize)
34d0640e 238 break;
a05b0855 239 end_index = (isize - 1) >> huge_page_shift(h);
34d0640e
AV
240 if (index > end_index)
241 break;
242 if (index == end_index) {
a5516438 243 nr = ((isize - 1) & ~huge_page_mask(h)) + 1;
a05b0855 244 if (nr <= offset)
34d0640e 245 break;
e63e1e5a
BP
246 }
247 nr = nr - offset;
248
249 /* Find the page */
a05b0855 250 page = find_lock_page(mapping, index);
e63e1e5a
BP
251 if (unlikely(page == NULL)) {
252 /*
253 * We have a HOLE, zero out the user-buffer for the
254 * length of the hole or request.
255 */
34d0640e 256 copied = iov_iter_zero(nr, to);
e63e1e5a 257 } else {
a05b0855
AK
258 unlock_page(page);
259
e63e1e5a
BP
260 /*
261 * We have the page, copy it to user space buffer.
262 */
34d0640e 263 copied = hugetlbfs_read_actor(page, offset, to, nr);
a05b0855 264 page_cache_release(page);
e63e1e5a 265 }
34d0640e
AV
266 offset += copied;
267 retval += copied;
268 if (copied != nr && iov_iter_count(to)) {
269 if (!retval)
270 retval = -EFAULT;
271 break;
e63e1e5a 272 }
a5516438
AK
273 index += offset >> huge_page_shift(h);
274 offset &= ~huge_page_mask(h);
e63e1e5a 275 }
34d0640e 276 iocb->ki_pos = ((loff_t)index << huge_page_shift(h)) + offset;
e63e1e5a
BP
277 return retval;
278}
279
800d15a5
NP
280static int hugetlbfs_write_begin(struct file *file,
281 struct address_space *mapping,
282 loff_t pos, unsigned len, unsigned flags,
283 struct page **pagep, void **fsdata)
1da177e4
LT
284{
285 return -EINVAL;
286}
287
800d15a5
NP
288static int hugetlbfs_write_end(struct file *file, struct address_space *mapping,
289 loff_t pos, unsigned len, unsigned copied,
290 struct page *page, void *fsdata)
1da177e4 291{
800d15a5 292 BUG();
1da177e4
LT
293 return -EINVAL;
294}
295
1da177e4
LT
296static void truncate_huge_page(struct page *page)
297{
b9ea2515 298 ClearPageDirty(page);
1da177e4 299 ClearPageUptodate(page);
bd65cb86 300 delete_from_page_cache(page);
1da177e4
LT
301}
302
b45b5bd6 303static void truncate_hugepages(struct inode *inode, loff_t lstart)
1da177e4 304{
a5516438 305 struct hstate *h = hstate_inode(inode);
b45b5bd6 306 struct address_space *mapping = &inode->i_data;
a5516438 307 const pgoff_t start = lstart >> huge_page_shift(h);
1da177e4
LT
308 struct pagevec pvec;
309 pgoff_t next;
a43a8c39 310 int i, freed = 0;
1da177e4
LT
311
312 pagevec_init(&pvec, 0);
313 next = start;
314 while (1) {
315 if (!pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
316 if (next == start)
317 break;
318 next = start;
319 continue;
320 }
321
322 for (i = 0; i < pagevec_count(&pvec); ++i) {
323 struct page *page = pvec.pages[i];
324
325 lock_page(page);
326 if (page->index > next)
327 next = page->index;
328 ++next;
329 truncate_huge_page(page);
330 unlock_page(page);
a43a8c39 331 freed++;
1da177e4
LT
332 }
333 huge_pagevec_release(&pvec);
334 }
335 BUG_ON(!lstart && mapping->nrpages);
a43a8c39 336 hugetlb_unreserve_pages(inode, start, freed);
1da177e4
LT
337}
338
2bbbda30 339static void hugetlbfs_evict_inode(struct inode *inode)
1da177e4 340{
9119a41e
JK
341 struct resv_map *resv_map;
342
b45b5bd6 343 truncate_hugepages(inode, 0);
9119a41e
JK
344 resv_map = (struct resv_map *)inode->i_mapping->private_data;
345 /* root inode doesn't have the resv_map, so we should check it */
346 if (resv_map)
347 resv_map_release(&resv_map->refs);
dbd5768f 348 clear_inode(inode);
149f4211
CH
349}
350
1da177e4 351static inline void
6b2dbba8 352hugetlb_vmtruncate_list(struct rb_root *root, pgoff_t pgoff)
1da177e4
LT
353{
354 struct vm_area_struct *vma;
1da177e4 355
6b2dbba8 356 vma_interval_tree_foreach(vma, root, pgoff, ULONG_MAX) {
1da177e4
LT
357 unsigned long v_offset;
358
1da177e4 359 /*
856fc295 360 * Can the expression below overflow on 32-bit arches?
6b2dbba8 361 * No, because the interval tree returns us only those vmas
856fc295
HD
362 * which overlap the truncated area starting at pgoff,
363 * and no vma on a 32-bit arch can span beyond the 4GB.
1da177e4 364 */
856fc295
HD
365 if (vma->vm_pgoff < pgoff)
366 v_offset = (pgoff - vma->vm_pgoff) << PAGE_SHIFT;
367 else
1da177e4
LT
368 v_offset = 0;
369
24669e58
AK
370 unmap_hugepage_range(vma, vma->vm_start + v_offset,
371 vma->vm_end, NULL);
1da177e4
LT
372 }
373}
374
1da177e4
LT
375static int hugetlb_vmtruncate(struct inode *inode, loff_t offset)
376{
856fc295 377 pgoff_t pgoff;
1da177e4 378 struct address_space *mapping = inode->i_mapping;
a5516438 379 struct hstate *h = hstate_inode(inode);
1da177e4 380
a5516438 381 BUG_ON(offset & ~huge_page_mask(h));
856fc295 382 pgoff = offset >> PAGE_SHIFT;
1da177e4 383
7aa91e10 384 i_size_write(inode, offset);
83cde9e8 385 i_mmap_lock_write(mapping);
6b2dbba8 386 if (!RB_EMPTY_ROOT(&mapping->i_mmap))
1da177e4 387 hugetlb_vmtruncate_list(&mapping->i_mmap, pgoff);
83cde9e8 388 i_mmap_unlock_write(mapping);
b45b5bd6 389 truncate_hugepages(inode, offset);
1da177e4
LT
390 return 0;
391}
392
393static int hugetlbfs_setattr(struct dentry *dentry, struct iattr *attr)
394{
2b0143b5 395 struct inode *inode = d_inode(dentry);
a5516438 396 struct hstate *h = hstate_inode(inode);
1da177e4
LT
397 int error;
398 unsigned int ia_valid = attr->ia_valid;
399
400 BUG_ON(!inode);
401
402 error = inode_change_ok(inode, attr);
403 if (error)
1025774c 404 return error;
1da177e4
LT
405
406 if (ia_valid & ATTR_SIZE) {
407 error = -EINVAL;
1025774c
CH
408 if (attr->ia_size & ~huge_page_mask(h))
409 return -EINVAL;
410 error = hugetlb_vmtruncate(inode, attr->ia_size);
1da177e4 411 if (error)
1025774c 412 return error;
1da177e4 413 }
1025774c
CH
414
415 setattr_copy(inode, attr);
416 mark_inode_dirty(inode);
417 return 0;
1da177e4
LT
418}
419
7d54fa64
AV
420static struct inode *hugetlbfs_get_root(struct super_block *sb,
421 struct hugetlbfs_config *config)
1da177e4
LT
422{
423 struct inode *inode;
1da177e4
LT
424
425 inode = new_inode(sb);
426 if (inode) {
427 struct hugetlbfs_inode_info *info;
85fe4025 428 inode->i_ino = get_next_ino();
7d54fa64
AV
429 inode->i_mode = S_IFDIR | config->mode;
430 inode->i_uid = config->uid;
431 inode->i_gid = config->gid;
432 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
433 info = HUGETLBFS_I(inode);
434 mpol_shared_policy_init(&info->policy, NULL);
435 inode->i_op = &hugetlbfs_dir_inode_operations;
436 inode->i_fop = &simple_dir_operations;
437 /* directory inodes start off with i_nlink == 2 (for "." entry) */
438 inc_nlink(inode);
65ed7601 439 lockdep_annotate_inode_mutex_key(inode);
7d54fa64
AV
440 }
441 return inode;
442}
443
b610ded7 444/*
c8c06efa 445 * Hugetlbfs is not reclaimable; therefore its i_mmap_rwsem will never
b610ded7
MH
446 * be taken from reclaim -- unlike regular filesystems. This needs an
447 * annotation because huge_pmd_share() does an allocation under
c8c06efa 448 * i_mmap_rwsem.
b610ded7 449 */
c8c06efa 450static struct lock_class_key hugetlbfs_i_mmap_rwsem_key;
b610ded7 451
7d54fa64
AV
452static struct inode *hugetlbfs_get_inode(struct super_block *sb,
453 struct inode *dir,
18df2252 454 umode_t mode, dev_t dev)
7d54fa64
AV
455{
456 struct inode *inode;
9119a41e
JK
457 struct resv_map *resv_map;
458
459 resv_map = resv_map_alloc();
460 if (!resv_map)
461 return NULL;
7d54fa64
AV
462
463 inode = new_inode(sb);
464 if (inode) {
465 struct hugetlbfs_inode_info *info;
466 inode->i_ino = get_next_ino();
467 inode_init_owner(inode, dir, mode);
c8c06efa
DB
468 lockdep_set_class(&inode->i_mapping->i_mmap_rwsem,
469 &hugetlbfs_i_mmap_rwsem_key);
1da177e4 470 inode->i_mapping->a_ops = &hugetlbfs_aops;
1da177e4 471 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
9119a41e 472 inode->i_mapping->private_data = resv_map;
1da177e4 473 info = HUGETLBFS_I(inode);
6bfde05b
EM
474 /*
475 * The policy is initialized here even if we are creating a
476 * private inode because initialization simply creates an
477 * an empty rb tree and calls spin_lock_init(), later when we
478 * call mpol_free_shared_policy() it will just return because
479 * the rb tree will still be empty.
480 */
71fe804b 481 mpol_shared_policy_init(&info->policy, NULL);
1da177e4
LT
482 switch (mode & S_IFMT) {
483 default:
484 init_special_inode(inode, mode, dev);
485 break;
486 case S_IFREG:
487 inode->i_op = &hugetlbfs_inode_operations;
488 inode->i_fop = &hugetlbfs_file_operations;
489 break;
490 case S_IFDIR:
491 inode->i_op = &hugetlbfs_dir_inode_operations;
492 inode->i_fop = &simple_dir_operations;
493
494 /* directory inodes start off with i_nlink == 2 (for "." entry) */
d8c76e6f 495 inc_nlink(inode);
1da177e4
LT
496 break;
497 case S_IFLNK:
498 inode->i_op = &page_symlink_inode_operations;
499 break;
500 }
e096d0c7 501 lockdep_annotate_inode_mutex_key(inode);
9119a41e
JK
502 } else
503 kref_put(&resv_map->refs, resv_map_release);
504
1da177e4
LT
505 return inode;
506}
507
508/*
509 * File creation. Allocate an inode, and we're done..
510 */
511static int hugetlbfs_mknod(struct inode *dir,
1a67aafb 512 struct dentry *dentry, umode_t mode, dev_t dev)
1da177e4
LT
513{
514 struct inode *inode;
515 int error = -ENOSPC;
7d54fa64
AV
516
517 inode = hugetlbfs_get_inode(dir->i_sb, dir, mode, dev);
1da177e4
LT
518 if (inode) {
519 dir->i_ctime = dir->i_mtime = CURRENT_TIME;
520 d_instantiate(dentry, inode);
521 dget(dentry); /* Extra count - pin the dentry in core */
522 error = 0;
523 }
524 return error;
525}
526
18bb1db3 527static int hugetlbfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
1da177e4
LT
528{
529 int retval = hugetlbfs_mknod(dir, dentry, mode | S_IFDIR, 0);
530 if (!retval)
d8c76e6f 531 inc_nlink(dir);
1da177e4
LT
532 return retval;
533}
534
ebfc3b49 535static int hugetlbfs_create(struct inode *dir, struct dentry *dentry, umode_t mode, bool excl)
1da177e4
LT
536{
537 return hugetlbfs_mknod(dir, dentry, mode | S_IFREG, 0);
538}
539
540static int hugetlbfs_symlink(struct inode *dir,
541 struct dentry *dentry, const char *symname)
542{
543 struct inode *inode;
544 int error = -ENOSPC;
1da177e4 545
7d54fa64 546 inode = hugetlbfs_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0);
1da177e4
LT
547 if (inode) {
548 int l = strlen(symname)+1;
549 error = page_symlink(inode, symname, l);
550 if (!error) {
551 d_instantiate(dentry, inode);
552 dget(dentry);
553 } else
554 iput(inode);
555 }
556 dir->i_ctime = dir->i_mtime = CURRENT_TIME;
557
558 return error;
559}
560
561/*
6649a386 562 * mark the head page dirty
1da177e4
LT
563 */
564static int hugetlbfs_set_page_dirty(struct page *page)
565{
d85f3385 566 struct page *head = compound_head(page);
6649a386
KC
567
568 SetPageDirty(head);
1da177e4
LT
569 return 0;
570}
571
290408d4 572static int hugetlbfs_migrate_page(struct address_space *mapping,
b969c4ab 573 struct page *newpage, struct page *page,
a6bc32b8 574 enum migrate_mode mode)
290408d4
NH
575{
576 int rc;
577
578 rc = migrate_huge_page_move_mapping(mapping, newpage, page);
78bd5209 579 if (rc != MIGRATEPAGE_SUCCESS)
290408d4
NH
580 return rc;
581 migrate_page_copy(newpage, page);
582
78bd5209 583 return MIGRATEPAGE_SUCCESS;
290408d4
NH
584}
585
726c3342 586static int hugetlbfs_statfs(struct dentry *dentry, struct kstatfs *buf)
1da177e4 587{
726c3342 588 struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(dentry->d_sb);
2b0143b5 589 struct hstate *h = hstate_inode(d_inode(dentry));
1da177e4
LT
590
591 buf->f_type = HUGETLBFS_MAGIC;
a5516438 592 buf->f_bsize = huge_page_size(h);
1da177e4
LT
593 if (sbinfo) {
594 spin_lock(&sbinfo->stat_lock);
74a8a65c
DG
595 /* If no limits set, just report 0 for max/free/used
596 * blocks, like simple_statfs() */
90481622
DG
597 if (sbinfo->spool) {
598 long free_pages;
599
600 spin_lock(&sbinfo->spool->lock);
601 buf->f_blocks = sbinfo->spool->max_hpages;
602 free_pages = sbinfo->spool->max_hpages
603 - sbinfo->spool->used_hpages;
604 buf->f_bavail = buf->f_bfree = free_pages;
605 spin_unlock(&sbinfo->spool->lock);
74a8a65c
DG
606 buf->f_files = sbinfo->max_inodes;
607 buf->f_ffree = sbinfo->free_inodes;
608 }
1da177e4
LT
609 spin_unlock(&sbinfo->stat_lock);
610 }
611 buf->f_namelen = NAME_MAX;
612 return 0;
613}
614
615static void hugetlbfs_put_super(struct super_block *sb)
616{
617 struct hugetlbfs_sb_info *sbi = HUGETLBFS_SB(sb);
618
619 if (sbi) {
620 sb->s_fs_info = NULL;
90481622
DG
621
622 if (sbi->spool)
623 hugepage_put_subpool(sbi->spool);
624
1da177e4
LT
625 kfree(sbi);
626 }
627}
628
96527980
CH
629static inline int hugetlbfs_dec_free_inodes(struct hugetlbfs_sb_info *sbinfo)
630{
631 if (sbinfo->free_inodes >= 0) {
632 spin_lock(&sbinfo->stat_lock);
633 if (unlikely(!sbinfo->free_inodes)) {
634 spin_unlock(&sbinfo->stat_lock);
635 return 0;
636 }
637 sbinfo->free_inodes--;
638 spin_unlock(&sbinfo->stat_lock);
639 }
640
641 return 1;
642}
643
644static void hugetlbfs_inc_free_inodes(struct hugetlbfs_sb_info *sbinfo)
645{
646 if (sbinfo->free_inodes >= 0) {
647 spin_lock(&sbinfo->stat_lock);
648 sbinfo->free_inodes++;
649 spin_unlock(&sbinfo->stat_lock);
650 }
651}
652
653
e18b890b 654static struct kmem_cache *hugetlbfs_inode_cachep;
1da177e4
LT
655
656static struct inode *hugetlbfs_alloc_inode(struct super_block *sb)
657{
96527980 658 struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(sb);
1da177e4
LT
659 struct hugetlbfs_inode_info *p;
660
96527980
CH
661 if (unlikely(!hugetlbfs_dec_free_inodes(sbinfo)))
662 return NULL;
e94b1766 663 p = kmem_cache_alloc(hugetlbfs_inode_cachep, GFP_KERNEL);
96527980
CH
664 if (unlikely(!p)) {
665 hugetlbfs_inc_free_inodes(sbinfo);
1da177e4 666 return NULL;
96527980 667 }
1da177e4
LT
668 return &p->vfs_inode;
669}
670
fa0d7e3d
NP
671static void hugetlbfs_i_callback(struct rcu_head *head)
672{
673 struct inode *inode = container_of(head, struct inode, i_rcu);
fa0d7e3d
NP
674 kmem_cache_free(hugetlbfs_inode_cachep, HUGETLBFS_I(inode));
675}
676
1da177e4
LT
677static void hugetlbfs_destroy_inode(struct inode *inode)
678{
96527980 679 hugetlbfs_inc_free_inodes(HUGETLBFS_SB(inode->i_sb));
1da177e4 680 mpol_free_shared_policy(&HUGETLBFS_I(inode)->policy);
fa0d7e3d 681 call_rcu(&inode->i_rcu, hugetlbfs_i_callback);
1da177e4
LT
682}
683
f5e54d6e 684static const struct address_space_operations hugetlbfs_aops = {
800d15a5
NP
685 .write_begin = hugetlbfs_write_begin,
686 .write_end = hugetlbfs_write_end,
1da177e4 687 .set_page_dirty = hugetlbfs_set_page_dirty,
290408d4 688 .migratepage = hugetlbfs_migrate_page,
1da177e4
LT
689};
690
96527980 691
51cc5068 692static void init_once(void *foo)
96527980
CH
693{
694 struct hugetlbfs_inode_info *ei = (struct hugetlbfs_inode_info *)foo;
695
a35afb83 696 inode_init_once(&ei->vfs_inode);
96527980
CH
697}
698
4b6f5d20 699const struct file_operations hugetlbfs_file_operations = {
34d0640e 700 .read_iter = hugetlbfs_read_iter,
1da177e4 701 .mmap = hugetlbfs_file_mmap,
1b061d92 702 .fsync = noop_fsync,
1da177e4 703 .get_unmapped_area = hugetlb_get_unmapped_area,
6038f373 704 .llseek = default_llseek,
1da177e4
LT
705};
706
92e1d5be 707static const struct inode_operations hugetlbfs_dir_inode_operations = {
1da177e4
LT
708 .create = hugetlbfs_create,
709 .lookup = simple_lookup,
710 .link = simple_link,
711 .unlink = simple_unlink,
712 .symlink = hugetlbfs_symlink,
713 .mkdir = hugetlbfs_mkdir,
714 .rmdir = simple_rmdir,
715 .mknod = hugetlbfs_mknod,
716 .rename = simple_rename,
717 .setattr = hugetlbfs_setattr,
718};
719
92e1d5be 720static const struct inode_operations hugetlbfs_inode_operations = {
1da177e4
LT
721 .setattr = hugetlbfs_setattr,
722};
723
ee9b6d61 724static const struct super_operations hugetlbfs_ops = {
1da177e4
LT
725 .alloc_inode = hugetlbfs_alloc_inode,
726 .destroy_inode = hugetlbfs_destroy_inode,
2bbbda30 727 .evict_inode = hugetlbfs_evict_inode,
1da177e4 728 .statfs = hugetlbfs_statfs,
1da177e4 729 .put_super = hugetlbfs_put_super,
10f19a86 730 .show_options = generic_show_options,
1da177e4
LT
731};
732
7ca02d0a
MK
733enum { NO_SIZE, SIZE_STD, SIZE_PERCENT };
734
735/*
736 * Convert size option passed from command line to number of huge pages
737 * in the pool specified by hstate. Size option could be in bytes
738 * (val_type == SIZE_STD) or percentage of the pool (val_type == SIZE_PERCENT).
739 */
740static long long
741hugetlbfs_size_to_hpages(struct hstate *h, unsigned long long size_opt,
742 int val_type)
743{
744 if (val_type == NO_SIZE)
745 return -1;
746
747 if (val_type == SIZE_PERCENT) {
748 size_opt <<= huge_page_shift(h);
749 size_opt *= h->max_huge_pages;
750 do_div(size_opt, 100);
751 }
752
753 size_opt >>= huge_page_shift(h);
754 return size_opt;
755}
756
1da177e4
LT
757static int
758hugetlbfs_parse_options(char *options, struct hugetlbfs_config *pconfig)
759{
e73a75fa
RD
760 char *p, *rest;
761 substring_t args[MAX_OPT_ARGS];
762 int option;
7ca02d0a
MK
763 unsigned long long max_size_opt = 0, min_size_opt = 0;
764 int max_val_type = NO_SIZE, min_val_type = NO_SIZE;
1da177e4
LT
765
766 if (!options)
767 return 0;
1da177e4 768
e73a75fa
RD
769 while ((p = strsep(&options, ",")) != NULL) {
770 int token;
b4c07bce
LS
771 if (!*p)
772 continue;
e73a75fa
RD
773
774 token = match_token(p, tokens, args);
775 switch (token) {
776 case Opt_uid:
777 if (match_int(&args[0], &option))
778 goto bad_val;
a0eb3a05
EB
779 pconfig->uid = make_kuid(current_user_ns(), option);
780 if (!uid_valid(pconfig->uid))
781 goto bad_val;
e73a75fa
RD
782 break;
783
784 case Opt_gid:
785 if (match_int(&args[0], &option))
786 goto bad_val;
a0eb3a05
EB
787 pconfig->gid = make_kgid(current_user_ns(), option);
788 if (!gid_valid(pconfig->gid))
789 goto bad_val;
e73a75fa
RD
790 break;
791
792 case Opt_mode:
793 if (match_octal(&args[0], &option))
794 goto bad_val;
75897d60 795 pconfig->mode = option & 01777U;
e73a75fa
RD
796 break;
797
798 case Opt_size: {
e73a75fa
RD
799 /* memparse() will accept a K/M/G without a digit */
800 if (!isdigit(*args[0].from))
801 goto bad_val;
7ca02d0a
MK
802 max_size_opt = memparse(args[0].from, &rest);
803 max_val_type = SIZE_STD;
a137e1cc 804 if (*rest == '%')
7ca02d0a 805 max_val_type = SIZE_PERCENT;
e73a75fa
RD
806 break;
807 }
1da177e4 808
e73a75fa
RD
809 case Opt_nr_inodes:
810 /* memparse() will accept a K/M/G without a digit */
811 if (!isdigit(*args[0].from))
812 goto bad_val;
813 pconfig->nr_inodes = memparse(args[0].from, &rest);
814 break;
815
a137e1cc
AK
816 case Opt_pagesize: {
817 unsigned long ps;
818 ps = memparse(args[0].from, &rest);
819 pconfig->hstate = size_to_hstate(ps);
820 if (!pconfig->hstate) {
9b857d26 821 pr_err("Unsupported page size %lu MB\n",
a137e1cc
AK
822 ps >> 20);
823 return -EINVAL;
824 }
825 break;
826 }
827
7ca02d0a
MK
828 case Opt_min_size: {
829 /* memparse() will accept a K/M/G without a digit */
830 if (!isdigit(*args[0].from))
831 goto bad_val;
832 min_size_opt = memparse(args[0].from, &rest);
833 min_val_type = SIZE_STD;
834 if (*rest == '%')
835 min_val_type = SIZE_PERCENT;
836 break;
837 }
838
e73a75fa 839 default:
9b857d26 840 pr_err("Bad mount option: \"%s\"\n", p);
b4c07bce 841 return -EINVAL;
e73a75fa
RD
842 break;
843 }
1da177e4 844 }
a137e1cc 845
7ca02d0a
MK
846 /*
847 * Use huge page pool size (in hstate) to convert the size
848 * options to number of huge pages. If NO_SIZE, -1 is returned.
849 */
850 pconfig->max_hpages = hugetlbfs_size_to_hpages(pconfig->hstate,
851 max_size_opt, max_val_type);
852 pconfig->min_hpages = hugetlbfs_size_to_hpages(pconfig->hstate,
853 min_size_opt, min_val_type);
854
855 /*
856 * If max_size was specified, then min_size must be smaller
857 */
858 if (max_val_type > NO_SIZE &&
859 pconfig->min_hpages > pconfig->max_hpages) {
860 pr_err("minimum size can not be greater than maximum size\n");
861 return -EINVAL;
a137e1cc
AK
862 }
863
1da177e4 864 return 0;
e73a75fa
RD
865
866bad_val:
9b857d26 867 pr_err("Bad value '%s' for mount option '%s'\n", args[0].from, p);
c12ddba0 868 return -EINVAL;
1da177e4
LT
869}
870
871static int
872hugetlbfs_fill_super(struct super_block *sb, void *data, int silent)
873{
1da177e4
LT
874 int ret;
875 struct hugetlbfs_config config;
876 struct hugetlbfs_sb_info *sbinfo;
877
10f19a86
MS
878 save_mount_options(sb, data);
879
7ca02d0a 880 config.max_hpages = -1; /* No limit on size by default */
1da177e4 881 config.nr_inodes = -1; /* No limit on number of inodes by default */
77c70de1
DH
882 config.uid = current_fsuid();
883 config.gid = current_fsgid();
1da177e4 884 config.mode = 0755;
a137e1cc 885 config.hstate = &default_hstate;
7ca02d0a 886 config.min_hpages = -1; /* No default minimum size */
1da177e4 887 ret = hugetlbfs_parse_options(data, &config);
1da177e4
LT
888 if (ret)
889 return ret;
890
891 sbinfo = kmalloc(sizeof(struct hugetlbfs_sb_info), GFP_KERNEL);
892 if (!sbinfo)
893 return -ENOMEM;
894 sb->s_fs_info = sbinfo;
a137e1cc 895 sbinfo->hstate = config.hstate;
1da177e4 896 spin_lock_init(&sbinfo->stat_lock);
1da177e4
LT
897 sbinfo->max_inodes = config.nr_inodes;
898 sbinfo->free_inodes = config.nr_inodes;
90481622 899 sbinfo->spool = NULL;
7ca02d0a
MK
900 /*
901 * Allocate and initialize subpool if maximum or minimum size is
902 * specified. Any needed reservations (for minimim size) are taken
903 * taken when the subpool is created.
904 */
905 if (config.max_hpages != -1 || config.min_hpages != -1) {
906 sbinfo->spool = hugepage_new_subpool(config.hstate,
907 config.max_hpages,
908 config.min_hpages);
90481622
DG
909 if (!sbinfo->spool)
910 goto out_free;
911 }
1da177e4 912 sb->s_maxbytes = MAX_LFS_FILESIZE;
a137e1cc
AK
913 sb->s_blocksize = huge_page_size(config.hstate);
914 sb->s_blocksize_bits = huge_page_shift(config.hstate);
1da177e4
LT
915 sb->s_magic = HUGETLBFS_MAGIC;
916 sb->s_op = &hugetlbfs_ops;
917 sb->s_time_gran = 1;
48fde701
AV
918 sb->s_root = d_make_root(hugetlbfs_get_root(sb, &config));
919 if (!sb->s_root)
1da177e4 920 goto out_free;
1da177e4
LT
921 return 0;
922out_free:
6e6870d4 923 kfree(sbinfo->spool);
1da177e4
LT
924 kfree(sbinfo);
925 return -ENOMEM;
926}
927
3c26ff6e
AV
928static struct dentry *hugetlbfs_mount(struct file_system_type *fs_type,
929 int flags, const char *dev_name, void *data)
1da177e4 930{
3c26ff6e 931 return mount_nodev(fs_type, flags, data, hugetlbfs_fill_super);
1da177e4
LT
932}
933
934static struct file_system_type hugetlbfs_fs_type = {
935 .name = "hugetlbfs",
3c26ff6e 936 .mount = hugetlbfs_mount,
1da177e4
LT
937 .kill_sb = kill_litter_super,
938};
7f78e035 939MODULE_ALIAS_FS("hugetlbfs");
1da177e4 940
42d7395f 941static struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
1da177e4 942
ef1ff6b8 943static int can_do_hugetlb_shm(void)
1da177e4 944{
a0eb3a05
EB
945 kgid_t shm_group;
946 shm_group = make_kgid(&init_user_ns, sysctl_hugetlb_shm_group);
947 return capable(CAP_IPC_LOCK) || in_group_p(shm_group);
1da177e4
LT
948}
949
42d7395f
AK
950static int get_hstate_idx(int page_size_log)
951{
af73e4d9 952 struct hstate *h = hstate_sizelog(page_size_log);
42d7395f 953
42d7395f
AK
954 if (!h)
955 return -1;
956 return h - hstates;
957}
958
be1d2cf5 959static const struct dentry_operations anon_ops = {
118b2302 960 .d_dname = simple_dname
0df4d6e5
AV
961};
962
af73e4d9
NH
963/*
964 * Note that size should be aligned to proper hugepage size in caller side,
965 * otherwise hugetlb_reserve_pages reserves one less hugepages than intended.
966 */
967struct file *hugetlb_file_setup(const char *name, size_t size,
968 vm_flags_t acctflag, struct user_struct **user,
42d7395f 969 int creat_flags, int page_size_log)
1da177e4 970{
39b65252 971 struct file *file = ERR_PTR(-ENOMEM);
1da177e4 972 struct inode *inode;
2c48b9c4 973 struct path path;
0df4d6e5 974 struct super_block *sb;
1da177e4 975 struct qstr quick_string;
42d7395f
AK
976 int hstate_idx;
977
978 hstate_idx = get_hstate_idx(page_size_log);
979 if (hstate_idx < 0)
980 return ERR_PTR(-ENODEV);
1da177e4 981
353d5c30 982 *user = NULL;
42d7395f 983 if (!hugetlbfs_vfsmount[hstate_idx])
5bc98594
AM
984 return ERR_PTR(-ENOENT);
985
ef1ff6b8 986 if (creat_flags == HUGETLB_SHMFS_INODE && !can_do_hugetlb_shm()) {
353d5c30
HD
987 *user = current_user();
988 if (user_shm_lock(size, *user)) {
21a3c273 989 task_lock(current);
9b857d26 990 pr_warn_once("%s (%d): Using mlock ulimits for SHM_HUGETLB is deprecated\n",
21a3c273
DR
991 current->comm, current->pid);
992 task_unlock(current);
353d5c30
HD
993 } else {
994 *user = NULL;
2584e517 995 return ERR_PTR(-EPERM);
353d5c30 996 }
2584e517 997 }
1da177e4 998
0df4d6e5 999 sb = hugetlbfs_vfsmount[hstate_idx]->mnt_sb;
9d66586f 1000 quick_string.name = name;
1da177e4
LT
1001 quick_string.len = strlen(quick_string.name);
1002 quick_string.hash = 0;
0df4d6e5 1003 path.dentry = d_alloc_pseudo(sb, &quick_string);
2c48b9c4 1004 if (!path.dentry)
1da177e4
LT
1005 goto out_shm_unlock;
1006
0df4d6e5 1007 d_set_d_op(path.dentry, &anon_ops);
42d7395f 1008 path.mnt = mntget(hugetlbfs_vfsmount[hstate_idx]);
39b65252 1009 file = ERR_PTR(-ENOSPC);
0df4d6e5 1010 inode = hugetlbfs_get_inode(sb, NULL, S_IFREG | S_IRWXUGO, 0);
1da177e4 1011 if (!inode)
ce8d2cdf 1012 goto out_dentry;
e1832f29
SS
1013 if (creat_flags == HUGETLB_SHMFS_INODE)
1014 inode->i_flags |= S_PRIVATE;
1da177e4 1015
39b65252 1016 file = ERR_PTR(-ENOMEM);
af73e4d9
NH
1017 if (hugetlb_reserve_pages(inode, 0,
1018 size >> huge_page_shift(hstate_inode(inode)), NULL,
1019 acctflag))
b45b5bd6
DG
1020 goto out_inode;
1021
2c48b9c4 1022 d_instantiate(path.dentry, inode);
1da177e4 1023 inode->i_size = size;
6d6b77f1 1024 clear_nlink(inode);
ce8d2cdf 1025
2c48b9c4 1026 file = alloc_file(&path, FMODE_WRITE | FMODE_READ,
ce8d2cdf 1027 &hugetlbfs_file_operations);
39b65252 1028 if (IS_ERR(file))
b4d232e6 1029 goto out_dentry; /* inode is already attached */
ce8d2cdf 1030
1da177e4
LT
1031 return file;
1032
b45b5bd6
DG
1033out_inode:
1034 iput(inode);
1da177e4 1035out_dentry:
2c48b9c4 1036 path_put(&path);
1da177e4 1037out_shm_unlock:
353d5c30
HD
1038 if (*user) {
1039 user_shm_unlock(size, *user);
1040 *user = NULL;
1041 }
39b65252 1042 return file;
1da177e4
LT
1043}
1044
1045static int __init init_hugetlbfs_fs(void)
1046{
42d7395f 1047 struct hstate *h;
1da177e4 1048 int error;
42d7395f 1049 int i;
1da177e4 1050
457c1b27 1051 if (!hugepages_supported()) {
9b857d26 1052 pr_info("disabling because there are no supported hugepage sizes\n");
457c1b27
NA
1053 return -ENOTSUPP;
1054 }
1055
d1d5e05f 1056 error = -ENOMEM;
1da177e4
LT
1057 hugetlbfs_inode_cachep = kmem_cache_create("hugetlbfs_inode_cache",
1058 sizeof(struct hugetlbfs_inode_info),
20c2df83 1059 0, 0, init_once);
1da177e4 1060 if (hugetlbfs_inode_cachep == NULL)
e0bf68dd 1061 goto out2;
1da177e4
LT
1062
1063 error = register_filesystem(&hugetlbfs_fs_type);
1064 if (error)
1065 goto out;
1066
42d7395f
AK
1067 i = 0;
1068 for_each_hstate(h) {
1069 char buf[50];
1070 unsigned ps_kb = 1U << (h->order + PAGE_SHIFT - 10);
1da177e4 1071
42d7395f
AK
1072 snprintf(buf, sizeof(buf), "pagesize=%uK", ps_kb);
1073 hugetlbfs_vfsmount[i] = kern_mount_data(&hugetlbfs_fs_type,
1074 buf);
1da177e4 1075
42d7395f 1076 if (IS_ERR(hugetlbfs_vfsmount[i])) {
9b857d26 1077 pr_err("Cannot mount internal hugetlbfs for "
42d7395f
AK
1078 "page size %uK", ps_kb);
1079 error = PTR_ERR(hugetlbfs_vfsmount[i]);
1080 hugetlbfs_vfsmount[i] = NULL;
1081 }
1082 i++;
1083 }
1084 /* Non default hstates are optional */
1085 if (!IS_ERR_OR_NULL(hugetlbfs_vfsmount[default_hstate_idx]))
1086 return 0;
1da177e4
LT
1087
1088 out:
d1d5e05f 1089 kmem_cache_destroy(hugetlbfs_inode_cachep);
e0bf68dd 1090 out2:
1da177e4
LT
1091 return error;
1092}
1093
1094static void __exit exit_hugetlbfs_fs(void)
1095{
42d7395f
AK
1096 struct hstate *h;
1097 int i;
1098
1099
8c0a8537
KS
1100 /*
1101 * Make sure all delayed rcu free inodes are flushed before we
1102 * destroy cache.
1103 */
1104 rcu_barrier();
1da177e4 1105 kmem_cache_destroy(hugetlbfs_inode_cachep);
42d7395f
AK
1106 i = 0;
1107 for_each_hstate(h)
1108 kern_unmount(hugetlbfs_vfsmount[i++]);
1da177e4
LT
1109 unregister_filesystem(&hugetlbfs_fs_type);
1110}
1111
1112module_init(init_hugetlbfs_fs)
1113module_exit(exit_hugetlbfs_fs)
1114
1115MODULE_LICENSE("GPL");