Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * hugetlbpage-backed filesystem. Based on ramfs. | |
3 | * | |
6d49e352 | 4 | * Nadia Yvette Chambers, 2002 |
1da177e4 LT |
5 | * |
6 | * Copyright (C) 2002 Linus Torvalds. | |
3e89e1c5 | 7 | * License: GPL |
1da177e4 LT |
8 | */ |
9 | ||
9b857d26 AM |
10 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
11 | ||
1da177e4 LT |
12 | #include <linux/thread_info.h> |
13 | #include <asm/current.h> | |
174cd4b1 | 14 | #include <linux/sched/signal.h> /* remove ASAP */ |
70c3547e | 15 | #include <linux/falloc.h> |
1da177e4 LT |
16 | #include <linux/fs.h> |
17 | #include <linux/mount.h> | |
18 | #include <linux/file.h> | |
e73a75fa | 19 | #include <linux/kernel.h> |
1da177e4 LT |
20 | #include <linux/writeback.h> |
21 | #include <linux/pagemap.h> | |
22 | #include <linux/highmem.h> | |
23 | #include <linux/init.h> | |
24 | #include <linux/string.h> | |
16f7e0fe | 25 | #include <linux/capability.h> |
e73a75fa | 26 | #include <linux/ctype.h> |
1da177e4 LT |
27 | #include <linux/backing-dev.h> |
28 | #include <linux/hugetlb.h> | |
29 | #include <linux/pagevec.h> | |
32021982 | 30 | #include <linux/fs_parser.h> |
036e0856 | 31 | #include <linux/mman.h> |
1da177e4 LT |
32 | #include <linux/slab.h> |
33 | #include <linux/dnotify.h> | |
34 | #include <linux/statfs.h> | |
35 | #include <linux/security.h> | |
1fd7317d | 36 | #include <linux/magic.h> |
290408d4 | 37 | #include <linux/migrate.h> |
34d0640e | 38 | #include <linux/uio.h> |
1da177e4 | 39 | |
7c0f6ba6 | 40 | #include <linux/uaccess.h> |
1da177e4 | 41 | |
ee9b6d61 | 42 | static const struct super_operations hugetlbfs_ops; |
f5e54d6e | 43 | static const struct address_space_operations hugetlbfs_aops; |
4b6f5d20 | 44 | const struct file_operations hugetlbfs_file_operations; |
92e1d5be AV |
45 | static const struct inode_operations hugetlbfs_dir_inode_operations; |
46 | static const struct inode_operations hugetlbfs_inode_operations; | |
1da177e4 | 47 | |
32021982 DH |
48 | enum hugetlbfs_size_type { NO_SIZE, SIZE_STD, SIZE_PERCENT }; |
49 | ||
50 | struct hugetlbfs_fs_context { | |
4a25220d | 51 | struct hstate *hstate; |
32021982 DH |
52 | unsigned long long max_size_opt; |
53 | unsigned long long min_size_opt; | |
4a25220d DH |
54 | long max_hpages; |
55 | long nr_inodes; | |
56 | long min_hpages; | |
32021982 DH |
57 | enum hugetlbfs_size_type max_val_type; |
58 | enum hugetlbfs_size_type min_val_type; | |
4a25220d DH |
59 | kuid_t uid; |
60 | kgid_t gid; | |
61 | umode_t mode; | |
a1d776ee DG |
62 | }; |
63 | ||
1da177e4 LT |
64 | int sysctl_hugetlb_shm_group; |
65 | ||
32021982 DH |
66 | enum hugetlb_param { |
67 | Opt_gid, | |
68 | Opt_min_size, | |
69 | Opt_mode, | |
70 | Opt_nr_inodes, | |
71 | Opt_pagesize, | |
72 | Opt_size, | |
73 | Opt_uid, | |
e73a75fa RD |
74 | }; |
75 | ||
32021982 DH |
76 | static const struct fs_parameter_spec hugetlb_param_specs[] = { |
77 | fsparam_u32 ("gid", Opt_gid), | |
78 | fsparam_string("min_size", Opt_min_size), | |
79 | fsparam_u32 ("mode", Opt_mode), | |
80 | fsparam_string("nr_inodes", Opt_nr_inodes), | |
81 | fsparam_string("pagesize", Opt_pagesize), | |
82 | fsparam_string("size", Opt_size), | |
83 | fsparam_u32 ("uid", Opt_uid), | |
84 | {} | |
85 | }; | |
86 | ||
87 | static const struct fs_parameter_description hugetlb_fs_parameters = { | |
88 | .name = "hugetlbfs", | |
89 | .specs = hugetlb_param_specs, | |
e73a75fa RD |
90 | }; |
91 | ||
70c3547e MK |
92 | #ifdef CONFIG_NUMA |
93 | static inline void hugetlb_set_vma_policy(struct vm_area_struct *vma, | |
94 | struct inode *inode, pgoff_t index) | |
95 | { | |
96 | vma->vm_policy = mpol_shared_policy_lookup(&HUGETLBFS_I(inode)->policy, | |
97 | index); | |
98 | } | |
99 | ||
100 | static inline void hugetlb_drop_vma_policy(struct vm_area_struct *vma) | |
101 | { | |
102 | mpol_cond_put(vma->vm_policy); | |
103 | } | |
104 | #else | |
105 | static inline void hugetlb_set_vma_policy(struct vm_area_struct *vma, | |
106 | struct inode *inode, pgoff_t index) | |
107 | { | |
108 | } | |
109 | ||
110 | static inline void hugetlb_drop_vma_policy(struct vm_area_struct *vma) | |
111 | { | |
112 | } | |
113 | #endif | |
114 | ||
2e9b367c AL |
115 | static void huge_pagevec_release(struct pagevec *pvec) |
116 | { | |
117 | int i; | |
118 | ||
119 | for (i = 0; i < pagevec_count(pvec); ++i) | |
120 | put_page(pvec->pages[i]); | |
121 | ||
122 | pagevec_reinit(pvec); | |
123 | } | |
124 | ||
63489f8e MK |
125 | /* |
126 | * Mask used when checking the page offset value passed in via system | |
127 | * calls. This value will be converted to a loff_t which is signed. | |
128 | * Therefore, we want to check the upper PAGE_SHIFT + 1 bits of the | |
129 | * value. The extra bit (- 1 in the shift value) is to take the sign | |
130 | * bit into account. | |
131 | */ | |
132 | #define PGOFF_LOFFT_MAX \ | |
133 | (((1UL << (PAGE_SHIFT + 1)) - 1) << (BITS_PER_LONG - (PAGE_SHIFT + 1))) | |
134 | ||
1da177e4 LT |
135 | static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma) |
136 | { | |
496ad9aa | 137 | struct inode *inode = file_inode(file); |
1da177e4 LT |
138 | loff_t len, vma_len; |
139 | int ret; | |
a5516438 | 140 | struct hstate *h = hstate_file(file); |
1da177e4 | 141 | |
68589bc3 | 142 | /* |
dec4ad86 DG |
143 | * vma address alignment (but not the pgoff alignment) has |
144 | * already been checked by prepare_hugepage_range. If you add | |
145 | * any error returns here, do so after setting VM_HUGETLB, so | |
146 | * is_vm_hugetlb_page tests below unmap_region go the right | |
147 | * way when do_mmap_pgoff unwinds (may be important on powerpc | |
148 | * and ia64). | |
68589bc3 | 149 | */ |
a2fce914 | 150 | vma->vm_flags |= VM_HUGETLB | VM_DONTEXPAND; |
68589bc3 | 151 | vma->vm_ops = &hugetlb_vm_ops; |
1da177e4 | 152 | |
045c7a3f | 153 | /* |
63489f8e | 154 | * page based offset in vm_pgoff could be sufficiently large to |
5df63c2a MK |
155 | * overflow a loff_t when converted to byte offset. This can |
156 | * only happen on architectures where sizeof(loff_t) == | |
157 | * sizeof(unsigned long). So, only check in those instances. | |
045c7a3f | 158 | */ |
5df63c2a MK |
159 | if (sizeof(unsigned long) == sizeof(loff_t)) { |
160 | if (vma->vm_pgoff & PGOFF_LOFFT_MAX) | |
161 | return -EINVAL; | |
162 | } | |
045c7a3f | 163 | |
63489f8e | 164 | /* must be huge page aligned */ |
2b37c35e | 165 | if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT)) |
dec4ad86 DG |
166 | return -EINVAL; |
167 | ||
1da177e4 | 168 | vma_len = (loff_t)(vma->vm_end - vma->vm_start); |
045c7a3f MK |
169 | len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT); |
170 | /* check for overflow */ | |
171 | if (len < vma_len) | |
172 | return -EINVAL; | |
1da177e4 | 173 | |
5955102c | 174 | inode_lock(inode); |
1da177e4 | 175 | file_accessed(file); |
1da177e4 LT |
176 | |
177 | ret = -ENOMEM; | |
a1e78772 | 178 | if (hugetlb_reserve_pages(inode, |
a5516438 | 179 | vma->vm_pgoff >> huge_page_order(h), |
5a6fe125 MG |
180 | len >> huge_page_shift(h), vma, |
181 | vma->vm_flags)) | |
a43a8c39 | 182 | goto out; |
b45b5bd6 | 183 | |
4c887265 | 184 | ret = 0; |
b6174df5 | 185 | if (vma->vm_flags & VM_WRITE && inode->i_size < len) |
045c7a3f | 186 | i_size_write(inode, len); |
1da177e4 | 187 | out: |
5955102c | 188 | inode_unlock(inode); |
1da177e4 LT |
189 | |
190 | return ret; | |
191 | } | |
192 | ||
193 | /* | |
508034a3 | 194 | * Called under down_write(mmap_sem). |
1da177e4 LT |
195 | */ |
196 | ||
d2ba27e8 | 197 | #ifndef HAVE_ARCH_HUGETLB_UNMAPPED_AREA |
1da177e4 LT |
198 | static unsigned long |
199 | hugetlb_get_unmapped_area(struct file *file, unsigned long addr, | |
200 | unsigned long len, unsigned long pgoff, unsigned long flags) | |
201 | { | |
202 | struct mm_struct *mm = current->mm; | |
203 | struct vm_area_struct *vma; | |
a5516438 | 204 | struct hstate *h = hstate_file(file); |
08659355 | 205 | struct vm_unmapped_area_info info; |
1da177e4 | 206 | |
a5516438 | 207 | if (len & ~huge_page_mask(h)) |
1da177e4 LT |
208 | return -EINVAL; |
209 | if (len > TASK_SIZE) | |
210 | return -ENOMEM; | |
211 | ||
036e0856 | 212 | if (flags & MAP_FIXED) { |
a5516438 | 213 | if (prepare_hugepage_range(file, addr, len)) |
036e0856 BH |
214 | return -EINVAL; |
215 | return addr; | |
216 | } | |
217 | ||
1da177e4 | 218 | if (addr) { |
a5516438 | 219 | addr = ALIGN(addr, huge_page_size(h)); |
1da177e4 LT |
220 | vma = find_vma(mm, addr); |
221 | if (TASK_SIZE - len >= addr && | |
1be7107f | 222 | (!vma || addr + len <= vm_start_gap(vma))) |
1da177e4 LT |
223 | return addr; |
224 | } | |
225 | ||
08659355 ML |
226 | info.flags = 0; |
227 | info.length = len; | |
228 | info.low_limit = TASK_UNMAPPED_BASE; | |
229 | info.high_limit = TASK_SIZE; | |
230 | info.align_mask = PAGE_MASK & ~huge_page_mask(h); | |
231 | info.align_offset = 0; | |
232 | return vm_unmapped_area(&info); | |
1da177e4 LT |
233 | } |
234 | #endif | |
235 | ||
34d0640e | 236 | static size_t |
e63e1e5a | 237 | hugetlbfs_read_actor(struct page *page, unsigned long offset, |
34d0640e | 238 | struct iov_iter *to, unsigned long size) |
e63e1e5a | 239 | { |
34d0640e | 240 | size_t copied = 0; |
e63e1e5a BP |
241 | int i, chunksize; |
242 | ||
e63e1e5a | 243 | /* Find which 4k chunk and offset with in that chunk */ |
09cbfeaf KS |
244 | i = offset >> PAGE_SHIFT; |
245 | offset = offset & ~PAGE_MASK; | |
e63e1e5a BP |
246 | |
247 | while (size) { | |
34d0640e | 248 | size_t n; |
09cbfeaf | 249 | chunksize = PAGE_SIZE; |
e63e1e5a BP |
250 | if (offset) |
251 | chunksize -= offset; | |
252 | if (chunksize > size) | |
253 | chunksize = size; | |
34d0640e AV |
254 | n = copy_page_to_iter(&page[i], offset, chunksize, to); |
255 | copied += n; | |
256 | if (n != chunksize) | |
257 | return copied; | |
e63e1e5a BP |
258 | offset = 0; |
259 | size -= chunksize; | |
e63e1e5a BP |
260 | i++; |
261 | } | |
34d0640e | 262 | return copied; |
e63e1e5a BP |
263 | } |
264 | ||
265 | /* | |
266 | * Support for read() - Find the page attached to f_mapping and copy out the | |
267 | * data. Its *very* similar to do_generic_mapping_read(), we can't use that | |
ea1754a0 | 268 | * since it has PAGE_SIZE assumptions. |
e63e1e5a | 269 | */ |
34d0640e | 270 | static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to) |
e63e1e5a | 271 | { |
34d0640e AV |
272 | struct file *file = iocb->ki_filp; |
273 | struct hstate *h = hstate_file(file); | |
274 | struct address_space *mapping = file->f_mapping; | |
e63e1e5a | 275 | struct inode *inode = mapping->host; |
34d0640e AV |
276 | unsigned long index = iocb->ki_pos >> huge_page_shift(h); |
277 | unsigned long offset = iocb->ki_pos & ~huge_page_mask(h); | |
e63e1e5a BP |
278 | unsigned long end_index; |
279 | loff_t isize; | |
280 | ssize_t retval = 0; | |
281 | ||
34d0640e | 282 | while (iov_iter_count(to)) { |
e63e1e5a | 283 | struct page *page; |
34d0640e | 284 | size_t nr, copied; |
e63e1e5a BP |
285 | |
286 | /* nr is the maximum number of bytes to copy from this page */ | |
a5516438 | 287 | nr = huge_page_size(h); |
a05b0855 AK |
288 | isize = i_size_read(inode); |
289 | if (!isize) | |
34d0640e | 290 | break; |
a05b0855 | 291 | end_index = (isize - 1) >> huge_page_shift(h); |
34d0640e AV |
292 | if (index > end_index) |
293 | break; | |
294 | if (index == end_index) { | |
a5516438 | 295 | nr = ((isize - 1) & ~huge_page_mask(h)) + 1; |
a05b0855 | 296 | if (nr <= offset) |
34d0640e | 297 | break; |
e63e1e5a BP |
298 | } |
299 | nr = nr - offset; | |
300 | ||
301 | /* Find the page */ | |
a05b0855 | 302 | page = find_lock_page(mapping, index); |
e63e1e5a BP |
303 | if (unlikely(page == NULL)) { |
304 | /* | |
305 | * We have a HOLE, zero out the user-buffer for the | |
306 | * length of the hole or request. | |
307 | */ | |
34d0640e | 308 | copied = iov_iter_zero(nr, to); |
e63e1e5a | 309 | } else { |
a05b0855 AK |
310 | unlock_page(page); |
311 | ||
e63e1e5a BP |
312 | /* |
313 | * We have the page, copy it to user space buffer. | |
314 | */ | |
34d0640e | 315 | copied = hugetlbfs_read_actor(page, offset, to, nr); |
09cbfeaf | 316 | put_page(page); |
e63e1e5a | 317 | } |
34d0640e AV |
318 | offset += copied; |
319 | retval += copied; | |
320 | if (copied != nr && iov_iter_count(to)) { | |
321 | if (!retval) | |
322 | retval = -EFAULT; | |
323 | break; | |
e63e1e5a | 324 | } |
a5516438 AK |
325 | index += offset >> huge_page_shift(h); |
326 | offset &= ~huge_page_mask(h); | |
e63e1e5a | 327 | } |
34d0640e | 328 | iocb->ki_pos = ((loff_t)index << huge_page_shift(h)) + offset; |
e63e1e5a BP |
329 | return retval; |
330 | } | |
331 | ||
800d15a5 NP |
332 | static int hugetlbfs_write_begin(struct file *file, |
333 | struct address_space *mapping, | |
334 | loff_t pos, unsigned len, unsigned flags, | |
335 | struct page **pagep, void **fsdata) | |
1da177e4 LT |
336 | { |
337 | return -EINVAL; | |
338 | } | |
339 | ||
800d15a5 NP |
340 | static int hugetlbfs_write_end(struct file *file, struct address_space *mapping, |
341 | loff_t pos, unsigned len, unsigned copied, | |
342 | struct page *page, void *fsdata) | |
1da177e4 | 343 | { |
800d15a5 | 344 | BUG(); |
1da177e4 LT |
345 | return -EINVAL; |
346 | } | |
347 | ||
b5cec28d | 348 | static void remove_huge_page(struct page *page) |
1da177e4 | 349 | { |
b9ea2515 | 350 | ClearPageDirty(page); |
1da177e4 | 351 | ClearPageUptodate(page); |
bd65cb86 | 352 | delete_from_page_cache(page); |
1da177e4 LT |
353 | } |
354 | ||
4aae8d1c | 355 | static void |
f808c13f | 356 | hugetlb_vmdelete_list(struct rb_root_cached *root, pgoff_t start, pgoff_t end) |
4aae8d1c MK |
357 | { |
358 | struct vm_area_struct *vma; | |
359 | ||
360 | /* | |
361 | * end == 0 indicates that the entire range after | |
362 | * start should be unmapped. | |
363 | */ | |
364 | vma_interval_tree_foreach(vma, root, start, end ? end : ULONG_MAX) { | |
365 | unsigned long v_offset; | |
366 | unsigned long v_end; | |
367 | ||
368 | /* | |
369 | * Can the expression below overflow on 32-bit arches? | |
370 | * No, because the interval tree returns us only those vmas | |
371 | * which overlap the truncated area starting at pgoff, | |
372 | * and no vma on a 32-bit arch can span beyond the 4GB. | |
373 | */ | |
374 | if (vma->vm_pgoff < start) | |
375 | v_offset = (start - vma->vm_pgoff) << PAGE_SHIFT; | |
376 | else | |
377 | v_offset = 0; | |
378 | ||
379 | if (!end) | |
380 | v_end = vma->vm_end; | |
381 | else { | |
382 | v_end = ((end - vma->vm_pgoff) << PAGE_SHIFT) | |
383 | + vma->vm_start; | |
384 | if (v_end > vma->vm_end) | |
385 | v_end = vma->vm_end; | |
386 | } | |
387 | ||
388 | unmap_hugepage_range(vma, vma->vm_start + v_offset, v_end, | |
389 | NULL); | |
390 | } | |
391 | } | |
b5cec28d MK |
392 | |
393 | /* | |
394 | * remove_inode_hugepages handles two distinct cases: truncation and hole | |
395 | * punch. There are subtle differences in operation for each case. | |
4aae8d1c | 396 | * |
b5cec28d MK |
397 | * truncation is indicated by end of range being LLONG_MAX |
398 | * In this case, we first scan the range and release found pages. | |
399 | * After releasing pages, hugetlb_unreserve_pages cleans up region/reserv | |
e7c58097 MK |
400 | * maps and global counts. Page faults can not race with truncation |
401 | * in this routine. hugetlb_no_page() prevents page faults in the | |
402 | * truncated range. It checks i_size before allocation, and again after | |
403 | * with the page table lock for the page held. The same lock must be | |
404 | * acquired to unmap a page. | |
b5cec28d MK |
405 | * hole punch is indicated if end is not LLONG_MAX |
406 | * In the hole punch case we scan the range and release found pages. | |
407 | * Only when releasing a page is the associated region/reserv map | |
408 | * deleted. The region/reserv map for ranges without associated | |
e7c58097 MK |
409 | * pages are not modified. Page faults can race with hole punch. |
410 | * This is indicated if we find a mapped page. | |
b5cec28d MK |
411 | * Note: If the passed end of range value is beyond the end of file, but |
412 | * not LLONG_MAX this routine still performs a hole punch operation. | |
413 | */ | |
414 | static void remove_inode_hugepages(struct inode *inode, loff_t lstart, | |
415 | loff_t lend) | |
1da177e4 | 416 | { |
a5516438 | 417 | struct hstate *h = hstate_inode(inode); |
b45b5bd6 | 418 | struct address_space *mapping = &inode->i_data; |
a5516438 | 419 | const pgoff_t start = lstart >> huge_page_shift(h); |
b5cec28d MK |
420 | const pgoff_t end = lend >> huge_page_shift(h); |
421 | struct vm_area_struct pseudo_vma; | |
1da177e4 | 422 | struct pagevec pvec; |
d72dc8a2 | 423 | pgoff_t next, index; |
a43a8c39 | 424 | int i, freed = 0; |
b5cec28d | 425 | bool truncate_op = (lend == LLONG_MAX); |
1da177e4 | 426 | |
2c4541e2 | 427 | vma_init(&pseudo_vma, current->mm); |
b5cec28d | 428 | pseudo_vma.vm_flags = (VM_HUGETLB | VM_MAYSHARE | VM_SHARED); |
86679820 | 429 | pagevec_init(&pvec); |
1da177e4 | 430 | next = start; |
b5cec28d | 431 | while (next < end) { |
b5cec28d | 432 | /* |
1817889e | 433 | * When no more pages are found, we are done. |
b5cec28d | 434 | */ |
397162ff | 435 | if (!pagevec_lookup_range(&pvec, mapping, &next, end - 1)) |
1817889e | 436 | break; |
1da177e4 LT |
437 | |
438 | for (i = 0; i < pagevec_count(&pvec); ++i) { | |
439 | struct page *page = pvec.pages[i]; | |
e7c58097 | 440 | u32 hash; |
b5cec28d | 441 | |
d72dc8a2 | 442 | index = page->index; |
e7c58097 MK |
443 | hash = hugetlb_fault_mutex_hash(h, current->mm, |
444 | &pseudo_vma, | |
445 | mapping, index, 0); | |
446 | mutex_lock(&hugetlb_fault_mutex_table[hash]); | |
447 | ||
4aae8d1c | 448 | /* |
e7c58097 MK |
449 | * If page is mapped, it was faulted in after being |
450 | * unmapped in caller. Unmap (again) now after taking | |
451 | * the fault mutex. The mutex will prevent faults | |
452 | * until we finish removing the page. | |
453 | * | |
454 | * This race can only happen in the hole punch case. | |
455 | * Getting here in a truncate operation is a bug. | |
4aae8d1c | 456 | */ |
e7c58097 MK |
457 | if (unlikely(page_mapped(page))) { |
458 | BUG_ON(truncate_op); | |
459 | ||
460 | i_mmap_lock_write(mapping); | |
461 | hugetlb_vmdelete_list(&mapping->i_mmap, | |
462 | index * pages_per_huge_page(h), | |
463 | (index + 1) * pages_per_huge_page(h)); | |
464 | i_mmap_unlock_write(mapping); | |
465 | } | |
4aae8d1c MK |
466 | |
467 | lock_page(page); | |
468 | /* | |
469 | * We must free the huge page and remove from page | |
470 | * cache (remove_huge_page) BEFORE removing the | |
471 | * region/reserve map (hugetlb_unreserve_pages). In | |
472 | * rare out of memory conditions, removal of the | |
72e2936c | 473 | * region/reserve map could fail. Correspondingly, |
474 | * the subpool and global reserve usage count can need | |
475 | * to be adjusted. | |
4aae8d1c | 476 | */ |
72e2936c | 477 | VM_BUG_ON(PagePrivate(page)); |
4aae8d1c MK |
478 | remove_huge_page(page); |
479 | freed++; | |
480 | if (!truncate_op) { | |
481 | if (unlikely(hugetlb_unreserve_pages(inode, | |
d72dc8a2 | 482 | index, index + 1, 1))) |
72e2936c | 483 | hugetlb_fix_reserve_counts(inode); |
b5cec28d MK |
484 | } |
485 | ||
1da177e4 | 486 | unlock_page(page); |
e7c58097 | 487 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); |
1da177e4 LT |
488 | } |
489 | huge_pagevec_release(&pvec); | |
1817889e | 490 | cond_resched(); |
1da177e4 | 491 | } |
b5cec28d MK |
492 | |
493 | if (truncate_op) | |
494 | (void)hugetlb_unreserve_pages(inode, start, LONG_MAX, freed); | |
1da177e4 LT |
495 | } |
496 | ||
2bbbda30 | 497 | static void hugetlbfs_evict_inode(struct inode *inode) |
1da177e4 | 498 | { |
9119a41e JK |
499 | struct resv_map *resv_map; |
500 | ||
b5cec28d | 501 | remove_inode_hugepages(inode, 0, LLONG_MAX); |
9119a41e JK |
502 | resv_map = (struct resv_map *)inode->i_mapping->private_data; |
503 | /* root inode doesn't have the resv_map, so we should check it */ | |
504 | if (resv_map) | |
505 | resv_map_release(&resv_map->refs); | |
dbd5768f | 506 | clear_inode(inode); |
149f4211 CH |
507 | } |
508 | ||
1da177e4 LT |
509 | static int hugetlb_vmtruncate(struct inode *inode, loff_t offset) |
510 | { | |
856fc295 | 511 | pgoff_t pgoff; |
1da177e4 | 512 | struct address_space *mapping = inode->i_mapping; |
a5516438 | 513 | struct hstate *h = hstate_inode(inode); |
1da177e4 | 514 | |
a5516438 | 515 | BUG_ON(offset & ~huge_page_mask(h)); |
856fc295 | 516 | pgoff = offset >> PAGE_SHIFT; |
1da177e4 | 517 | |
7aa91e10 | 518 | i_size_write(inode, offset); |
83cde9e8 | 519 | i_mmap_lock_write(mapping); |
f808c13f | 520 | if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)) |
1bfad99a | 521 | hugetlb_vmdelete_list(&mapping->i_mmap, pgoff, 0); |
c86aa7bb | 522 | i_mmap_unlock_write(mapping); |
e7c58097 | 523 | remove_inode_hugepages(inode, offset, LLONG_MAX); |
1da177e4 LT |
524 | return 0; |
525 | } | |
526 | ||
70c3547e MK |
527 | static long hugetlbfs_punch_hole(struct inode *inode, loff_t offset, loff_t len) |
528 | { | |
529 | struct hstate *h = hstate_inode(inode); | |
530 | loff_t hpage_size = huge_page_size(h); | |
531 | loff_t hole_start, hole_end; | |
532 | ||
533 | /* | |
534 | * For hole punch round up the beginning offset of the hole and | |
535 | * round down the end. | |
536 | */ | |
537 | hole_start = round_up(offset, hpage_size); | |
538 | hole_end = round_down(offset + len, hpage_size); | |
539 | ||
540 | if (hole_end > hole_start) { | |
541 | struct address_space *mapping = inode->i_mapping; | |
ff62a342 | 542 | struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode); |
70c3547e | 543 | |
5955102c | 544 | inode_lock(inode); |
ff62a342 MAL |
545 | |
546 | /* protected by i_mutex */ | |
ab3948f5 | 547 | if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) { |
ff62a342 MAL |
548 | inode_unlock(inode); |
549 | return -EPERM; | |
550 | } | |
551 | ||
70c3547e | 552 | i_mmap_lock_write(mapping); |
f808c13f | 553 | if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)) |
70c3547e MK |
554 | hugetlb_vmdelete_list(&mapping->i_mmap, |
555 | hole_start >> PAGE_SHIFT, | |
556 | hole_end >> PAGE_SHIFT); | |
c86aa7bb | 557 | i_mmap_unlock_write(mapping); |
e7c58097 | 558 | remove_inode_hugepages(inode, hole_start, hole_end); |
5955102c | 559 | inode_unlock(inode); |
70c3547e MK |
560 | } |
561 | ||
562 | return 0; | |
563 | } | |
564 | ||
565 | static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset, | |
566 | loff_t len) | |
567 | { | |
568 | struct inode *inode = file_inode(file); | |
ff62a342 | 569 | struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode); |
70c3547e MK |
570 | struct address_space *mapping = inode->i_mapping; |
571 | struct hstate *h = hstate_inode(inode); | |
572 | struct vm_area_struct pseudo_vma; | |
573 | struct mm_struct *mm = current->mm; | |
574 | loff_t hpage_size = huge_page_size(h); | |
575 | unsigned long hpage_shift = huge_page_shift(h); | |
576 | pgoff_t start, index, end; | |
577 | int error; | |
578 | u32 hash; | |
579 | ||
580 | if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) | |
581 | return -EOPNOTSUPP; | |
582 | ||
583 | if (mode & FALLOC_FL_PUNCH_HOLE) | |
584 | return hugetlbfs_punch_hole(inode, offset, len); | |
585 | ||
586 | /* | |
587 | * Default preallocate case. | |
588 | * For this range, start is rounded down and end is rounded up | |
589 | * as well as being converted to page offsets. | |
590 | */ | |
591 | start = offset >> hpage_shift; | |
592 | end = (offset + len + hpage_size - 1) >> hpage_shift; | |
593 | ||
5955102c | 594 | inode_lock(inode); |
70c3547e MK |
595 | |
596 | /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */ | |
597 | error = inode_newsize_ok(inode, offset + len); | |
598 | if (error) | |
599 | goto out; | |
600 | ||
ff62a342 MAL |
601 | if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) { |
602 | error = -EPERM; | |
603 | goto out; | |
604 | } | |
605 | ||
70c3547e MK |
606 | /* |
607 | * Initialize a pseudo vma as this is required by the huge page | |
608 | * allocation routines. If NUMA is configured, use page index | |
609 | * as input to create an allocation policy. | |
610 | */ | |
2c4541e2 | 611 | vma_init(&pseudo_vma, mm); |
70c3547e MK |
612 | pseudo_vma.vm_flags = (VM_HUGETLB | VM_MAYSHARE | VM_SHARED); |
613 | pseudo_vma.vm_file = file; | |
614 | ||
615 | for (index = start; index < end; index++) { | |
616 | /* | |
617 | * This is supposed to be the vaddr where the page is being | |
618 | * faulted in, but we have no vaddr here. | |
619 | */ | |
620 | struct page *page; | |
621 | unsigned long addr; | |
622 | int avoid_reserve = 0; | |
623 | ||
624 | cond_resched(); | |
625 | ||
626 | /* | |
627 | * fallocate(2) manpage permits EINTR; we may have been | |
628 | * interrupted because we are using up too much memory. | |
629 | */ | |
630 | if (signal_pending(current)) { | |
631 | error = -EINTR; | |
632 | break; | |
633 | } | |
634 | ||
635 | /* Set numa allocation policy based on index */ | |
636 | hugetlb_set_vma_policy(&pseudo_vma, inode, index); | |
637 | ||
638 | /* addr is the offset within the file (zero based) */ | |
639 | addr = index * hpage_size; | |
640 | ||
e7c58097 | 641 | /* mutex taken here, fault path and hole punch */ |
70c3547e MK |
642 | hash = hugetlb_fault_mutex_hash(h, mm, &pseudo_vma, mapping, |
643 | index, addr); | |
644 | mutex_lock(&hugetlb_fault_mutex_table[hash]); | |
645 | ||
646 | /* See if already present in mapping to avoid alloc/free */ | |
647 | page = find_get_page(mapping, index); | |
648 | if (page) { | |
649 | put_page(page); | |
650 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); | |
651 | hugetlb_drop_vma_policy(&pseudo_vma); | |
652 | continue; | |
653 | } | |
654 | ||
655 | /* Allocate page and add to page cache */ | |
656 | page = alloc_huge_page(&pseudo_vma, addr, avoid_reserve); | |
657 | hugetlb_drop_vma_policy(&pseudo_vma); | |
658 | if (IS_ERR(page)) { | |
659 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); | |
660 | error = PTR_ERR(page); | |
661 | goto out; | |
662 | } | |
663 | clear_huge_page(page, addr, pages_per_huge_page(h)); | |
664 | __SetPageUptodate(page); | |
665 | error = huge_add_to_page_cache(page, mapping, index); | |
666 | if (unlikely(error)) { | |
667 | put_page(page); | |
668 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); | |
669 | goto out; | |
670 | } | |
671 | ||
672 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); | |
673 | ||
674 | /* | |
70c3547e | 675 | * unlock_page because locked by add_to_page_cache() |
72639e6d | 676 | * page_put due to reference from alloc_huge_page() |
70c3547e | 677 | */ |
70c3547e | 678 | unlock_page(page); |
72639e6d | 679 | put_page(page); |
70c3547e MK |
680 | } |
681 | ||
682 | if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size) | |
683 | i_size_write(inode, offset + len); | |
078cd827 | 684 | inode->i_ctime = current_time(inode); |
70c3547e | 685 | out: |
5955102c | 686 | inode_unlock(inode); |
70c3547e MK |
687 | return error; |
688 | } | |
689 | ||
1da177e4 LT |
690 | static int hugetlbfs_setattr(struct dentry *dentry, struct iattr *attr) |
691 | { | |
2b0143b5 | 692 | struct inode *inode = d_inode(dentry); |
a5516438 | 693 | struct hstate *h = hstate_inode(inode); |
1da177e4 LT |
694 | int error; |
695 | unsigned int ia_valid = attr->ia_valid; | |
ff62a342 | 696 | struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode); |
1da177e4 LT |
697 | |
698 | BUG_ON(!inode); | |
699 | ||
31051c85 | 700 | error = setattr_prepare(dentry, attr); |
1da177e4 | 701 | if (error) |
1025774c | 702 | return error; |
1da177e4 LT |
703 | |
704 | if (ia_valid & ATTR_SIZE) { | |
ff62a342 MAL |
705 | loff_t oldsize = inode->i_size; |
706 | loff_t newsize = attr->ia_size; | |
707 | ||
708 | if (newsize & ~huge_page_mask(h)) | |
1025774c | 709 | return -EINVAL; |
ff62a342 MAL |
710 | /* protected by i_mutex */ |
711 | if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) || | |
712 | (newsize > oldsize && (info->seals & F_SEAL_GROW))) | |
713 | return -EPERM; | |
714 | error = hugetlb_vmtruncate(inode, newsize); | |
1da177e4 | 715 | if (error) |
1025774c | 716 | return error; |
1da177e4 | 717 | } |
1025774c CH |
718 | |
719 | setattr_copy(inode, attr); | |
720 | mark_inode_dirty(inode); | |
721 | return 0; | |
1da177e4 LT |
722 | } |
723 | ||
7d54fa64 | 724 | static struct inode *hugetlbfs_get_root(struct super_block *sb, |
32021982 | 725 | struct hugetlbfs_fs_context *ctx) |
1da177e4 LT |
726 | { |
727 | struct inode *inode; | |
1da177e4 LT |
728 | |
729 | inode = new_inode(sb); | |
730 | if (inode) { | |
85fe4025 | 731 | inode->i_ino = get_next_ino(); |
32021982 DH |
732 | inode->i_mode = S_IFDIR | ctx->mode; |
733 | inode->i_uid = ctx->uid; | |
734 | inode->i_gid = ctx->gid; | |
078cd827 | 735 | inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode); |
7d54fa64 AV |
736 | inode->i_op = &hugetlbfs_dir_inode_operations; |
737 | inode->i_fop = &simple_dir_operations; | |
738 | /* directory inodes start off with i_nlink == 2 (for "." entry) */ | |
739 | inc_nlink(inode); | |
65ed7601 | 740 | lockdep_annotate_inode_mutex_key(inode); |
7d54fa64 AV |
741 | } |
742 | return inode; | |
743 | } | |
744 | ||
b610ded7 | 745 | /* |
c8c06efa | 746 | * Hugetlbfs is not reclaimable; therefore its i_mmap_rwsem will never |
b610ded7 | 747 | * be taken from reclaim -- unlike regular filesystems. This needs an |
88f306b6 | 748 | * annotation because huge_pmd_share() does an allocation under hugetlb's |
c8c06efa | 749 | * i_mmap_rwsem. |
b610ded7 | 750 | */ |
c8c06efa | 751 | static struct lock_class_key hugetlbfs_i_mmap_rwsem_key; |
b610ded7 | 752 | |
7d54fa64 AV |
753 | static struct inode *hugetlbfs_get_inode(struct super_block *sb, |
754 | struct inode *dir, | |
18df2252 | 755 | umode_t mode, dev_t dev) |
7d54fa64 AV |
756 | { |
757 | struct inode *inode; | |
58b6e5e8 | 758 | struct resv_map *resv_map = NULL; |
9119a41e | 759 | |
58b6e5e8 MK |
760 | /* |
761 | * Reserve maps are only needed for inodes that can have associated | |
762 | * page allocations. | |
763 | */ | |
764 | if (S_ISREG(mode) || S_ISLNK(mode)) { | |
765 | resv_map = resv_map_alloc(); | |
766 | if (!resv_map) | |
767 | return NULL; | |
768 | } | |
7d54fa64 AV |
769 | |
770 | inode = new_inode(sb); | |
771 | if (inode) { | |
ff62a342 MAL |
772 | struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode); |
773 | ||
7d54fa64 AV |
774 | inode->i_ino = get_next_ino(); |
775 | inode_init_owner(inode, dir, mode); | |
c8c06efa DB |
776 | lockdep_set_class(&inode->i_mapping->i_mmap_rwsem, |
777 | &hugetlbfs_i_mmap_rwsem_key); | |
1da177e4 | 778 | inode->i_mapping->a_ops = &hugetlbfs_aops; |
078cd827 | 779 | inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode); |
9119a41e | 780 | inode->i_mapping->private_data = resv_map; |
ff62a342 | 781 | info->seals = F_SEAL_SEAL; |
1da177e4 LT |
782 | switch (mode & S_IFMT) { |
783 | default: | |
784 | init_special_inode(inode, mode, dev); | |
785 | break; | |
786 | case S_IFREG: | |
787 | inode->i_op = &hugetlbfs_inode_operations; | |
788 | inode->i_fop = &hugetlbfs_file_operations; | |
789 | break; | |
790 | case S_IFDIR: | |
791 | inode->i_op = &hugetlbfs_dir_inode_operations; | |
792 | inode->i_fop = &simple_dir_operations; | |
793 | ||
794 | /* directory inodes start off with i_nlink == 2 (for "." entry) */ | |
d8c76e6f | 795 | inc_nlink(inode); |
1da177e4 LT |
796 | break; |
797 | case S_IFLNK: | |
798 | inode->i_op = &page_symlink_inode_operations; | |
21fc61c7 | 799 | inode_nohighmem(inode); |
1da177e4 LT |
800 | break; |
801 | } | |
e096d0c7 | 802 | lockdep_annotate_inode_mutex_key(inode); |
58b6e5e8 MK |
803 | } else { |
804 | if (resv_map) | |
805 | kref_put(&resv_map->refs, resv_map_release); | |
806 | } | |
9119a41e | 807 | |
1da177e4 LT |
808 | return inode; |
809 | } | |
810 | ||
811 | /* | |
812 | * File creation. Allocate an inode, and we're done.. | |
813 | */ | |
814 | static int hugetlbfs_mknod(struct inode *dir, | |
1a67aafb | 815 | struct dentry *dentry, umode_t mode, dev_t dev) |
1da177e4 LT |
816 | { |
817 | struct inode *inode; | |
818 | int error = -ENOSPC; | |
7d54fa64 AV |
819 | |
820 | inode = hugetlbfs_get_inode(dir->i_sb, dir, mode, dev); | |
1da177e4 | 821 | if (inode) { |
078cd827 | 822 | dir->i_ctime = dir->i_mtime = current_time(dir); |
1da177e4 LT |
823 | d_instantiate(dentry, inode); |
824 | dget(dentry); /* Extra count - pin the dentry in core */ | |
825 | error = 0; | |
826 | } | |
827 | return error; | |
828 | } | |
829 | ||
18bb1db3 | 830 | static int hugetlbfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) |
1da177e4 LT |
831 | { |
832 | int retval = hugetlbfs_mknod(dir, dentry, mode | S_IFDIR, 0); | |
833 | if (!retval) | |
d8c76e6f | 834 | inc_nlink(dir); |
1da177e4 LT |
835 | return retval; |
836 | } | |
837 | ||
ebfc3b49 | 838 | static int hugetlbfs_create(struct inode *dir, struct dentry *dentry, umode_t mode, bool excl) |
1da177e4 LT |
839 | { |
840 | return hugetlbfs_mknod(dir, dentry, mode | S_IFREG, 0); | |
841 | } | |
842 | ||
843 | static int hugetlbfs_symlink(struct inode *dir, | |
844 | struct dentry *dentry, const char *symname) | |
845 | { | |
846 | struct inode *inode; | |
847 | int error = -ENOSPC; | |
1da177e4 | 848 | |
7d54fa64 | 849 | inode = hugetlbfs_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0); |
1da177e4 LT |
850 | if (inode) { |
851 | int l = strlen(symname)+1; | |
852 | error = page_symlink(inode, symname, l); | |
853 | if (!error) { | |
854 | d_instantiate(dentry, inode); | |
855 | dget(dentry); | |
856 | } else | |
857 | iput(inode); | |
858 | } | |
078cd827 | 859 | dir->i_ctime = dir->i_mtime = current_time(dir); |
1da177e4 LT |
860 | |
861 | return error; | |
862 | } | |
863 | ||
864 | /* | |
6649a386 | 865 | * mark the head page dirty |
1da177e4 LT |
866 | */ |
867 | static int hugetlbfs_set_page_dirty(struct page *page) | |
868 | { | |
d85f3385 | 869 | struct page *head = compound_head(page); |
6649a386 KC |
870 | |
871 | SetPageDirty(head); | |
1da177e4 LT |
872 | return 0; |
873 | } | |
874 | ||
290408d4 | 875 | static int hugetlbfs_migrate_page(struct address_space *mapping, |
b969c4ab | 876 | struct page *newpage, struct page *page, |
a6bc32b8 | 877 | enum migrate_mode mode) |
290408d4 NH |
878 | { |
879 | int rc; | |
880 | ||
881 | rc = migrate_huge_page_move_mapping(mapping, newpage, page); | |
78bd5209 | 882 | if (rc != MIGRATEPAGE_SUCCESS) |
290408d4 | 883 | return rc; |
cb6acd01 MK |
884 | |
885 | /* | |
886 | * page_private is subpool pointer in hugetlb pages. Transfer to | |
887 | * new page. PagePrivate is not associated with page_private for | |
888 | * hugetlb pages and can not be set here as only page_huge_active | |
889 | * pages can be migrated. | |
890 | */ | |
891 | if (page_private(page)) { | |
892 | set_page_private(newpage, page_private(page)); | |
893 | set_page_private(page, 0); | |
894 | } | |
895 | ||
2916ecc0 JG |
896 | if (mode != MIGRATE_SYNC_NO_COPY) |
897 | migrate_page_copy(newpage, page); | |
898 | else | |
899 | migrate_page_states(newpage, page); | |
290408d4 | 900 | |
78bd5209 | 901 | return MIGRATEPAGE_SUCCESS; |
290408d4 NH |
902 | } |
903 | ||
78bb9203 NH |
904 | static int hugetlbfs_error_remove_page(struct address_space *mapping, |
905 | struct page *page) | |
906 | { | |
907 | struct inode *inode = mapping->host; | |
ab615a5b | 908 | pgoff_t index = page->index; |
78bb9203 NH |
909 | |
910 | remove_huge_page(page); | |
ab615a5b MK |
911 | if (unlikely(hugetlb_unreserve_pages(inode, index, index + 1, 1))) |
912 | hugetlb_fix_reserve_counts(inode); | |
913 | ||
78bb9203 NH |
914 | return 0; |
915 | } | |
916 | ||
4a25220d DH |
917 | /* |
918 | * Display the mount options in /proc/mounts. | |
919 | */ | |
920 | static int hugetlbfs_show_options(struct seq_file *m, struct dentry *root) | |
921 | { | |
922 | struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(root->d_sb); | |
923 | struct hugepage_subpool *spool = sbinfo->spool; | |
924 | unsigned long hpage_size = huge_page_size(sbinfo->hstate); | |
925 | unsigned hpage_shift = huge_page_shift(sbinfo->hstate); | |
926 | char mod; | |
927 | ||
928 | if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID)) | |
929 | seq_printf(m, ",uid=%u", | |
930 | from_kuid_munged(&init_user_ns, sbinfo->uid)); | |
931 | if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID)) | |
932 | seq_printf(m, ",gid=%u", | |
933 | from_kgid_munged(&init_user_ns, sbinfo->gid)); | |
934 | if (sbinfo->mode != 0755) | |
935 | seq_printf(m, ",mode=%o", sbinfo->mode); | |
936 | if (sbinfo->max_inodes != -1) | |
937 | seq_printf(m, ",nr_inodes=%lu", sbinfo->max_inodes); | |
938 | ||
939 | hpage_size /= 1024; | |
940 | mod = 'K'; | |
941 | if (hpage_size >= 1024) { | |
942 | hpage_size /= 1024; | |
943 | mod = 'M'; | |
944 | } | |
945 | seq_printf(m, ",pagesize=%lu%c", hpage_size, mod); | |
946 | if (spool) { | |
947 | if (spool->max_hpages != -1) | |
948 | seq_printf(m, ",size=%llu", | |
949 | (unsigned long long)spool->max_hpages << hpage_shift); | |
950 | if (spool->min_hpages != -1) | |
951 | seq_printf(m, ",min_size=%llu", | |
952 | (unsigned long long)spool->min_hpages << hpage_shift); | |
953 | } | |
954 | return 0; | |
955 | } | |
956 | ||
726c3342 | 957 | static int hugetlbfs_statfs(struct dentry *dentry, struct kstatfs *buf) |
1da177e4 | 958 | { |
726c3342 | 959 | struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(dentry->d_sb); |
2b0143b5 | 960 | struct hstate *h = hstate_inode(d_inode(dentry)); |
1da177e4 LT |
961 | |
962 | buf->f_type = HUGETLBFS_MAGIC; | |
a5516438 | 963 | buf->f_bsize = huge_page_size(h); |
1da177e4 LT |
964 | if (sbinfo) { |
965 | spin_lock(&sbinfo->stat_lock); | |
74a8a65c DG |
966 | /* If no limits set, just report 0 for max/free/used |
967 | * blocks, like simple_statfs() */ | |
90481622 DG |
968 | if (sbinfo->spool) { |
969 | long free_pages; | |
970 | ||
971 | spin_lock(&sbinfo->spool->lock); | |
972 | buf->f_blocks = sbinfo->spool->max_hpages; | |
973 | free_pages = sbinfo->spool->max_hpages | |
974 | - sbinfo->spool->used_hpages; | |
975 | buf->f_bavail = buf->f_bfree = free_pages; | |
976 | spin_unlock(&sbinfo->spool->lock); | |
74a8a65c DG |
977 | buf->f_files = sbinfo->max_inodes; |
978 | buf->f_ffree = sbinfo->free_inodes; | |
979 | } | |
1da177e4 LT |
980 | spin_unlock(&sbinfo->stat_lock); |
981 | } | |
982 | buf->f_namelen = NAME_MAX; | |
983 | return 0; | |
984 | } | |
985 | ||
986 | static void hugetlbfs_put_super(struct super_block *sb) | |
987 | { | |
988 | struct hugetlbfs_sb_info *sbi = HUGETLBFS_SB(sb); | |
989 | ||
990 | if (sbi) { | |
991 | sb->s_fs_info = NULL; | |
90481622 DG |
992 | |
993 | if (sbi->spool) | |
994 | hugepage_put_subpool(sbi->spool); | |
995 | ||
1da177e4 LT |
996 | kfree(sbi); |
997 | } | |
998 | } | |
999 | ||
96527980 CH |
1000 | static inline int hugetlbfs_dec_free_inodes(struct hugetlbfs_sb_info *sbinfo) |
1001 | { | |
1002 | if (sbinfo->free_inodes >= 0) { | |
1003 | spin_lock(&sbinfo->stat_lock); | |
1004 | if (unlikely(!sbinfo->free_inodes)) { | |
1005 | spin_unlock(&sbinfo->stat_lock); | |
1006 | return 0; | |
1007 | } | |
1008 | sbinfo->free_inodes--; | |
1009 | spin_unlock(&sbinfo->stat_lock); | |
1010 | } | |
1011 | ||
1012 | return 1; | |
1013 | } | |
1014 | ||
1015 | static void hugetlbfs_inc_free_inodes(struct hugetlbfs_sb_info *sbinfo) | |
1016 | { | |
1017 | if (sbinfo->free_inodes >= 0) { | |
1018 | spin_lock(&sbinfo->stat_lock); | |
1019 | sbinfo->free_inodes++; | |
1020 | spin_unlock(&sbinfo->stat_lock); | |
1021 | } | |
1022 | } | |
1023 | ||
1024 | ||
e18b890b | 1025 | static struct kmem_cache *hugetlbfs_inode_cachep; |
1da177e4 LT |
1026 | |
1027 | static struct inode *hugetlbfs_alloc_inode(struct super_block *sb) | |
1028 | { | |
96527980 | 1029 | struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(sb); |
1da177e4 LT |
1030 | struct hugetlbfs_inode_info *p; |
1031 | ||
96527980 CH |
1032 | if (unlikely(!hugetlbfs_dec_free_inodes(sbinfo))) |
1033 | return NULL; | |
e94b1766 | 1034 | p = kmem_cache_alloc(hugetlbfs_inode_cachep, GFP_KERNEL); |
96527980 CH |
1035 | if (unlikely(!p)) { |
1036 | hugetlbfs_inc_free_inodes(sbinfo); | |
1da177e4 | 1037 | return NULL; |
96527980 | 1038 | } |
4742a35d MK |
1039 | |
1040 | /* | |
1041 | * Any time after allocation, hugetlbfs_destroy_inode can be called | |
1042 | * for the inode. mpol_free_shared_policy is unconditionally called | |
1043 | * as part of hugetlbfs_destroy_inode. So, initialize policy here | |
1044 | * in case of a quick call to destroy. | |
1045 | * | |
1046 | * Note that the policy is initialized even if we are creating a | |
1047 | * private inode. This simplifies hugetlbfs_destroy_inode. | |
1048 | */ | |
1049 | mpol_shared_policy_init(&p->policy, NULL); | |
1050 | ||
1da177e4 LT |
1051 | return &p->vfs_inode; |
1052 | } | |
1053 | ||
fa0d7e3d NP |
1054 | static void hugetlbfs_i_callback(struct rcu_head *head) |
1055 | { | |
1056 | struct inode *inode = container_of(head, struct inode, i_rcu); | |
fa0d7e3d NP |
1057 | kmem_cache_free(hugetlbfs_inode_cachep, HUGETLBFS_I(inode)); |
1058 | } | |
1059 | ||
1da177e4 LT |
1060 | static void hugetlbfs_destroy_inode(struct inode *inode) |
1061 | { | |
96527980 | 1062 | hugetlbfs_inc_free_inodes(HUGETLBFS_SB(inode->i_sb)); |
1da177e4 | 1063 | mpol_free_shared_policy(&HUGETLBFS_I(inode)->policy); |
fa0d7e3d | 1064 | call_rcu(&inode->i_rcu, hugetlbfs_i_callback); |
1da177e4 LT |
1065 | } |
1066 | ||
f5e54d6e | 1067 | static const struct address_space_operations hugetlbfs_aops = { |
800d15a5 NP |
1068 | .write_begin = hugetlbfs_write_begin, |
1069 | .write_end = hugetlbfs_write_end, | |
1da177e4 | 1070 | .set_page_dirty = hugetlbfs_set_page_dirty, |
290408d4 | 1071 | .migratepage = hugetlbfs_migrate_page, |
78bb9203 | 1072 | .error_remove_page = hugetlbfs_error_remove_page, |
1da177e4 LT |
1073 | }; |
1074 | ||
96527980 | 1075 | |
51cc5068 | 1076 | static void init_once(void *foo) |
96527980 CH |
1077 | { |
1078 | struct hugetlbfs_inode_info *ei = (struct hugetlbfs_inode_info *)foo; | |
1079 | ||
a35afb83 | 1080 | inode_init_once(&ei->vfs_inode); |
96527980 CH |
1081 | } |
1082 | ||
4b6f5d20 | 1083 | const struct file_operations hugetlbfs_file_operations = { |
34d0640e | 1084 | .read_iter = hugetlbfs_read_iter, |
1da177e4 | 1085 | .mmap = hugetlbfs_file_mmap, |
1b061d92 | 1086 | .fsync = noop_fsync, |
1da177e4 | 1087 | .get_unmapped_area = hugetlb_get_unmapped_area, |
70c3547e MK |
1088 | .llseek = default_llseek, |
1089 | .fallocate = hugetlbfs_fallocate, | |
1da177e4 LT |
1090 | }; |
1091 | ||
92e1d5be | 1092 | static const struct inode_operations hugetlbfs_dir_inode_operations = { |
1da177e4 LT |
1093 | .create = hugetlbfs_create, |
1094 | .lookup = simple_lookup, | |
1095 | .link = simple_link, | |
1096 | .unlink = simple_unlink, | |
1097 | .symlink = hugetlbfs_symlink, | |
1098 | .mkdir = hugetlbfs_mkdir, | |
1099 | .rmdir = simple_rmdir, | |
1100 | .mknod = hugetlbfs_mknod, | |
1101 | .rename = simple_rename, | |
1102 | .setattr = hugetlbfs_setattr, | |
1103 | }; | |
1104 | ||
92e1d5be | 1105 | static const struct inode_operations hugetlbfs_inode_operations = { |
1da177e4 LT |
1106 | .setattr = hugetlbfs_setattr, |
1107 | }; | |
1108 | ||
ee9b6d61 | 1109 | static const struct super_operations hugetlbfs_ops = { |
1da177e4 LT |
1110 | .alloc_inode = hugetlbfs_alloc_inode, |
1111 | .destroy_inode = hugetlbfs_destroy_inode, | |
2bbbda30 | 1112 | .evict_inode = hugetlbfs_evict_inode, |
1da177e4 | 1113 | .statfs = hugetlbfs_statfs, |
1da177e4 | 1114 | .put_super = hugetlbfs_put_super, |
4a25220d | 1115 | .show_options = hugetlbfs_show_options, |
1da177e4 LT |
1116 | }; |
1117 | ||
7ca02d0a MK |
1118 | /* |
1119 | * Convert size option passed from command line to number of huge pages | |
1120 | * in the pool specified by hstate. Size option could be in bytes | |
1121 | * (val_type == SIZE_STD) or percentage of the pool (val_type == SIZE_PERCENT). | |
1122 | */ | |
4a25220d | 1123 | static long |
7ca02d0a | 1124 | hugetlbfs_size_to_hpages(struct hstate *h, unsigned long long size_opt, |
4a25220d | 1125 | enum hugetlbfs_size_type val_type) |
7ca02d0a MK |
1126 | { |
1127 | if (val_type == NO_SIZE) | |
1128 | return -1; | |
1129 | ||
1130 | if (val_type == SIZE_PERCENT) { | |
1131 | size_opt <<= huge_page_shift(h); | |
1132 | size_opt *= h->max_huge_pages; | |
1133 | do_div(size_opt, 100); | |
1134 | } | |
1135 | ||
1136 | size_opt >>= huge_page_shift(h); | |
1137 | return size_opt; | |
1138 | } | |
1139 | ||
32021982 DH |
1140 | /* |
1141 | * Parse one mount parameter. | |
1142 | */ | |
1143 | static int hugetlbfs_parse_param(struct fs_context *fc, struct fs_parameter *param) | |
1da177e4 | 1144 | { |
32021982 DH |
1145 | struct hugetlbfs_fs_context *ctx = fc->fs_private; |
1146 | struct fs_parse_result result; | |
1147 | char *rest; | |
1148 | unsigned long ps; | |
1149 | int opt; | |
1150 | ||
1151 | opt = fs_parse(fc, &hugetlb_fs_parameters, param, &result); | |
1152 | if (opt < 0) | |
1153 | return opt; | |
1154 | ||
1155 | switch (opt) { | |
1156 | case Opt_uid: | |
1157 | ctx->uid = make_kuid(current_user_ns(), result.uint_32); | |
1158 | if (!uid_valid(ctx->uid)) | |
1159 | goto bad_val; | |
1da177e4 | 1160 | return 0; |
1da177e4 | 1161 | |
32021982 DH |
1162 | case Opt_gid: |
1163 | ctx->gid = make_kgid(current_user_ns(), result.uint_32); | |
1164 | if (!gid_valid(ctx->gid)) | |
1165 | goto bad_val; | |
1166 | return 0; | |
e73a75fa | 1167 | |
32021982 DH |
1168 | case Opt_mode: |
1169 | ctx->mode = result.uint_32 & 01777U; | |
1170 | return 0; | |
e73a75fa | 1171 | |
32021982 DH |
1172 | case Opt_size: |
1173 | /* memparse() will accept a K/M/G without a digit */ | |
1174 | if (!isdigit(param->string[0])) | |
1175 | goto bad_val; | |
1176 | ctx->max_size_opt = memparse(param->string, &rest); | |
1177 | ctx->max_val_type = SIZE_STD; | |
1178 | if (*rest == '%') | |
1179 | ctx->max_val_type = SIZE_PERCENT; | |
1180 | return 0; | |
e73a75fa | 1181 | |
32021982 DH |
1182 | case Opt_nr_inodes: |
1183 | /* memparse() will accept a K/M/G without a digit */ | |
1184 | if (!isdigit(param->string[0])) | |
1185 | goto bad_val; | |
1186 | ctx->nr_inodes = memparse(param->string, &rest); | |
1187 | return 0; | |
e73a75fa | 1188 | |
32021982 DH |
1189 | case Opt_pagesize: |
1190 | ps = memparse(param->string, &rest); | |
1191 | ctx->hstate = size_to_hstate(ps); | |
1192 | if (!ctx->hstate) { | |
1193 | pr_err("Unsupported page size %lu MB\n", ps >> 20); | |
1194 | return -EINVAL; | |
e73a75fa | 1195 | } |
32021982 | 1196 | return 0; |
1da177e4 | 1197 | |
32021982 DH |
1198 | case Opt_min_size: |
1199 | /* memparse() will accept a K/M/G without a digit */ | |
1200 | if (!isdigit(param->string[0])) | |
1201 | goto bad_val; | |
1202 | ctx->min_size_opt = memparse(param->string, &rest); | |
1203 | ctx->min_val_type = SIZE_STD; | |
1204 | if (*rest == '%') | |
1205 | ctx->min_val_type = SIZE_PERCENT; | |
1206 | return 0; | |
e73a75fa | 1207 | |
32021982 DH |
1208 | default: |
1209 | return -EINVAL; | |
1210 | } | |
a137e1cc | 1211 | |
32021982 DH |
1212 | bad_val: |
1213 | return invalf(fc, "hugetlbfs: Bad value '%s' for mount option '%s'\n", | |
1214 | param->string, param->key); | |
1215 | } | |
7ca02d0a | 1216 | |
32021982 DH |
1217 | /* |
1218 | * Validate the parsed options. | |
1219 | */ | |
1220 | static int hugetlbfs_validate(struct fs_context *fc) | |
1221 | { | |
1222 | struct hugetlbfs_fs_context *ctx = fc->fs_private; | |
a137e1cc | 1223 | |
7ca02d0a MK |
1224 | /* |
1225 | * Use huge page pool size (in hstate) to convert the size | |
1226 | * options to number of huge pages. If NO_SIZE, -1 is returned. | |
1227 | */ | |
32021982 DH |
1228 | ctx->max_hpages = hugetlbfs_size_to_hpages(ctx->hstate, |
1229 | ctx->max_size_opt, | |
1230 | ctx->max_val_type); | |
1231 | ctx->min_hpages = hugetlbfs_size_to_hpages(ctx->hstate, | |
1232 | ctx->min_size_opt, | |
1233 | ctx->min_val_type); | |
7ca02d0a MK |
1234 | |
1235 | /* | |
1236 | * If max_size was specified, then min_size must be smaller | |
1237 | */ | |
32021982 DH |
1238 | if (ctx->max_val_type > NO_SIZE && |
1239 | ctx->min_hpages > ctx->max_hpages) { | |
1240 | pr_err("Minimum size can not be greater than maximum size\n"); | |
7ca02d0a | 1241 | return -EINVAL; |
a137e1cc AK |
1242 | } |
1243 | ||
1da177e4 LT |
1244 | return 0; |
1245 | } | |
1246 | ||
1247 | static int | |
32021982 | 1248 | hugetlbfs_fill_super(struct super_block *sb, struct fs_context *fc) |
1da177e4 | 1249 | { |
32021982 | 1250 | struct hugetlbfs_fs_context *ctx = fc->fs_private; |
1da177e4 LT |
1251 | struct hugetlbfs_sb_info *sbinfo; |
1252 | ||
1da177e4 LT |
1253 | sbinfo = kmalloc(sizeof(struct hugetlbfs_sb_info), GFP_KERNEL); |
1254 | if (!sbinfo) | |
1255 | return -ENOMEM; | |
1256 | sb->s_fs_info = sbinfo; | |
1257 | spin_lock_init(&sbinfo->stat_lock); | |
32021982 DH |
1258 | sbinfo->hstate = ctx->hstate; |
1259 | sbinfo->max_inodes = ctx->nr_inodes; | |
1260 | sbinfo->free_inodes = ctx->nr_inodes; | |
1261 | sbinfo->spool = NULL; | |
1262 | sbinfo->uid = ctx->uid; | |
1263 | sbinfo->gid = ctx->gid; | |
1264 | sbinfo->mode = ctx->mode; | |
4a25220d | 1265 | |
7ca02d0a MK |
1266 | /* |
1267 | * Allocate and initialize subpool if maximum or minimum size is | |
1268 | * specified. Any needed reservations (for minimim size) are taken | |
1269 | * taken when the subpool is created. | |
1270 | */ | |
32021982 DH |
1271 | if (ctx->max_hpages != -1 || ctx->min_hpages != -1) { |
1272 | sbinfo->spool = hugepage_new_subpool(ctx->hstate, | |
1273 | ctx->max_hpages, | |
1274 | ctx->min_hpages); | |
90481622 DG |
1275 | if (!sbinfo->spool) |
1276 | goto out_free; | |
1277 | } | |
1da177e4 | 1278 | sb->s_maxbytes = MAX_LFS_FILESIZE; |
32021982 DH |
1279 | sb->s_blocksize = huge_page_size(ctx->hstate); |
1280 | sb->s_blocksize_bits = huge_page_shift(ctx->hstate); | |
1da177e4 LT |
1281 | sb->s_magic = HUGETLBFS_MAGIC; |
1282 | sb->s_op = &hugetlbfs_ops; | |
1283 | sb->s_time_gran = 1; | |
32021982 | 1284 | sb->s_root = d_make_root(hugetlbfs_get_root(sb, ctx)); |
48fde701 | 1285 | if (!sb->s_root) |
1da177e4 | 1286 | goto out_free; |
1da177e4 LT |
1287 | return 0; |
1288 | out_free: | |
6e6870d4 | 1289 | kfree(sbinfo->spool); |
1da177e4 LT |
1290 | kfree(sbinfo); |
1291 | return -ENOMEM; | |
1292 | } | |
1293 | ||
32021982 DH |
1294 | static int hugetlbfs_get_tree(struct fs_context *fc) |
1295 | { | |
1296 | int err = hugetlbfs_validate(fc); | |
1297 | if (err) | |
1298 | return err; | |
1299 | return vfs_get_super(fc, vfs_get_independent_super, hugetlbfs_fill_super); | |
1300 | } | |
1301 | ||
1302 | static void hugetlbfs_fs_context_free(struct fs_context *fc) | |
1303 | { | |
1304 | kfree(fc->fs_private); | |
1305 | } | |
1306 | ||
1307 | static const struct fs_context_operations hugetlbfs_fs_context_ops = { | |
1308 | .free = hugetlbfs_fs_context_free, | |
1309 | .parse_param = hugetlbfs_parse_param, | |
1310 | .get_tree = hugetlbfs_get_tree, | |
1311 | }; | |
1312 | ||
1313 | static int hugetlbfs_init_fs_context(struct fs_context *fc) | |
1da177e4 | 1314 | { |
32021982 DH |
1315 | struct hugetlbfs_fs_context *ctx; |
1316 | ||
1317 | ctx = kzalloc(sizeof(struct hugetlbfs_fs_context), GFP_KERNEL); | |
1318 | if (!ctx) | |
1319 | return -ENOMEM; | |
1320 | ||
1321 | ctx->max_hpages = -1; /* No limit on size by default */ | |
1322 | ctx->nr_inodes = -1; /* No limit on number of inodes by default */ | |
1323 | ctx->uid = current_fsuid(); | |
1324 | ctx->gid = current_fsgid(); | |
1325 | ctx->mode = 0755; | |
1326 | ctx->hstate = &default_hstate; | |
1327 | ctx->min_hpages = -1; /* No default minimum size */ | |
1328 | ctx->max_val_type = NO_SIZE; | |
1329 | ctx->min_val_type = NO_SIZE; | |
1330 | fc->fs_private = ctx; | |
1331 | fc->ops = &hugetlbfs_fs_context_ops; | |
1332 | return 0; | |
1da177e4 LT |
1333 | } |
1334 | ||
1335 | static struct file_system_type hugetlbfs_fs_type = { | |
32021982 DH |
1336 | .name = "hugetlbfs", |
1337 | .init_fs_context = hugetlbfs_init_fs_context, | |
1338 | .parameters = &hugetlb_fs_parameters, | |
1339 | .kill_sb = kill_litter_super, | |
1da177e4 LT |
1340 | }; |
1341 | ||
42d7395f | 1342 | static struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE]; |
1da177e4 | 1343 | |
ef1ff6b8 | 1344 | static int can_do_hugetlb_shm(void) |
1da177e4 | 1345 | { |
a0eb3a05 EB |
1346 | kgid_t shm_group; |
1347 | shm_group = make_kgid(&init_user_ns, sysctl_hugetlb_shm_group); | |
1348 | return capable(CAP_IPC_LOCK) || in_group_p(shm_group); | |
1da177e4 LT |
1349 | } |
1350 | ||
42d7395f AK |
1351 | static int get_hstate_idx(int page_size_log) |
1352 | { | |
af73e4d9 | 1353 | struct hstate *h = hstate_sizelog(page_size_log); |
42d7395f | 1354 | |
42d7395f AK |
1355 | if (!h) |
1356 | return -1; | |
1357 | return h - hstates; | |
1358 | } | |
1359 | ||
af73e4d9 NH |
1360 | /* |
1361 | * Note that size should be aligned to proper hugepage size in caller side, | |
1362 | * otherwise hugetlb_reserve_pages reserves one less hugepages than intended. | |
1363 | */ | |
1364 | struct file *hugetlb_file_setup(const char *name, size_t size, | |
1365 | vm_flags_t acctflag, struct user_struct **user, | |
42d7395f | 1366 | int creat_flags, int page_size_log) |
1da177e4 | 1367 | { |
1da177e4 | 1368 | struct inode *inode; |
e68375c8 | 1369 | struct vfsmount *mnt; |
42d7395f | 1370 | int hstate_idx; |
e68375c8 | 1371 | struct file *file; |
42d7395f AK |
1372 | |
1373 | hstate_idx = get_hstate_idx(page_size_log); | |
1374 | if (hstate_idx < 0) | |
1375 | return ERR_PTR(-ENODEV); | |
1da177e4 | 1376 | |
353d5c30 | 1377 | *user = NULL; |
e68375c8 AV |
1378 | mnt = hugetlbfs_vfsmount[hstate_idx]; |
1379 | if (!mnt) | |
5bc98594 AM |
1380 | return ERR_PTR(-ENOENT); |
1381 | ||
ef1ff6b8 | 1382 | if (creat_flags == HUGETLB_SHMFS_INODE && !can_do_hugetlb_shm()) { |
353d5c30 HD |
1383 | *user = current_user(); |
1384 | if (user_shm_lock(size, *user)) { | |
21a3c273 | 1385 | task_lock(current); |
9b857d26 | 1386 | pr_warn_once("%s (%d): Using mlock ulimits for SHM_HUGETLB is deprecated\n", |
21a3c273 DR |
1387 | current->comm, current->pid); |
1388 | task_unlock(current); | |
353d5c30 HD |
1389 | } else { |
1390 | *user = NULL; | |
2584e517 | 1391 | return ERR_PTR(-EPERM); |
353d5c30 | 1392 | } |
2584e517 | 1393 | } |
1da177e4 | 1394 | |
39b65252 | 1395 | file = ERR_PTR(-ENOSPC); |
e68375c8 | 1396 | inode = hugetlbfs_get_inode(mnt->mnt_sb, NULL, S_IFREG | S_IRWXUGO, 0); |
1da177e4 | 1397 | if (!inode) |
e68375c8 | 1398 | goto out; |
e1832f29 SS |
1399 | if (creat_flags == HUGETLB_SHMFS_INODE) |
1400 | inode->i_flags |= S_PRIVATE; | |
1da177e4 | 1401 | |
1da177e4 | 1402 | inode->i_size = size; |
6d6b77f1 | 1403 | clear_nlink(inode); |
ce8d2cdf | 1404 | |
e68375c8 AV |
1405 | if (hugetlb_reserve_pages(inode, 0, |
1406 | size >> huge_page_shift(hstate_inode(inode)), NULL, | |
1407 | acctflag)) | |
1408 | file = ERR_PTR(-ENOMEM); | |
1409 | else | |
1410 | file = alloc_file_pseudo(inode, mnt, name, O_RDWR, | |
1411 | &hugetlbfs_file_operations); | |
1412 | if (!IS_ERR(file)) | |
1413 | return file; | |
1da177e4 | 1414 | |
b45b5bd6 | 1415 | iput(inode); |
e68375c8 | 1416 | out: |
353d5c30 HD |
1417 | if (*user) { |
1418 | user_shm_unlock(size, *user); | |
1419 | *user = NULL; | |
1420 | } | |
39b65252 | 1421 | return file; |
1da177e4 LT |
1422 | } |
1423 | ||
32021982 DH |
1424 | static struct vfsmount *__init mount_one_hugetlbfs(struct hstate *h) |
1425 | { | |
1426 | struct fs_context *fc; | |
1427 | struct vfsmount *mnt; | |
1428 | ||
1429 | fc = fs_context_for_mount(&hugetlbfs_fs_type, SB_KERNMOUNT); | |
1430 | if (IS_ERR(fc)) { | |
1431 | mnt = ERR_CAST(fc); | |
1432 | } else { | |
1433 | struct hugetlbfs_fs_context *ctx = fc->fs_private; | |
1434 | ctx->hstate = h; | |
1435 | mnt = fc_mount(fc); | |
1436 | put_fs_context(fc); | |
1437 | } | |
1438 | if (IS_ERR(mnt)) | |
1439 | pr_err("Cannot mount internal hugetlbfs for page size %uK", | |
1440 | 1U << (h->order + PAGE_SHIFT - 10)); | |
1441 | return mnt; | |
1442 | } | |
1443 | ||
1da177e4 LT |
1444 | static int __init init_hugetlbfs_fs(void) |
1445 | { | |
32021982 | 1446 | struct vfsmount *mnt; |
42d7395f | 1447 | struct hstate *h; |
1da177e4 | 1448 | int error; |
42d7395f | 1449 | int i; |
1da177e4 | 1450 | |
457c1b27 | 1451 | if (!hugepages_supported()) { |
9b857d26 | 1452 | pr_info("disabling because there are no supported hugepage sizes\n"); |
457c1b27 NA |
1453 | return -ENOTSUPP; |
1454 | } | |
1455 | ||
d1d5e05f | 1456 | error = -ENOMEM; |
1da177e4 LT |
1457 | hugetlbfs_inode_cachep = kmem_cache_create("hugetlbfs_inode_cache", |
1458 | sizeof(struct hugetlbfs_inode_info), | |
5d097056 | 1459 | 0, SLAB_ACCOUNT, init_once); |
1da177e4 | 1460 | if (hugetlbfs_inode_cachep == NULL) |
e0bf68dd | 1461 | goto out2; |
1da177e4 LT |
1462 | |
1463 | error = register_filesystem(&hugetlbfs_fs_type); | |
1464 | if (error) | |
1465 | goto out; | |
1466 | ||
42d7395f AK |
1467 | i = 0; |
1468 | for_each_hstate(h) { | |
32021982 DH |
1469 | mnt = mount_one_hugetlbfs(h); |
1470 | if (IS_ERR(mnt) && i == 0) { | |
1471 | error = PTR_ERR(mnt); | |
1472 | goto out; | |
42d7395f | 1473 | } |
32021982 | 1474 | hugetlbfs_vfsmount[i] = mnt; |
42d7395f AK |
1475 | i++; |
1476 | } | |
32021982 DH |
1477 | |
1478 | return 0; | |
1da177e4 LT |
1479 | |
1480 | out: | |
d1d5e05f | 1481 | kmem_cache_destroy(hugetlbfs_inode_cachep); |
e0bf68dd | 1482 | out2: |
1da177e4 LT |
1483 | return error; |
1484 | } | |
3e89e1c5 | 1485 | fs_initcall(init_hugetlbfs_fs) |