x86/asm: Change all ENTRY+ENDPROC to SYM_FUNC_*
[linux-2.6-block.git] / mm / shmem.c
1 /*
2  * Resizable virtual memory filesystem for Linux.
3  *
4  * Copyright (C) 2000 Linus Torvalds.
5  *               2000 Transmeta Corp.
6  *               2000-2001 Christoph Rohland
7  *               2000-2001 SAP AG
8  *               2002 Red Hat Inc.
9  * Copyright (C) 2002-2011 Hugh Dickins.
10  * Copyright (C) 2011 Google Inc.
11  * Copyright (C) 2002-2005 VERITAS Software Corporation.
12  * Copyright (C) 2004 Andi Kleen, SuSE Labs
13  *
14  * Extended attribute support for tmpfs:
15  * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net>
16  * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
17  *
18  * tiny-shmem:
19  * Copyright (c) 2004, 2008 Matt Mackall <mpm@selenic.com>
20  *
21  * This file is released under the GPL.
22  */
23
24 #include <linux/fs.h>
25 #include <linux/init.h>
26 #include <linux/vfs.h>
27 #include <linux/mount.h>
28 #include <linux/ramfs.h>
29 #include <linux/pagemap.h>
30 #include <linux/file.h>
31 #include <linux/mm.h>
32 #include <linux/random.h>
33 #include <linux/sched/signal.h>
34 #include <linux/export.h>
35 #include <linux/swap.h>
36 #include <linux/uio.h>
37 #include <linux/khugepaged.h>
38 #include <linux/hugetlb.h>
39 #include <linux/frontswap.h>
40 #include <linux/fs_parser.h>
41
42 #include <asm/tlbflush.h> /* for arch/microblaze update_mmu_cache() */
43
44 static struct vfsmount *shm_mnt;
45
46 #ifdef CONFIG_SHMEM
47 /*
48  * This virtual memory filesystem is heavily based on the ramfs. It
49  * extends ramfs by the ability to use swap and honor resource limits
50  * which makes it a completely usable filesystem.
51  */
52
53 #include <linux/xattr.h>
54 #include <linux/exportfs.h>
55 #include <linux/posix_acl.h>
56 #include <linux/posix_acl_xattr.h>
57 #include <linux/mman.h>
58 #include <linux/string.h>
59 #include <linux/slab.h>
60 #include <linux/backing-dev.h>
61 #include <linux/shmem_fs.h>
62 #include <linux/writeback.h>
63 #include <linux/blkdev.h>
64 #include <linux/pagevec.h>
65 #include <linux/percpu_counter.h>
66 #include <linux/falloc.h>
67 #include <linux/splice.h>
68 #include <linux/security.h>
69 #include <linux/swapops.h>
70 #include <linux/mempolicy.h>
71 #include <linux/namei.h>
72 #include <linux/ctype.h>
73 #include <linux/migrate.h>
74 #include <linux/highmem.h>
75 #include <linux/seq_file.h>
76 #include <linux/magic.h>
77 #include <linux/syscalls.h>
78 #include <linux/fcntl.h>
79 #include <uapi/linux/memfd.h>
80 #include <linux/userfaultfd_k.h>
81 #include <linux/rmap.h>
82 #include <linux/uuid.h>
83
84 #include <linux/uaccess.h>
85 #include <asm/pgtable.h>
86
87 #include "internal.h"
88
89 #define BLOCKS_PER_PAGE  (PAGE_SIZE/512)
90 #define VM_ACCT(size)    (PAGE_ALIGN(size) >> PAGE_SHIFT)
91
92 /* Pretend that each entry is of this size in directory's i_size */
93 #define BOGO_DIRENT_SIZE 20
94
95 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
96 #define SHORT_SYMLINK_LEN 128
97
98 /*
99  * shmem_fallocate communicates with shmem_fault or shmem_writepage via
100  * inode->i_private (with i_mutex making sure that it has only one user at
101  * a time): we would prefer not to enlarge the shmem inode just for that.
102  */
103 struct shmem_falloc {
104         wait_queue_head_t *waitq; /* faults into hole wait for punch to end */
105         pgoff_t start;          /* start of range currently being fallocated */
106         pgoff_t next;           /* the next page offset to be fallocated */
107         pgoff_t nr_falloced;    /* how many new pages have been fallocated */
108         pgoff_t nr_unswapped;   /* how often writepage refused to swap out */
109 };
110
111 struct shmem_options {
112         unsigned long long blocks;
113         unsigned long long inodes;
114         struct mempolicy *mpol;
115         kuid_t uid;
116         kgid_t gid;
117         umode_t mode;
118         int huge;
119         int seen;
120 #define SHMEM_SEEN_BLOCKS 1
121 #define SHMEM_SEEN_INODES 2
122 #define SHMEM_SEEN_HUGE 4
123 };
124
125 #ifdef CONFIG_TMPFS
126 static unsigned long shmem_default_max_blocks(void)
127 {
128         return totalram_pages() / 2;
129 }
130
131 static unsigned long shmem_default_max_inodes(void)
132 {
133         unsigned long nr_pages = totalram_pages();
134
135         return min(nr_pages - totalhigh_pages(), nr_pages / 2);
136 }
137 #endif
138
139 static bool shmem_should_replace_page(struct page *page, gfp_t gfp);
140 static int shmem_replace_page(struct page **pagep, gfp_t gfp,
141                                 struct shmem_inode_info *info, pgoff_t index);
142 static int shmem_swapin_page(struct inode *inode, pgoff_t index,
143                              struct page **pagep, enum sgp_type sgp,
144                              gfp_t gfp, struct vm_area_struct *vma,
145                              vm_fault_t *fault_type);
146 static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
147                 struct page **pagep, enum sgp_type sgp,
148                 gfp_t gfp, struct vm_area_struct *vma,
149                 struct vm_fault *vmf, vm_fault_t *fault_type);
150
151 int shmem_getpage(struct inode *inode, pgoff_t index,
152                 struct page **pagep, enum sgp_type sgp)
153 {
154         return shmem_getpage_gfp(inode, index, pagep, sgp,
155                 mapping_gfp_mask(inode->i_mapping), NULL, NULL, NULL);
156 }
157
158 static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb)
159 {
160         return sb->s_fs_info;
161 }
162
163 /*
164  * shmem_file_setup pre-accounts the whole fixed size of a VM object,
165  * for shared memory and for shared anonymous (/dev/zero) mappings
166  * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1),
167  * consistent with the pre-accounting of private mappings ...
168  */
169 static inline int shmem_acct_size(unsigned long flags, loff_t size)
170 {
171         return (flags & VM_NORESERVE) ?
172                 0 : security_vm_enough_memory_mm(current->mm, VM_ACCT(size));
173 }
174
175 static inline void shmem_unacct_size(unsigned long flags, loff_t size)
176 {
177         if (!(flags & VM_NORESERVE))
178                 vm_unacct_memory(VM_ACCT(size));
179 }
180
181 static inline int shmem_reacct_size(unsigned long flags,
182                 loff_t oldsize, loff_t newsize)
183 {
184         if (!(flags & VM_NORESERVE)) {
185                 if (VM_ACCT(newsize) > VM_ACCT(oldsize))
186                         return security_vm_enough_memory_mm(current->mm,
187                                         VM_ACCT(newsize) - VM_ACCT(oldsize));
188                 else if (VM_ACCT(newsize) < VM_ACCT(oldsize))
189                         vm_unacct_memory(VM_ACCT(oldsize) - VM_ACCT(newsize));
190         }
191         return 0;
192 }
193
194 /*
195  * ... whereas tmpfs objects are accounted incrementally as
196  * pages are allocated, in order to allow large sparse files.
197  * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM,
198  * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM.
199  */
200 static inline int shmem_acct_block(unsigned long flags, long pages)
201 {
202         if (!(flags & VM_NORESERVE))
203                 return 0;
204
205         return security_vm_enough_memory_mm(current->mm,
206                         pages * VM_ACCT(PAGE_SIZE));
207 }
208
209 static inline void shmem_unacct_blocks(unsigned long flags, long pages)
210 {
211         if (flags & VM_NORESERVE)
212                 vm_unacct_memory(pages * VM_ACCT(PAGE_SIZE));
213 }
214
215 static inline bool shmem_inode_acct_block(struct inode *inode, long pages)
216 {
217         struct shmem_inode_info *info = SHMEM_I(inode);
218         struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
219
220         if (shmem_acct_block(info->flags, pages))
221                 return false;
222
223         if (sbinfo->max_blocks) {
224                 if (percpu_counter_compare(&sbinfo->used_blocks,
225                                            sbinfo->max_blocks - pages) > 0)
226                         goto unacct;
227                 percpu_counter_add(&sbinfo->used_blocks, pages);
228         }
229
230         return true;
231
232 unacct:
233         shmem_unacct_blocks(info->flags, pages);
234         return false;
235 }
236
237 static inline void shmem_inode_unacct_blocks(struct inode *inode, long pages)
238 {
239         struct shmem_inode_info *info = SHMEM_I(inode);
240         struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
241
242         if (sbinfo->max_blocks)
243                 percpu_counter_sub(&sbinfo->used_blocks, pages);
244         shmem_unacct_blocks(info->flags, pages);
245 }
246
247 static const struct super_operations shmem_ops;
248 static const struct address_space_operations shmem_aops;
249 static const struct file_operations shmem_file_operations;
250 static const struct inode_operations shmem_inode_operations;
251 static const struct inode_operations shmem_dir_inode_operations;
252 static const struct inode_operations shmem_special_inode_operations;
253 static const struct vm_operations_struct shmem_vm_ops;
254 static struct file_system_type shmem_fs_type;
255
256 bool vma_is_shmem(struct vm_area_struct *vma)
257 {
258         return vma->vm_ops == &shmem_vm_ops;
259 }
260
261 static LIST_HEAD(shmem_swaplist);
262 static DEFINE_MUTEX(shmem_swaplist_mutex);
263
264 static int shmem_reserve_inode(struct super_block *sb)
265 {
266         struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
267         if (sbinfo->max_inodes) {
268                 spin_lock(&sbinfo->stat_lock);
269                 if (!sbinfo->free_inodes) {
270                         spin_unlock(&sbinfo->stat_lock);
271                         return -ENOSPC;
272                 }
273                 sbinfo->free_inodes--;
274                 spin_unlock(&sbinfo->stat_lock);
275         }
276         return 0;
277 }
278
279 static void shmem_free_inode(struct super_block *sb)
280 {
281         struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
282         if (sbinfo->max_inodes) {
283                 spin_lock(&sbinfo->stat_lock);
284                 sbinfo->free_inodes++;
285                 spin_unlock(&sbinfo->stat_lock);
286         }
287 }
288
289 /**
290  * shmem_recalc_inode - recalculate the block usage of an inode
291  * @inode: inode to recalc
292  *
293  * We have to calculate the free blocks since the mm can drop
294  * undirtied hole pages behind our back.
295  *
296  * But normally   info->alloced == inode->i_mapping->nrpages + info->swapped
297  * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped)
298  *
299  * It has to be called with the spinlock held.
300  */
301 static void shmem_recalc_inode(struct inode *inode)
302 {
303         struct shmem_inode_info *info = SHMEM_I(inode);
304         long freed;
305
306         freed = info->alloced - info->swapped - inode->i_mapping->nrpages;
307         if (freed > 0) {
308                 info->alloced -= freed;
309                 inode->i_blocks -= freed * BLOCKS_PER_PAGE;
310                 shmem_inode_unacct_blocks(inode, freed);
311         }
312 }
313
314 bool shmem_charge(struct inode *inode, long pages)
315 {
316         struct shmem_inode_info *info = SHMEM_I(inode);
317         unsigned long flags;
318
319         if (!shmem_inode_acct_block(inode, pages))
320                 return false;
321
322         /* nrpages adjustment first, then shmem_recalc_inode() when balanced */
323         inode->i_mapping->nrpages += pages;
324
325         spin_lock_irqsave(&info->lock, flags);
326         info->alloced += pages;
327         inode->i_blocks += pages * BLOCKS_PER_PAGE;
328         shmem_recalc_inode(inode);
329         spin_unlock_irqrestore(&info->lock, flags);
330
331         return true;
332 }
333
334 void shmem_uncharge(struct inode *inode, long pages)
335 {
336         struct shmem_inode_info *info = SHMEM_I(inode);
337         unsigned long flags;
338
339         /* nrpages adjustment done by __delete_from_page_cache() or caller */
340
341         spin_lock_irqsave(&info->lock, flags);
342         info->alloced -= pages;
343         inode->i_blocks -= pages * BLOCKS_PER_PAGE;
344         shmem_recalc_inode(inode);
345         spin_unlock_irqrestore(&info->lock, flags);
346
347         shmem_inode_unacct_blocks(inode, pages);
348 }
349
350 /*
351  * Replace item expected in xarray by a new item, while holding xa_lock.
352  */
353 static int shmem_replace_entry(struct address_space *mapping,
354                         pgoff_t index, void *expected, void *replacement)
355 {
356         XA_STATE(xas, &mapping->i_pages, index);
357         void *item;
358
359         VM_BUG_ON(!expected);
360         VM_BUG_ON(!replacement);
361         item = xas_load(&xas);
362         if (item != expected)
363                 return -ENOENT;
364         xas_store(&xas, replacement);
365         return 0;
366 }
367
368 /*
369  * Sometimes, before we decide whether to proceed or to fail, we must check
370  * that an entry was not already brought back from swap by a racing thread.
371  *
372  * Checking page is not enough: by the time a SwapCache page is locked, it
373  * might be reused, and again be SwapCache, using the same swap as before.
374  */
375 static bool shmem_confirm_swap(struct address_space *mapping,
376                                pgoff_t index, swp_entry_t swap)
377 {
378         return xa_load(&mapping->i_pages, index) == swp_to_radix_entry(swap);
379 }
380
381 /*
382  * Definitions for "huge tmpfs": tmpfs mounted with the huge= option
383  *
384  * SHMEM_HUGE_NEVER:
385  *      disables huge pages for the mount;
386  * SHMEM_HUGE_ALWAYS:
387  *      enables huge pages for the mount;
388  * SHMEM_HUGE_WITHIN_SIZE:
389  *      only allocate huge pages if the page will be fully within i_size,
390  *      also respect fadvise()/madvise() hints;
391  * SHMEM_HUGE_ADVISE:
392  *      only allocate huge pages if requested with fadvise()/madvise();
393  */
394
395 #define SHMEM_HUGE_NEVER        0
396 #define SHMEM_HUGE_ALWAYS       1
397 #define SHMEM_HUGE_WITHIN_SIZE  2
398 #define SHMEM_HUGE_ADVISE       3
399
400 /*
401  * Special values.
402  * Only can be set via /sys/kernel/mm/transparent_hugepage/shmem_enabled:
403  *
404  * SHMEM_HUGE_DENY:
405  *      disables huge on shm_mnt and all mounts, for emergency use;
406  * SHMEM_HUGE_FORCE:
407  *      enables huge on shm_mnt and all mounts, w/o needing option, for testing;
408  *
409  */
410 #define SHMEM_HUGE_DENY         (-1)
411 #define SHMEM_HUGE_FORCE        (-2)
412
413 #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
414 /* ifdef here to avoid bloating shmem.o when not necessary */
415
416 static int shmem_huge __read_mostly;
417
418 #if defined(CONFIG_SYSFS)
419 static int shmem_parse_huge(const char *str)
420 {
421         if (!strcmp(str, "never"))
422                 return SHMEM_HUGE_NEVER;
423         if (!strcmp(str, "always"))
424                 return SHMEM_HUGE_ALWAYS;
425         if (!strcmp(str, "within_size"))
426                 return SHMEM_HUGE_WITHIN_SIZE;
427         if (!strcmp(str, "advise"))
428                 return SHMEM_HUGE_ADVISE;
429         if (!strcmp(str, "deny"))
430                 return SHMEM_HUGE_DENY;
431         if (!strcmp(str, "force"))
432                 return SHMEM_HUGE_FORCE;
433         return -EINVAL;
434 }
435 #endif
436
437 #if defined(CONFIG_SYSFS) || defined(CONFIG_TMPFS)
438 static const char *shmem_format_huge(int huge)
439 {
440         switch (huge) {
441         case SHMEM_HUGE_NEVER:
442                 return "never";
443         case SHMEM_HUGE_ALWAYS:
444                 return "always";
445         case SHMEM_HUGE_WITHIN_SIZE:
446                 return "within_size";
447         case SHMEM_HUGE_ADVISE:
448                 return "advise";
449         case SHMEM_HUGE_DENY:
450                 return "deny";
451         case SHMEM_HUGE_FORCE:
452                 return "force";
453         default:
454                 VM_BUG_ON(1);
455                 return "bad_val";
456         }
457 }
458 #endif
459
460 static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
461                 struct shrink_control *sc, unsigned long nr_to_split)
462 {
463         LIST_HEAD(list), *pos, *next;
464         LIST_HEAD(to_remove);
465         struct inode *inode;
466         struct shmem_inode_info *info;
467         struct page *page;
468         unsigned long batch = sc ? sc->nr_to_scan : 128;
469         int removed = 0, split = 0;
470
471         if (list_empty(&sbinfo->shrinklist))
472                 return SHRINK_STOP;
473
474         spin_lock(&sbinfo->shrinklist_lock);
475         list_for_each_safe(pos, next, &sbinfo->shrinklist) {
476                 info = list_entry(pos, struct shmem_inode_info, shrinklist);
477
478                 /* pin the inode */
479                 inode = igrab(&info->vfs_inode);
480
481                 /* inode is about to be evicted */
482                 if (!inode) {
483                         list_del_init(&info->shrinklist);
484                         removed++;
485                         goto next;
486                 }
487
488                 /* Check if there's anything to gain */
489                 if (round_up(inode->i_size, PAGE_SIZE) ==
490                                 round_up(inode->i_size, HPAGE_PMD_SIZE)) {
491                         list_move(&info->shrinklist, &to_remove);
492                         removed++;
493                         goto next;
494                 }
495
496                 list_move(&info->shrinklist, &list);
497 next:
498                 if (!--batch)
499                         break;
500         }
501         spin_unlock(&sbinfo->shrinklist_lock);
502
503         list_for_each_safe(pos, next, &to_remove) {
504                 info = list_entry(pos, struct shmem_inode_info, shrinklist);
505                 inode = &info->vfs_inode;
506                 list_del_init(&info->shrinklist);
507                 iput(inode);
508         }
509
510         list_for_each_safe(pos, next, &list) {
511                 int ret;
512
513                 info = list_entry(pos, struct shmem_inode_info, shrinklist);
514                 inode = &info->vfs_inode;
515
516                 if (nr_to_split && split >= nr_to_split)
517                         goto leave;
518
519                 page = find_get_page(inode->i_mapping,
520                                 (inode->i_size & HPAGE_PMD_MASK) >> PAGE_SHIFT);
521                 if (!page)
522                         goto drop;
523
524                 /* No huge page at the end of the file: nothing to split */
525                 if (!PageTransHuge(page)) {
526                         put_page(page);
527                         goto drop;
528                 }
529
530                 /*
531                  * Leave the inode on the list if we failed to lock
532                  * the page at this time.
533                  *
534                  * Waiting for the lock may lead to deadlock in the
535                  * reclaim path.
536                  */
537                 if (!trylock_page(page)) {
538                         put_page(page);
539                         goto leave;
540                 }
541
542                 ret = split_huge_page(page);
543                 unlock_page(page);
544                 put_page(page);
545
546                 /* If split failed leave the inode on the list */
547                 if (ret)
548                         goto leave;
549
550                 split++;
551 drop:
552                 list_del_init(&info->shrinklist);
553                 removed++;
554 leave:
555                 iput(inode);
556         }
557
558         spin_lock(&sbinfo->shrinklist_lock);
559         list_splice_tail(&list, &sbinfo->shrinklist);
560         sbinfo->shrinklist_len -= removed;
561         spin_unlock(&sbinfo->shrinklist_lock);
562
563         return split;
564 }
565
566 static long shmem_unused_huge_scan(struct super_block *sb,
567                 struct shrink_control *sc)
568 {
569         struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
570
571         if (!READ_ONCE(sbinfo->shrinklist_len))
572                 return SHRINK_STOP;
573
574         return shmem_unused_huge_shrink(sbinfo, sc, 0);
575 }
576
577 static long shmem_unused_huge_count(struct super_block *sb,
578                 struct shrink_control *sc)
579 {
580         struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
581         return READ_ONCE(sbinfo->shrinklist_len);
582 }
583 #else /* !CONFIG_TRANSPARENT_HUGE_PAGECACHE */
584
585 #define shmem_huge SHMEM_HUGE_DENY
586
587 static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
588                 struct shrink_control *sc, unsigned long nr_to_split)
589 {
590         return 0;
591 }
592 #endif /* CONFIG_TRANSPARENT_HUGE_PAGECACHE */
593
594 static inline bool is_huge_enabled(struct shmem_sb_info *sbinfo)
595 {
596         if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE) &&
597             (shmem_huge == SHMEM_HUGE_FORCE || sbinfo->huge) &&
598             shmem_huge != SHMEM_HUGE_DENY)
599                 return true;
600         return false;
601 }
602
603 /*
604  * Like add_to_page_cache_locked, but error if expected item has gone.
605  */
606 static int shmem_add_to_page_cache(struct page *page,
607                                    struct address_space *mapping,
608                                    pgoff_t index, void *expected, gfp_t gfp)
609 {
610         XA_STATE_ORDER(xas, &mapping->i_pages, index, compound_order(page));
611         unsigned long i = 0;
612         unsigned long nr = compound_nr(page);
613
614         VM_BUG_ON_PAGE(PageTail(page), page);
615         VM_BUG_ON_PAGE(index != round_down(index, nr), page);
616         VM_BUG_ON_PAGE(!PageLocked(page), page);
617         VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
618         VM_BUG_ON(expected && PageTransHuge(page));
619
620         page_ref_add(page, nr);
621         page->mapping = mapping;
622         page->index = index;
623
624         do {
625                 void *entry;
626                 xas_lock_irq(&xas);
627                 entry = xas_find_conflict(&xas);
628                 if (entry != expected)
629                         xas_set_err(&xas, -EEXIST);
630                 xas_create_range(&xas);
631                 if (xas_error(&xas))
632                         goto unlock;
633 next:
634                 xas_store(&xas, page);
635                 if (++i < nr) {
636                         xas_next(&xas);
637                         goto next;
638                 }
639                 if (PageTransHuge(page)) {
640                         count_vm_event(THP_FILE_ALLOC);
641                         __inc_node_page_state(page, NR_SHMEM_THPS);
642                 }
643                 mapping->nrpages += nr;
644                 __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, nr);
645                 __mod_node_page_state(page_pgdat(page), NR_SHMEM, nr);
646 unlock:
647                 xas_unlock_irq(&xas);
648         } while (xas_nomem(&xas, gfp));
649
650         if (xas_error(&xas)) {
651                 page->mapping = NULL;
652                 page_ref_sub(page, nr);
653                 return xas_error(&xas);
654         }
655
656         return 0;
657 }
658
659 /*
660  * Like delete_from_page_cache, but substitutes swap for page.
661  */
662 static void shmem_delete_from_page_cache(struct page *page, void *radswap)
663 {
664         struct address_space *mapping = page->mapping;
665         int error;
666
667         VM_BUG_ON_PAGE(PageCompound(page), page);
668
669         xa_lock_irq(&mapping->i_pages);
670         error = shmem_replace_entry(mapping, page->index, page, radswap);
671         page->mapping = NULL;
672         mapping->nrpages--;
673         __dec_node_page_state(page, NR_FILE_PAGES);
674         __dec_node_page_state(page, NR_SHMEM);
675         xa_unlock_irq(&mapping->i_pages);
676         put_page(page);
677         BUG_ON(error);
678 }
679
680 /*
681  * Remove swap entry from page cache, free the swap and its page cache.
682  */
683 static int shmem_free_swap(struct address_space *mapping,
684                            pgoff_t index, void *radswap)
685 {
686         void *old;
687
688         old = xa_cmpxchg_irq(&mapping->i_pages, index, radswap, NULL, 0);
689         if (old != radswap)
690                 return -ENOENT;
691         free_swap_and_cache(radix_to_swp_entry(radswap));
692         return 0;
693 }
694
695 /*
696  * Determine (in bytes) how many of the shmem object's pages mapped by the
697  * given offsets are swapped out.
698  *
699  * This is safe to call without i_mutex or the i_pages lock thanks to RCU,
700  * as long as the inode doesn't go away and racy results are not a problem.
701  */
702 unsigned long shmem_partial_swap_usage(struct address_space *mapping,
703                                                 pgoff_t start, pgoff_t end)
704 {
705         XA_STATE(xas, &mapping->i_pages, start);
706         struct page *page;
707         unsigned long swapped = 0;
708
709         rcu_read_lock();
710         xas_for_each(&xas, page, end - 1) {
711                 if (xas_retry(&xas, page))
712                         continue;
713                 if (xa_is_value(page))
714                         swapped++;
715
716                 if (need_resched()) {
717                         xas_pause(&xas);
718                         cond_resched_rcu();
719                 }
720         }
721
722         rcu_read_unlock();
723
724         return swapped << PAGE_SHIFT;
725 }
726
727 /*
728  * Determine (in bytes) how many of the shmem object's pages mapped by the
729  * given vma is swapped out.
730  *
731  * This is safe to call without i_mutex or the i_pages lock thanks to RCU,
732  * as long as the inode doesn't go away and racy results are not a problem.
733  */
734 unsigned long shmem_swap_usage(struct vm_area_struct *vma)
735 {
736         struct inode *inode = file_inode(vma->vm_file);
737         struct shmem_inode_info *info = SHMEM_I(inode);
738         struct address_space *mapping = inode->i_mapping;
739         unsigned long swapped;
740
741         /* Be careful as we don't hold info->lock */
742         swapped = READ_ONCE(info->swapped);
743
744         /*
745          * The easier cases are when the shmem object has nothing in swap, or
746          * the vma maps it whole. Then we can simply use the stats that we
747          * already track.
748          */
749         if (!swapped)
750                 return 0;
751
752         if (!vma->vm_pgoff && vma->vm_end - vma->vm_start >= inode->i_size)
753                 return swapped << PAGE_SHIFT;
754
755         /* Here comes the more involved part */
756         return shmem_partial_swap_usage(mapping,
757                         linear_page_index(vma, vma->vm_start),
758                         linear_page_index(vma, vma->vm_end));
759 }
760
761 /*
762  * SysV IPC SHM_UNLOCK restore Unevictable pages to their evictable lists.
763  */
764 void shmem_unlock_mapping(struct address_space *mapping)
765 {
766         struct pagevec pvec;
767         pgoff_t indices[PAGEVEC_SIZE];
768         pgoff_t index = 0;
769
770         pagevec_init(&pvec);
771         /*
772          * Minor point, but we might as well stop if someone else SHM_LOCKs it.
773          */
774         while (!mapping_unevictable(mapping)) {
775                 /*
776                  * Avoid pagevec_lookup(): find_get_pages() returns 0 as if it
777                  * has finished, if it hits a row of PAGEVEC_SIZE swap entries.
778                  */
779                 pvec.nr = find_get_entries(mapping, index,
780                                            PAGEVEC_SIZE, pvec.pages, indices);
781                 if (!pvec.nr)
782                         break;
783                 index = indices[pvec.nr - 1] + 1;
784                 pagevec_remove_exceptionals(&pvec);
785                 check_move_unevictable_pages(&pvec);
786                 pagevec_release(&pvec);
787                 cond_resched();
788         }
789 }
790
791 /*
792  * Remove range of pages and swap entries from page cache, and free them.
793  * If !unfalloc, truncate or punch hole; if unfalloc, undo failed fallocate.
794  */
795 static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
796                                                                  bool unfalloc)
797 {
798         struct address_space *mapping = inode->i_mapping;
799         struct shmem_inode_info *info = SHMEM_I(inode);
800         pgoff_t start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT;
801         pgoff_t end = (lend + 1) >> PAGE_SHIFT;
802         unsigned int partial_start = lstart & (PAGE_SIZE - 1);
803         unsigned int partial_end = (lend + 1) & (PAGE_SIZE - 1);
804         struct pagevec pvec;
805         pgoff_t indices[PAGEVEC_SIZE];
806         long nr_swaps_freed = 0;
807         pgoff_t index;
808         int i;
809
810         if (lend == -1)
811                 end = -1;       /* unsigned, so actually very big */
812
813         pagevec_init(&pvec);
814         index = start;
815         while (index < end) {
816                 pvec.nr = find_get_entries(mapping, index,
817                         min(end - index, (pgoff_t)PAGEVEC_SIZE),
818                         pvec.pages, indices);
819                 if (!pvec.nr)
820                         break;
821                 for (i = 0; i < pagevec_count(&pvec); i++) {
822                         struct page *page = pvec.pages[i];
823
824                         index = indices[i];
825                         if (index >= end)
826                                 break;
827
828                         if (xa_is_value(page)) {
829                                 if (unfalloc)
830                                         continue;
831                                 nr_swaps_freed += !shmem_free_swap(mapping,
832                                                                 index, page);
833                                 continue;
834                         }
835
836                         VM_BUG_ON_PAGE(page_to_pgoff(page) != index, page);
837
838                         if (!trylock_page(page))
839                                 continue;
840
841                         if (PageTransTail(page)) {
842                                 /* Middle of THP: zero out the page */
843                                 clear_highpage(page);
844                                 unlock_page(page);
845                                 continue;
846                         } else if (PageTransHuge(page)) {
847                                 if (index == round_down(end, HPAGE_PMD_NR)) {
848                                         /*
849                                          * Range ends in the middle of THP:
850                                          * zero out the page
851                                          */
852                                         clear_highpage(page);
853                                         unlock_page(page);
854                                         continue;
855                                 }
856                                 index += HPAGE_PMD_NR - 1;
857                                 i += HPAGE_PMD_NR - 1;
858                         }
859
860                         if (!unfalloc || !PageUptodate(page)) {
861                                 VM_BUG_ON_PAGE(PageTail(page), page);
862                                 if (page_mapping(page) == mapping) {
863                                         VM_BUG_ON_PAGE(PageWriteback(page), page);
864                                         truncate_inode_page(mapping, page);
865                                 }
866                         }
867                         unlock_page(page);
868                 }
869                 pagevec_remove_exceptionals(&pvec);
870                 pagevec_release(&pvec);
871                 cond_resched();
872                 index++;
873         }
874
875         if (partial_start) {
876                 struct page *page = NULL;
877                 shmem_getpage(inode, start - 1, &page, SGP_READ);
878                 if (page) {
879                         unsigned int top = PAGE_SIZE;
880                         if (start > end) {
881                                 top = partial_end;
882                                 partial_end = 0;
883                         }
884                         zero_user_segment(page, partial_start, top);
885                         set_page_dirty(page);
886                         unlock_page(page);
887                         put_page(page);
888                 }
889         }
890         if (partial_end) {
891                 struct page *page = NULL;
892                 shmem_getpage(inode, end, &page, SGP_READ);
893                 if (page) {
894                         zero_user_segment(page, 0, partial_end);
895                         set_page_dirty(page);
896                         unlock_page(page);
897                         put_page(page);
898                 }
899         }
900         if (start >= end)
901                 return;
902
903         index = start;
904         while (index < end) {
905                 cond_resched();
906
907                 pvec.nr = find_get_entries(mapping, index,
908                                 min(end - index, (pgoff_t)PAGEVEC_SIZE),
909                                 pvec.pages, indices);
910                 if (!pvec.nr) {
911                         /* If all gone or hole-punch or unfalloc, we're done */
912                         if (index == start || end != -1)
913                                 break;
914                         /* But if truncating, restart to make sure all gone */
915                         index = start;
916                         continue;
917                 }
918                 for (i = 0; i < pagevec_count(&pvec); i++) {
919                         struct page *page = pvec.pages[i];
920
921                         index = indices[i];
922                         if (index >= end)
923                                 break;
924
925                         if (xa_is_value(page)) {
926                                 if (unfalloc)
927                                         continue;
928                                 if (shmem_free_swap(mapping, index, page)) {
929                                         /* Swap was replaced by page: retry */
930                                         index--;
931                                         break;
932                                 }
933                                 nr_swaps_freed++;
934                                 continue;
935                         }
936
937                         lock_page(page);
938
939                         if (PageTransTail(page)) {
940                                 /* Middle of THP: zero out the page */
941                                 clear_highpage(page);
942                                 unlock_page(page);
943                                 /*
944                                  * Partial thp truncate due 'start' in middle
945                                  * of THP: don't need to look on these pages
946                                  * again on !pvec.nr restart.
947                                  */
948                                 if (index != round_down(end, HPAGE_PMD_NR))
949                                         start++;
950                                 continue;
951                         } else if (PageTransHuge(page)) {
952                                 if (index == round_down(end, HPAGE_PMD_NR)) {
953                                         /*
954                                          * Range ends in the middle of THP:
955                                          * zero out the page
956                                          */
957                                         clear_highpage(page);
958                                         unlock_page(page);
959                                         continue;
960                                 }
961                                 index += HPAGE_PMD_NR - 1;
962                                 i += HPAGE_PMD_NR - 1;
963                         }
964
965                         if (!unfalloc || !PageUptodate(page)) {
966                                 VM_BUG_ON_PAGE(PageTail(page), page);
967                                 if (page_mapping(page) == mapping) {
968                                         VM_BUG_ON_PAGE(PageWriteback(page), page);
969                                         truncate_inode_page(mapping, page);
970                                 } else {
971                                         /* Page was replaced by swap: retry */
972                                         unlock_page(page);
973                                         index--;
974                                         break;
975                                 }
976                         }
977                         unlock_page(page);
978                 }
979                 pagevec_remove_exceptionals(&pvec);
980                 pagevec_release(&pvec);
981                 index++;
982         }
983
984         spin_lock_irq(&info->lock);
985         info->swapped -= nr_swaps_freed;
986         shmem_recalc_inode(inode);
987         spin_unlock_irq(&info->lock);
988 }
989
990 void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
991 {
992         shmem_undo_range(inode, lstart, lend, false);
993         inode->i_ctime = inode->i_mtime = current_time(inode);
994 }
995 EXPORT_SYMBOL_GPL(shmem_truncate_range);
996
997 static int shmem_getattr(const struct path *path, struct kstat *stat,
998                          u32 request_mask, unsigned int query_flags)
999 {
1000         struct inode *inode = path->dentry->d_inode;
1001         struct shmem_inode_info *info = SHMEM_I(inode);
1002         struct shmem_sb_info *sb_info = SHMEM_SB(inode->i_sb);
1003
1004         if (info->alloced - info->swapped != inode->i_mapping->nrpages) {
1005                 spin_lock_irq(&info->lock);
1006                 shmem_recalc_inode(inode);
1007                 spin_unlock_irq(&info->lock);
1008         }
1009         generic_fillattr(inode, stat);
1010
1011         if (is_huge_enabled(sb_info))
1012                 stat->blksize = HPAGE_PMD_SIZE;
1013
1014         return 0;
1015 }
1016
1017 static int shmem_setattr(struct dentry *dentry, struct iattr *attr)
1018 {
1019         struct inode *inode = d_inode(dentry);
1020         struct shmem_inode_info *info = SHMEM_I(inode);
1021         struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
1022         int error;
1023
1024         error = setattr_prepare(dentry, attr);
1025         if (error)
1026                 return error;
1027
1028         if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
1029                 loff_t oldsize = inode->i_size;
1030                 loff_t newsize = attr->ia_size;
1031
1032                 /* protected by i_mutex */
1033                 if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) ||
1034                     (newsize > oldsize && (info->seals & F_SEAL_GROW)))
1035                         return -EPERM;
1036
1037                 if (newsize != oldsize) {
1038                         error = shmem_reacct_size(SHMEM_I(inode)->flags,
1039                                         oldsize, newsize);
1040                         if (error)
1041                                 return error;
1042                         i_size_write(inode, newsize);
1043                         inode->i_ctime = inode->i_mtime = current_time(inode);
1044                 }
1045                 if (newsize <= oldsize) {
1046                         loff_t holebegin = round_up(newsize, PAGE_SIZE);
1047                         if (oldsize > holebegin)
1048                                 unmap_mapping_range(inode->i_mapping,
1049                                                         holebegin, 0, 1);
1050                         if (info->alloced)
1051                                 shmem_truncate_range(inode,
1052                                                         newsize, (loff_t)-1);
1053                         /* unmap again to remove racily COWed private pages */
1054                         if (oldsize > holebegin)
1055                                 unmap_mapping_range(inode->i_mapping,
1056                                                         holebegin, 0, 1);
1057
1058                         /*
1059                          * Part of the huge page can be beyond i_size: subject
1060                          * to shrink under memory pressure.
1061                          */
1062                         if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE)) {
1063                                 spin_lock(&sbinfo->shrinklist_lock);
1064                                 /*
1065                                  * _careful to defend against unlocked access to
1066                                  * ->shrink_list in shmem_unused_huge_shrink()
1067                                  */
1068                                 if (list_empty_careful(&info->shrinklist)) {
1069                                         list_add_tail(&info->shrinklist,
1070                                                         &sbinfo->shrinklist);
1071                                         sbinfo->shrinklist_len++;
1072                                 }
1073                                 spin_unlock(&sbinfo->shrinklist_lock);
1074                         }
1075                 }
1076         }
1077
1078         setattr_copy(inode, attr);
1079         if (attr->ia_valid & ATTR_MODE)
1080                 error = posix_acl_chmod(inode, inode->i_mode);
1081         return error;
1082 }
1083
1084 static void shmem_evict_inode(struct inode *inode)
1085 {
1086         struct shmem_inode_info *info = SHMEM_I(inode);
1087         struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
1088
1089         if (inode->i_mapping->a_ops == &shmem_aops) {
1090                 shmem_unacct_size(info->flags, inode->i_size);
1091                 inode->i_size = 0;
1092                 shmem_truncate_range(inode, 0, (loff_t)-1);
1093                 if (!list_empty(&info->shrinklist)) {
1094                         spin_lock(&sbinfo->shrinklist_lock);
1095                         if (!list_empty(&info->shrinklist)) {
1096                                 list_del_init(&info->shrinklist);
1097                                 sbinfo->shrinklist_len--;
1098                         }
1099                         spin_unlock(&sbinfo->shrinklist_lock);
1100                 }
1101                 while (!list_empty(&info->swaplist)) {
1102                         /* Wait while shmem_unuse() is scanning this inode... */
1103                         wait_var_event(&info->stop_eviction,
1104                                        !atomic_read(&info->stop_eviction));
1105                         mutex_lock(&shmem_swaplist_mutex);
1106                         /* ...but beware of the race if we peeked too early */
1107                         if (!atomic_read(&info->stop_eviction))
1108                                 list_del_init(&info->swaplist);
1109                         mutex_unlock(&shmem_swaplist_mutex);
1110                 }
1111         }
1112
1113         simple_xattrs_free(&info->xattrs);
1114         WARN_ON(inode->i_blocks);
1115         shmem_free_inode(inode->i_sb);
1116         clear_inode(inode);
1117 }
1118
1119 extern struct swap_info_struct *swap_info[];
1120
1121 static int shmem_find_swap_entries(struct address_space *mapping,
1122                                    pgoff_t start, unsigned int nr_entries,
1123                                    struct page **entries, pgoff_t *indices,
1124                                    unsigned int type, bool frontswap)
1125 {
1126         XA_STATE(xas, &mapping->i_pages, start);
1127         struct page *page;
1128         swp_entry_t entry;
1129         unsigned int ret = 0;
1130
1131         if (!nr_entries)
1132                 return 0;
1133
1134         rcu_read_lock();
1135         xas_for_each(&xas, page, ULONG_MAX) {
1136                 if (xas_retry(&xas, page))
1137                         continue;
1138
1139                 if (!xa_is_value(page))
1140                         continue;
1141
1142                 entry = radix_to_swp_entry(page);
1143                 if (swp_type(entry) != type)
1144                         continue;
1145                 if (frontswap &&
1146                     !frontswap_test(swap_info[type], swp_offset(entry)))
1147                         continue;
1148
1149                 indices[ret] = xas.xa_index;
1150                 entries[ret] = page;
1151
1152                 if (need_resched()) {
1153                         xas_pause(&xas);
1154                         cond_resched_rcu();
1155                 }
1156                 if (++ret == nr_entries)
1157                         break;
1158         }
1159         rcu_read_unlock();
1160
1161         return ret;
1162 }
1163
1164 /*
1165  * Move the swapped pages for an inode to page cache. Returns the count
1166  * of pages swapped in, or the error in case of failure.
1167  */
1168 static int shmem_unuse_swap_entries(struct inode *inode, struct pagevec pvec,
1169                                     pgoff_t *indices)
1170 {
1171         int i = 0;
1172         int ret = 0;
1173         int error = 0;
1174         struct address_space *mapping = inode->i_mapping;
1175
1176         for (i = 0; i < pvec.nr; i++) {
1177                 struct page *page = pvec.pages[i];
1178
1179                 if (!xa_is_value(page))
1180                         continue;
1181                 error = shmem_swapin_page(inode, indices[i],
1182                                           &page, SGP_CACHE,
1183                                           mapping_gfp_mask(mapping),
1184                                           NULL, NULL);
1185                 if (error == 0) {
1186                         unlock_page(page);
1187                         put_page(page);
1188                         ret++;
1189                 }
1190                 if (error == -ENOMEM)
1191                         break;
1192                 error = 0;
1193         }
1194         return error ? error : ret;
1195 }
1196
1197 /*
1198  * If swap found in inode, free it and move page from swapcache to filecache.
1199  */
1200 static int shmem_unuse_inode(struct inode *inode, unsigned int type,
1201                              bool frontswap, unsigned long *fs_pages_to_unuse)
1202 {
1203         struct address_space *mapping = inode->i_mapping;
1204         pgoff_t start = 0;
1205         struct pagevec pvec;
1206         pgoff_t indices[PAGEVEC_SIZE];
1207         bool frontswap_partial = (frontswap && *fs_pages_to_unuse > 0);
1208         int ret = 0;
1209
1210         pagevec_init(&pvec);
1211         do {
1212                 unsigned int nr_entries = PAGEVEC_SIZE;
1213
1214                 if (frontswap_partial && *fs_pages_to_unuse < PAGEVEC_SIZE)
1215                         nr_entries = *fs_pages_to_unuse;
1216
1217                 pvec.nr = shmem_find_swap_entries(mapping, start, nr_entries,
1218                                                   pvec.pages, indices,
1219                                                   type, frontswap);
1220                 if (pvec.nr == 0) {
1221                         ret = 0;
1222                         break;
1223                 }
1224
1225                 ret = shmem_unuse_swap_entries(inode, pvec, indices);
1226                 if (ret < 0)
1227                         break;
1228
1229                 if (frontswap_partial) {
1230                         *fs_pages_to_unuse -= ret;
1231                         if (*fs_pages_to_unuse == 0) {
1232                                 ret = FRONTSWAP_PAGES_UNUSED;
1233                                 break;
1234                         }
1235                 }
1236
1237                 start = indices[pvec.nr - 1];
1238         } while (true);
1239
1240         return ret;
1241 }
1242
1243 /*
1244  * Read all the shared memory data that resides in the swap
1245  * device 'type' back into memory, so the swap device can be
1246  * unused.
1247  */
1248 int shmem_unuse(unsigned int type, bool frontswap,
1249                 unsigned long *fs_pages_to_unuse)
1250 {
1251         struct shmem_inode_info *info, *next;
1252         int error = 0;
1253
1254         if (list_empty(&shmem_swaplist))
1255                 return 0;
1256
1257         mutex_lock(&shmem_swaplist_mutex);
1258         list_for_each_entry_safe(info, next, &shmem_swaplist, swaplist) {
1259                 if (!info->swapped) {
1260                         list_del_init(&info->swaplist);
1261                         continue;
1262                 }
1263                 /*
1264                  * Drop the swaplist mutex while searching the inode for swap;
1265                  * but before doing so, make sure shmem_evict_inode() will not
1266                  * remove placeholder inode from swaplist, nor let it be freed
1267                  * (igrab() would protect from unlink, but not from unmount).
1268                  */
1269                 atomic_inc(&info->stop_eviction);
1270                 mutex_unlock(&shmem_swaplist_mutex);
1271
1272                 error = shmem_unuse_inode(&info->vfs_inode, type, frontswap,
1273                                           fs_pages_to_unuse);
1274                 cond_resched();
1275
1276                 mutex_lock(&shmem_swaplist_mutex);
1277                 next = list_next_entry(info, swaplist);
1278                 if (!info->swapped)
1279                         list_del_init(&info->swaplist);
1280                 if (atomic_dec_and_test(&info->stop_eviction))
1281                         wake_up_var(&info->stop_eviction);
1282                 if (error)
1283                         break;
1284         }
1285         mutex_unlock(&shmem_swaplist_mutex);
1286
1287         return error;
1288 }
1289
1290 /*
1291  * Move the page from the page cache to the swap cache.
1292  */
1293 static int shmem_writepage(struct page *page, struct writeback_control *wbc)
1294 {
1295         struct shmem_inode_info *info;
1296         struct address_space *mapping;
1297         struct inode *inode;
1298         swp_entry_t swap;
1299         pgoff_t index;
1300
1301         VM_BUG_ON_PAGE(PageCompound(page), page);
1302         BUG_ON(!PageLocked(page));
1303         mapping = page->mapping;
1304         index = page->index;
1305         inode = mapping->host;
1306         info = SHMEM_I(inode);
1307         if (info->flags & VM_LOCKED)
1308                 goto redirty;
1309         if (!total_swap_pages)
1310                 goto redirty;
1311
1312         /*
1313          * Our capabilities prevent regular writeback or sync from ever calling
1314          * shmem_writepage; but a stacking filesystem might use ->writepage of
1315          * its underlying filesystem, in which case tmpfs should write out to
1316          * swap only in response to memory pressure, and not for the writeback
1317          * threads or sync.
1318          */
1319         if (!wbc->for_reclaim) {
1320                 WARN_ON_ONCE(1);        /* Still happens? Tell us about it! */
1321                 goto redirty;
1322         }
1323
1324         /*
1325          * This is somewhat ridiculous, but without plumbing a SWAP_MAP_FALLOC
1326          * value into swapfile.c, the only way we can correctly account for a
1327          * fallocated page arriving here is now to initialize it and write it.
1328          *
1329          * That's okay for a page already fallocated earlier, but if we have
1330          * not yet completed the fallocation, then (a) we want to keep track
1331          * of this page in case we have to undo it, and (b) it may not be a
1332          * good idea to continue anyway, once we're pushing into swap.  So
1333          * reactivate the page, and let shmem_fallocate() quit when too many.
1334          */
1335         if (!PageUptodate(page)) {
1336                 if (inode->i_private) {
1337                         struct shmem_falloc *shmem_falloc;
1338                         spin_lock(&inode->i_lock);
1339                         shmem_falloc = inode->i_private;
1340                         if (shmem_falloc &&
1341                             !shmem_falloc->waitq &&
1342                             index >= shmem_falloc->start &&
1343                             index < shmem_falloc->next)
1344                                 shmem_falloc->nr_unswapped++;
1345                         else
1346                                 shmem_falloc = NULL;
1347                         spin_unlock(&inode->i_lock);
1348                         if (shmem_falloc)
1349                                 goto redirty;
1350                 }
1351                 clear_highpage(page);
1352                 flush_dcache_page(page);
1353                 SetPageUptodate(page);
1354         }
1355
1356         swap = get_swap_page(page);
1357         if (!swap.val)
1358                 goto redirty;
1359
1360         /*
1361          * Add inode to shmem_unuse()'s list of swapped-out inodes,
1362          * if it's not already there.  Do it now before the page is
1363          * moved to swap cache, when its pagelock no longer protects
1364          * the inode from eviction.  But don't unlock the mutex until
1365          * we've incremented swapped, because shmem_unuse_inode() will
1366          * prune a !swapped inode from the swaplist under this mutex.
1367          */
1368         mutex_lock(&shmem_swaplist_mutex);
1369         if (list_empty(&info->swaplist))
1370                 list_add(&info->swaplist, &shmem_swaplist);
1371
1372         if (add_to_swap_cache(page, swap, GFP_ATOMIC) == 0) {
1373                 spin_lock_irq(&info->lock);
1374                 shmem_recalc_inode(inode);
1375                 info->swapped++;
1376                 spin_unlock_irq(&info->lock);
1377
1378                 swap_shmem_alloc(swap);
1379                 shmem_delete_from_page_cache(page, swp_to_radix_entry(swap));
1380
1381                 mutex_unlock(&shmem_swaplist_mutex);
1382                 BUG_ON(page_mapped(page));
1383                 swap_writepage(page, wbc);
1384                 return 0;
1385         }
1386
1387         mutex_unlock(&shmem_swaplist_mutex);
1388         put_swap_page(page, swap);
1389 redirty:
1390         set_page_dirty(page);
1391         if (wbc->for_reclaim)
1392                 return AOP_WRITEPAGE_ACTIVATE;  /* Return with page locked */
1393         unlock_page(page);
1394         return 0;
1395 }
1396
1397 #if defined(CONFIG_NUMA) && defined(CONFIG_TMPFS)
1398 static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
1399 {
1400         char buffer[64];
1401
1402         if (!mpol || mpol->mode == MPOL_DEFAULT)
1403                 return;         /* show nothing */
1404
1405         mpol_to_str(buffer, sizeof(buffer), mpol);
1406
1407         seq_printf(seq, ",mpol=%s", buffer);
1408 }
1409
1410 static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
1411 {
1412         struct mempolicy *mpol = NULL;
1413         if (sbinfo->mpol) {
1414                 spin_lock(&sbinfo->stat_lock);  /* prevent replace/use races */
1415                 mpol = sbinfo->mpol;
1416                 mpol_get(mpol);
1417                 spin_unlock(&sbinfo->stat_lock);
1418         }
1419         return mpol;
1420 }
1421 #else /* !CONFIG_NUMA || !CONFIG_TMPFS */
1422 static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
1423 {
1424 }
1425 static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
1426 {
1427         return NULL;
1428 }
1429 #endif /* CONFIG_NUMA && CONFIG_TMPFS */
1430 #ifndef CONFIG_NUMA
1431 #define vm_policy vm_private_data
1432 #endif
1433
1434 static void shmem_pseudo_vma_init(struct vm_area_struct *vma,
1435                 struct shmem_inode_info *info, pgoff_t index)
1436 {
1437         /* Create a pseudo vma that just contains the policy */
1438         vma_init(vma, NULL);
1439         /* Bias interleave by inode number to distribute better across nodes */
1440         vma->vm_pgoff = index + info->vfs_inode.i_ino;
1441         vma->vm_policy = mpol_shared_policy_lookup(&info->policy, index);
1442 }
1443
1444 static void shmem_pseudo_vma_destroy(struct vm_area_struct *vma)
1445 {
1446         /* Drop reference taken by mpol_shared_policy_lookup() */
1447         mpol_cond_put(vma->vm_policy);
1448 }
1449
1450 static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp,
1451                         struct shmem_inode_info *info, pgoff_t index)
1452 {
1453         struct vm_area_struct pvma;
1454         struct page *page;
1455         struct vm_fault vmf;
1456
1457         shmem_pseudo_vma_init(&pvma, info, index);
1458         vmf.vma = &pvma;
1459         vmf.address = 0;
1460         page = swap_cluster_readahead(swap, gfp, &vmf);
1461         shmem_pseudo_vma_destroy(&pvma);
1462
1463         return page;
1464 }
1465
1466 static struct page *shmem_alloc_hugepage(gfp_t gfp,
1467                 struct shmem_inode_info *info, pgoff_t index)
1468 {
1469         struct vm_area_struct pvma;
1470         struct address_space *mapping = info->vfs_inode.i_mapping;
1471         pgoff_t hindex;
1472         struct page *page;
1473
1474         if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE))
1475                 return NULL;
1476
1477         hindex = round_down(index, HPAGE_PMD_NR);
1478         if (xa_find(&mapping->i_pages, &hindex, hindex + HPAGE_PMD_NR - 1,
1479                                                                 XA_PRESENT))
1480                 return NULL;
1481
1482         shmem_pseudo_vma_init(&pvma, info, hindex);
1483         page = alloc_pages_vma(gfp | __GFP_COMP | __GFP_NORETRY | __GFP_NOWARN,
1484                         HPAGE_PMD_ORDER, &pvma, 0, numa_node_id(), true);
1485         shmem_pseudo_vma_destroy(&pvma);
1486         if (page)
1487                 prep_transhuge_page(page);
1488         return page;
1489 }
1490
1491 static struct page *shmem_alloc_page(gfp_t gfp,
1492                         struct shmem_inode_info *info, pgoff_t index)
1493 {
1494         struct vm_area_struct pvma;
1495         struct page *page;
1496
1497         shmem_pseudo_vma_init(&pvma, info, index);
1498         page = alloc_page_vma(gfp, &pvma, 0);
1499         shmem_pseudo_vma_destroy(&pvma);
1500
1501         return page;
1502 }
1503
1504 static struct page *shmem_alloc_and_acct_page(gfp_t gfp,
1505                 struct inode *inode,
1506                 pgoff_t index, bool huge)
1507 {
1508         struct shmem_inode_info *info = SHMEM_I(inode);
1509         struct page *page;
1510         int nr;
1511         int err = -ENOSPC;
1512
1513         if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE))
1514                 huge = false;
1515         nr = huge ? HPAGE_PMD_NR : 1;
1516
1517         if (!shmem_inode_acct_block(inode, nr))
1518                 goto failed;
1519
1520         if (huge)
1521                 page = shmem_alloc_hugepage(gfp, info, index);
1522         else
1523                 page = shmem_alloc_page(gfp, info, index);
1524         if (page) {
1525                 __SetPageLocked(page);
1526                 __SetPageSwapBacked(page);
1527                 return page;
1528         }
1529
1530         err = -ENOMEM;
1531         shmem_inode_unacct_blocks(inode, nr);
1532 failed:
1533         return ERR_PTR(err);
1534 }
1535
1536 /*
1537  * When a page is moved from swapcache to shmem filecache (either by the
1538  * usual swapin of shmem_getpage_gfp(), or by the less common swapoff of
1539  * shmem_unuse_inode()), it may have been read in earlier from swap, in
1540  * ignorance of the mapping it belongs to.  If that mapping has special
1541  * constraints (like the gma500 GEM driver, which requires RAM below 4GB),
1542  * we may need to copy to a suitable page before moving to filecache.
1543  *
1544  * In a future release, this may well be extended to respect cpuset and
1545  * NUMA mempolicy, and applied also to anonymous pages in do_swap_page();
1546  * but for now it is a simple matter of zone.
1547  */
1548 static bool shmem_should_replace_page(struct page *page, gfp_t gfp)
1549 {
1550         return page_zonenum(page) > gfp_zone(gfp);
1551 }
1552
1553 static int shmem_replace_page(struct page **pagep, gfp_t gfp,
1554                                 struct shmem_inode_info *info, pgoff_t index)
1555 {
1556         struct page *oldpage, *newpage;
1557         struct address_space *swap_mapping;
1558         swp_entry_t entry;
1559         pgoff_t swap_index;
1560         int error;
1561
1562         oldpage = *pagep;
1563         entry.val = page_private(oldpage);
1564         swap_index = swp_offset(entry);
1565         swap_mapping = page_mapping(oldpage);
1566
1567         /*
1568          * We have arrived here because our zones are constrained, so don't
1569          * limit chance of success by further cpuset and node constraints.
1570          */
1571         gfp &= ~GFP_CONSTRAINT_MASK;
1572         newpage = shmem_alloc_page(gfp, info, index);
1573         if (!newpage)
1574                 return -ENOMEM;
1575
1576         get_page(newpage);
1577         copy_highpage(newpage, oldpage);
1578         flush_dcache_page(newpage);
1579
1580         __SetPageLocked(newpage);
1581         __SetPageSwapBacked(newpage);
1582         SetPageUptodate(newpage);
1583         set_page_private(newpage, entry.val);
1584         SetPageSwapCache(newpage);
1585
1586         /*
1587          * Our caller will very soon move newpage out of swapcache, but it's
1588          * a nice clean interface for us to replace oldpage by newpage there.
1589          */
1590         xa_lock_irq(&swap_mapping->i_pages);
1591         error = shmem_replace_entry(swap_mapping, swap_index, oldpage, newpage);
1592         if (!error) {
1593                 __inc_node_page_state(newpage, NR_FILE_PAGES);
1594                 __dec_node_page_state(oldpage, NR_FILE_PAGES);
1595         }
1596         xa_unlock_irq(&swap_mapping->i_pages);
1597
1598         if (unlikely(error)) {
1599                 /*
1600                  * Is this possible?  I think not, now that our callers check
1601                  * both PageSwapCache and page_private after getting page lock;
1602                  * but be defensive.  Reverse old to newpage for clear and free.
1603                  */
1604                 oldpage = newpage;
1605         } else {
1606                 mem_cgroup_migrate(oldpage, newpage);
1607                 lru_cache_add_anon(newpage);
1608                 *pagep = newpage;
1609         }
1610
1611         ClearPageSwapCache(oldpage);
1612         set_page_private(oldpage, 0);
1613
1614         unlock_page(oldpage);
1615         put_page(oldpage);
1616         put_page(oldpage);
1617         return error;
1618 }
1619
1620 /*
1621  * Swap in the page pointed to by *pagep.
1622  * Caller has to make sure that *pagep contains a valid swapped page.
1623  * Returns 0 and the page in pagep if success. On failure, returns the
1624  * the error code and NULL in *pagep.
1625  */
1626 static int shmem_swapin_page(struct inode *inode, pgoff_t index,
1627                              struct page **pagep, enum sgp_type sgp,
1628                              gfp_t gfp, struct vm_area_struct *vma,
1629                              vm_fault_t *fault_type)
1630 {
1631         struct address_space *mapping = inode->i_mapping;
1632         struct shmem_inode_info *info = SHMEM_I(inode);
1633         struct mm_struct *charge_mm = vma ? vma->vm_mm : current->mm;
1634         struct mem_cgroup *memcg;
1635         struct page *page;
1636         swp_entry_t swap;
1637         int error;
1638
1639         VM_BUG_ON(!*pagep || !xa_is_value(*pagep));
1640         swap = radix_to_swp_entry(*pagep);
1641         *pagep = NULL;
1642
1643         /* Look it up and read it in.. */
1644         page = lookup_swap_cache(swap, NULL, 0);
1645         if (!page) {
1646                 /* Or update major stats only when swapin succeeds?? */
1647                 if (fault_type) {
1648                         *fault_type |= VM_FAULT_MAJOR;
1649                         count_vm_event(PGMAJFAULT);
1650                         count_memcg_event_mm(charge_mm, PGMAJFAULT);
1651                 }
1652                 /* Here we actually start the io */
1653                 page = shmem_swapin(swap, gfp, info, index);
1654                 if (!page) {
1655                         error = -ENOMEM;
1656                         goto failed;
1657                 }
1658         }
1659
1660         /* We have to do this with page locked to prevent races */
1661         lock_page(page);
1662         if (!PageSwapCache(page) || page_private(page) != swap.val ||
1663             !shmem_confirm_swap(mapping, index, swap)) {
1664                 error = -EEXIST;
1665                 goto unlock;
1666         }
1667         if (!PageUptodate(page)) {
1668                 error = -EIO;
1669                 goto failed;
1670         }
1671         wait_on_page_writeback(page);
1672
1673         if (shmem_should_replace_page(page, gfp)) {
1674                 error = shmem_replace_page(&page, gfp, info, index);
1675                 if (error)
1676                         goto failed;
1677         }
1678
1679         error = mem_cgroup_try_charge_delay(page, charge_mm, gfp, &memcg,
1680                                             false);
1681         if (!error) {
1682                 error = shmem_add_to_page_cache(page, mapping, index,
1683                                                 swp_to_radix_entry(swap), gfp);
1684                 /*
1685                  * We already confirmed swap under page lock, and make
1686                  * no memory allocation here, so usually no possibility
1687                  * of error; but free_swap_and_cache() only trylocks a
1688                  * page, so it is just possible that the entry has been
1689                  * truncated or holepunched since swap was confirmed.
1690                  * shmem_undo_range() will have done some of the
1691                  * unaccounting, now delete_from_swap_cache() will do
1692                  * the rest.
1693                  */
1694                 if (error) {
1695                         mem_cgroup_cancel_charge(page, memcg, false);
1696                         delete_from_swap_cache(page);
1697                 }
1698         }
1699         if (error)
1700                 goto failed;
1701
1702         mem_cgroup_commit_charge(page, memcg, true, false);
1703
1704         spin_lock_irq(&info->lock);
1705         info->swapped--;
1706         shmem_recalc_inode(inode);
1707         spin_unlock_irq(&info->lock);
1708
1709         if (sgp == SGP_WRITE)
1710                 mark_page_accessed(page);
1711
1712         delete_from_swap_cache(page);
1713         set_page_dirty(page);
1714         swap_free(swap);
1715
1716         *pagep = page;
1717         return 0;
1718 failed:
1719         if (!shmem_confirm_swap(mapping, index, swap))
1720                 error = -EEXIST;
1721 unlock:
1722         if (page) {
1723                 unlock_page(page);
1724                 put_page(page);
1725         }
1726
1727         return error;
1728 }
1729
1730 /*
1731  * shmem_getpage_gfp - find page in cache, or get from swap, or allocate
1732  *
1733  * If we allocate a new one we do not mark it dirty. That's up to the
1734  * vm. If we swap it in we mark it dirty since we also free the swap
1735  * entry since a page cannot live in both the swap and page cache.
1736  *
1737  * vmf and fault_type are only supplied by shmem_fault:
1738  * otherwise they are NULL.
1739  */
1740 static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
1741         struct page **pagep, enum sgp_type sgp, gfp_t gfp,
1742         struct vm_area_struct *vma, struct vm_fault *vmf,
1743                         vm_fault_t *fault_type)
1744 {
1745         struct address_space *mapping = inode->i_mapping;
1746         struct shmem_inode_info *info = SHMEM_I(inode);
1747         struct shmem_sb_info *sbinfo;
1748         struct mm_struct *charge_mm;
1749         struct mem_cgroup *memcg;
1750         struct page *page;
1751         enum sgp_type sgp_huge = sgp;
1752         pgoff_t hindex = index;
1753         int error;
1754         int once = 0;
1755         int alloced = 0;
1756
1757         if (index > (MAX_LFS_FILESIZE >> PAGE_SHIFT))
1758                 return -EFBIG;
1759         if (sgp == SGP_NOHUGE || sgp == SGP_HUGE)
1760                 sgp = SGP_CACHE;
1761 repeat:
1762         if (sgp <= SGP_CACHE &&
1763             ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) {
1764                 return -EINVAL;
1765         }
1766
1767         sbinfo = SHMEM_SB(inode->i_sb);
1768         charge_mm = vma ? vma->vm_mm : current->mm;
1769
1770         page = find_lock_entry(mapping, index);
1771         if (xa_is_value(page)) {
1772                 error = shmem_swapin_page(inode, index, &page,
1773                                           sgp, gfp, vma, fault_type);
1774                 if (error == -EEXIST)
1775                         goto repeat;
1776
1777                 *pagep = page;
1778                 return error;
1779         }
1780
1781         if (page && sgp == SGP_WRITE)
1782                 mark_page_accessed(page);
1783
1784         /* fallocated page? */
1785         if (page && !PageUptodate(page)) {
1786                 if (sgp != SGP_READ)
1787                         goto clear;
1788                 unlock_page(page);
1789                 put_page(page);
1790                 page = NULL;
1791         }
1792         if (page || sgp == SGP_READ) {
1793                 *pagep = page;
1794                 return 0;
1795         }
1796
1797         /*
1798          * Fast cache lookup did not find it:
1799          * bring it back from swap or allocate.
1800          */
1801
1802         if (vma && userfaultfd_missing(vma)) {
1803                 *fault_type = handle_userfault(vmf, VM_UFFD_MISSING);
1804                 return 0;
1805         }
1806
1807         /* shmem_symlink() */
1808         if (mapping->a_ops != &shmem_aops)
1809                 goto alloc_nohuge;
1810         if (shmem_huge == SHMEM_HUGE_DENY || sgp_huge == SGP_NOHUGE)
1811                 goto alloc_nohuge;
1812         if (shmem_huge == SHMEM_HUGE_FORCE)
1813                 goto alloc_huge;
1814         switch (sbinfo->huge) {
1815                 loff_t i_size;
1816                 pgoff_t off;
1817         case SHMEM_HUGE_NEVER:
1818                 goto alloc_nohuge;
1819         case SHMEM_HUGE_WITHIN_SIZE:
1820                 off = round_up(index, HPAGE_PMD_NR);
1821                 i_size = round_up(i_size_read(inode), PAGE_SIZE);
1822                 if (i_size >= HPAGE_PMD_SIZE &&
1823                     i_size >> PAGE_SHIFT >= off)
1824                         goto alloc_huge;
1825                 /* fallthrough */
1826         case SHMEM_HUGE_ADVISE:
1827                 if (sgp_huge == SGP_HUGE)
1828                         goto alloc_huge;
1829                 /* TODO: implement fadvise() hints */
1830                 goto alloc_nohuge;
1831         }
1832
1833 alloc_huge:
1834         page = shmem_alloc_and_acct_page(gfp, inode, index, true);
1835         if (IS_ERR(page)) {
1836 alloc_nohuge:
1837                 page = shmem_alloc_and_acct_page(gfp, inode,
1838                                                  index, false);
1839         }
1840         if (IS_ERR(page)) {
1841                 int retry = 5;
1842
1843                 error = PTR_ERR(page);
1844                 page = NULL;
1845                 if (error != -ENOSPC)
1846                         goto unlock;
1847                 /*
1848                  * Try to reclaim some space by splitting a huge page
1849                  * beyond i_size on the filesystem.
1850                  */
1851                 while (retry--) {
1852                         int ret;
1853
1854                         ret = shmem_unused_huge_shrink(sbinfo, NULL, 1);
1855                         if (ret == SHRINK_STOP)
1856                                 break;
1857                         if (ret)
1858                                 goto alloc_nohuge;
1859                 }
1860                 goto unlock;
1861         }
1862
1863         if (PageTransHuge(page))
1864                 hindex = round_down(index, HPAGE_PMD_NR);
1865         else
1866                 hindex = index;
1867
1868         if (sgp == SGP_WRITE)
1869                 __SetPageReferenced(page);
1870
1871         error = mem_cgroup_try_charge_delay(page, charge_mm, gfp, &memcg,
1872                                             PageTransHuge(page));
1873         if (error)
1874                 goto unacct;
1875         error = shmem_add_to_page_cache(page, mapping, hindex,
1876                                         NULL, gfp & GFP_RECLAIM_MASK);
1877         if (error) {
1878                 mem_cgroup_cancel_charge(page, memcg,
1879                                          PageTransHuge(page));
1880                 goto unacct;
1881         }
1882         mem_cgroup_commit_charge(page, memcg, false,
1883                                  PageTransHuge(page));
1884         lru_cache_add_anon(page);
1885
1886         spin_lock_irq(&info->lock);
1887         info->alloced += compound_nr(page);
1888         inode->i_blocks += BLOCKS_PER_PAGE << compound_order(page);
1889         shmem_recalc_inode(inode);
1890         spin_unlock_irq(&info->lock);
1891         alloced = true;
1892
1893         if (PageTransHuge(page) &&
1894             DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE) <
1895                         hindex + HPAGE_PMD_NR - 1) {
1896                 /*
1897                  * Part of the huge page is beyond i_size: subject
1898                  * to shrink under memory pressure.
1899                  */
1900                 spin_lock(&sbinfo->shrinklist_lock);
1901                 /*
1902                  * _careful to defend against unlocked access to
1903                  * ->shrink_list in shmem_unused_huge_shrink()
1904                  */
1905                 if (list_empty_careful(&info->shrinklist)) {
1906                         list_add_tail(&info->shrinklist,
1907                                       &sbinfo->shrinklist);
1908                         sbinfo->shrinklist_len++;
1909                 }
1910                 spin_unlock(&sbinfo->shrinklist_lock);
1911         }
1912
1913         /*
1914          * Let SGP_FALLOC use the SGP_WRITE optimization on a new page.
1915          */
1916         if (sgp == SGP_FALLOC)
1917                 sgp = SGP_WRITE;
1918 clear:
1919         /*
1920          * Let SGP_WRITE caller clear ends if write does not fill page;
1921          * but SGP_FALLOC on a page fallocated earlier must initialize
1922          * it now, lest undo on failure cancel our earlier guarantee.
1923          */
1924         if (sgp != SGP_WRITE && !PageUptodate(page)) {
1925                 struct page *head = compound_head(page);
1926                 int i;
1927
1928                 for (i = 0; i < compound_nr(head); i++) {
1929                         clear_highpage(head + i);
1930                         flush_dcache_page(head + i);
1931                 }
1932                 SetPageUptodate(head);
1933         }
1934
1935         /* Perhaps the file has been truncated since we checked */
1936         if (sgp <= SGP_CACHE &&
1937             ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) {
1938                 if (alloced) {
1939                         ClearPageDirty(page);
1940                         delete_from_page_cache(page);
1941                         spin_lock_irq(&info->lock);
1942                         shmem_recalc_inode(inode);
1943                         spin_unlock_irq(&info->lock);
1944                 }
1945                 error = -EINVAL;
1946                 goto unlock;
1947         }
1948         *pagep = page + index - hindex;
1949         return 0;
1950
1951         /*
1952          * Error recovery.
1953          */
1954 unacct:
1955         shmem_inode_unacct_blocks(inode, compound_nr(page));
1956
1957         if (PageTransHuge(page)) {
1958                 unlock_page(page);
1959                 put_page(page);
1960                 goto alloc_nohuge;
1961         }
1962 unlock:
1963         if (page) {
1964                 unlock_page(page);
1965                 put_page(page);
1966         }
1967         if (error == -ENOSPC && !once++) {
1968                 spin_lock_irq(&info->lock);
1969                 shmem_recalc_inode(inode);
1970                 spin_unlock_irq(&info->lock);
1971                 goto repeat;
1972         }
1973         if (error == -EEXIST)
1974                 goto repeat;
1975         return error;
1976 }
1977
1978 /*
1979  * This is like autoremove_wake_function, but it removes the wait queue
1980  * entry unconditionally - even if something else had already woken the
1981  * target.
1982  */
1983 static int synchronous_wake_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
1984 {
1985         int ret = default_wake_function(wait, mode, sync, key);
1986         list_del_init(&wait->entry);
1987         return ret;
1988 }
1989
1990 static vm_fault_t shmem_fault(struct vm_fault *vmf)
1991 {
1992         struct vm_area_struct *vma = vmf->vma;
1993         struct inode *inode = file_inode(vma->vm_file);
1994         gfp_t gfp = mapping_gfp_mask(inode->i_mapping);
1995         enum sgp_type sgp;
1996         int err;
1997         vm_fault_t ret = VM_FAULT_LOCKED;
1998
1999         /*
2000          * Trinity finds that probing a hole which tmpfs is punching can
2001          * prevent the hole-punch from ever completing: which in turn
2002          * locks writers out with its hold on i_mutex.  So refrain from
2003          * faulting pages into the hole while it's being punched.  Although
2004          * shmem_undo_range() does remove the additions, it may be unable to
2005          * keep up, as each new page needs its own unmap_mapping_range() call,
2006          * and the i_mmap tree grows ever slower to scan if new vmas are added.
2007          *
2008          * It does not matter if we sometimes reach this check just before the
2009          * hole-punch begins, so that one fault then races with the punch:
2010          * we just need to make racing faults a rare case.
2011          *
2012          * The implementation below would be much simpler if we just used a
2013          * standard mutex or completion: but we cannot take i_mutex in fault,
2014          * and bloating every shmem inode for this unlikely case would be sad.
2015          */
2016         if (unlikely(inode->i_private)) {
2017                 struct shmem_falloc *shmem_falloc;
2018
2019                 spin_lock(&inode->i_lock);
2020                 shmem_falloc = inode->i_private;
2021                 if (shmem_falloc &&
2022                     shmem_falloc->waitq &&
2023                     vmf->pgoff >= shmem_falloc->start &&
2024                     vmf->pgoff < shmem_falloc->next) {
2025                         wait_queue_head_t *shmem_falloc_waitq;
2026                         DEFINE_WAIT_FUNC(shmem_fault_wait, synchronous_wake_function);
2027
2028                         ret = VM_FAULT_NOPAGE;
2029                         if ((vmf->flags & FAULT_FLAG_ALLOW_RETRY) &&
2030                            !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
2031                                 /* It's polite to up mmap_sem if we can */
2032                                 up_read(&vma->vm_mm->mmap_sem);
2033                                 ret = VM_FAULT_RETRY;
2034                         }
2035
2036                         shmem_falloc_waitq = shmem_falloc->waitq;
2037                         prepare_to_wait(shmem_falloc_waitq, &shmem_fault_wait,
2038                                         TASK_UNINTERRUPTIBLE);
2039                         spin_unlock(&inode->i_lock);
2040                         schedule();
2041
2042                         /*
2043                          * shmem_falloc_waitq points into the shmem_fallocate()
2044                          * stack of the hole-punching task: shmem_falloc_waitq
2045                          * is usually invalid by the time we reach here, but
2046                          * finish_wait() does not dereference it in that case;
2047                          * though i_lock needed lest racing with wake_up_all().
2048                          */
2049                         spin_lock(&inode->i_lock);
2050                         finish_wait(shmem_falloc_waitq, &shmem_fault_wait);
2051                         spin_unlock(&inode->i_lock);
2052                         return ret;
2053                 }
2054                 spin_unlock(&inode->i_lock);
2055         }
2056
2057         sgp = SGP_CACHE;
2058
2059         if ((vma->vm_flags & VM_NOHUGEPAGE) ||
2060             test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
2061                 sgp = SGP_NOHUGE;
2062         else if (vma->vm_flags & VM_HUGEPAGE)
2063                 sgp = SGP_HUGE;
2064
2065         err = shmem_getpage_gfp(inode, vmf->pgoff, &vmf->page, sgp,
2066                                   gfp, vma, vmf, &ret);
2067         if (err)
2068                 return vmf_error(err);
2069         return ret;
2070 }
2071
2072 unsigned long shmem_get_unmapped_area(struct file *file,
2073                                       unsigned long uaddr, unsigned long len,
2074                                       unsigned long pgoff, unsigned long flags)
2075 {
2076         unsigned long (*get_area)(struct file *,
2077                 unsigned long, unsigned long, unsigned long, unsigned long);
2078         unsigned long addr;
2079         unsigned long offset;
2080         unsigned long inflated_len;
2081         unsigned long inflated_addr;
2082         unsigned long inflated_offset;
2083
2084         if (len > TASK_SIZE)
2085                 return -ENOMEM;
2086
2087         get_area = current->mm->get_unmapped_area;
2088         addr = get_area(file, uaddr, len, pgoff, flags);
2089
2090         if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE))
2091                 return addr;
2092         if (IS_ERR_VALUE(addr))
2093                 return addr;
2094         if (addr & ~PAGE_MASK)
2095                 return addr;
2096         if (addr > TASK_SIZE - len)
2097                 return addr;
2098
2099         if (shmem_huge == SHMEM_HUGE_DENY)
2100                 return addr;
2101         if (len < HPAGE_PMD_SIZE)
2102                 return addr;
2103         if (flags & MAP_FIXED)
2104                 return addr;
2105         /*
2106          * Our priority is to support MAP_SHARED mapped hugely;
2107          * and support MAP_PRIVATE mapped hugely too, until it is COWed.
2108          * But if caller specified an address hint, respect that as before.
2109          */
2110         if (uaddr)
2111                 return addr;
2112
2113         if (shmem_huge != SHMEM_HUGE_FORCE) {
2114                 struct super_block *sb;
2115
2116                 if (file) {
2117                         VM_BUG_ON(file->f_op != &shmem_file_operations);
2118                         sb = file_inode(file)->i_sb;
2119                 } else {
2120                         /*
2121                          * Called directly from mm/mmap.c, or drivers/char/mem.c
2122                          * for "/dev/zero", to create a shared anonymous object.
2123                          */
2124                         if (IS_ERR(shm_mnt))
2125                                 return addr;
2126                         sb = shm_mnt->mnt_sb;
2127                 }
2128                 if (SHMEM_SB(sb)->huge == SHMEM_HUGE_NEVER)
2129                         return addr;
2130         }
2131
2132         offset = (pgoff << PAGE_SHIFT) & (HPAGE_PMD_SIZE-1);
2133         if (offset && offset + len < 2 * HPAGE_PMD_SIZE)
2134                 return addr;
2135         if ((addr & (HPAGE_PMD_SIZE-1)) == offset)
2136                 return addr;
2137
2138         inflated_len = len + HPAGE_PMD_SIZE - PAGE_SIZE;
2139         if (inflated_len > TASK_SIZE)
2140                 return addr;
2141         if (inflated_len < len)
2142                 return addr;
2143
2144         inflated_addr = get_area(NULL, 0, inflated_len, 0, flags);
2145         if (IS_ERR_VALUE(inflated_addr))
2146                 return addr;
2147         if (inflated_addr & ~PAGE_MASK)
2148                 return addr;
2149
2150         inflated_offset = inflated_addr & (HPAGE_PMD_SIZE-1);
2151         inflated_addr += offset - inflated_offset;
2152         if (inflated_offset > offset)
2153                 inflated_addr += HPAGE_PMD_SIZE;
2154
2155         if (inflated_addr > TASK_SIZE - len)
2156                 return addr;
2157         return inflated_addr;
2158 }
2159
2160 #ifdef CONFIG_NUMA
2161 static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol)
2162 {
2163         struct inode *inode = file_inode(vma->vm_file);
2164         return mpol_set_shared_policy(&SHMEM_I(inode)->policy, vma, mpol);
2165 }
2166
2167 static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma,
2168                                           unsigned long addr)
2169 {
2170         struct inode *inode = file_inode(vma->vm_file);
2171         pgoff_t index;
2172
2173         index = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
2174         return mpol_shared_policy_lookup(&SHMEM_I(inode)->policy, index);
2175 }
2176 #endif
2177
2178 int shmem_lock(struct file *file, int lock, struct user_struct *user)
2179 {
2180         struct inode *inode = file_inode(file);
2181         struct shmem_inode_info *info = SHMEM_I(inode);
2182         int retval = -ENOMEM;
2183
2184         spin_lock_irq(&info->lock);
2185         if (lock && !(info->flags & VM_LOCKED)) {
2186                 if (!user_shm_lock(inode->i_size, user))
2187                         goto out_nomem;
2188                 info->flags |= VM_LOCKED;
2189                 mapping_set_unevictable(file->f_mapping);
2190         }
2191         if (!lock && (info->flags & VM_LOCKED) && user) {
2192                 user_shm_unlock(inode->i_size, user);
2193                 info->flags &= ~VM_LOCKED;
2194                 mapping_clear_unevictable(file->f_mapping);
2195         }
2196         retval = 0;
2197
2198 out_nomem:
2199         spin_unlock_irq(&info->lock);
2200         return retval;
2201 }
2202
2203 static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
2204 {
2205         struct shmem_inode_info *info = SHMEM_I(file_inode(file));
2206
2207         if (info->seals & F_SEAL_FUTURE_WRITE) {
2208                 /*
2209                  * New PROT_WRITE and MAP_SHARED mmaps are not allowed when
2210                  * "future write" seal active.
2211                  */
2212                 if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE))
2213                         return -EPERM;
2214
2215                 /*
2216                  * Since the F_SEAL_FUTURE_WRITE seals allow for a MAP_SHARED
2217                  * read-only mapping, take care to not allow mprotect to revert
2218                  * protections.
2219                  */
2220                 vma->vm_flags &= ~(VM_MAYWRITE);
2221         }
2222
2223         file_accessed(file);
2224         vma->vm_ops = &shmem_vm_ops;
2225         if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE) &&
2226                         ((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) <
2227                         (vma->vm_end & HPAGE_PMD_MASK)) {
2228                 khugepaged_enter(vma, vma->vm_flags);
2229         }
2230         return 0;
2231 }
2232
2233 static struct inode *shmem_get_inode(struct super_block *sb, const struct inode *dir,
2234                                      umode_t mode, dev_t dev, unsigned long flags)
2235 {
2236         struct inode *inode;
2237         struct shmem_inode_info *info;
2238         struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
2239
2240         if (shmem_reserve_inode(sb))
2241                 return NULL;
2242
2243         inode = new_inode(sb);
2244         if (inode) {
2245                 inode->i_ino = get_next_ino();
2246                 inode_init_owner(inode, dir, mode);
2247                 inode->i_blocks = 0;
2248                 inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
2249                 inode->i_generation = prandom_u32();
2250                 info = SHMEM_I(inode);
2251                 memset(info, 0, (char *)inode - (char *)info);
2252                 spin_lock_init(&info->lock);
2253                 atomic_set(&info->stop_eviction, 0);
2254                 info->seals = F_SEAL_SEAL;
2255                 info->flags = flags & VM_NORESERVE;
2256                 INIT_LIST_HEAD(&info->shrinklist);
2257                 INIT_LIST_HEAD(&info->swaplist);
2258                 simple_xattrs_init(&info->xattrs);
2259                 cache_no_acl(inode);
2260
2261                 switch (mode & S_IFMT) {
2262                 default:
2263                         inode->i_op = &shmem_special_inode_operations;
2264                         init_special_inode(inode, mode, dev);
2265                         break;
2266                 case S_IFREG:
2267                         inode->i_mapping->a_ops = &shmem_aops;
2268                         inode->i_op = &shmem_inode_operations;
2269                         inode->i_fop = &shmem_file_operations;
2270                         mpol_shared_policy_init(&info->policy,
2271                                                  shmem_get_sbmpol(sbinfo));
2272                         break;
2273                 case S_IFDIR:
2274                         inc_nlink(inode);
2275                         /* Some things misbehave if size == 0 on a directory */
2276                         inode->i_size = 2 * BOGO_DIRENT_SIZE;
2277                         inode->i_op = &shmem_dir_inode_operations;
2278                         inode->i_fop = &simple_dir_operations;
2279                         break;
2280                 case S_IFLNK:
2281                         /*
2282                          * Must not load anything in the rbtree,
2283                          * mpol_free_shared_policy will not be called.
2284                          */
2285                         mpol_shared_policy_init(&info->policy, NULL);
2286                         break;
2287                 }
2288
2289                 lockdep_annotate_inode_mutex_key(inode);
2290         } else
2291                 shmem_free_inode(sb);
2292         return inode;
2293 }
2294
2295 bool shmem_mapping(struct address_space *mapping)
2296 {
2297         return mapping->a_ops == &shmem_aops;
2298 }
2299
2300 static int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
2301                                   pmd_t *dst_pmd,
2302                                   struct vm_area_struct *dst_vma,
2303                                   unsigned long dst_addr,
2304                                   unsigned long src_addr,
2305                                   bool zeropage,
2306                                   struct page **pagep)
2307 {
2308         struct inode *inode = file_inode(dst_vma->vm_file);
2309         struct shmem_inode_info *info = SHMEM_I(inode);
2310         struct address_space *mapping = inode->i_mapping;
2311         gfp_t gfp = mapping_gfp_mask(mapping);
2312         pgoff_t pgoff = linear_page_index(dst_vma, dst_addr);
2313         struct mem_cgroup *memcg;
2314         spinlock_t *ptl;
2315         void *page_kaddr;
2316         struct page *page;
2317         pte_t _dst_pte, *dst_pte;
2318         int ret;
2319         pgoff_t offset, max_off;
2320
2321         ret = -ENOMEM;
2322         if (!shmem_inode_acct_block(inode, 1))
2323                 goto out;
2324
2325         if (!*pagep) {
2326                 page = shmem_alloc_page(gfp, info, pgoff);
2327                 if (!page)
2328                         goto out_unacct_blocks;
2329
2330                 if (!zeropage) {        /* mcopy_atomic */
2331                         page_kaddr = kmap_atomic(page);
2332                         ret = copy_from_user(page_kaddr,
2333                                              (const void __user *)src_addr,
2334                                              PAGE_SIZE);
2335                         kunmap_atomic(page_kaddr);
2336
2337                         /* fallback to copy_from_user outside mmap_sem */
2338                         if (unlikely(ret)) {
2339                                 *pagep = page;
2340                                 shmem_inode_unacct_blocks(inode, 1);
2341                                 /* don't free the page */
2342                                 return -ENOENT;
2343                         }
2344                 } else {                /* mfill_zeropage_atomic */
2345                         clear_highpage(page);
2346                 }
2347         } else {
2348                 page = *pagep;
2349                 *pagep = NULL;
2350         }
2351
2352         VM_BUG_ON(PageLocked(page) || PageSwapBacked(page));
2353         __SetPageLocked(page);
2354         __SetPageSwapBacked(page);
2355         __SetPageUptodate(page);
2356
2357         ret = -EFAULT;
2358         offset = linear_page_index(dst_vma, dst_addr);
2359         max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
2360         if (unlikely(offset >= max_off))
2361                 goto out_release;
2362
2363         ret = mem_cgroup_try_charge_delay(page, dst_mm, gfp, &memcg, false);
2364         if (ret)
2365                 goto out_release;
2366
2367         ret = shmem_add_to_page_cache(page, mapping, pgoff, NULL,
2368                                                 gfp & GFP_RECLAIM_MASK);
2369         if (ret)
2370                 goto out_release_uncharge;
2371
2372         mem_cgroup_commit_charge(page, memcg, false, false);
2373
2374         _dst_pte = mk_pte(page, dst_vma->vm_page_prot);
2375         if (dst_vma->vm_flags & VM_WRITE)
2376                 _dst_pte = pte_mkwrite(pte_mkdirty(_dst_pte));
2377         else {
2378                 /*
2379                  * We don't set the pte dirty if the vma has no
2380                  * VM_WRITE permission, so mark the page dirty or it
2381                  * could be freed from under us. We could do it
2382                  * unconditionally before unlock_page(), but doing it
2383                  * only if VM_WRITE is not set is faster.
2384                  */
2385                 set_page_dirty(page);
2386         }
2387
2388         dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl);
2389
2390         ret = -EFAULT;
2391         max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
2392         if (unlikely(offset >= max_off))
2393                 goto out_release_uncharge_unlock;
2394
2395         ret = -EEXIST;
2396         if (!pte_none(*dst_pte))
2397                 goto out_release_uncharge_unlock;
2398
2399         lru_cache_add_anon(page);
2400
2401         spin_lock(&info->lock);
2402         info->alloced++;
2403         inode->i_blocks += BLOCKS_PER_PAGE;
2404         shmem_recalc_inode(inode);
2405         spin_unlock(&info->lock);
2406
2407         inc_mm_counter(dst_mm, mm_counter_file(page));
2408         page_add_file_rmap(page, false);
2409         set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
2410
2411         /* No need to invalidate - it was non-present before */
2412         update_mmu_cache(dst_vma, dst_addr, dst_pte);
2413         pte_unmap_unlock(dst_pte, ptl);
2414         unlock_page(page);
2415         ret = 0;
2416 out:
2417         return ret;
2418 out_release_uncharge_unlock:
2419         pte_unmap_unlock(dst_pte, ptl);
2420         ClearPageDirty(page);
2421         delete_from_page_cache(page);
2422 out_release_uncharge:
2423         mem_cgroup_cancel_charge(page, memcg, false);
2424 out_release:
2425         unlock_page(page);
2426         put_page(page);
2427 out_unacct_blocks:
2428         shmem_inode_unacct_blocks(inode, 1);
2429         goto out;
2430 }
2431
2432 int shmem_mcopy_atomic_pte(struct mm_struct *dst_mm,
2433                            pmd_t *dst_pmd,
2434                            struct vm_area_struct *dst_vma,
2435                            unsigned long dst_addr,
2436                            unsigned long src_addr,
2437                            struct page **pagep)
2438 {
2439         return shmem_mfill_atomic_pte(dst_mm, dst_pmd, dst_vma,
2440                                       dst_addr, src_addr, false, pagep);
2441 }
2442
2443 int shmem_mfill_zeropage_pte(struct mm_struct *dst_mm,
2444                              pmd_t *dst_pmd,
2445                              struct vm_area_struct *dst_vma,
2446                              unsigned long dst_addr)
2447 {
2448         struct page *page = NULL;
2449
2450         return shmem_mfill_atomic_pte(dst_mm, dst_pmd, dst_vma,
2451                                       dst_addr, 0, true, &page);
2452 }
2453
2454 #ifdef CONFIG_TMPFS
2455 static const struct inode_operations shmem_symlink_inode_operations;
2456 static const struct inode_operations shmem_short_symlink_operations;
2457
2458 #ifdef CONFIG_TMPFS_XATTR
2459 static int shmem_initxattrs(struct inode *, const struct xattr *, void *);
2460 #else
2461 #define shmem_initxattrs NULL
2462 #endif
2463
2464 static int
2465 shmem_write_begin(struct file *file, struct address_space *mapping,
2466                         loff_t pos, unsigned len, unsigned flags,
2467                         struct page **pagep, void **fsdata)
2468 {
2469         struct inode *inode = mapping->host;
2470         struct shmem_inode_info *info = SHMEM_I(inode);
2471         pgoff_t index = pos >> PAGE_SHIFT;
2472
2473         /* i_mutex is held by caller */
2474         if (unlikely(info->seals & (F_SEAL_GROW |
2475                                    F_SEAL_WRITE | F_SEAL_FUTURE_WRITE))) {
2476                 if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE))
2477                         return -EPERM;
2478                 if ((info->seals & F_SEAL_GROW) && pos + len > inode->i_size)
2479                         return -EPERM;
2480         }
2481
2482         return shmem_getpage(inode, index, pagep, SGP_WRITE);
2483 }
2484
2485 static int
2486 shmem_write_end(struct file *file, struct address_space *mapping,
2487                         loff_t pos, unsigned len, unsigned copied,
2488                         struct page *page, void *fsdata)
2489 {
2490         struct inode *inode = mapping->host;
2491
2492         if (pos + copied > inode->i_size)
2493                 i_size_write(inode, pos + copied);
2494
2495         if (!PageUptodate(page)) {
2496                 struct page *head = compound_head(page);
2497                 if (PageTransCompound(page)) {
2498                         int i;
2499
2500                         for (i = 0; i < HPAGE_PMD_NR; i++) {
2501                                 if (head + i == page)
2502                                         continue;
2503                                 clear_highpage(head + i);
2504                                 flush_dcache_page(head + i);
2505                         }
2506                 }
2507                 if (copied < PAGE_SIZE) {
2508                         unsigned from = pos & (PAGE_SIZE - 1);
2509                         zero_user_segments(page, 0, from,
2510                                         from + copied, PAGE_SIZE);
2511                 }
2512                 SetPageUptodate(head);
2513         }
2514         set_page_dirty(page);
2515         unlock_page(page);
2516         put_page(page);
2517
2518         return copied;
2519 }
2520
2521 static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
2522 {
2523         struct file *file = iocb->ki_filp;
2524         struct inode *inode = file_inode(file);
2525         struct address_space *mapping = inode->i_mapping;
2526         pgoff_t index;
2527         unsigned long offset;
2528         enum sgp_type sgp = SGP_READ;
2529         int error = 0;
2530         ssize_t retval = 0;
2531         loff_t *ppos = &iocb->ki_pos;
2532
2533         /*
2534          * Might this read be for a stacking filesystem?  Then when reading
2535          * holes of a sparse file, we actually need to allocate those pages,
2536          * and even mark them dirty, so it cannot exceed the max_blocks limit.
2537          */
2538         if (!iter_is_iovec(to))
2539                 sgp = SGP_CACHE;
2540
2541         index = *ppos >> PAGE_SHIFT;
2542         offset = *ppos & ~PAGE_MASK;
2543
2544         for (;;) {
2545                 struct page *page = NULL;
2546                 pgoff_t end_index;
2547                 unsigned long nr, ret;
2548                 loff_t i_size = i_size_read(inode);
2549
2550                 end_index = i_size >> PAGE_SHIFT;
2551                 if (index > end_index)
2552                         break;
2553                 if (index == end_index) {
2554                         nr = i_size & ~PAGE_MASK;
2555                         if (nr <= offset)
2556                                 break;
2557                 }
2558
2559                 error = shmem_getpage(inode, index, &page, sgp);
2560                 if (error) {
2561                         if (error == -EINVAL)
2562                                 error = 0;
2563                         break;
2564                 }
2565                 if (page) {
2566                         if (sgp == SGP_CACHE)
2567                                 set_page_dirty(page);
2568                         unlock_page(page);
2569                 }
2570
2571                 /*
2572                  * We must evaluate after, since reads (unlike writes)
2573                  * are called without i_mutex protection against truncate
2574                  */
2575                 nr = PAGE_SIZE;
2576                 i_size = i_size_read(inode);
2577                 end_index = i_size >> PAGE_SHIFT;
2578                 if (index == end_index) {
2579                         nr = i_size & ~PAGE_MASK;
2580                         if (nr <= offset) {
2581                                 if (page)
2582                                         put_page(page);
2583                                 break;
2584                         }
2585                 }
2586                 nr -= offset;
2587
2588                 if (page) {
2589                         /*
2590                          * If users can be writing to this page using arbitrary
2591                          * virtual addresses, take care about potential aliasing
2592                          * before reading the page on the kernel side.
2593                          */
2594                         if (mapping_writably_mapped(mapping))
2595                                 flush_dcache_page(page);
2596                         /*
2597                          * Mark the page accessed if we read the beginning.
2598                          */
2599                         if (!offset)
2600                                 mark_page_accessed(page);
2601                 } else {
2602                         page = ZERO_PAGE(0);
2603                         get_page(page);
2604                 }
2605
2606                 /*
2607                  * Ok, we have the page, and it's up-to-date, so
2608                  * now we can copy it to user space...
2609                  */
2610                 ret = copy_page_to_iter(page, offset, nr, to);
2611                 retval += ret;
2612                 offset += ret;
2613                 index += offset >> PAGE_SHIFT;
2614                 offset &= ~PAGE_MASK;
2615
2616                 put_page(page);
2617                 if (!iov_iter_count(to))
2618                         break;
2619                 if (ret < nr) {
2620                         error = -EFAULT;
2621                         break;
2622                 }
2623                 cond_resched();
2624         }
2625
2626         *ppos = ((loff_t) index << PAGE_SHIFT) + offset;
2627         file_accessed(file);
2628         return retval ? retval : error;
2629 }
2630
2631 /*
2632  * llseek SEEK_DATA or SEEK_HOLE through the page cache.
2633  */
2634 static pgoff_t shmem_seek_hole_data(struct address_space *mapping,
2635                                     pgoff_t index, pgoff_t end, int whence)
2636 {
2637         struct page *page;
2638         struct pagevec pvec;
2639         pgoff_t indices[PAGEVEC_SIZE];
2640         bool done = false;
2641         int i;
2642
2643         pagevec_init(&pvec);
2644         pvec.nr = 1;            /* start small: we may be there already */
2645         while (!done) {
2646                 pvec.nr = find_get_entries(mapping, index,
2647                                         pvec.nr, pvec.pages, indices);
2648                 if (!pvec.nr) {
2649                         if (whence == SEEK_DATA)
2650                                 index = end;
2651                         break;
2652                 }
2653                 for (i = 0; i < pvec.nr; i++, index++) {
2654                         if (index < indices[i]) {
2655                                 if (whence == SEEK_HOLE) {
2656                                         done = true;
2657                                         break;
2658                                 }
2659                                 index = indices[i];
2660                         }
2661                         page = pvec.pages[i];
2662                         if (page && !xa_is_value(page)) {
2663                                 if (!PageUptodate(page))
2664                                         page = NULL;
2665                         }
2666                         if (index >= end ||
2667                             (page && whence == SEEK_DATA) ||
2668                             (!page && whence == SEEK_HOLE)) {
2669                                 done = true;
2670                                 break;
2671                         }
2672                 }
2673                 pagevec_remove_exceptionals(&pvec);
2674                 pagevec_release(&pvec);
2675                 pvec.nr = PAGEVEC_SIZE;
2676                 cond_resched();
2677         }
2678         return index;
2679 }
2680
2681 static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence)
2682 {
2683         struct address_space *mapping = file->f_mapping;
2684         struct inode *inode = mapping->host;
2685         pgoff_t start, end;
2686         loff_t new_offset;
2687
2688         if (whence != SEEK_DATA && whence != SEEK_HOLE)
2689                 return generic_file_llseek_size(file, offset, whence,
2690                                         MAX_LFS_FILESIZE, i_size_read(inode));
2691         inode_lock(inode);
2692         /* We're holding i_mutex so we can access i_size directly */
2693
2694         if (offset < 0 || offset >= inode->i_size)
2695                 offset = -ENXIO;
2696         else {
2697                 start = offset >> PAGE_SHIFT;
2698                 end = (inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
2699                 new_offset = shmem_seek_hole_data(mapping, start, end, whence);
2700                 new_offset <<= PAGE_SHIFT;
2701                 if (new_offset > offset) {
2702                         if (new_offset < inode->i_size)
2703                                 offset = new_offset;
2704                         else if (whence == SEEK_DATA)
2705                                 offset = -ENXIO;
2706                         else
2707                                 offset = inode->i_size;
2708                 }
2709         }
2710
2711         if (offset >= 0)
2712                 offset = vfs_setpos(file, offset, MAX_LFS_FILESIZE);
2713         inode_unlock(inode);
2714         return offset;
2715 }
2716
2717 static long shmem_fallocate(struct file *file, int mode, loff_t offset,
2718                                                          loff_t len)
2719 {
2720         struct inode *inode = file_inode(file);
2721         struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
2722         struct shmem_inode_info *info = SHMEM_I(inode);
2723         struct shmem_falloc shmem_falloc;
2724         pgoff_t start, index, end;
2725         int error;
2726
2727         if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
2728                 return -EOPNOTSUPP;
2729
2730         inode_lock(inode);
2731
2732         if (mode & FALLOC_FL_PUNCH_HOLE) {
2733                 struct address_space *mapping = file->f_mapping;
2734                 loff_t unmap_start = round_up(offset, PAGE_SIZE);
2735                 loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1;
2736                 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq);
2737
2738                 /* protected by i_mutex */
2739                 if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) {
2740                         error = -EPERM;
2741                         goto out;
2742                 }
2743
2744                 shmem_falloc.waitq = &shmem_falloc_waitq;
2745                 shmem_falloc.start = unmap_start >> PAGE_SHIFT;
2746                 shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT;
2747                 spin_lock(&inode->i_lock);
2748                 inode->i_private = &shmem_falloc;
2749                 spin_unlock(&inode->i_lock);
2750
2751                 if ((u64)unmap_end > (u64)unmap_start)
2752                         unmap_mapping_range(mapping, unmap_start,
2753                                             1 + unmap_end - unmap_start, 0);
2754                 shmem_truncate_range(inode, offset, offset + len - 1);
2755                 /* No need to unmap again: hole-punching leaves COWed pages */
2756
2757                 spin_lock(&inode->i_lock);
2758                 inode->i_private = NULL;
2759                 wake_up_all(&shmem_falloc_waitq);
2760                 WARN_ON_ONCE(!list_empty(&shmem_falloc_waitq.head));
2761                 spin_unlock(&inode->i_lock);
2762                 error = 0;
2763                 goto out;
2764         }
2765
2766         /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */
2767         error = inode_newsize_ok(inode, offset + len);
2768         if (error)
2769                 goto out;
2770
2771         if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) {
2772                 error = -EPERM;
2773                 goto out;
2774         }
2775
2776         start = offset >> PAGE_SHIFT;
2777         end = (offset + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
2778         /* Try to avoid a swapstorm if len is impossible to satisfy */
2779         if (sbinfo->max_blocks && end - start > sbinfo->max_blocks) {
2780                 error = -ENOSPC;
2781                 goto out;
2782         }
2783
2784         shmem_falloc.waitq = NULL;
2785         shmem_falloc.start = start;
2786         shmem_falloc.next  = start;
2787         shmem_falloc.nr_falloced = 0;
2788         shmem_falloc.nr_unswapped = 0;
2789         spin_lock(&inode->i_lock);
2790         inode->i_private = &shmem_falloc;
2791         spin_unlock(&inode->i_lock);
2792
2793         for (index = start; index < end; index++) {
2794                 struct page *page;
2795
2796                 /*
2797                  * Good, the fallocate(2) manpage permits EINTR: we may have
2798                  * been interrupted because we are using up too much memory.
2799                  */
2800                 if (signal_pending(current))
2801                         error = -EINTR;
2802                 else if (shmem_falloc.nr_unswapped > shmem_falloc.nr_falloced)
2803                         error = -ENOMEM;
2804                 else
2805                         error = shmem_getpage(inode, index, &page, SGP_FALLOC);
2806                 if (error) {
2807                         /* Remove the !PageUptodate pages we added */
2808                         if (index > start) {
2809                                 shmem_undo_range(inode,
2810                                     (loff_t)start << PAGE_SHIFT,
2811                                     ((loff_t)index << PAGE_SHIFT) - 1, true);
2812                         }
2813                         goto undone;
2814                 }
2815
2816                 /*
2817                  * Inform shmem_writepage() how far we have reached.
2818                  * No need for lock or barrier: we have the page lock.
2819                  */
2820                 shmem_falloc.next++;
2821                 if (!PageUptodate(page))
2822                         shmem_falloc.nr_falloced++;
2823
2824                 /*
2825                  * If !PageUptodate, leave it that way so that freeable pages
2826                  * can be recognized if we need to rollback on error later.
2827                  * But set_page_dirty so that memory pressure will swap rather
2828                  * than free the pages we are allocating (and SGP_CACHE pages
2829                  * might still be clean: we now need to mark those dirty too).
2830                  */
2831                 set_page_dirty(page);
2832                 unlock_page(page);
2833                 put_page(page);
2834                 cond_resched();
2835         }
2836
2837         if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size)
2838                 i_size_write(inode, offset + len);
2839         inode->i_ctime = current_time(inode);
2840 undone:
2841         spin_lock(&inode->i_lock);
2842         inode->i_private = NULL;
2843         spin_unlock(&inode->i_lock);
2844 out:
2845         inode_unlock(inode);
2846         return error;
2847 }
2848
2849 static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf)
2850 {
2851         struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb);
2852
2853         buf->f_type = TMPFS_MAGIC;
2854         buf->f_bsize = PAGE_SIZE;
2855         buf->f_namelen = NAME_MAX;
2856         if (sbinfo->max_blocks) {
2857                 buf->f_blocks = sbinfo->max_blocks;
2858                 buf->f_bavail =
2859                 buf->f_bfree  = sbinfo->max_blocks -
2860                                 percpu_counter_sum(&sbinfo->used_blocks);
2861         }
2862         if (sbinfo->max_inodes) {
2863                 buf->f_files = sbinfo->max_inodes;
2864                 buf->f_ffree = sbinfo->free_inodes;
2865         }
2866         /* else leave those fields 0 like simple_statfs */
2867         return 0;
2868 }
2869
2870 /*
2871  * File creation. Allocate an inode, and we're done..
2872  */
2873 static int
2874 shmem_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
2875 {
2876         struct inode *inode;
2877         int error = -ENOSPC;
2878
2879         inode = shmem_get_inode(dir->i_sb, dir, mode, dev, VM_NORESERVE);
2880         if (inode) {
2881                 error = simple_acl_create(dir, inode);
2882                 if (error)
2883                         goto out_iput;
2884                 error = security_inode_init_security(inode, dir,
2885                                                      &dentry->d_name,
2886                                                      shmem_initxattrs, NULL);
2887                 if (error && error != -EOPNOTSUPP)
2888                         goto out_iput;
2889
2890                 error = 0;
2891                 dir->i_size += BOGO_DIRENT_SIZE;
2892                 dir->i_ctime = dir->i_mtime = current_time(dir);
2893                 d_instantiate(dentry, inode);
2894                 dget(dentry); /* Extra count - pin the dentry in core */
2895         }
2896         return error;
2897 out_iput:
2898         iput(inode);
2899         return error;
2900 }
2901
2902 static int
2903 shmem_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
2904 {
2905         struct inode *inode;
2906         int error = -ENOSPC;
2907
2908         inode = shmem_get_inode(dir->i_sb, dir, mode, 0, VM_NORESERVE);
2909         if (inode) {
2910                 error = security_inode_init_security(inode, dir,
2911                                                      NULL,
2912                                                      shmem_initxattrs, NULL);
2913                 if (error && error != -EOPNOTSUPP)
2914                         goto out_iput;
2915                 error = simple_acl_create(dir, inode);
2916                 if (error)
2917                         goto out_iput;
2918                 d_tmpfile(dentry, inode);
2919         }
2920         return error;
2921 out_iput:
2922         iput(inode);
2923         return error;
2924 }
2925
2926 static int shmem_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
2927 {
2928         int error;
2929
2930         if ((error = shmem_mknod(dir, dentry, mode | S_IFDIR, 0)))
2931                 return error;
2932         inc_nlink(dir);
2933         return 0;
2934 }
2935
2936 static int shmem_create(struct inode *dir, struct dentry *dentry, umode_t mode,
2937                 bool excl)
2938 {
2939         return shmem_mknod(dir, dentry, mode | S_IFREG, 0);
2940 }
2941
2942 /*
2943  * Link a file..
2944  */
2945 static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
2946 {
2947         struct inode *inode = d_inode(old_dentry);
2948         int ret = 0;
2949
2950         /*
2951          * No ordinary (disk based) filesystem counts links as inodes;
2952          * but each new link needs a new dentry, pinning lowmem, and
2953          * tmpfs dentries cannot be pruned until they are unlinked.
2954          * But if an O_TMPFILE file is linked into the tmpfs, the
2955          * first link must skip that, to get the accounting right.
2956          */
2957         if (inode->i_nlink) {
2958                 ret = shmem_reserve_inode(inode->i_sb);
2959                 if (ret)
2960                         goto out;
2961         }
2962
2963         dir->i_size += BOGO_DIRENT_SIZE;
2964         inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode);
2965         inc_nlink(inode);
2966         ihold(inode);   /* New dentry reference */
2967         dget(dentry);           /* Extra pinning count for the created dentry */
2968         d_instantiate(dentry, inode);
2969 out:
2970         return ret;
2971 }
2972
2973 static int shmem_unlink(struct inode *dir, struct dentry *dentry)
2974 {
2975         struct inode *inode = d_inode(dentry);
2976
2977         if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode))
2978                 shmem_free_inode(inode->i_sb);
2979
2980         dir->i_size -= BOGO_DIRENT_SIZE;
2981         inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode);
2982         drop_nlink(inode);
2983         dput(dentry);   /* Undo the count from "create" - this does all the work */
2984         return 0;
2985 }
2986
2987 static int shmem_rmdir(struct inode *dir, struct dentry *dentry)
2988 {
2989         if (!simple_empty(dentry))
2990                 return -ENOTEMPTY;
2991
2992         drop_nlink(d_inode(dentry));
2993         drop_nlink(dir);
2994         return shmem_unlink(dir, dentry);
2995 }
2996
2997 static int shmem_exchange(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry)
2998 {
2999         bool old_is_dir = d_is_dir(old_dentry);
3000         bool new_is_dir = d_is_dir(new_dentry);
3001
3002         if (old_dir != new_dir && old_is_dir != new_is_dir) {
3003                 if (old_is_dir) {
3004                         drop_nlink(old_dir);
3005                         inc_nlink(new_dir);
3006                 } else {
3007                         drop_nlink(new_dir);
3008                         inc_nlink(old_dir);
3009                 }
3010         }
3011         old_dir->i_ctime = old_dir->i_mtime =
3012         new_dir->i_ctime = new_dir->i_mtime =
3013         d_inode(old_dentry)->i_ctime =
3014         d_inode(new_dentry)->i_ctime = current_time(old_dir);
3015
3016         return 0;
3017 }
3018
3019 static int shmem_whiteout(struct inode *old_dir, struct dentry *old_dentry)
3020 {
3021         struct dentry *whiteout;
3022         int error;
3023
3024         whiteout = d_alloc(old_dentry->d_parent, &old_dentry->d_name);
3025         if (!whiteout)
3026                 return -ENOMEM;
3027
3028         error = shmem_mknod(old_dir, whiteout,
3029                             S_IFCHR | WHITEOUT_MODE, WHITEOUT_DEV);
3030         dput(whiteout);
3031         if (error)
3032                 return error;
3033
3034         /*
3035          * Cheat and hash the whiteout while the old dentry is still in
3036          * place, instead of playing games with FS_RENAME_DOES_D_MOVE.
3037          *
3038          * d_lookup() will consistently find one of them at this point,
3039          * not sure which one, but that isn't even important.
3040          */
3041         d_rehash(whiteout);
3042         return 0;
3043 }
3044
3045 /*
3046  * The VFS layer already does all the dentry stuff for rename,
3047  * we just have to decrement the usage count for the target if
3048  * it exists so that the VFS layer correctly free's it when it
3049  * gets overwritten.
3050  */
3051 static int shmem_rename2(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry, unsigned int flags)
3052 {
3053         struct inode *inode = d_inode(old_dentry);
3054         int they_are_dirs = S_ISDIR(inode->i_mode);
3055
3056         if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT))
3057                 return -EINVAL;
3058
3059         if (flags & RENAME_EXCHANGE)
3060                 return shmem_exchange(old_dir, old_dentry, new_dir, new_dentry);
3061
3062         if (!simple_empty(new_dentry))
3063                 return -ENOTEMPTY;
3064
3065         if (flags & RENAME_WHITEOUT) {
3066                 int error;
3067
3068                 error = shmem_whiteout(old_dir, old_dentry);
3069                 if (error)
3070                         return error;
3071         }
3072
3073         if (d_really_is_positive(new_dentry)) {
3074                 (void) shmem_unlink(new_dir, new_dentry);
3075                 if (they_are_dirs) {
3076                         drop_nlink(d_inode(new_dentry));
3077                         drop_nlink(old_dir);
3078                 }
3079         } else if (they_are_dirs) {
3080                 drop_nlink(old_dir);
3081                 inc_nlink(new_dir);
3082         }
3083
3084         old_dir->i_size -= BOGO_DIRENT_SIZE;
3085         new_dir->i_size += BOGO_DIRENT_SIZE;
3086         old_dir->i_ctime = old_dir->i_mtime =
3087         new_dir->i_ctime = new_dir->i_mtime =
3088         inode->i_ctime = current_time(old_dir);
3089         return 0;
3090 }
3091
3092 static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
3093 {
3094         int error;
3095         int len;
3096         struct inode *inode;
3097         struct page *page;
3098
3099         len = strlen(symname) + 1;
3100         if (len > PAGE_SIZE)
3101                 return -ENAMETOOLONG;
3102
3103         inode = shmem_get_inode(dir->i_sb, dir, S_IFLNK | 0777, 0,
3104                                 VM_NORESERVE);
3105         if (!inode)
3106                 return -ENOSPC;
3107
3108         error = security_inode_init_security(inode, dir, &dentry->d_name,
3109                                              shmem_initxattrs, NULL);
3110         if (error) {
3111                 if (error != -EOPNOTSUPP) {
3112                         iput(inode);
3113                         return error;
3114                 }
3115                 error = 0;
3116         }
3117
3118         inode->i_size = len-1;
3119         if (len <= SHORT_SYMLINK_LEN) {
3120                 inode->i_link = kmemdup(symname, len, GFP_KERNEL);
3121                 if (!inode->i_link) {
3122                         iput(inode);
3123                         return -ENOMEM;
3124                 }
3125                 inode->i_op = &shmem_short_symlink_operations;
3126         } else {
3127                 inode_nohighmem(inode);
3128                 error = shmem_getpage(inode, 0, &page, SGP_WRITE);
3129                 if (error) {
3130                         iput(inode);
3131                         return error;
3132                 }
3133                 inode->i_mapping->a_ops = &shmem_aops;
3134                 inode->i_op = &shmem_symlink_inode_operations;
3135                 memcpy(page_address(page), symname, len);
3136                 SetPageUptodate(page);
3137                 set_page_dirty(page);
3138                 unlock_page(page);
3139                 put_page(page);
3140         }
3141         dir->i_size += BOGO_DIRENT_SIZE;
3142         dir->i_ctime = dir->i_mtime = current_time(dir);
3143         d_instantiate(dentry, inode);
3144         dget(dentry);
3145         return 0;
3146 }
3147
3148 static void shmem_put_link(void *arg)
3149 {
3150         mark_page_accessed(arg);
3151         put_page(arg);
3152 }
3153
3154 static const char *shmem_get_link(struct dentry *dentry,
3155                                   struct inode *inode,
3156                                   struct delayed_call *done)
3157 {
3158         struct page *page = NULL;
3159         int error;
3160         if (!dentry) {
3161                 page = find_get_page(inode->i_mapping, 0);
3162                 if (!page)
3163                         return ERR_PTR(-ECHILD);
3164                 if (!PageUptodate(page)) {
3165                         put_page(page);
3166                         return ERR_PTR(-ECHILD);
3167                 }
3168         } else {
3169                 error = shmem_getpage(inode, 0, &page, SGP_READ);
3170                 if (error)
3171                         return ERR_PTR(error);
3172                 unlock_page(page);
3173         }
3174         set_delayed_call(done, shmem_put_link, page);
3175         return page_address(page);
3176 }
3177
3178 #ifdef CONFIG_TMPFS_XATTR
3179 /*
3180  * Superblocks without xattr inode operations may get some security.* xattr
3181  * support from the LSM "for free". As soon as we have any other xattrs
3182  * like ACLs, we also need to implement the security.* handlers at
3183  * filesystem level, though.
3184  */
3185
3186 /*
3187  * Callback for security_inode_init_security() for acquiring xattrs.
3188  */
3189 static int shmem_initxattrs(struct inode *inode,
3190                             const struct xattr *xattr_array,
3191                             void *fs_info)
3192 {
3193         struct shmem_inode_info *info = SHMEM_I(inode);
3194         const struct xattr *xattr;
3195         struct simple_xattr *new_xattr;
3196         size_t len;
3197
3198         for (xattr = xattr_array; xattr->name != NULL; xattr++) {
3199                 new_xattr = simple_xattr_alloc(xattr->value, xattr->value_len);
3200                 if (!new_xattr)
3201                         return -ENOMEM;
3202
3203                 len = strlen(xattr->name) + 1;
3204                 new_xattr->name = kmalloc(XATTR_SECURITY_PREFIX_LEN + len,
3205                                           GFP_KERNEL);
3206                 if (!new_xattr->name) {
3207                         kfree(new_xattr);
3208                         return -ENOMEM;
3209                 }
3210
3211                 memcpy(new_xattr->name, XATTR_SECURITY_PREFIX,
3212                        XATTR_SECURITY_PREFIX_LEN);
3213                 memcpy(new_xattr->name + XATTR_SECURITY_PREFIX_LEN,
3214                        xattr->name, len);
3215
3216                 simple_xattr_list_add(&info->xattrs, new_xattr);
3217         }
3218
3219         return 0;
3220 }
3221
3222 static int shmem_xattr_handler_get(const struct xattr_handler *handler,
3223                                    struct dentry *unused, struct inode *inode,
3224                                    const char *name, void *buffer, size_t size)
3225 {
3226         struct shmem_inode_info *info = SHMEM_I(inode);
3227
3228         name = xattr_full_name(handler, name);
3229         return simple_xattr_get(&info->xattrs, name, buffer, size);
3230 }
3231
3232 static int shmem_xattr_handler_set(const struct xattr_handler *handler,
3233                                    struct dentry *unused, struct inode *inode,
3234                                    const char *name, const void *value,
3235                                    size_t size, int flags)
3236 {
3237         struct shmem_inode_info *info = SHMEM_I(inode);
3238
3239         name = xattr_full_name(handler, name);
3240         return simple_xattr_set(&info->xattrs, name, value, size, flags);
3241 }
3242
3243 static const struct xattr_handler shmem_security_xattr_handler = {
3244         .prefix = XATTR_SECURITY_PREFIX,
3245         .get = shmem_xattr_handler_get,
3246         .set = shmem_xattr_handler_set,
3247 };
3248
3249 static const struct xattr_handler shmem_trusted_xattr_handler = {
3250         .prefix = XATTR_TRUSTED_PREFIX,
3251         .get = shmem_xattr_handler_get,
3252         .set = shmem_xattr_handler_set,
3253 };
3254
3255 static const struct xattr_handler *shmem_xattr_handlers[] = {
3256 #ifdef CONFIG_TMPFS_POSIX_ACL
3257         &posix_acl_access_xattr_handler,
3258         &posix_acl_default_xattr_handler,
3259 #endif
3260         &shmem_security_xattr_handler,
3261         &shmem_trusted_xattr_handler,
3262         NULL
3263 };
3264
3265 static ssize_t shmem_listxattr(struct dentry *dentry, char *buffer, size_t size)
3266 {
3267         struct shmem_inode_info *info = SHMEM_I(d_inode(dentry));
3268         return simple_xattr_list(d_inode(dentry), &info->xattrs, buffer, size);
3269 }
3270 #endif /* CONFIG_TMPFS_XATTR */
3271
3272 static const struct inode_operations shmem_short_symlink_operations = {
3273         .get_link       = simple_get_link,
3274 #ifdef CONFIG_TMPFS_XATTR
3275         .listxattr      = shmem_listxattr,
3276 #endif
3277 };
3278
3279 static const struct inode_operations shmem_symlink_inode_operations = {
3280         .get_link       = shmem_get_link,
3281 #ifdef CONFIG_TMPFS_XATTR
3282         .listxattr      = shmem_listxattr,
3283 #endif
3284 };
3285
3286 static struct dentry *shmem_get_parent(struct dentry *child)
3287 {
3288         return ERR_PTR(-ESTALE);
3289 }
3290
3291 static int shmem_match(struct inode *ino, void *vfh)
3292 {
3293         __u32 *fh = vfh;
3294         __u64 inum = fh[2];
3295         inum = (inum << 32) | fh[1];
3296         return ino->i_ino == inum && fh[0] == ino->i_generation;
3297 }
3298
3299 /* Find any alias of inode, but prefer a hashed alias */
3300 static struct dentry *shmem_find_alias(struct inode *inode)
3301 {
3302         struct dentry *alias = d_find_alias(inode);
3303
3304         return alias ?: d_find_any_alias(inode);
3305 }
3306
3307
3308 static struct dentry *shmem_fh_to_dentry(struct super_block *sb,
3309                 struct fid *fid, int fh_len, int fh_type)
3310 {
3311         struct inode *inode;
3312         struct dentry *dentry = NULL;
3313         u64 inum;
3314
3315         if (fh_len < 3)
3316                 return NULL;
3317
3318         inum = fid->raw[2];
3319         inum = (inum << 32) | fid->raw[1];
3320
3321         inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]),
3322                         shmem_match, fid->raw);
3323         if (inode) {
3324                 dentry = shmem_find_alias(inode);
3325                 iput(inode);
3326         }
3327
3328         return dentry;
3329 }
3330
3331 static int shmem_encode_fh(struct inode *inode, __u32 *fh, int *len,
3332                                 struct inode *parent)
3333 {
3334         if (*len < 3) {
3335                 *len = 3;
3336                 return FILEID_INVALID;
3337         }
3338
3339         if (inode_unhashed(inode)) {
3340                 /* Unfortunately insert_inode_hash is not idempotent,
3341                  * so as we hash inodes here rather than at creation
3342                  * time, we need a lock to ensure we only try
3343                  * to do it once
3344                  */
3345                 static DEFINE_SPINLOCK(lock);
3346                 spin_lock(&lock);
3347                 if (inode_unhashed(inode))
3348                         __insert_inode_hash(inode,
3349                                             inode->i_ino + inode->i_generation);
3350                 spin_unlock(&lock);
3351         }
3352
3353         fh[0] = inode->i_generation;
3354         fh[1] = inode->i_ino;
3355         fh[2] = ((__u64)inode->i_ino) >> 32;
3356
3357         *len = 3;
3358         return 1;
3359 }
3360
3361 static const struct export_operations shmem_export_ops = {
3362         .get_parent     = shmem_get_parent,
3363         .encode_fh      = shmem_encode_fh,
3364         .fh_to_dentry   = shmem_fh_to_dentry,
3365 };
3366
3367 enum shmem_param {
3368         Opt_gid,
3369         Opt_huge,
3370         Opt_mode,
3371         Opt_mpol,
3372         Opt_nr_blocks,
3373         Opt_nr_inodes,
3374         Opt_size,
3375         Opt_uid,
3376 };
3377
3378 static const struct fs_parameter_spec shmem_param_specs[] = {
3379         fsparam_u32   ("gid",           Opt_gid),
3380         fsparam_enum  ("huge",          Opt_huge),
3381         fsparam_u32oct("mode",          Opt_mode),
3382         fsparam_string("mpol",          Opt_mpol),
3383         fsparam_string("nr_blocks",     Opt_nr_blocks),
3384         fsparam_string("nr_inodes",     Opt_nr_inodes),
3385         fsparam_string("size",          Opt_size),
3386         fsparam_u32   ("uid",           Opt_uid),
3387         {}
3388 };
3389
3390 static const struct fs_parameter_enum shmem_param_enums[] = {
3391         { Opt_huge,     "never",        SHMEM_HUGE_NEVER },
3392         { Opt_huge,     "always",       SHMEM_HUGE_ALWAYS },
3393         { Opt_huge,     "within_size",  SHMEM_HUGE_WITHIN_SIZE },
3394         { Opt_huge,     "advise",       SHMEM_HUGE_ADVISE },
3395         {}
3396 };
3397
3398 const struct fs_parameter_description shmem_fs_parameters = {
3399         .name           = "tmpfs",
3400         .specs          = shmem_param_specs,
3401         .enums          = shmem_param_enums,
3402 };
3403
3404 static int shmem_parse_one(struct fs_context *fc, struct fs_parameter *param)
3405 {
3406         struct shmem_options *ctx = fc->fs_private;
3407         struct fs_parse_result result;
3408         unsigned long long size;
3409         char *rest;
3410         int opt;
3411
3412         opt = fs_parse(fc, &shmem_fs_parameters, param, &result);
3413         if (opt < 0)
3414                 return opt;
3415
3416         switch (opt) {
3417         case Opt_size:
3418                 size = memparse(param->string, &rest);
3419                 if (*rest == '%') {
3420                         size <<= PAGE_SHIFT;
3421                         size *= totalram_pages();
3422                         do_div(size, 100);
3423                         rest++;
3424                 }
3425                 if (*rest)
3426                         goto bad_value;
3427                 ctx->blocks = DIV_ROUND_UP(size, PAGE_SIZE);
3428                 ctx->seen |= SHMEM_SEEN_BLOCKS;
3429                 break;
3430         case Opt_nr_blocks:
3431                 ctx->blocks = memparse(param->string, &rest);
3432                 if (*rest)
3433                         goto bad_value;
3434                 ctx->seen |= SHMEM_SEEN_BLOCKS;
3435                 break;
3436         case Opt_nr_inodes:
3437                 ctx->inodes = memparse(param->string, &rest);
3438                 if (*rest)
3439                         goto bad_value;
3440                 ctx->seen |= SHMEM_SEEN_INODES;
3441                 break;
3442         case Opt_mode:
3443                 ctx->mode = result.uint_32 & 07777;
3444                 break;
3445         case Opt_uid:
3446                 ctx->uid = make_kuid(current_user_ns(), result.uint_32);
3447                 if (!uid_valid(ctx->uid))
3448                         goto bad_value;
3449                 break;
3450         case Opt_gid:
3451                 ctx->gid = make_kgid(current_user_ns(), result.uint_32);
3452                 if (!gid_valid(ctx->gid))
3453                         goto bad_value;
3454                 break;
3455         case Opt_huge:
3456                 ctx->huge = result.uint_32;
3457                 if (ctx->huge != SHMEM_HUGE_NEVER &&
3458                     !(IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE) &&
3459                       has_transparent_hugepage()))
3460                         goto unsupported_parameter;
3461                 ctx->seen |= SHMEM_SEEN_HUGE;
3462                 break;
3463         case Opt_mpol:
3464                 if (IS_ENABLED(CONFIG_NUMA)) {
3465                         mpol_put(ctx->mpol);
3466                         ctx->mpol = NULL;
3467                         if (mpol_parse_str(param->string, &ctx->mpol))
3468                                 goto bad_value;
3469                         break;
3470                 }
3471                 goto unsupported_parameter;
3472         }
3473         return 0;
3474
3475 unsupported_parameter:
3476         return invalf(fc, "tmpfs: Unsupported parameter '%s'", param->key);
3477 bad_value:
3478         return invalf(fc, "tmpfs: Bad value for '%s'", param->key);
3479 }
3480
3481 static int shmem_parse_options(struct fs_context *fc, void *data)
3482 {
3483         char *options = data;
3484
3485         while (options != NULL) {
3486                 char *this_char = options;
3487                 for (;;) {
3488                         /*
3489                          * NUL-terminate this option: unfortunately,
3490                          * mount options form a comma-separated list,
3491                          * but mpol's nodelist may also contain commas.
3492                          */
3493                         options = strchr(options, ',');
3494                         if (options == NULL)
3495                                 break;
3496                         options++;
3497                         if (!isdigit(*options)) {
3498                                 options[-1] = '\0';
3499                                 break;
3500                         }
3501                 }
3502                 if (*this_char) {
3503                         char *value = strchr(this_char,'=');
3504                         size_t len = 0;
3505                         int err;
3506
3507                         if (value) {
3508                                 *value++ = '\0';
3509                                 len = strlen(value);
3510                         }
3511                         err = vfs_parse_fs_string(fc, this_char, value, len);
3512                         if (err < 0)
3513                                 return err;
3514                 }
3515         }
3516         return 0;
3517 }
3518
3519 /*
3520  * Reconfigure a shmem filesystem.
3521  *
3522  * Note that we disallow change from limited->unlimited blocks/inodes while any
3523  * are in use; but we must separately disallow unlimited->limited, because in
3524  * that case we have no record of how much is already in use.
3525  */
3526 static int shmem_reconfigure(struct fs_context *fc)
3527 {
3528         struct shmem_options *ctx = fc->fs_private;
3529         struct shmem_sb_info *sbinfo = SHMEM_SB(fc->root->d_sb);
3530         unsigned long inodes;
3531         const char *err;
3532
3533         spin_lock(&sbinfo->stat_lock);
3534         inodes = sbinfo->max_inodes - sbinfo->free_inodes;
3535         if ((ctx->seen & SHMEM_SEEN_BLOCKS) && ctx->blocks) {
3536                 if (!sbinfo->max_blocks) {
3537                         err = "Cannot retroactively limit size";
3538                         goto out;
3539                 }
3540                 if (percpu_counter_compare(&sbinfo->used_blocks,
3541                                            ctx->blocks) > 0) {
3542                         err = "Too small a size for current use";
3543                         goto out;
3544                 }
3545         }
3546         if ((ctx->seen & SHMEM_SEEN_INODES) && ctx->inodes) {
3547                 if (!sbinfo->max_inodes) {
3548                         err = "Cannot retroactively limit inodes";
3549                         goto out;
3550                 }
3551                 if (ctx->inodes < inodes) {
3552                         err = "Too few inodes for current use";
3553                         goto out;
3554                 }
3555         }
3556
3557         if (ctx->seen & SHMEM_SEEN_HUGE)
3558                 sbinfo->huge = ctx->huge;
3559         if (ctx->seen & SHMEM_SEEN_BLOCKS)
3560                 sbinfo->max_blocks  = ctx->blocks;
3561         if (ctx->seen & SHMEM_SEEN_INODES) {
3562                 sbinfo->max_inodes  = ctx->inodes;
3563                 sbinfo->free_inodes = ctx->inodes - inodes;
3564         }
3565
3566         /*
3567          * Preserve previous mempolicy unless mpol remount option was specified.
3568          */
3569         if (ctx->mpol) {
3570                 mpol_put(sbinfo->mpol);
3571                 sbinfo->mpol = ctx->mpol;       /* transfers initial ref */
3572                 ctx->mpol = NULL;
3573         }
3574         spin_unlock(&sbinfo->stat_lock);
3575         return 0;
3576 out:
3577         spin_unlock(&sbinfo->stat_lock);
3578         return invalf(fc, "tmpfs: %s", err);
3579 }
3580
3581 static int shmem_show_options(struct seq_file *seq, struct dentry *root)
3582 {
3583         struct shmem_sb_info *sbinfo = SHMEM_SB(root->d_sb);
3584
3585         if (sbinfo->max_blocks != shmem_default_max_blocks())
3586                 seq_printf(seq, ",size=%luk",
3587                         sbinfo->max_blocks << (PAGE_SHIFT - 10));
3588         if (sbinfo->max_inodes != shmem_default_max_inodes())
3589                 seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes);
3590         if (sbinfo->mode != (0777 | S_ISVTX))
3591                 seq_printf(seq, ",mode=%03ho", sbinfo->mode);
3592         if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID))
3593                 seq_printf(seq, ",uid=%u",
3594                                 from_kuid_munged(&init_user_ns, sbinfo->uid));
3595         if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID))
3596                 seq_printf(seq, ",gid=%u",
3597                                 from_kgid_munged(&init_user_ns, sbinfo->gid));
3598 #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
3599         /* Rightly or wrongly, show huge mount option unmasked by shmem_huge */
3600         if (sbinfo->huge)
3601                 seq_printf(seq, ",huge=%s", shmem_format_huge(sbinfo->huge));
3602 #endif
3603         shmem_show_mpol(seq, sbinfo->mpol);
3604         return 0;
3605 }
3606
3607 #endif /* CONFIG_TMPFS */
3608
3609 static void shmem_put_super(struct super_block *sb)
3610 {
3611         struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
3612
3613         percpu_counter_destroy(&sbinfo->used_blocks);
3614         mpol_put(sbinfo->mpol);
3615         kfree(sbinfo);
3616         sb->s_fs_info = NULL;
3617 }
3618
3619 static int shmem_fill_super(struct super_block *sb, struct fs_context *fc)
3620 {
3621         struct shmem_options *ctx = fc->fs_private;
3622         struct inode *inode;
3623         struct shmem_sb_info *sbinfo;
3624         int err = -ENOMEM;
3625
3626         /* Round up to L1_CACHE_BYTES to resist false sharing */
3627         sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
3628                                 L1_CACHE_BYTES), GFP_KERNEL);
3629         if (!sbinfo)
3630                 return -ENOMEM;
3631
3632         sb->s_fs_info = sbinfo;
3633
3634 #ifdef CONFIG_TMPFS
3635         /*
3636          * Per default we only allow half of the physical ram per
3637          * tmpfs instance, limiting inodes to one per page of lowmem;
3638          * but the internal instance is left unlimited.
3639          */
3640         if (!(sb->s_flags & SB_KERNMOUNT)) {
3641                 if (!(ctx->seen & SHMEM_SEEN_BLOCKS))
3642                         ctx->blocks = shmem_default_max_blocks();
3643                 if (!(ctx->seen & SHMEM_SEEN_INODES))
3644                         ctx->inodes = shmem_default_max_inodes();
3645         } else {
3646                 sb->s_flags |= SB_NOUSER;
3647         }
3648         sb->s_export_op = &shmem_export_ops;
3649         sb->s_flags |= SB_NOSEC;
3650 #else
3651         sb->s_flags |= SB_NOUSER;
3652 #endif
3653         sbinfo->max_blocks = ctx->blocks;
3654         sbinfo->free_inodes = sbinfo->max_inodes = ctx->inodes;
3655         sbinfo->uid = ctx->uid;
3656         sbinfo->gid = ctx->gid;
3657         sbinfo->mode = ctx->mode;
3658         sbinfo->huge = ctx->huge;
3659         sbinfo->mpol = ctx->mpol;
3660         ctx->mpol = NULL;
3661
3662         spin_lock_init(&sbinfo->stat_lock);
3663         if (percpu_counter_init(&sbinfo->used_blocks, 0, GFP_KERNEL))
3664                 goto failed;
3665         spin_lock_init(&sbinfo->shrinklist_lock);
3666         INIT_LIST_HEAD(&sbinfo->shrinklist);
3667
3668         sb->s_maxbytes = MAX_LFS_FILESIZE;
3669         sb->s_blocksize = PAGE_SIZE;
3670         sb->s_blocksize_bits = PAGE_SHIFT;
3671         sb->s_magic = TMPFS_MAGIC;
3672         sb->s_op = &shmem_ops;
3673         sb->s_time_gran = 1;
3674 #ifdef CONFIG_TMPFS_XATTR
3675         sb->s_xattr = shmem_xattr_handlers;
3676 #endif
3677 #ifdef CONFIG_TMPFS_POSIX_ACL
3678         sb->s_flags |= SB_POSIXACL;
3679 #endif
3680         uuid_gen(&sb->s_uuid);
3681
3682         inode = shmem_get_inode(sb, NULL, S_IFDIR | sbinfo->mode, 0, VM_NORESERVE);
3683         if (!inode)
3684                 goto failed;
3685         inode->i_uid = sbinfo->uid;
3686         inode->i_gid = sbinfo->gid;
3687         sb->s_root = d_make_root(inode);
3688         if (!sb->s_root)
3689                 goto failed;
3690         return 0;
3691
3692 failed:
3693         shmem_put_super(sb);
3694         return err;
3695 }
3696
3697 static int shmem_get_tree(struct fs_context *fc)
3698 {
3699         return get_tree_nodev(fc, shmem_fill_super);
3700 }
3701
3702 static void shmem_free_fc(struct fs_context *fc)
3703 {
3704         struct shmem_options *ctx = fc->fs_private;
3705
3706         if (ctx) {
3707                 mpol_put(ctx->mpol);
3708                 kfree(ctx);
3709         }
3710 }
3711
3712 static const struct fs_context_operations shmem_fs_context_ops = {
3713         .free                   = shmem_free_fc,
3714         .get_tree               = shmem_get_tree,
3715 #ifdef CONFIG_TMPFS
3716         .parse_monolithic       = shmem_parse_options,
3717         .parse_param            = shmem_parse_one,
3718         .reconfigure            = shmem_reconfigure,
3719 #endif
3720 };
3721
3722 static struct kmem_cache *shmem_inode_cachep;
3723
3724 static struct inode *shmem_alloc_inode(struct super_block *sb)
3725 {
3726         struct shmem_inode_info *info;
3727         info = kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL);
3728         if (!info)
3729                 return NULL;
3730         return &info->vfs_inode;
3731 }
3732
3733 static void shmem_free_in_core_inode(struct inode *inode)
3734 {
3735         if (S_ISLNK(inode->i_mode))
3736                 kfree(inode->i_link);
3737         kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode));
3738 }
3739
3740 static void shmem_destroy_inode(struct inode *inode)
3741 {
3742         if (S_ISREG(inode->i_mode))
3743                 mpol_free_shared_policy(&SHMEM_I(inode)->policy);
3744 }
3745
3746 static void shmem_init_inode(void *foo)
3747 {
3748         struct shmem_inode_info *info = foo;
3749         inode_init_once(&info->vfs_inode);
3750 }
3751
3752 static void shmem_init_inodecache(void)
3753 {
3754         shmem_inode_cachep = kmem_cache_create("shmem_inode_cache",
3755                                 sizeof(struct shmem_inode_info),
3756                                 0, SLAB_PANIC|SLAB_ACCOUNT, shmem_init_inode);
3757 }
3758
3759 static void shmem_destroy_inodecache(void)
3760 {
3761         kmem_cache_destroy(shmem_inode_cachep);
3762 }
3763
3764 static const struct address_space_operations shmem_aops = {
3765         .writepage      = shmem_writepage,
3766         .set_page_dirty = __set_page_dirty_no_writeback,
3767 #ifdef CONFIG_TMPFS
3768         .write_begin    = shmem_write_begin,
3769         .write_end      = shmem_write_end,
3770 #endif
3771 #ifdef CONFIG_MIGRATION
3772         .migratepage    = migrate_page,
3773 #endif
3774         .error_remove_page = generic_error_remove_page,
3775 };
3776
3777 static const struct file_operations shmem_file_operations = {
3778         .mmap           = shmem_mmap,
3779         .get_unmapped_area = shmem_get_unmapped_area,
3780 #ifdef CONFIG_TMPFS
3781         .llseek         = shmem_file_llseek,
3782         .read_iter      = shmem_file_read_iter,
3783         .write_iter     = generic_file_write_iter,
3784         .fsync          = noop_fsync,
3785         .splice_read    = generic_file_splice_read,
3786         .splice_write   = iter_file_splice_write,
3787         .fallocate      = shmem_fallocate,
3788 #endif
3789 };
3790
3791 static const struct inode_operations shmem_inode_operations = {
3792         .getattr        = shmem_getattr,
3793         .setattr        = shmem_setattr,
3794 #ifdef CONFIG_TMPFS_XATTR
3795         .listxattr      = shmem_listxattr,
3796         .set_acl        = simple_set_acl,
3797 #endif
3798 };
3799
3800 static const struct inode_operations shmem_dir_inode_operations = {
3801 #ifdef CONFIG_TMPFS
3802         .create         = shmem_create,
3803         .lookup         = simple_lookup,
3804         .link           = shmem_link,
3805         .unlink         = shmem_unlink,
3806         .symlink        = shmem_symlink,
3807         .mkdir          = shmem_mkdir,
3808         .rmdir          = shmem_rmdir,
3809         .mknod          = shmem_mknod,
3810         .rename         = shmem_rename2,
3811         .tmpfile        = shmem_tmpfile,
3812 #endif
3813 #ifdef CONFIG_TMPFS_XATTR
3814         .listxattr      = shmem_listxattr,
3815 #endif
3816 #ifdef CONFIG_TMPFS_POSIX_ACL
3817         .setattr        = shmem_setattr,
3818         .set_acl        = simple_set_acl,
3819 #endif
3820 };
3821
3822 static const struct inode_operations shmem_special_inode_operations = {
3823 #ifdef CONFIG_TMPFS_XATTR
3824         .listxattr      = shmem_listxattr,
3825 #endif
3826 #ifdef CONFIG_TMPFS_POSIX_ACL
3827         .setattr        = shmem_setattr,
3828         .set_acl        = simple_set_acl,
3829 #endif
3830 };
3831
3832 static const struct super_operations shmem_ops = {
3833         .alloc_inode    = shmem_alloc_inode,
3834         .free_inode     = shmem_free_in_core_inode,
3835         .destroy_inode  = shmem_destroy_inode,
3836 #ifdef CONFIG_TMPFS
3837         .statfs         = shmem_statfs,
3838         .show_options   = shmem_show_options,
3839 #endif
3840         .evict_inode    = shmem_evict_inode,
3841         .drop_inode     = generic_delete_inode,
3842         .put_super      = shmem_put_super,
3843 #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
3844         .nr_cached_objects      = shmem_unused_huge_count,
3845         .free_cached_objects    = shmem_unused_huge_scan,
3846 #endif
3847 };
3848
3849 static const struct vm_operations_struct shmem_vm_ops = {
3850         .fault          = shmem_fault,
3851         .map_pages      = filemap_map_pages,
3852 #ifdef CONFIG_NUMA
3853         .set_policy     = shmem_set_policy,
3854         .get_policy     = shmem_get_policy,
3855 #endif
3856 };
3857
3858 int shmem_init_fs_context(struct fs_context *fc)
3859 {
3860         struct shmem_options *ctx;
3861
3862         ctx = kzalloc(sizeof(struct shmem_options), GFP_KERNEL);
3863         if (!ctx)
3864                 return -ENOMEM;
3865
3866         ctx->mode = 0777 | S_ISVTX;
3867         ctx->uid = current_fsuid();
3868         ctx->gid = current_fsgid();
3869
3870         fc->fs_private = ctx;
3871         fc->ops = &shmem_fs_context_ops;
3872         return 0;
3873 }
3874
3875 static struct file_system_type shmem_fs_type = {
3876         .owner          = THIS_MODULE,
3877         .name           = "tmpfs",
3878         .init_fs_context = shmem_init_fs_context,
3879 #ifdef CONFIG_TMPFS
3880         .parameters     = &shmem_fs_parameters,
3881 #endif
3882         .kill_sb        = kill_litter_super,
3883         .fs_flags       = FS_USERNS_MOUNT,
3884 };
3885
3886 int __init shmem_init(void)
3887 {
3888         int error;
3889
3890         shmem_init_inodecache();
3891
3892         error = register_filesystem(&shmem_fs_type);
3893         if (error) {
3894                 pr_err("Could not register tmpfs\n");
3895                 goto out2;
3896         }
3897
3898         shm_mnt = kern_mount(&shmem_fs_type);
3899         if (IS_ERR(shm_mnt)) {
3900                 error = PTR_ERR(shm_mnt);
3901                 pr_err("Could not kern_mount tmpfs\n");
3902                 goto out1;
3903         }
3904
3905 #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
3906         if (has_transparent_hugepage() && shmem_huge > SHMEM_HUGE_DENY)
3907                 SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
3908         else
3909                 shmem_huge = 0; /* just in case it was patched */
3910 #endif
3911         return 0;
3912
3913 out1:
3914         unregister_filesystem(&shmem_fs_type);
3915 out2:
3916         shmem_destroy_inodecache();
3917         shm_mnt = ERR_PTR(error);
3918         return error;
3919 }
3920
3921 #if defined(CONFIG_TRANSPARENT_HUGE_PAGECACHE) && defined(CONFIG_SYSFS)
3922 static ssize_t shmem_enabled_show(struct kobject *kobj,
3923                 struct kobj_attribute *attr, char *buf)
3924 {
3925         int values[] = {
3926                 SHMEM_HUGE_ALWAYS,
3927                 SHMEM_HUGE_WITHIN_SIZE,
3928                 SHMEM_HUGE_ADVISE,
3929                 SHMEM_HUGE_NEVER,
3930                 SHMEM_HUGE_DENY,
3931                 SHMEM_HUGE_FORCE,
3932         };
3933         int i, count;
3934
3935         for (i = 0, count = 0; i < ARRAY_SIZE(values); i++) {
3936                 const char *fmt = shmem_huge == values[i] ? "[%s] " : "%s ";
3937
3938                 count += sprintf(buf + count, fmt,
3939                                 shmem_format_huge(values[i]));
3940         }
3941         buf[count - 1] = '\n';
3942         return count;
3943 }
3944
3945 static ssize_t shmem_enabled_store(struct kobject *kobj,
3946                 struct kobj_attribute *attr, const char *buf, size_t count)
3947 {
3948         char tmp[16];
3949         int huge;
3950
3951         if (count + 1 > sizeof(tmp))
3952                 return -EINVAL;
3953         memcpy(tmp, buf, count);
3954         tmp[count] = '\0';
3955         if (count && tmp[count - 1] == '\n')
3956                 tmp[count - 1] = '\0';
3957
3958         huge = shmem_parse_huge(tmp);
3959         if (huge == -EINVAL)
3960                 return -EINVAL;
3961         if (!has_transparent_hugepage() &&
3962                         huge != SHMEM_HUGE_NEVER && huge != SHMEM_HUGE_DENY)
3963                 return -EINVAL;
3964
3965         shmem_huge = huge;
3966         if (shmem_huge > SHMEM_HUGE_DENY)
3967                 SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
3968         return count;
3969 }
3970
3971 struct kobj_attribute shmem_enabled_attr =
3972         __ATTR(shmem_enabled, 0644, shmem_enabled_show, shmem_enabled_store);
3973 #endif /* CONFIG_TRANSPARENT_HUGE_PAGECACHE && CONFIG_SYSFS */
3974
3975 #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
3976 bool shmem_huge_enabled(struct vm_area_struct *vma)
3977 {
3978         struct inode *inode = file_inode(vma->vm_file);
3979         struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
3980         loff_t i_size;
3981         pgoff_t off;
3982
3983         if ((vma->vm_flags & VM_NOHUGEPAGE) ||
3984             test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
3985                 return false;
3986         if (shmem_huge == SHMEM_HUGE_FORCE)
3987                 return true;
3988         if (shmem_huge == SHMEM_HUGE_DENY)
3989                 return false;
3990         switch (sbinfo->huge) {
3991                 case SHMEM_HUGE_NEVER:
3992                         return false;
3993                 case SHMEM_HUGE_ALWAYS:
3994                         return true;
3995                 case SHMEM_HUGE_WITHIN_SIZE:
3996                         off = round_up(vma->vm_pgoff, HPAGE_PMD_NR);
3997                         i_size = round_up(i_size_read(inode), PAGE_SIZE);
3998                         if (i_size >= HPAGE_PMD_SIZE &&
3999                                         i_size >> PAGE_SHIFT >= off)
4000                                 return true;
4001                         /* fall through */
4002                 case SHMEM_HUGE_ADVISE:
4003                         /* TODO: implement fadvise() hints */
4004                         return (vma->vm_flags & VM_HUGEPAGE);
4005                 default:
4006                         VM_BUG_ON(1);
4007                         return false;
4008         }
4009 }
4010 #endif /* CONFIG_TRANSPARENT_HUGE_PAGECACHE */
4011
4012 #else /* !CONFIG_SHMEM */
4013
4014 /*
4015  * tiny-shmem: simple shmemfs and tmpfs using ramfs code
4016  *
4017  * This is intended for small system where the benefits of the full
4018  * shmem code (swap-backed and resource-limited) are outweighed by
4019  * their complexity. On systems without swap this code should be
4020  * effectively equivalent, but much lighter weight.
4021  */
4022
4023 static struct file_system_type shmem_fs_type = {
4024         .name           = "tmpfs",
4025         .init_fs_context = ramfs_init_fs_context,
4026         .parameters     = &ramfs_fs_parameters,
4027         .kill_sb        = kill_litter_super,
4028         .fs_flags       = FS_USERNS_MOUNT,
4029 };
4030
4031 int __init shmem_init(void)
4032 {
4033         BUG_ON(register_filesystem(&shmem_fs_type) != 0);
4034
4035         shm_mnt = kern_mount(&shmem_fs_type);
4036         BUG_ON(IS_ERR(shm_mnt));
4037
4038         return 0;
4039 }
4040
4041 int shmem_unuse(unsigned int type, bool frontswap,
4042                 unsigned long *fs_pages_to_unuse)
4043 {
4044         return 0;
4045 }
4046
4047 int shmem_lock(struct file *file, int lock, struct user_struct *user)
4048 {
4049         return 0;
4050 }
4051
4052 void shmem_unlock_mapping(struct address_space *mapping)
4053 {
4054 }
4055
4056 #ifdef CONFIG_MMU
4057 unsigned long shmem_get_unmapped_area(struct file *file,
4058                                       unsigned long addr, unsigned long len,
4059                                       unsigned long pgoff, unsigned long flags)
4060 {
4061         return current->mm->get_unmapped_area(file, addr, len, pgoff, flags);
4062 }
4063 #endif
4064
4065 void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
4066 {
4067         truncate_inode_pages_range(inode->i_mapping, lstart, lend);
4068 }
4069 EXPORT_SYMBOL_GPL(shmem_truncate_range);
4070
4071 #define shmem_vm_ops                            generic_file_vm_ops
4072 #define shmem_file_operations                   ramfs_file_operations
4073 #define shmem_get_inode(sb, dir, mode, dev, flags)      ramfs_get_inode(sb, dir, mode, dev)
4074 #define shmem_acct_size(flags, size)            0
4075 #define shmem_unacct_size(flags, size)          do {} while (0)
4076
4077 #endif /* CONFIG_SHMEM */
4078
4079 /* common code */
4080
4081 static struct file *__shmem_file_setup(struct vfsmount *mnt, const char *name, loff_t size,
4082                                        unsigned long flags, unsigned int i_flags)
4083 {
4084         struct inode *inode;
4085         struct file *res;
4086
4087         if (IS_ERR(mnt))
4088                 return ERR_CAST(mnt);
4089
4090         if (size < 0 || size > MAX_LFS_FILESIZE)
4091                 return ERR_PTR(-EINVAL);
4092
4093         if (shmem_acct_size(flags, size))
4094                 return ERR_PTR(-ENOMEM);
4095
4096         inode = shmem_get_inode(mnt->mnt_sb, NULL, S_IFREG | S_IRWXUGO, 0,
4097                                 flags);
4098         if (unlikely(!inode)) {
4099                 shmem_unacct_size(flags, size);
4100                 return ERR_PTR(-ENOSPC);
4101         }
4102         inode->i_flags |= i_flags;
4103         inode->i_size = size;
4104         clear_nlink(inode);     /* It is unlinked */
4105         res = ERR_PTR(ramfs_nommu_expand_for_mapping(inode, size));
4106         if (!IS_ERR(res))
4107                 res = alloc_file_pseudo(inode, mnt, name, O_RDWR,
4108                                 &shmem_file_operations);
4109         if (IS_ERR(res))
4110                 iput(inode);
4111         return res;
4112 }
4113
4114 /**
4115  * shmem_kernel_file_setup - get an unlinked file living in tmpfs which must be
4116  *      kernel internal.  There will be NO LSM permission checks against the
4117  *      underlying inode.  So users of this interface must do LSM checks at a
4118  *      higher layer.  The users are the big_key and shm implementations.  LSM
4119  *      checks are provided at the key or shm level rather than the inode.
4120  * @name: name for dentry (to be seen in /proc/<pid>/maps
4121  * @size: size to be set for the file
4122  * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
4123  */
4124 struct file *shmem_kernel_file_setup(const char *name, loff_t size, unsigned long flags)
4125 {
4126         return __shmem_file_setup(shm_mnt, name, size, flags, S_PRIVATE);
4127 }
4128
4129 /**
4130  * shmem_file_setup - get an unlinked file living in tmpfs
4131  * @name: name for dentry (to be seen in /proc/<pid>/maps
4132  * @size: size to be set for the file
4133  * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
4134  */
4135 struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags)
4136 {
4137         return __shmem_file_setup(shm_mnt, name, size, flags, 0);
4138 }
4139 EXPORT_SYMBOL_GPL(shmem_file_setup);
4140
4141 /**
4142  * shmem_file_setup_with_mnt - get an unlinked file living in tmpfs
4143  * @mnt: the tmpfs mount where the file will be created
4144  * @name: name for dentry (to be seen in /proc/<pid>/maps
4145  * @size: size to be set for the file
4146  * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
4147  */
4148 struct file *shmem_file_setup_with_mnt(struct vfsmount *mnt, const char *name,
4149                                        loff_t size, unsigned long flags)
4150 {
4151         return __shmem_file_setup(mnt, name, size, flags, 0);
4152 }
4153 EXPORT_SYMBOL_GPL(shmem_file_setup_with_mnt);
4154
4155 /**
4156  * shmem_zero_setup - setup a shared anonymous mapping
4157  * @vma: the vma to be mmapped is prepared by do_mmap_pgoff
4158  */
4159 int shmem_zero_setup(struct vm_area_struct *vma)
4160 {
4161         struct file *file;
4162         loff_t size = vma->vm_end - vma->vm_start;
4163
4164         /*
4165          * Cloning a new file under mmap_sem leads to a lock ordering conflict
4166          * between XFS directory reading and selinux: since this file is only
4167          * accessible to the user through its mapping, use S_PRIVATE flag to
4168          * bypass file security, in the same way as shmem_kernel_file_setup().
4169          */
4170         file = shmem_kernel_file_setup("dev/zero", size, vma->vm_flags);
4171         if (IS_ERR(file))
4172                 return PTR_ERR(file);
4173
4174         if (vma->vm_file)
4175                 fput(vma->vm_file);
4176         vma->vm_file = file;
4177         vma->vm_ops = &shmem_vm_ops;
4178
4179         if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE) &&
4180                         ((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) <
4181                         (vma->vm_end & HPAGE_PMD_MASK)) {
4182                 khugepaged_enter(vma, vma->vm_flags);
4183         }
4184
4185         return 0;
4186 }
4187
4188 /**
4189  * shmem_read_mapping_page_gfp - read into page cache, using specified page allocation flags.
4190  * @mapping:    the page's address_space
4191  * @index:      the page index
4192  * @gfp:        the page allocator flags to use if allocating
4193  *
4194  * This behaves as a tmpfs "read_cache_page_gfp(mapping, index, gfp)",
4195  * with any new page allocations done using the specified allocation flags.
4196  * But read_cache_page_gfp() uses the ->readpage() method: which does not
4197  * suit tmpfs, since it may have pages in swapcache, and needs to find those
4198  * for itself; although drivers/gpu/drm i915 and ttm rely upon this support.
4199  *
4200  * i915_gem_object_get_pages_gtt() mixes __GFP_NORETRY | __GFP_NOWARN in
4201  * with the mapping_gfp_mask(), to avoid OOMing the machine unnecessarily.
4202  */
4203 struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
4204                                          pgoff_t index, gfp_t gfp)
4205 {
4206 #ifdef CONFIG_SHMEM
4207         struct inode *inode = mapping->host;
4208         struct page *page;
4209         int error;
4210
4211         BUG_ON(mapping->a_ops != &shmem_aops);
4212         error = shmem_getpage_gfp(inode, index, &page, SGP_CACHE,
4213                                   gfp, NULL, NULL, NULL);
4214         if (error)
4215                 page = ERR_PTR(error);
4216         else
4217                 unlock_page(page);
4218         return page;
4219 #else
4220         /*
4221          * The tiny !SHMEM case uses ramfs without swap
4222          */
4223         return read_cache_page_gfp(mapping, index, gfp);
4224 #endif
4225 }
4226 EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp);