Merge tag 'net-6.2-rc5-2' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
[linux-block.git] / mm / shmem.c
CommitLineData
1da177e4
LT
1/*
2 * Resizable virtual memory filesystem for Linux.
3 *
4 * Copyright (C) 2000 Linus Torvalds.
5 * 2000 Transmeta Corp.
6 * 2000-2001 Christoph Rohland
7 * 2000-2001 SAP AG
8 * 2002 Red Hat Inc.
6922c0c7
HD
9 * Copyright (C) 2002-2011 Hugh Dickins.
10 * Copyright (C) 2011 Google Inc.
0edd73b3 11 * Copyright (C) 2002-2005 VERITAS Software Corporation.
1da177e4
LT
12 * Copyright (C) 2004 Andi Kleen, SuSE Labs
13 *
14 * Extended attribute support for tmpfs:
15 * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net>
16 * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
17 *
853ac43a
MM
18 * tiny-shmem:
19 * Copyright (c) 2004, 2008 Matt Mackall <mpm@selenic.com>
20 *
1da177e4
LT
21 * This file is released under the GPL.
22 */
23
853ac43a
MM
24#include <linux/fs.h>
25#include <linux/init.h>
26#include <linux/vfs.h>
27#include <linux/mount.h>
250297ed 28#include <linux/ramfs.h>
caefba17 29#include <linux/pagemap.h>
853ac43a 30#include <linux/file.h>
e408e695 31#include <linux/fileattr.h>
853ac43a 32#include <linux/mm.h>
46c9a946 33#include <linux/random.h>
174cd4b1 34#include <linux/sched/signal.h>
b95f1b31 35#include <linux/export.h>
853ac43a 36#include <linux/swap.h>
e2e40f2c 37#include <linux/uio.h>
749df87b 38#include <linux/hugetlb.h>
626c3920 39#include <linux/fs_parser.h>
86a2f3f2 40#include <linux/swapfile.h>
36f05cab 41#include <linux/iversion.h>
014bb1de 42#include "swap.h"
95cc09d6 43
853ac43a
MM
44static struct vfsmount *shm_mnt;
45
46#ifdef CONFIG_SHMEM
1da177e4
LT
47/*
48 * This virtual memory filesystem is heavily based on the ramfs. It
49 * extends ramfs by the ability to use swap and honor resource limits
50 * which makes it a completely usable filesystem.
51 */
52
39f0247d 53#include <linux/xattr.h>
a5694255 54#include <linux/exportfs.h>
1c7c474c 55#include <linux/posix_acl.h>
feda821e 56#include <linux/posix_acl_xattr.h>
1da177e4 57#include <linux/mman.h>
1da177e4
LT
58#include <linux/string.h>
59#include <linux/slab.h>
60#include <linux/backing-dev.h>
61#include <linux/shmem_fs.h>
1da177e4 62#include <linux/writeback.h>
bda97eab 63#include <linux/pagevec.h>
41ffe5d5 64#include <linux/percpu_counter.h>
83e4fa9c 65#include <linux/falloc.h>
708e3508 66#include <linux/splice.h>
1da177e4
LT
67#include <linux/security.h>
68#include <linux/swapops.h>
69#include <linux/mempolicy.h>
70#include <linux/namei.h>
b00dc3ad 71#include <linux/ctype.h>
304dbdb7 72#include <linux/migrate.h>
c1f60a5a 73#include <linux/highmem.h>
680d794b 74#include <linux/seq_file.h>
92562927 75#include <linux/magic.h>
9183df25 76#include <linux/syscalls.h>
40e041a2 77#include <linux/fcntl.h>
9183df25 78#include <uapi/linux/memfd.h>
cfda0526 79#include <linux/userfaultfd_k.h>
4c27fe4c 80#include <linux/rmap.h>
2b4db796 81#include <linux/uuid.h>
304dbdb7 82
7c0f6ba6 83#include <linux/uaccess.h>
1da177e4 84
dd56b046
MG
85#include "internal.h"
86
09cbfeaf
KS
87#define BLOCKS_PER_PAGE (PAGE_SIZE/512)
88#define VM_ACCT(size) (PAGE_ALIGN(size) >> PAGE_SHIFT)
1da177e4 89
1da177e4
LT
90/* Pretend that each entry is of this size in directory's i_size */
91#define BOGO_DIRENT_SIZE 20
92
69f07ec9
HD
93/* Symlink up to this size is kmalloc'ed instead of using a swappable page */
94#define SHORT_SYMLINK_LEN 128
95
1aac1400 96/*
f00cdc6d 97 * shmem_fallocate communicates with shmem_fault or shmem_writepage via
9608703e 98 * inode->i_private (with i_rwsem making sure that it has only one user at
f00cdc6d 99 * a time): we would prefer not to enlarge the shmem inode just for that.
1aac1400
HD
100 */
101struct shmem_falloc {
8e205f77 102 wait_queue_head_t *waitq; /* faults into hole wait for punch to end */
1aac1400
HD
103 pgoff_t start; /* start of range currently being fallocated */
104 pgoff_t next; /* the next page offset to be fallocated */
105 pgoff_t nr_falloced; /* how many new pages have been fallocated */
106 pgoff_t nr_unswapped; /* how often writepage refused to swap out */
107};
108
0b5071dd
AV
109struct shmem_options {
110 unsigned long long blocks;
111 unsigned long long inodes;
112 struct mempolicy *mpol;
113 kuid_t uid;
114 kgid_t gid;
115 umode_t mode;
ea3271f7 116 bool full_inums;
0b5071dd
AV
117 int huge;
118 int seen;
119#define SHMEM_SEEN_BLOCKS 1
120#define SHMEM_SEEN_INODES 2
121#define SHMEM_SEEN_HUGE 4
ea3271f7 122#define SHMEM_SEEN_INUMS 8
0b5071dd
AV
123};
124
b76db735 125#ifdef CONFIG_TMPFS
680d794b 126static unsigned long shmem_default_max_blocks(void)
127{
ca79b0c2 128 return totalram_pages() / 2;
680d794b 129}
130
131static unsigned long shmem_default_max_inodes(void)
132{
ca79b0c2
AK
133 unsigned long nr_pages = totalram_pages();
134
135 return min(nr_pages - totalhigh_pages(), nr_pages / 2);
680d794b 136}
b76db735 137#endif
680d794b 138
da08e9b7
MWO
139static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
140 struct folio **foliop, enum sgp_type sgp,
c5bf121e
VRP
141 gfp_t gfp, struct vm_area_struct *vma,
142 vm_fault_t *fault_type);
1da177e4 143
1da177e4
LT
144static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb)
145{
146 return sb->s_fs_info;
147}
148
149/*
150 * shmem_file_setup pre-accounts the whole fixed size of a VM object,
151 * for shared memory and for shared anonymous (/dev/zero) mappings
152 * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1),
153 * consistent with the pre-accounting of private mappings ...
154 */
155static inline int shmem_acct_size(unsigned long flags, loff_t size)
156{
0b0a0806 157 return (flags & VM_NORESERVE) ?
191c5424 158 0 : security_vm_enough_memory_mm(current->mm, VM_ACCT(size));
1da177e4
LT
159}
160
161static inline void shmem_unacct_size(unsigned long flags, loff_t size)
162{
0b0a0806 163 if (!(flags & VM_NORESERVE))
1da177e4
LT
164 vm_unacct_memory(VM_ACCT(size));
165}
166
77142517
KK
167static inline int shmem_reacct_size(unsigned long flags,
168 loff_t oldsize, loff_t newsize)
169{
170 if (!(flags & VM_NORESERVE)) {
171 if (VM_ACCT(newsize) > VM_ACCT(oldsize))
172 return security_vm_enough_memory_mm(current->mm,
173 VM_ACCT(newsize) - VM_ACCT(oldsize));
174 else if (VM_ACCT(newsize) < VM_ACCT(oldsize))
175 vm_unacct_memory(VM_ACCT(oldsize) - VM_ACCT(newsize));
176 }
177 return 0;
178}
179
1da177e4
LT
180/*
181 * ... whereas tmpfs objects are accounted incrementally as
75edd345 182 * pages are allocated, in order to allow large sparse files.
923e2f0e 183 * shmem_get_folio reports shmem_acct_block failure as -ENOSPC not -ENOMEM,
1da177e4
LT
184 * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM.
185 */
800d8c63 186static inline int shmem_acct_block(unsigned long flags, long pages)
1da177e4 187{
800d8c63
KS
188 if (!(flags & VM_NORESERVE))
189 return 0;
190
191 return security_vm_enough_memory_mm(current->mm,
192 pages * VM_ACCT(PAGE_SIZE));
1da177e4
LT
193}
194
195static inline void shmem_unacct_blocks(unsigned long flags, long pages)
196{
0b0a0806 197 if (flags & VM_NORESERVE)
09cbfeaf 198 vm_unacct_memory(pages * VM_ACCT(PAGE_SIZE));
1da177e4
LT
199}
200
0f079694
MR
201static inline bool shmem_inode_acct_block(struct inode *inode, long pages)
202{
203 struct shmem_inode_info *info = SHMEM_I(inode);
204 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
205
206 if (shmem_acct_block(info->flags, pages))
207 return false;
208
209 if (sbinfo->max_blocks) {
210 if (percpu_counter_compare(&sbinfo->used_blocks,
211 sbinfo->max_blocks - pages) > 0)
212 goto unacct;
213 percpu_counter_add(&sbinfo->used_blocks, pages);
214 }
215
216 return true;
217
218unacct:
219 shmem_unacct_blocks(info->flags, pages);
220 return false;
221}
222
223static inline void shmem_inode_unacct_blocks(struct inode *inode, long pages)
224{
225 struct shmem_inode_info *info = SHMEM_I(inode);
226 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
227
228 if (sbinfo->max_blocks)
229 percpu_counter_sub(&sbinfo->used_blocks, pages);
230 shmem_unacct_blocks(info->flags, pages);
231}
232
759b9775 233static const struct super_operations shmem_ops;
30e6a51d 234const struct address_space_operations shmem_aops;
15ad7cdc 235static const struct file_operations shmem_file_operations;
92e1d5be
AV
236static const struct inode_operations shmem_inode_operations;
237static const struct inode_operations shmem_dir_inode_operations;
238static const struct inode_operations shmem_special_inode_operations;
f0f37e2f 239static const struct vm_operations_struct shmem_vm_ops;
d09e8ca6 240static const struct vm_operations_struct shmem_anon_vm_ops;
779750d2 241static struct file_system_type shmem_fs_type;
1da177e4 242
d09e8ca6
PT
243bool vma_is_anon_shmem(struct vm_area_struct *vma)
244{
245 return vma->vm_ops == &shmem_anon_vm_ops;
246}
247
b0506e48
MR
248bool vma_is_shmem(struct vm_area_struct *vma)
249{
d09e8ca6 250 return vma_is_anon_shmem(vma) || vma->vm_ops == &shmem_vm_ops;
b0506e48
MR
251}
252
1da177e4 253static LIST_HEAD(shmem_swaplist);
cb5f7b9a 254static DEFINE_MUTEX(shmem_swaplist_mutex);
1da177e4 255
e809d5f0
CD
256/*
257 * shmem_reserve_inode() performs bookkeeping to reserve a shmem inode, and
258 * produces a novel ino for the newly allocated inode.
259 *
260 * It may also be called when making a hard link to permit the space needed by
261 * each dentry. However, in that case, no new inode number is needed since that
262 * internally draws from another pool of inode numbers (currently global
263 * get_next_ino()). This case is indicated by passing NULL as inop.
264 */
265#define SHMEM_INO_BATCH 1024
266static int shmem_reserve_inode(struct super_block *sb, ino_t *inop)
5b04c689
PE
267{
268 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
e809d5f0
CD
269 ino_t ino;
270
271 if (!(sb->s_flags & SB_KERNMOUNT)) {
bf11b9a8 272 raw_spin_lock(&sbinfo->stat_lock);
bb3e96d6
BS
273 if (sbinfo->max_inodes) {
274 if (!sbinfo->free_inodes) {
bf11b9a8 275 raw_spin_unlock(&sbinfo->stat_lock);
bb3e96d6
BS
276 return -ENOSPC;
277 }
278 sbinfo->free_inodes--;
5b04c689 279 }
e809d5f0
CD
280 if (inop) {
281 ino = sbinfo->next_ino++;
282 if (unlikely(is_zero_ino(ino)))
283 ino = sbinfo->next_ino++;
ea3271f7
CD
284 if (unlikely(!sbinfo->full_inums &&
285 ino > UINT_MAX)) {
e809d5f0
CD
286 /*
287 * Emulate get_next_ino uint wraparound for
288 * compatibility
289 */
ea3271f7
CD
290 if (IS_ENABLED(CONFIG_64BIT))
291 pr_warn("%s: inode number overflow on device %d, consider using inode64 mount option\n",
292 __func__, MINOR(sb->s_dev));
293 sbinfo->next_ino = 1;
294 ino = sbinfo->next_ino++;
e809d5f0
CD
295 }
296 *inop = ino;
297 }
bf11b9a8 298 raw_spin_unlock(&sbinfo->stat_lock);
e809d5f0
CD
299 } else if (inop) {
300 /*
301 * __shmem_file_setup, one of our callers, is lock-free: it
302 * doesn't hold stat_lock in shmem_reserve_inode since
303 * max_inodes is always 0, and is called from potentially
304 * unknown contexts. As such, use a per-cpu batched allocator
305 * which doesn't require the per-sb stat_lock unless we are at
306 * the batch boundary.
ea3271f7
CD
307 *
308 * We don't need to worry about inode{32,64} since SB_KERNMOUNT
309 * shmem mounts are not exposed to userspace, so we don't need
310 * to worry about things like glibc compatibility.
e809d5f0
CD
311 */
312 ino_t *next_ino;
bf11b9a8 313
e809d5f0
CD
314 next_ino = per_cpu_ptr(sbinfo->ino_batch, get_cpu());
315 ino = *next_ino;
316 if (unlikely(ino % SHMEM_INO_BATCH == 0)) {
bf11b9a8 317 raw_spin_lock(&sbinfo->stat_lock);
e809d5f0
CD
318 ino = sbinfo->next_ino;
319 sbinfo->next_ino += SHMEM_INO_BATCH;
bf11b9a8 320 raw_spin_unlock(&sbinfo->stat_lock);
e809d5f0
CD
321 if (unlikely(is_zero_ino(ino)))
322 ino++;
323 }
324 *inop = ino;
325 *next_ino = ++ino;
326 put_cpu();
5b04c689 327 }
e809d5f0 328
5b04c689
PE
329 return 0;
330}
331
332static void shmem_free_inode(struct super_block *sb)
333{
334 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
335 if (sbinfo->max_inodes) {
bf11b9a8 336 raw_spin_lock(&sbinfo->stat_lock);
5b04c689 337 sbinfo->free_inodes++;
bf11b9a8 338 raw_spin_unlock(&sbinfo->stat_lock);
5b04c689
PE
339 }
340}
341
46711810 342/**
41ffe5d5 343 * shmem_recalc_inode - recalculate the block usage of an inode
1da177e4
LT
344 * @inode: inode to recalc
345 *
346 * We have to calculate the free blocks since the mm can drop
347 * undirtied hole pages behind our back.
348 *
349 * But normally info->alloced == inode->i_mapping->nrpages + info->swapped
350 * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped)
351 *
352 * It has to be called with the spinlock held.
353 */
354static void shmem_recalc_inode(struct inode *inode)
355{
356 struct shmem_inode_info *info = SHMEM_I(inode);
357 long freed;
358
359 freed = info->alloced - info->swapped - inode->i_mapping->nrpages;
360 if (freed > 0) {
361 info->alloced -= freed;
54af6042 362 inode->i_blocks -= freed * BLOCKS_PER_PAGE;
0f079694 363 shmem_inode_unacct_blocks(inode, freed);
1da177e4
LT
364 }
365}
366
800d8c63
KS
367bool shmem_charge(struct inode *inode, long pages)
368{
369 struct shmem_inode_info *info = SHMEM_I(inode);
4595ef88 370 unsigned long flags;
800d8c63 371
0f079694 372 if (!shmem_inode_acct_block(inode, pages))
800d8c63 373 return false;
b1cc94ab 374
aaa52e34
HD
375 /* nrpages adjustment first, then shmem_recalc_inode() when balanced */
376 inode->i_mapping->nrpages += pages;
377
4595ef88 378 spin_lock_irqsave(&info->lock, flags);
800d8c63
KS
379 info->alloced += pages;
380 inode->i_blocks += pages * BLOCKS_PER_PAGE;
381 shmem_recalc_inode(inode);
4595ef88 382 spin_unlock_irqrestore(&info->lock, flags);
800d8c63 383
800d8c63
KS
384 return true;
385}
386
387void shmem_uncharge(struct inode *inode, long pages)
388{
389 struct shmem_inode_info *info = SHMEM_I(inode);
4595ef88 390 unsigned long flags;
800d8c63 391
6ffcd825 392 /* nrpages adjustment done by __filemap_remove_folio() or caller */
aaa52e34 393
4595ef88 394 spin_lock_irqsave(&info->lock, flags);
800d8c63
KS
395 info->alloced -= pages;
396 inode->i_blocks -= pages * BLOCKS_PER_PAGE;
397 shmem_recalc_inode(inode);
4595ef88 398 spin_unlock_irqrestore(&info->lock, flags);
800d8c63 399
0f079694 400 shmem_inode_unacct_blocks(inode, pages);
800d8c63
KS
401}
402
7a5d0fbb 403/*
62f945b6 404 * Replace item expected in xarray by a new item, while holding xa_lock.
7a5d0fbb 405 */
62f945b6 406static int shmem_replace_entry(struct address_space *mapping,
7a5d0fbb
HD
407 pgoff_t index, void *expected, void *replacement)
408{
62f945b6 409 XA_STATE(xas, &mapping->i_pages, index);
6dbaf22c 410 void *item;
7a5d0fbb
HD
411
412 VM_BUG_ON(!expected);
6dbaf22c 413 VM_BUG_ON(!replacement);
62f945b6 414 item = xas_load(&xas);
7a5d0fbb
HD
415 if (item != expected)
416 return -ENOENT;
62f945b6 417 xas_store(&xas, replacement);
7a5d0fbb
HD
418 return 0;
419}
420
d1899228
HD
421/*
422 * Sometimes, before we decide whether to proceed or to fail, we must check
423 * that an entry was not already brought back from swap by a racing thread.
424 *
425 * Checking page is not enough: by the time a SwapCache page is locked, it
426 * might be reused, and again be SwapCache, using the same swap as before.
427 */
428static bool shmem_confirm_swap(struct address_space *mapping,
429 pgoff_t index, swp_entry_t swap)
430{
a12831bf 431 return xa_load(&mapping->i_pages, index) == swp_to_radix_entry(swap);
d1899228
HD
432}
433
5a6e75f8
KS
434/*
435 * Definitions for "huge tmpfs": tmpfs mounted with the huge= option
436 *
437 * SHMEM_HUGE_NEVER:
438 * disables huge pages for the mount;
439 * SHMEM_HUGE_ALWAYS:
440 * enables huge pages for the mount;
441 * SHMEM_HUGE_WITHIN_SIZE:
442 * only allocate huge pages if the page will be fully within i_size,
443 * also respect fadvise()/madvise() hints;
444 * SHMEM_HUGE_ADVISE:
445 * only allocate huge pages if requested with fadvise()/madvise();
446 */
447
448#define SHMEM_HUGE_NEVER 0
449#define SHMEM_HUGE_ALWAYS 1
450#define SHMEM_HUGE_WITHIN_SIZE 2
451#define SHMEM_HUGE_ADVISE 3
452
453/*
454 * Special values.
455 * Only can be set via /sys/kernel/mm/transparent_hugepage/shmem_enabled:
456 *
457 * SHMEM_HUGE_DENY:
458 * disables huge on shm_mnt and all mounts, for emergency use;
459 * SHMEM_HUGE_FORCE:
460 * enables huge on shm_mnt and all mounts, w/o needing option, for testing;
461 *
462 */
463#define SHMEM_HUGE_DENY (-1)
464#define SHMEM_HUGE_FORCE (-2)
465
396bcc52 466#ifdef CONFIG_TRANSPARENT_HUGEPAGE
5a6e75f8
KS
467/* ifdef here to avoid bloating shmem.o when not necessary */
468
5e6e5a12 469static int shmem_huge __read_mostly = SHMEM_HUGE_NEVER;
5a6e75f8 470
7c6c6cc4
ZK
471bool shmem_is_huge(struct vm_area_struct *vma, struct inode *inode,
472 pgoff_t index, bool shmem_huge_force)
c852023e 473{
c852023e 474 loff_t i_size;
c852023e 475
f7cd16a5
XR
476 if (!S_ISREG(inode->i_mode))
477 return false;
5e6e5a12
HD
478 if (vma && ((vma->vm_flags & VM_NOHUGEPAGE) ||
479 test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags)))
c852023e 480 return false;
7c6c6cc4
ZK
481 if (shmem_huge == SHMEM_HUGE_DENY)
482 return false;
3de0c269
ZK
483 if (shmem_huge_force || shmem_huge == SHMEM_HUGE_FORCE)
484 return true;
5e6e5a12
HD
485
486 switch (SHMEM_SB(inode->i_sb)->huge) {
c852023e
HD
487 case SHMEM_HUGE_ALWAYS:
488 return true;
489 case SHMEM_HUGE_WITHIN_SIZE:
de6ee659 490 index = round_up(index + 1, HPAGE_PMD_NR);
c852023e 491 i_size = round_up(i_size_read(inode), PAGE_SIZE);
de6ee659 492 if (i_size >> PAGE_SHIFT >= index)
c852023e
HD
493 return true;
494 fallthrough;
495 case SHMEM_HUGE_ADVISE:
5e6e5a12
HD
496 if (vma && (vma->vm_flags & VM_HUGEPAGE))
497 return true;
498 fallthrough;
c852023e 499 default:
c852023e
HD
500 return false;
501 }
502}
5a6e75f8 503
e5f2249a 504#if defined(CONFIG_SYSFS)
5a6e75f8
KS
505static int shmem_parse_huge(const char *str)
506{
507 if (!strcmp(str, "never"))
508 return SHMEM_HUGE_NEVER;
509 if (!strcmp(str, "always"))
510 return SHMEM_HUGE_ALWAYS;
511 if (!strcmp(str, "within_size"))
512 return SHMEM_HUGE_WITHIN_SIZE;
513 if (!strcmp(str, "advise"))
514 return SHMEM_HUGE_ADVISE;
515 if (!strcmp(str, "deny"))
516 return SHMEM_HUGE_DENY;
517 if (!strcmp(str, "force"))
518 return SHMEM_HUGE_FORCE;
519 return -EINVAL;
520}
e5f2249a 521#endif
5a6e75f8 522
e5f2249a 523#if defined(CONFIG_SYSFS) || defined(CONFIG_TMPFS)
5a6e75f8
KS
524static const char *shmem_format_huge(int huge)
525{
526 switch (huge) {
527 case SHMEM_HUGE_NEVER:
528 return "never";
529 case SHMEM_HUGE_ALWAYS:
530 return "always";
531 case SHMEM_HUGE_WITHIN_SIZE:
532 return "within_size";
533 case SHMEM_HUGE_ADVISE:
534 return "advise";
535 case SHMEM_HUGE_DENY:
536 return "deny";
537 case SHMEM_HUGE_FORCE:
538 return "force";
539 default:
540 VM_BUG_ON(1);
541 return "bad_val";
542 }
543}
f1f5929c 544#endif
5a6e75f8 545
779750d2
KS
546static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
547 struct shrink_control *sc, unsigned long nr_to_split)
548{
549 LIST_HEAD(list), *pos, *next;
253fd0f0 550 LIST_HEAD(to_remove);
779750d2
KS
551 struct inode *inode;
552 struct shmem_inode_info *info;
05624571 553 struct folio *folio;
779750d2 554 unsigned long batch = sc ? sc->nr_to_scan : 128;
62c9827c 555 int split = 0;
779750d2
KS
556
557 if (list_empty(&sbinfo->shrinklist))
558 return SHRINK_STOP;
559
560 spin_lock(&sbinfo->shrinklist_lock);
561 list_for_each_safe(pos, next, &sbinfo->shrinklist) {
562 info = list_entry(pos, struct shmem_inode_info, shrinklist);
563
564 /* pin the inode */
565 inode = igrab(&info->vfs_inode);
566
567 /* inode is about to be evicted */
568 if (!inode) {
569 list_del_init(&info->shrinklist);
779750d2
KS
570 goto next;
571 }
572
573 /* Check if there's anything to gain */
574 if (round_up(inode->i_size, PAGE_SIZE) ==
575 round_up(inode->i_size, HPAGE_PMD_SIZE)) {
253fd0f0 576 list_move(&info->shrinklist, &to_remove);
779750d2
KS
577 goto next;
578 }
579
580 list_move(&info->shrinklist, &list);
581next:
62c9827c 582 sbinfo->shrinklist_len--;
779750d2
KS
583 if (!--batch)
584 break;
585 }
586 spin_unlock(&sbinfo->shrinklist_lock);
587
253fd0f0
KS
588 list_for_each_safe(pos, next, &to_remove) {
589 info = list_entry(pos, struct shmem_inode_info, shrinklist);
590 inode = &info->vfs_inode;
591 list_del_init(&info->shrinklist);
592 iput(inode);
593 }
594
779750d2
KS
595 list_for_each_safe(pos, next, &list) {
596 int ret;
05624571 597 pgoff_t index;
779750d2
KS
598
599 info = list_entry(pos, struct shmem_inode_info, shrinklist);
600 inode = &info->vfs_inode;
601
b3cd54b2 602 if (nr_to_split && split >= nr_to_split)
62c9827c 603 goto move_back;
779750d2 604
05624571
MWO
605 index = (inode->i_size & HPAGE_PMD_MASK) >> PAGE_SHIFT;
606 folio = filemap_get_folio(inode->i_mapping, index);
607 if (!folio)
779750d2
KS
608 goto drop;
609
b3cd54b2 610 /* No huge page at the end of the file: nothing to split */
05624571
MWO
611 if (!folio_test_large(folio)) {
612 folio_put(folio);
779750d2
KS
613 goto drop;
614 }
615
b3cd54b2 616 /*
62c9827c
GL
617 * Move the inode on the list back to shrinklist if we failed
618 * to lock the page at this time.
b3cd54b2
KS
619 *
620 * Waiting for the lock may lead to deadlock in the
621 * reclaim path.
622 */
05624571
MWO
623 if (!folio_trylock(folio)) {
624 folio_put(folio);
62c9827c 625 goto move_back;
b3cd54b2
KS
626 }
627
d788f5b3 628 ret = split_folio(folio);
05624571
MWO
629 folio_unlock(folio);
630 folio_put(folio);
779750d2 631
62c9827c 632 /* If split failed move the inode on the list back to shrinklist */
b3cd54b2 633 if (ret)
62c9827c 634 goto move_back;
779750d2
KS
635
636 split++;
637drop:
638 list_del_init(&info->shrinklist);
62c9827c
GL
639 goto put;
640move_back:
641 /*
642 * Make sure the inode is either on the global list or deleted
643 * from any local list before iput() since it could be deleted
644 * in another thread once we put the inode (then the local list
645 * is corrupted).
646 */
647 spin_lock(&sbinfo->shrinklist_lock);
648 list_move(&info->shrinklist, &sbinfo->shrinklist);
649 sbinfo->shrinklist_len++;
650 spin_unlock(&sbinfo->shrinklist_lock);
651put:
779750d2
KS
652 iput(inode);
653 }
654
779750d2
KS
655 return split;
656}
657
658static long shmem_unused_huge_scan(struct super_block *sb,
659 struct shrink_control *sc)
660{
661 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
662
663 if (!READ_ONCE(sbinfo->shrinklist_len))
664 return SHRINK_STOP;
665
666 return shmem_unused_huge_shrink(sbinfo, sc, 0);
667}
668
669static long shmem_unused_huge_count(struct super_block *sb,
670 struct shrink_control *sc)
671{
672 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
673 return READ_ONCE(sbinfo->shrinklist_len);
674}
396bcc52 675#else /* !CONFIG_TRANSPARENT_HUGEPAGE */
5a6e75f8
KS
676
677#define shmem_huge SHMEM_HUGE_DENY
678
7c6c6cc4
ZK
679bool shmem_is_huge(struct vm_area_struct *vma, struct inode *inode,
680 pgoff_t index, bool shmem_huge_force)
5e6e5a12
HD
681{
682 return false;
683}
684
779750d2
KS
685static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
686 struct shrink_control *sc, unsigned long nr_to_split)
687{
688 return 0;
689}
396bcc52 690#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
5a6e75f8 691
46f65ec1 692/*
2bb876b5 693 * Like filemap_add_folio, but error if expected item has gone.
46f65ec1 694 */
b7dd44a1 695static int shmem_add_to_page_cache(struct folio *folio,
46f65ec1 696 struct address_space *mapping,
3fea5a49
JW
697 pgoff_t index, void *expected, gfp_t gfp,
698 struct mm_struct *charge_mm)
46f65ec1 699{
b7dd44a1
MWO
700 XA_STATE_ORDER(xas, &mapping->i_pages, index, folio_order(folio));
701 long nr = folio_nr_pages(folio);
3fea5a49 702 int error;
46f65ec1 703
b7dd44a1
MWO
704 VM_BUG_ON_FOLIO(index != round_down(index, nr), folio);
705 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
706 VM_BUG_ON_FOLIO(!folio_test_swapbacked(folio), folio);
707 VM_BUG_ON(expected && folio_test_large(folio));
46f65ec1 708
b7dd44a1
MWO
709 folio_ref_add(folio, nr);
710 folio->mapping = mapping;
711 folio->index = index;
b065b432 712
b7dd44a1
MWO
713 if (!folio_test_swapcache(folio)) {
714 error = mem_cgroup_charge(folio, charge_mm, gfp);
4c6355b2 715 if (error) {
b7dd44a1 716 if (folio_test_pmd_mappable(folio)) {
4c6355b2
JW
717 count_vm_event(THP_FILE_FALLBACK);
718 count_vm_event(THP_FILE_FALLBACK_CHARGE);
719 }
720 goto error;
3fea5a49 721 }
3fea5a49 722 }
b7dd44a1 723 folio_throttle_swaprate(folio, gfp);
3fea5a49 724
552446a4 725 do {
552446a4 726 xas_lock_irq(&xas);
6b24ca4a
MWO
727 if (expected != xas_find_conflict(&xas)) {
728 xas_set_err(&xas, -EEXIST);
729 goto unlock;
730 }
731 if (expected && xas_find_conflict(&xas)) {
552446a4 732 xas_set_err(&xas, -EEXIST);
552446a4 733 goto unlock;
800d8c63 734 }
b7dd44a1 735 xas_store(&xas, folio);
6b24ca4a
MWO
736 if (xas_error(&xas))
737 goto unlock;
b7dd44a1 738 if (folio_test_pmd_mappable(folio)) {
800d8c63 739 count_vm_event(THP_FILE_ALLOC);
b7dd44a1 740 __lruvec_stat_mod_folio(folio, NR_SHMEM_THPS, nr);
800d8c63 741 }
800d8c63 742 mapping->nrpages += nr;
b7dd44a1
MWO
743 __lruvec_stat_mod_folio(folio, NR_FILE_PAGES, nr);
744 __lruvec_stat_mod_folio(folio, NR_SHMEM, nr);
552446a4
MW
745unlock:
746 xas_unlock_irq(&xas);
747 } while (xas_nomem(&xas, gfp));
748
749 if (xas_error(&xas)) {
3fea5a49
JW
750 error = xas_error(&xas);
751 goto error;
46f65ec1 752 }
552446a4
MW
753
754 return 0;
3fea5a49 755error:
b7dd44a1
MWO
756 folio->mapping = NULL;
757 folio_ref_sub(folio, nr);
3fea5a49 758 return error;
46f65ec1
HD
759}
760
6922c0c7 761/*
4cd400fd 762 * Like delete_from_page_cache, but substitutes swap for @folio.
6922c0c7 763 */
4cd400fd 764static void shmem_delete_from_page_cache(struct folio *folio, void *radswap)
6922c0c7 765{
4cd400fd
MWO
766 struct address_space *mapping = folio->mapping;
767 long nr = folio_nr_pages(folio);
6922c0c7
HD
768 int error;
769
b93b0163 770 xa_lock_irq(&mapping->i_pages);
4cd400fd
MWO
771 error = shmem_replace_entry(mapping, folio->index, folio, radswap);
772 folio->mapping = NULL;
773 mapping->nrpages -= nr;
774 __lruvec_stat_mod_folio(folio, NR_FILE_PAGES, -nr);
775 __lruvec_stat_mod_folio(folio, NR_SHMEM, -nr);
b93b0163 776 xa_unlock_irq(&mapping->i_pages);
4cd400fd 777 folio_put(folio);
6922c0c7
HD
778 BUG_ON(error);
779}
780
7a5d0fbb 781/*
c121d3bb 782 * Remove swap entry from page cache, free the swap and its page cache.
7a5d0fbb
HD
783 */
784static int shmem_free_swap(struct address_space *mapping,
785 pgoff_t index, void *radswap)
786{
6dbaf22c 787 void *old;
7a5d0fbb 788
55f3f7ea 789 old = xa_cmpxchg_irq(&mapping->i_pages, index, radswap, NULL, 0);
6dbaf22c
JW
790 if (old != radswap)
791 return -ENOENT;
792 free_swap_and_cache(radix_to_swp_entry(radswap));
793 return 0;
7a5d0fbb
HD
794}
795
6a15a370
VB
796/*
797 * Determine (in bytes) how many of the shmem object's pages mapped by the
48131e03 798 * given offsets are swapped out.
6a15a370 799 *
9608703e 800 * This is safe to call without i_rwsem or the i_pages lock thanks to RCU,
6a15a370
VB
801 * as long as the inode doesn't go away and racy results are not a problem.
802 */
48131e03
VB
803unsigned long shmem_partial_swap_usage(struct address_space *mapping,
804 pgoff_t start, pgoff_t end)
6a15a370 805{
7ae3424f 806 XA_STATE(xas, &mapping->i_pages, start);
6a15a370 807 struct page *page;
48131e03 808 unsigned long swapped = 0;
6a15a370
VB
809
810 rcu_read_lock();
7ae3424f
MW
811 xas_for_each(&xas, page, end - 1) {
812 if (xas_retry(&xas, page))
2cf938aa 813 continue;
3159f943 814 if (xa_is_value(page))
6a15a370
VB
815 swapped++;
816
817 if (need_resched()) {
7ae3424f 818 xas_pause(&xas);
6a15a370 819 cond_resched_rcu();
6a15a370
VB
820 }
821 }
822
823 rcu_read_unlock();
824
825 return swapped << PAGE_SHIFT;
826}
827
48131e03
VB
828/*
829 * Determine (in bytes) how many of the shmem object's pages mapped by the
830 * given vma is swapped out.
831 *
9608703e 832 * This is safe to call without i_rwsem or the i_pages lock thanks to RCU,
48131e03
VB
833 * as long as the inode doesn't go away and racy results are not a problem.
834 */
835unsigned long shmem_swap_usage(struct vm_area_struct *vma)
836{
837 struct inode *inode = file_inode(vma->vm_file);
838 struct shmem_inode_info *info = SHMEM_I(inode);
839 struct address_space *mapping = inode->i_mapping;
840 unsigned long swapped;
841
842 /* Be careful as we don't hold info->lock */
843 swapped = READ_ONCE(info->swapped);
844
845 /*
846 * The easier cases are when the shmem object has nothing in swap, or
847 * the vma maps it whole. Then we can simply use the stats that we
848 * already track.
849 */
850 if (!swapped)
851 return 0;
852
853 if (!vma->vm_pgoff && vma->vm_end - vma->vm_start >= inode->i_size)
854 return swapped << PAGE_SHIFT;
855
856 /* Here comes the more involved part */
02399c88
PX
857 return shmem_partial_swap_usage(mapping, vma->vm_pgoff,
858 vma->vm_pgoff + vma_pages(vma));
48131e03
VB
859}
860
24513264
HD
861/*
862 * SysV IPC SHM_UNLOCK restore Unevictable pages to their evictable lists.
863 */
864void shmem_unlock_mapping(struct address_space *mapping)
865{
105c988f 866 struct folio_batch fbatch;
24513264
HD
867 pgoff_t index = 0;
868
105c988f 869 folio_batch_init(&fbatch);
24513264
HD
870 /*
871 * Minor point, but we might as well stop if someone else SHM_LOCKs it.
872 */
105c988f
MWO
873 while (!mapping_unevictable(mapping) &&
874 filemap_get_folios(mapping, &index, ~0UL, &fbatch)) {
875 check_move_unevictable_folios(&fbatch);
876 folio_batch_release(&fbatch);
24513264
HD
877 cond_resched();
878 }
7a5d0fbb
HD
879}
880
b9a8a419 881static struct folio *shmem_get_partial_folio(struct inode *inode, pgoff_t index)
71725ed1 882{
b9a8a419 883 struct folio *folio;
71725ed1 884
b9a8a419 885 /*
a7f5862c 886 * At first avoid shmem_get_folio(,,,SGP_READ): that fails
b9a8a419
MWO
887 * beyond i_size, and reports fallocated pages as holes.
888 */
889 folio = __filemap_get_folio(inode->i_mapping, index,
890 FGP_ENTRY | FGP_LOCK, 0);
891 if (!xa_is_value(folio))
892 return folio;
893 /*
894 * But read a page back from swap if any of it is within i_size
895 * (although in some cases this is just a waste of time).
896 */
a7f5862c
MWO
897 folio = NULL;
898 shmem_get_folio(inode, index, &folio, SGP_READ);
899 return folio;
71725ed1
HD
900}
901
7a5d0fbb 902/*
7f4446ee 903 * Remove range of pages and swap entries from page cache, and free them.
1635f6a7 904 * If !unfalloc, truncate or punch hole; if unfalloc, undo failed fallocate.
7a5d0fbb 905 */
1635f6a7
HD
906static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
907 bool unfalloc)
1da177e4 908{
285b2c4f 909 struct address_space *mapping = inode->i_mapping;
1da177e4 910 struct shmem_inode_info *info = SHMEM_I(inode);
09cbfeaf
KS
911 pgoff_t start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT;
912 pgoff_t end = (lend + 1) >> PAGE_SHIFT;
0e499ed3 913 struct folio_batch fbatch;
7a5d0fbb 914 pgoff_t indices[PAGEVEC_SIZE];
b9a8a419
MWO
915 struct folio *folio;
916 bool same_folio;
7a5d0fbb 917 long nr_swaps_freed = 0;
285b2c4f 918 pgoff_t index;
bda97eab
HD
919 int i;
920
83e4fa9c
HD
921 if (lend == -1)
922 end = -1; /* unsigned, so actually very big */
bda97eab 923
d144bf62
HD
924 if (info->fallocend > start && info->fallocend <= end && !unfalloc)
925 info->fallocend = start;
926
51dcbdac 927 folio_batch_init(&fbatch);
bda97eab 928 index = start;
3392ca12 929 while (index < end && find_lock_entries(mapping, &index, end - 1,
51dcbdac
MWO
930 &fbatch, indices)) {
931 for (i = 0; i < folio_batch_count(&fbatch); i++) {
b9a8a419 932 folio = fbatch.folios[i];
bda97eab 933
7b774aab 934 if (xa_is_value(folio)) {
1635f6a7
HD
935 if (unfalloc)
936 continue;
7a5d0fbb 937 nr_swaps_freed += !shmem_free_swap(mapping,
3392ca12 938 indices[i], folio);
bda97eab 939 continue;
7a5d0fbb
HD
940 }
941
7b774aab 942 if (!unfalloc || !folio_test_uptodate(folio))
1e84a3d9 943 truncate_inode_folio(mapping, folio);
7b774aab 944 folio_unlock(folio);
bda97eab 945 }
51dcbdac
MWO
946 folio_batch_remove_exceptionals(&fbatch);
947 folio_batch_release(&fbatch);
bda97eab 948 cond_resched();
bda97eab 949 }
1da177e4 950
44bcabd7
HD
951 /*
952 * When undoing a failed fallocate, we want none of the partial folio
953 * zeroing and splitting below, but shall want to truncate the whole
954 * folio when !uptodate indicates that it was added by this fallocate,
955 * even when [lstart, lend] covers only a part of the folio.
956 */
957 if (unfalloc)
958 goto whole_folios;
959
b9a8a419
MWO
960 same_folio = (lstart >> PAGE_SHIFT) == (lend >> PAGE_SHIFT);
961 folio = shmem_get_partial_folio(inode, lstart >> PAGE_SHIFT);
962 if (folio) {
963 same_folio = lend < folio_pos(folio) + folio_size(folio);
964 folio_mark_dirty(folio);
965 if (!truncate_inode_partial_folio(folio, lstart, lend)) {
966 start = folio->index + folio_nr_pages(folio);
967 if (same_folio)
968 end = folio->index;
83e4fa9c 969 }
b9a8a419
MWO
970 folio_unlock(folio);
971 folio_put(folio);
972 folio = NULL;
83e4fa9c 973 }
b9a8a419
MWO
974
975 if (!same_folio)
976 folio = shmem_get_partial_folio(inode, lend >> PAGE_SHIFT);
977 if (folio) {
978 folio_mark_dirty(folio);
979 if (!truncate_inode_partial_folio(folio, lstart, lend))
980 end = folio->index;
981 folio_unlock(folio);
982 folio_put(folio);
bda97eab
HD
983 }
984
44bcabd7
HD
985whole_folios:
986
bda97eab 987 index = start;
b1a36650 988 while (index < end) {
bda97eab 989 cond_resched();
0cd6144a 990
9fb6beea 991 if (!find_get_entries(mapping, &index, end - 1, &fbatch,
cf2039af 992 indices)) {
b1a36650
HD
993 /* If all gone or hole-punch or unfalloc, we're done */
994 if (index == start || end != -1)
bda97eab 995 break;
b1a36650 996 /* But if truncating, restart to make sure all gone */
bda97eab
HD
997 index = start;
998 continue;
999 }
0e499ed3 1000 for (i = 0; i < folio_batch_count(&fbatch); i++) {
b9a8a419 1001 folio = fbatch.folios[i];
bda97eab 1002
0e499ed3 1003 if (xa_is_value(folio)) {
1635f6a7
HD
1004 if (unfalloc)
1005 continue;
9fb6beea 1006 if (shmem_free_swap(mapping, indices[i], folio)) {
b1a36650 1007 /* Swap was replaced by page: retry */
9fb6beea 1008 index = indices[i];
b1a36650
HD
1009 break;
1010 }
1011 nr_swaps_freed++;
7a5d0fbb
HD
1012 continue;
1013 }
1014
0e499ed3 1015 folio_lock(folio);
800d8c63 1016
0e499ed3 1017 if (!unfalloc || !folio_test_uptodate(folio)) {
0e499ed3 1018 if (folio_mapping(folio) != mapping) {
b1a36650 1019 /* Page was replaced by swap: retry */
0e499ed3 1020 folio_unlock(folio);
9fb6beea 1021 index = indices[i];
b1a36650 1022 break;
1635f6a7 1023 }
0e499ed3
MWO
1024 VM_BUG_ON_FOLIO(folio_test_writeback(folio),
1025 folio);
b9a8a419 1026 truncate_inode_folio(mapping, folio);
7a5d0fbb 1027 }
0e499ed3 1028 folio_unlock(folio);
bda97eab 1029 }
0e499ed3
MWO
1030 folio_batch_remove_exceptionals(&fbatch);
1031 folio_batch_release(&fbatch);
bda97eab 1032 }
94c1e62d 1033
4595ef88 1034 spin_lock_irq(&info->lock);
7a5d0fbb 1035 info->swapped -= nr_swaps_freed;
1da177e4 1036 shmem_recalc_inode(inode);
4595ef88 1037 spin_unlock_irq(&info->lock);
1635f6a7 1038}
1da177e4 1039
1635f6a7
HD
1040void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
1041{
1042 shmem_undo_range(inode, lstart, lend, false);
078cd827 1043 inode->i_ctime = inode->i_mtime = current_time(inode);
36f05cab 1044 inode_inc_iversion(inode);
1da177e4 1045}
94c1e62d 1046EXPORT_SYMBOL_GPL(shmem_truncate_range);
1da177e4 1047
549c7297
CB
1048static int shmem_getattr(struct user_namespace *mnt_userns,
1049 const struct path *path, struct kstat *stat,
a528d35e 1050 u32 request_mask, unsigned int query_flags)
44a30220 1051{
a528d35e 1052 struct inode *inode = path->dentry->d_inode;
44a30220
YZ
1053 struct shmem_inode_info *info = SHMEM_I(inode);
1054
d0424c42 1055 if (info->alloced - info->swapped != inode->i_mapping->nrpages) {
4595ef88 1056 spin_lock_irq(&info->lock);
d0424c42 1057 shmem_recalc_inode(inode);
4595ef88 1058 spin_unlock_irq(&info->lock);
d0424c42 1059 }
e408e695
TT
1060 if (info->fsflags & FS_APPEND_FL)
1061 stat->attributes |= STATX_ATTR_APPEND;
1062 if (info->fsflags & FS_IMMUTABLE_FL)
1063 stat->attributes |= STATX_ATTR_IMMUTABLE;
1064 if (info->fsflags & FS_NODUMP_FL)
1065 stat->attributes |= STATX_ATTR_NODUMP;
1066 stat->attributes_mask |= (STATX_ATTR_APPEND |
1067 STATX_ATTR_IMMUTABLE |
1068 STATX_ATTR_NODUMP);
0d56a451 1069 generic_fillattr(&init_user_ns, inode, stat);
89fdcd26 1070
7c6c6cc4 1071 if (shmem_is_huge(NULL, inode, 0, false))
89fdcd26
YS
1072 stat->blksize = HPAGE_PMD_SIZE;
1073
f7cd16a5
XR
1074 if (request_mask & STATX_BTIME) {
1075 stat->result_mask |= STATX_BTIME;
1076 stat->btime.tv_sec = info->i_crtime.tv_sec;
1077 stat->btime.tv_nsec = info->i_crtime.tv_nsec;
1078 }
1079
44a30220
YZ
1080 return 0;
1081}
1082
549c7297
CB
1083static int shmem_setattr(struct user_namespace *mnt_userns,
1084 struct dentry *dentry, struct iattr *attr)
1da177e4 1085{
75c3cfa8 1086 struct inode *inode = d_inode(dentry);
40e041a2 1087 struct shmem_inode_info *info = SHMEM_I(inode);
1da177e4 1088 int error;
36f05cab
JL
1089 bool update_mtime = false;
1090 bool update_ctime = true;
1da177e4 1091
2f221d6f 1092 error = setattr_prepare(&init_user_ns, dentry, attr);
db78b877
CH
1093 if (error)
1094 return error;
1095
94c1e62d
HD
1096 if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
1097 loff_t oldsize = inode->i_size;
1098 loff_t newsize = attr->ia_size;
3889e6e7 1099
9608703e 1100 /* protected by i_rwsem */
40e041a2
DH
1101 if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) ||
1102 (newsize > oldsize && (info->seals & F_SEAL_GROW)))
1103 return -EPERM;
1104
94c1e62d 1105 if (newsize != oldsize) {
77142517
KK
1106 error = shmem_reacct_size(SHMEM_I(inode)->flags,
1107 oldsize, newsize);
1108 if (error)
1109 return error;
94c1e62d 1110 i_size_write(inode, newsize);
36f05cab
JL
1111 update_mtime = true;
1112 } else {
1113 update_ctime = false;
94c1e62d 1114 }
afa2db2f 1115 if (newsize <= oldsize) {
94c1e62d 1116 loff_t holebegin = round_up(newsize, PAGE_SIZE);
d0424c42
HD
1117 if (oldsize > holebegin)
1118 unmap_mapping_range(inode->i_mapping,
1119 holebegin, 0, 1);
1120 if (info->alloced)
1121 shmem_truncate_range(inode,
1122 newsize, (loff_t)-1);
94c1e62d 1123 /* unmap again to remove racily COWed private pages */
d0424c42
HD
1124 if (oldsize > holebegin)
1125 unmap_mapping_range(inode->i_mapping,
1126 holebegin, 0, 1);
94c1e62d 1127 }
1da177e4
LT
1128 }
1129
2f221d6f 1130 setattr_copy(&init_user_ns, inode, attr);
db78b877 1131 if (attr->ia_valid & ATTR_MODE)
138060ba 1132 error = posix_acl_chmod(&init_user_ns, dentry, inode->i_mode);
36f05cab
JL
1133 if (!error && update_ctime) {
1134 inode->i_ctime = current_time(inode);
1135 if (update_mtime)
1136 inode->i_mtime = inode->i_ctime;
1137 inode_inc_iversion(inode);
1138 }
1da177e4
LT
1139 return error;
1140}
1141
1f895f75 1142static void shmem_evict_inode(struct inode *inode)
1da177e4 1143{
1da177e4 1144 struct shmem_inode_info *info = SHMEM_I(inode);
779750d2 1145 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
1da177e4 1146
30e6a51d 1147 if (shmem_mapping(inode->i_mapping)) {
1da177e4
LT
1148 shmem_unacct_size(info->flags, inode->i_size);
1149 inode->i_size = 0;
bc786390 1150 mapping_set_exiting(inode->i_mapping);
3889e6e7 1151 shmem_truncate_range(inode, 0, (loff_t)-1);
779750d2
KS
1152 if (!list_empty(&info->shrinklist)) {
1153 spin_lock(&sbinfo->shrinklist_lock);
1154 if (!list_empty(&info->shrinklist)) {
1155 list_del_init(&info->shrinklist);
1156 sbinfo->shrinklist_len--;
1157 }
1158 spin_unlock(&sbinfo->shrinklist_lock);
1159 }
af53d3e9
HD
1160 while (!list_empty(&info->swaplist)) {
1161 /* Wait while shmem_unuse() is scanning this inode... */
1162 wait_var_event(&info->stop_eviction,
1163 !atomic_read(&info->stop_eviction));
cb5f7b9a 1164 mutex_lock(&shmem_swaplist_mutex);
af53d3e9
HD
1165 /* ...but beware of the race if we peeked too early */
1166 if (!atomic_read(&info->stop_eviction))
1167 list_del_init(&info->swaplist);
cb5f7b9a 1168 mutex_unlock(&shmem_swaplist_mutex);
1da177e4 1169 }
3ed47db3 1170 }
b09e0fa4 1171
38f38657 1172 simple_xattrs_free(&info->xattrs);
0f3c42f5 1173 WARN_ON(inode->i_blocks);
5b04c689 1174 shmem_free_inode(inode->i_sb);
dbd5768f 1175 clear_inode(inode);
1da177e4
LT
1176}
1177
b56a2d8a 1178static int shmem_find_swap_entries(struct address_space *mapping,
da08e9b7
MWO
1179 pgoff_t start, struct folio_batch *fbatch,
1180 pgoff_t *indices, unsigned int type)
478922e2 1181{
b56a2d8a 1182 XA_STATE(xas, &mapping->i_pages, start);
da08e9b7 1183 struct folio *folio;
87039546 1184 swp_entry_t entry;
478922e2
MW
1185
1186 rcu_read_lock();
da08e9b7
MWO
1187 xas_for_each(&xas, folio, ULONG_MAX) {
1188 if (xas_retry(&xas, folio))
5b9c98f3 1189 continue;
b56a2d8a 1190
da08e9b7 1191 if (!xa_is_value(folio))
478922e2 1192 continue;
b56a2d8a 1193
da08e9b7 1194 entry = radix_to_swp_entry(folio);
6cec2b95
ML
1195 /*
1196 * swapin error entries can be found in the mapping. But they're
1197 * deliberately ignored here as we've done everything we can do.
1198 */
87039546
HD
1199 if (swp_type(entry) != type)
1200 continue;
b56a2d8a 1201
e384200e 1202 indices[folio_batch_count(fbatch)] = xas.xa_index;
da08e9b7
MWO
1203 if (!folio_batch_add(fbatch, folio))
1204 break;
b56a2d8a
VRP
1205
1206 if (need_resched()) {
1207 xas_pause(&xas);
1208 cond_resched_rcu();
1209 }
478922e2 1210 }
478922e2 1211 rcu_read_unlock();
e21a2955 1212
da08e9b7 1213 return xas.xa_index;
478922e2
MW
1214}
1215
46f65ec1 1216/*
b56a2d8a
VRP
1217 * Move the swapped pages for an inode to page cache. Returns the count
1218 * of pages swapped in, or the error in case of failure.
46f65ec1 1219 */
da08e9b7
MWO
1220static int shmem_unuse_swap_entries(struct inode *inode,
1221 struct folio_batch *fbatch, pgoff_t *indices)
1da177e4 1222{
b56a2d8a
VRP
1223 int i = 0;
1224 int ret = 0;
bde05d1c 1225 int error = 0;
b56a2d8a 1226 struct address_space *mapping = inode->i_mapping;
1da177e4 1227
da08e9b7
MWO
1228 for (i = 0; i < folio_batch_count(fbatch); i++) {
1229 struct folio *folio = fbatch->folios[i];
2e0e26c7 1230
da08e9b7 1231 if (!xa_is_value(folio))
b56a2d8a 1232 continue;
da08e9b7
MWO
1233 error = shmem_swapin_folio(inode, indices[i],
1234 &folio, SGP_CACHE,
b56a2d8a
VRP
1235 mapping_gfp_mask(mapping),
1236 NULL, NULL);
1237 if (error == 0) {
da08e9b7
MWO
1238 folio_unlock(folio);
1239 folio_put(folio);
b56a2d8a
VRP
1240 ret++;
1241 }
1242 if (error == -ENOMEM)
1243 break;
1244 error = 0;
bde05d1c 1245 }
b56a2d8a
VRP
1246 return error ? error : ret;
1247}
bde05d1c 1248
b56a2d8a
VRP
1249/*
1250 * If swap found in inode, free it and move page from swapcache to filecache.
1251 */
10a9c496 1252static int shmem_unuse_inode(struct inode *inode, unsigned int type)
b56a2d8a
VRP
1253{
1254 struct address_space *mapping = inode->i_mapping;
1255 pgoff_t start = 0;
da08e9b7 1256 struct folio_batch fbatch;
b56a2d8a 1257 pgoff_t indices[PAGEVEC_SIZE];
b56a2d8a
VRP
1258 int ret = 0;
1259
b56a2d8a 1260 do {
da08e9b7
MWO
1261 folio_batch_init(&fbatch);
1262 shmem_find_swap_entries(mapping, start, &fbatch, indices, type);
1263 if (folio_batch_count(&fbatch) == 0) {
b56a2d8a
VRP
1264 ret = 0;
1265 break;
46f65ec1 1266 }
b56a2d8a 1267
da08e9b7 1268 ret = shmem_unuse_swap_entries(inode, &fbatch, indices);
b56a2d8a
VRP
1269 if (ret < 0)
1270 break;
1271
da08e9b7 1272 start = indices[folio_batch_count(&fbatch) - 1];
b56a2d8a
VRP
1273 } while (true);
1274
1275 return ret;
1da177e4
LT
1276}
1277
1278/*
b56a2d8a
VRP
1279 * Read all the shared memory data that resides in the swap
1280 * device 'type' back into memory, so the swap device can be
1281 * unused.
1da177e4 1282 */
10a9c496 1283int shmem_unuse(unsigned int type)
1da177e4 1284{
b56a2d8a 1285 struct shmem_inode_info *info, *next;
bde05d1c
HD
1286 int error = 0;
1287
b56a2d8a
VRP
1288 if (list_empty(&shmem_swaplist))
1289 return 0;
1290
1291 mutex_lock(&shmem_swaplist_mutex);
b56a2d8a
VRP
1292 list_for_each_entry_safe(info, next, &shmem_swaplist, swaplist) {
1293 if (!info->swapped) {
6922c0c7 1294 list_del_init(&info->swaplist);
b56a2d8a
VRP
1295 continue;
1296 }
af53d3e9
HD
1297 /*
1298 * Drop the swaplist mutex while searching the inode for swap;
1299 * but before doing so, make sure shmem_evict_inode() will not
1300 * remove placeholder inode from swaplist, nor let it be freed
1301 * (igrab() would protect from unlink, but not from unmount).
1302 */
1303 atomic_inc(&info->stop_eviction);
b56a2d8a 1304 mutex_unlock(&shmem_swaplist_mutex);
b56a2d8a 1305
10a9c496 1306 error = shmem_unuse_inode(&info->vfs_inode, type);
cb5f7b9a 1307 cond_resched();
b56a2d8a
VRP
1308
1309 mutex_lock(&shmem_swaplist_mutex);
1310 next = list_next_entry(info, swaplist);
1311 if (!info->swapped)
1312 list_del_init(&info->swaplist);
af53d3e9
HD
1313 if (atomic_dec_and_test(&info->stop_eviction))
1314 wake_up_var(&info->stop_eviction);
b56a2d8a 1315 if (error)
778dd893 1316 break;
1da177e4 1317 }
cb5f7b9a 1318 mutex_unlock(&shmem_swaplist_mutex);
778dd893 1319
778dd893 1320 return error;
1da177e4
LT
1321}
1322
1323/*
1324 * Move the page from the page cache to the swap cache.
1325 */
1326static int shmem_writepage(struct page *page, struct writeback_control *wbc)
1327{
e2e3fdc7 1328 struct folio *folio = page_folio(page);
1da177e4 1329 struct shmem_inode_info *info;
1da177e4 1330 struct address_space *mapping;
1da177e4 1331 struct inode *inode;
6922c0c7
HD
1332 swp_entry_t swap;
1333 pgoff_t index;
1da177e4 1334
1e6decf3
HD
1335 /*
1336 * If /sys/kernel/mm/transparent_hugepage/shmem_enabled is "always" or
1337 * "force", drivers/gpu/drm/i915/gem/i915_gem_shmem.c gets huge pages,
1338 * and its shmem_writeback() needs them to be split when swapping.
1339 */
f530ed0e 1340 if (folio_test_large(folio)) {
1e6decf3 1341 /* Ensure the subpages are still dirty */
f530ed0e 1342 folio_test_set_dirty(folio);
1e6decf3
HD
1343 if (split_huge_page(page) < 0)
1344 goto redirty;
f530ed0e
MWO
1345 folio = page_folio(page);
1346 folio_clear_dirty(folio);
1e6decf3
HD
1347 }
1348
f530ed0e
MWO
1349 BUG_ON(!folio_test_locked(folio));
1350 mapping = folio->mapping;
1351 index = folio->index;
1da177e4
LT
1352 inode = mapping->host;
1353 info = SHMEM_I(inode);
1354 if (info->flags & VM_LOCKED)
1355 goto redirty;
d9fe526a 1356 if (!total_swap_pages)
1da177e4
LT
1357 goto redirty;
1358
d9fe526a 1359 /*
97b713ba
CH
1360 * Our capabilities prevent regular writeback or sync from ever calling
1361 * shmem_writepage; but a stacking filesystem might use ->writepage of
1362 * its underlying filesystem, in which case tmpfs should write out to
1363 * swap only in response to memory pressure, and not for the writeback
1364 * threads or sync.
d9fe526a 1365 */
48f170fb
HD
1366 if (!wbc->for_reclaim) {
1367 WARN_ON_ONCE(1); /* Still happens? Tell us about it! */
1368 goto redirty;
1369 }
1635f6a7
HD
1370
1371 /*
1372 * This is somewhat ridiculous, but without plumbing a SWAP_MAP_FALLOC
1373 * value into swapfile.c, the only way we can correctly account for a
f530ed0e 1374 * fallocated folio arriving here is now to initialize it and write it.
1aac1400 1375 *
f530ed0e 1376 * That's okay for a folio already fallocated earlier, but if we have
1aac1400 1377 * not yet completed the fallocation, then (a) we want to keep track
f530ed0e 1378 * of this folio in case we have to undo it, and (b) it may not be a
1aac1400 1379 * good idea to continue anyway, once we're pushing into swap. So
f530ed0e 1380 * reactivate the folio, and let shmem_fallocate() quit when too many.
1635f6a7 1381 */
f530ed0e 1382 if (!folio_test_uptodate(folio)) {
1aac1400
HD
1383 if (inode->i_private) {
1384 struct shmem_falloc *shmem_falloc;
1385 spin_lock(&inode->i_lock);
1386 shmem_falloc = inode->i_private;
1387 if (shmem_falloc &&
8e205f77 1388 !shmem_falloc->waitq &&
1aac1400
HD
1389 index >= shmem_falloc->start &&
1390 index < shmem_falloc->next)
1391 shmem_falloc->nr_unswapped++;
1392 else
1393 shmem_falloc = NULL;
1394 spin_unlock(&inode->i_lock);
1395 if (shmem_falloc)
1396 goto redirty;
1397 }
f530ed0e
MWO
1398 folio_zero_range(folio, 0, folio_size(folio));
1399 flush_dcache_folio(folio);
1400 folio_mark_uptodate(folio);
1635f6a7
HD
1401 }
1402
e2e3fdc7 1403 swap = folio_alloc_swap(folio);
48f170fb
HD
1404 if (!swap.val)
1405 goto redirty;
d9fe526a 1406
b1dea800
HD
1407 /*
1408 * Add inode to shmem_unuse()'s list of swapped-out inodes,
f530ed0e 1409 * if it's not already there. Do it now before the folio is
6922c0c7 1410 * moved to swap cache, when its pagelock no longer protects
b1dea800 1411 * the inode from eviction. But don't unlock the mutex until
6922c0c7
HD
1412 * we've incremented swapped, because shmem_unuse_inode() will
1413 * prune a !swapped inode from the swaplist under this mutex.
b1dea800 1414 */
48f170fb
HD
1415 mutex_lock(&shmem_swaplist_mutex);
1416 if (list_empty(&info->swaplist))
b56a2d8a 1417 list_add(&info->swaplist, &shmem_swaplist);
b1dea800 1418
a4c366f0 1419 if (add_to_swap_cache(folio, swap,
3852f676
JK
1420 __GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN,
1421 NULL) == 0) {
4595ef88 1422 spin_lock_irq(&info->lock);
6922c0c7 1423 shmem_recalc_inode(inode);
267a4c76 1424 info->swapped++;
4595ef88 1425 spin_unlock_irq(&info->lock);
6922c0c7 1426
267a4c76 1427 swap_shmem_alloc(swap);
4cd400fd 1428 shmem_delete_from_page_cache(folio, swp_to_radix_entry(swap));
267a4c76 1429
6922c0c7 1430 mutex_unlock(&shmem_swaplist_mutex);
f530ed0e
MWO
1431 BUG_ON(folio_mapped(folio));
1432 swap_writepage(&folio->page, wbc);
1da177e4
LT
1433 return 0;
1434 }
1435
6922c0c7 1436 mutex_unlock(&shmem_swaplist_mutex);
4081f744 1437 put_swap_folio(folio, swap);
1da177e4 1438redirty:
f530ed0e 1439 folio_mark_dirty(folio);
d9fe526a 1440 if (wbc->for_reclaim)
f530ed0e
MWO
1441 return AOP_WRITEPAGE_ACTIVATE; /* Return with folio locked */
1442 folio_unlock(folio);
d9fe526a 1443 return 0;
1da177e4
LT
1444}
1445
75edd345 1446#if defined(CONFIG_NUMA) && defined(CONFIG_TMPFS)
71fe804b 1447static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
680d794b 1448{
095f1fc4 1449 char buffer[64];
680d794b 1450
71fe804b 1451 if (!mpol || mpol->mode == MPOL_DEFAULT)
095f1fc4 1452 return; /* show nothing */
680d794b 1453
a7a88b23 1454 mpol_to_str(buffer, sizeof(buffer), mpol);
095f1fc4
LS
1455
1456 seq_printf(seq, ",mpol=%s", buffer);
680d794b 1457}
71fe804b
LS
1458
1459static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
1460{
1461 struct mempolicy *mpol = NULL;
1462 if (sbinfo->mpol) {
bf11b9a8 1463 raw_spin_lock(&sbinfo->stat_lock); /* prevent replace/use races */
71fe804b
LS
1464 mpol = sbinfo->mpol;
1465 mpol_get(mpol);
bf11b9a8 1466 raw_spin_unlock(&sbinfo->stat_lock);
71fe804b
LS
1467 }
1468 return mpol;
1469}
75edd345
HD
1470#else /* !CONFIG_NUMA || !CONFIG_TMPFS */
1471static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
1472{
1473}
1474static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
1475{
1476 return NULL;
1477}
1478#endif /* CONFIG_NUMA && CONFIG_TMPFS */
1479#ifndef CONFIG_NUMA
1480#define vm_policy vm_private_data
1481#endif
680d794b 1482
800d8c63
KS
1483static void shmem_pseudo_vma_init(struct vm_area_struct *vma,
1484 struct shmem_inode_info *info, pgoff_t index)
1485{
1486 /* Create a pseudo vma that just contains the policy */
2c4541e2 1487 vma_init(vma, NULL);
800d8c63
KS
1488 /* Bias interleave by inode number to distribute better across nodes */
1489 vma->vm_pgoff = index + info->vfs_inode.i_ino;
800d8c63
KS
1490 vma->vm_policy = mpol_shared_policy_lookup(&info->policy, index);
1491}
1492
1493static void shmem_pseudo_vma_destroy(struct vm_area_struct *vma)
1494{
1495 /* Drop reference taken by mpol_shared_policy_lookup() */
1496 mpol_cond_put(vma->vm_policy);
1497}
1498
5739a81c 1499static struct folio *shmem_swapin(swp_entry_t swap, gfp_t gfp,
41ffe5d5 1500 struct shmem_inode_info *info, pgoff_t index)
1da177e4 1501{
1da177e4 1502 struct vm_area_struct pvma;
18a2f371 1503 struct page *page;
8c63ca5b
WD
1504 struct vm_fault vmf = {
1505 .vma = &pvma,
1506 };
52cd3b07 1507
800d8c63 1508 shmem_pseudo_vma_init(&pvma, info, index);
e9e9b7ec 1509 page = swap_cluster_readahead(swap, gfp, &vmf);
800d8c63 1510 shmem_pseudo_vma_destroy(&pvma);
18a2f371 1511
5739a81c
MWO
1512 if (!page)
1513 return NULL;
1514 return page_folio(page);
800d8c63
KS
1515}
1516
78cc8cdc
RR
1517/*
1518 * Make sure huge_gfp is always more limited than limit_gfp.
1519 * Some of the flags set permissions, while others set limitations.
1520 */
1521static gfp_t limit_gfp_mask(gfp_t huge_gfp, gfp_t limit_gfp)
1522{
1523 gfp_t allowflags = __GFP_IO | __GFP_FS | __GFP_RECLAIM;
1524 gfp_t denyflags = __GFP_NOWARN | __GFP_NORETRY;
187df5dd
RR
1525 gfp_t zoneflags = limit_gfp & GFP_ZONEMASK;
1526 gfp_t result = huge_gfp & ~(allowflags | GFP_ZONEMASK);
1527
1528 /* Allow allocations only from the originally specified zones. */
1529 result |= zoneflags;
78cc8cdc
RR
1530
1531 /*
1532 * Minimize the result gfp by taking the union with the deny flags,
1533 * and the intersection of the allow flags.
1534 */
1535 result |= (limit_gfp & denyflags);
1536 result |= (huge_gfp & limit_gfp) & allowflags;
1537
1538 return result;
1539}
1540
72827e5c 1541static struct folio *shmem_alloc_hugefolio(gfp_t gfp,
800d8c63
KS
1542 struct shmem_inode_info *info, pgoff_t index)
1543{
1544 struct vm_area_struct pvma;
7b8d046f
MW
1545 struct address_space *mapping = info->vfs_inode.i_mapping;
1546 pgoff_t hindex;
dfe98499 1547 struct folio *folio;
800d8c63 1548
4620a06e 1549 hindex = round_down(index, HPAGE_PMD_NR);
7b8d046f
MW
1550 if (xa_find(&mapping->i_pages, &hindex, hindex + HPAGE_PMD_NR - 1,
1551 XA_PRESENT))
800d8c63 1552 return NULL;
18a2f371 1553
800d8c63 1554 shmem_pseudo_vma_init(&pvma, info, hindex);
dfe98499 1555 folio = vma_alloc_folio(gfp, HPAGE_PMD_ORDER, &pvma, 0, true);
800d8c63 1556 shmem_pseudo_vma_destroy(&pvma);
dfe98499 1557 if (!folio)
dcdf11ee 1558 count_vm_event(THP_FILE_FALLBACK);
72827e5c 1559 return folio;
1da177e4
LT
1560}
1561
0c023ef5 1562static struct folio *shmem_alloc_folio(gfp_t gfp,
41ffe5d5 1563 struct shmem_inode_info *info, pgoff_t index)
1da177e4
LT
1564{
1565 struct vm_area_struct pvma;
0c023ef5 1566 struct folio *folio;
1da177e4 1567
800d8c63 1568 shmem_pseudo_vma_init(&pvma, info, index);
0c023ef5 1569 folio = vma_alloc_folio(gfp, 0, &pvma, 0, false);
800d8c63
KS
1570 shmem_pseudo_vma_destroy(&pvma);
1571
0c023ef5
MWO
1572 return folio;
1573}
1574
b1d0ec3a 1575static struct folio *shmem_alloc_and_acct_folio(gfp_t gfp, struct inode *inode,
800d8c63
KS
1576 pgoff_t index, bool huge)
1577{
0f079694 1578 struct shmem_inode_info *info = SHMEM_I(inode);
72827e5c 1579 struct folio *folio;
800d8c63
KS
1580 int nr;
1581 int err = -ENOSPC;
52cd3b07 1582
396bcc52 1583 if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
800d8c63
KS
1584 huge = false;
1585 nr = huge ? HPAGE_PMD_NR : 1;
1586
0f079694 1587 if (!shmem_inode_acct_block(inode, nr))
800d8c63 1588 goto failed;
800d8c63
KS
1589
1590 if (huge)
72827e5c 1591 folio = shmem_alloc_hugefolio(gfp, info, index);
800d8c63 1592 else
72827e5c
MWO
1593 folio = shmem_alloc_folio(gfp, info, index);
1594 if (folio) {
1595 __folio_set_locked(folio);
1596 __folio_set_swapbacked(folio);
b1d0ec3a 1597 return folio;
75edd345 1598 }
18a2f371 1599
800d8c63 1600 err = -ENOMEM;
0f079694 1601 shmem_inode_unacct_blocks(inode, nr);
800d8c63
KS
1602failed:
1603 return ERR_PTR(err);
1da177e4 1604}
71fe804b 1605
bde05d1c
HD
1606/*
1607 * When a page is moved from swapcache to shmem filecache (either by the
fc26babb 1608 * usual swapin of shmem_get_folio_gfp(), or by the less common swapoff of
bde05d1c
HD
1609 * shmem_unuse_inode()), it may have been read in earlier from swap, in
1610 * ignorance of the mapping it belongs to. If that mapping has special
1611 * constraints (like the gma500 GEM driver, which requires RAM below 4GB),
1612 * we may need to copy to a suitable page before moving to filecache.
1613 *
1614 * In a future release, this may well be extended to respect cpuset and
1615 * NUMA mempolicy, and applied also to anonymous pages in do_swap_page();
1616 * but for now it is a simple matter of zone.
1617 */
069d849c 1618static bool shmem_should_replace_folio(struct folio *folio, gfp_t gfp)
bde05d1c 1619{
069d849c 1620 return folio_zonenum(folio) > gfp_zone(gfp);
bde05d1c
HD
1621}
1622
0d698e25 1623static int shmem_replace_folio(struct folio **foliop, gfp_t gfp,
bde05d1c
HD
1624 struct shmem_inode_info *info, pgoff_t index)
1625{
d21bba2b 1626 struct folio *old, *new;
bde05d1c 1627 struct address_space *swap_mapping;
c1cb20d4 1628 swp_entry_t entry;
bde05d1c
HD
1629 pgoff_t swap_index;
1630 int error;
1631
0d698e25 1632 old = *foliop;
907ea17e 1633 entry = folio_swap_entry(old);
c1cb20d4 1634 swap_index = swp_offset(entry);
907ea17e 1635 swap_mapping = swap_address_space(entry);
bde05d1c
HD
1636
1637 /*
1638 * We have arrived here because our zones are constrained, so don't
1639 * limit chance of success by further cpuset and node constraints.
1640 */
1641 gfp &= ~GFP_CONSTRAINT_MASK;
907ea17e
MWO
1642 VM_BUG_ON_FOLIO(folio_test_large(old), old);
1643 new = shmem_alloc_folio(gfp, info, index);
1644 if (!new)
bde05d1c 1645 return -ENOMEM;
bde05d1c 1646
907ea17e
MWO
1647 folio_get(new);
1648 folio_copy(new, old);
1649 flush_dcache_folio(new);
bde05d1c 1650
907ea17e
MWO
1651 __folio_set_locked(new);
1652 __folio_set_swapbacked(new);
1653 folio_mark_uptodate(new);
1654 folio_set_swap_entry(new, entry);
1655 folio_set_swapcache(new);
bde05d1c
HD
1656
1657 /*
1658 * Our caller will very soon move newpage out of swapcache, but it's
1659 * a nice clean interface for us to replace oldpage by newpage there.
1660 */
b93b0163 1661 xa_lock_irq(&swap_mapping->i_pages);
907ea17e 1662 error = shmem_replace_entry(swap_mapping, swap_index, old, new);
0142ef6c 1663 if (!error) {
d21bba2b 1664 mem_cgroup_migrate(old, new);
907ea17e
MWO
1665 __lruvec_stat_mod_folio(new, NR_FILE_PAGES, 1);
1666 __lruvec_stat_mod_folio(new, NR_SHMEM, 1);
1667 __lruvec_stat_mod_folio(old, NR_FILE_PAGES, -1);
1668 __lruvec_stat_mod_folio(old, NR_SHMEM, -1);
0142ef6c 1669 }
b93b0163 1670 xa_unlock_irq(&swap_mapping->i_pages);
bde05d1c 1671
0142ef6c
HD
1672 if (unlikely(error)) {
1673 /*
1674 * Is this possible? I think not, now that our callers check
1675 * both PageSwapCache and page_private after getting page lock;
1676 * but be defensive. Reverse old to newpage for clear and free.
1677 */
907ea17e 1678 old = new;
0142ef6c 1679 } else {
907ea17e 1680 folio_add_lru(new);
0d698e25 1681 *foliop = new;
0142ef6c 1682 }
bde05d1c 1683
907ea17e
MWO
1684 folio_clear_swapcache(old);
1685 old->private = NULL;
bde05d1c 1686
907ea17e
MWO
1687 folio_unlock(old);
1688 folio_put_refs(old, 2);
0142ef6c 1689 return error;
bde05d1c
HD
1690}
1691
6cec2b95
ML
1692static void shmem_set_folio_swapin_error(struct inode *inode, pgoff_t index,
1693 struct folio *folio, swp_entry_t swap)
1694{
1695 struct address_space *mapping = inode->i_mapping;
1696 struct shmem_inode_info *info = SHMEM_I(inode);
1697 swp_entry_t swapin_error;
1698 void *old;
1699
15520a3f 1700 swapin_error = make_swapin_error_entry();
6cec2b95
ML
1701 old = xa_cmpxchg_irq(&mapping->i_pages, index,
1702 swp_to_radix_entry(swap),
1703 swp_to_radix_entry(swapin_error), 0);
1704 if (old != swp_to_radix_entry(swap))
1705 return;
1706
1707 folio_wait_writeback(folio);
75fa68a5 1708 delete_from_swap_cache(folio);
6cec2b95
ML
1709 spin_lock_irq(&info->lock);
1710 /*
1711 * Don't treat swapin error folio as alloced. Otherwise inode->i_blocks won't
1712 * be 0 when inode is released and thus trigger WARN_ON(inode->i_blocks) in
1713 * shmem_evict_inode.
1714 */
1715 info->alloced--;
1716 info->swapped--;
1717 shmem_recalc_inode(inode);
1718 spin_unlock_irq(&info->lock);
1719 swap_free(swap);
1720}
1721
c5bf121e 1722/*
833de10f
ML
1723 * Swap in the folio pointed to by *foliop.
1724 * Caller has to make sure that *foliop contains a valid swapped folio.
1725 * Returns 0 and the folio in foliop if success. On failure, returns the
1726 * error code and NULL in *foliop.
c5bf121e 1727 */
da08e9b7
MWO
1728static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
1729 struct folio **foliop, enum sgp_type sgp,
c5bf121e
VRP
1730 gfp_t gfp, struct vm_area_struct *vma,
1731 vm_fault_t *fault_type)
1732{
1733 struct address_space *mapping = inode->i_mapping;
1734 struct shmem_inode_info *info = SHMEM_I(inode);
04f94e3f 1735 struct mm_struct *charge_mm = vma ? vma->vm_mm : NULL;
da08e9b7 1736 struct folio *folio = NULL;
c5bf121e
VRP
1737 swp_entry_t swap;
1738 int error;
1739
da08e9b7
MWO
1740 VM_BUG_ON(!*foliop || !xa_is_value(*foliop));
1741 swap = radix_to_swp_entry(*foliop);
1742 *foliop = NULL;
c5bf121e 1743
6cec2b95
ML
1744 if (is_swapin_error_entry(swap))
1745 return -EIO;
1746
c5bf121e 1747 /* Look it up and read it in.. */
5739a81c
MWO
1748 folio = swap_cache_get_folio(swap, NULL, 0);
1749 if (!folio) {
c5bf121e
VRP
1750 /* Or update major stats only when swapin succeeds?? */
1751 if (fault_type) {
1752 *fault_type |= VM_FAULT_MAJOR;
1753 count_vm_event(PGMAJFAULT);
1754 count_memcg_event_mm(charge_mm, PGMAJFAULT);
1755 }
1756 /* Here we actually start the io */
5739a81c
MWO
1757 folio = shmem_swapin(swap, gfp, info, index);
1758 if (!folio) {
c5bf121e
VRP
1759 error = -ENOMEM;
1760 goto failed;
1761 }
1762 }
1763
833de10f 1764 /* We have to do this with folio locked to prevent races */
da08e9b7
MWO
1765 folio_lock(folio);
1766 if (!folio_test_swapcache(folio) ||
1767 folio_swap_entry(folio).val != swap.val ||
c5bf121e
VRP
1768 !shmem_confirm_swap(mapping, index, swap)) {
1769 error = -EEXIST;
1770 goto unlock;
1771 }
da08e9b7 1772 if (!folio_test_uptodate(folio)) {
c5bf121e
VRP
1773 error = -EIO;
1774 goto failed;
1775 }
da08e9b7 1776 folio_wait_writeback(folio);
c5bf121e 1777
8a84802e
SP
1778 /*
1779 * Some architectures may have to restore extra metadata to the
da08e9b7 1780 * folio after reading from swap.
8a84802e 1781 */
da08e9b7 1782 arch_swap_restore(swap, folio);
8a84802e 1783
069d849c 1784 if (shmem_should_replace_folio(folio, gfp)) {
0d698e25 1785 error = shmem_replace_folio(&folio, gfp, info, index);
c5bf121e
VRP
1786 if (error)
1787 goto failed;
1788 }
1789
b7dd44a1 1790 error = shmem_add_to_page_cache(folio, mapping, index,
3fea5a49
JW
1791 swp_to_radix_entry(swap), gfp,
1792 charge_mm);
1793 if (error)
14235ab3 1794 goto failed;
c5bf121e
VRP
1795
1796 spin_lock_irq(&info->lock);
1797 info->swapped--;
1798 shmem_recalc_inode(inode);
1799 spin_unlock_irq(&info->lock);
1800
1801 if (sgp == SGP_WRITE)
da08e9b7 1802 folio_mark_accessed(folio);
c5bf121e 1803
75fa68a5 1804 delete_from_swap_cache(folio);
da08e9b7 1805 folio_mark_dirty(folio);
c5bf121e
VRP
1806 swap_free(swap);
1807
da08e9b7 1808 *foliop = folio;
c5bf121e
VRP
1809 return 0;
1810failed:
1811 if (!shmem_confirm_swap(mapping, index, swap))
1812 error = -EEXIST;
6cec2b95
ML
1813 if (error == -EIO)
1814 shmem_set_folio_swapin_error(inode, index, folio, swap);
c5bf121e 1815unlock:
da08e9b7
MWO
1816 if (folio) {
1817 folio_unlock(folio);
1818 folio_put(folio);
c5bf121e
VRP
1819 }
1820
1821 return error;
1822}
1823
1da177e4 1824/*
fc26babb 1825 * shmem_get_folio_gfp - find page in cache, or get from swap, or allocate
1da177e4
LT
1826 *
1827 * If we allocate a new one we do not mark it dirty. That's up to the
1828 * vm. If we swap it in we mark it dirty since we also free the swap
9e18eb29
ALC
1829 * entry since a page cannot live in both the swap and page cache.
1830 *
c949b097 1831 * vma, vmf, and fault_type are only supplied by shmem_fault:
9e18eb29 1832 * otherwise they are NULL.
1da177e4 1833 */
fc26babb
MWO
1834static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index,
1835 struct folio **foliop, enum sgp_type sgp, gfp_t gfp,
1836 struct vm_area_struct *vma, struct vm_fault *vmf,
1837 vm_fault_t *fault_type)
1da177e4
LT
1838{
1839 struct address_space *mapping = inode->i_mapping;
23f919d4 1840 struct shmem_inode_info *info = SHMEM_I(inode);
1da177e4 1841 struct shmem_sb_info *sbinfo;
9e18eb29 1842 struct mm_struct *charge_mm;
b7dd44a1 1843 struct folio *folio;
6fe7d712 1844 pgoff_t hindex;
164cc4fe 1845 gfp_t huge_gfp;
1da177e4 1846 int error;
54af6042 1847 int once = 0;
1635f6a7 1848 int alloced = 0;
1da177e4 1849
09cbfeaf 1850 if (index > (MAX_LFS_FILESIZE >> PAGE_SHIFT))
1da177e4 1851 return -EFBIG;
1da177e4 1852repeat:
c5bf121e
VRP
1853 if (sgp <= SGP_CACHE &&
1854 ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) {
1855 return -EINVAL;
1856 }
1857
1858 sbinfo = SHMEM_SB(inode->i_sb);
04f94e3f 1859 charge_mm = vma ? vma->vm_mm : NULL;
c5bf121e 1860
b1d0ec3a
MWO
1861 folio = __filemap_get_folio(mapping, index, FGP_ENTRY | FGP_LOCK, 0);
1862 if (folio && vma && userfaultfd_minor(vma)) {
1863 if (!xa_is_value(folio)) {
1864 folio_unlock(folio);
1865 folio_put(folio);
c949b097
AR
1866 }
1867 *fault_type = handle_userfault(vmf, VM_UFFD_MINOR);
1868 return 0;
1869 }
1870
b1d0ec3a 1871 if (xa_is_value(folio)) {
da08e9b7 1872 error = shmem_swapin_folio(inode, index, &folio,
c5bf121e
VRP
1873 sgp, gfp, vma, fault_type);
1874 if (error == -EEXIST)
1875 goto repeat;
54af6042 1876
fc26babb 1877 *foliop = folio;
c5bf121e 1878 return error;
54af6042
HD
1879 }
1880
b1d0ec3a 1881 if (folio) {
acdd9f8e 1882 if (sgp == SGP_WRITE)
b1d0ec3a
MWO
1883 folio_mark_accessed(folio);
1884 if (folio_test_uptodate(folio))
acdd9f8e 1885 goto out;
fc26babb 1886 /* fallocated folio */
1635f6a7
HD
1887 if (sgp != SGP_READ)
1888 goto clear;
b1d0ec3a
MWO
1889 folio_unlock(folio);
1890 folio_put(folio);
1635f6a7 1891 }
27ab7006
HD
1892
1893 /*
fc26babb
MWO
1894 * SGP_READ: succeed on hole, with NULL folio, letting caller zero.
1895 * SGP_NOALLOC: fail on hole, with NULL folio, letting caller fail.
acdd9f8e 1896 */
fc26babb 1897 *foliop = NULL;
acdd9f8e
HD
1898 if (sgp == SGP_READ)
1899 return 0;
1900 if (sgp == SGP_NOALLOC)
1901 return -ENOENT;
1902
1903 /*
1904 * Fast cache lookup and swap lookup did not find it: allocate.
27ab7006 1905 */
54af6042 1906
c5bf121e
VRP
1907 if (vma && userfaultfd_missing(vma)) {
1908 *fault_type = handle_userfault(vmf, VM_UFFD_MISSING);
1909 return 0;
1910 }
cfda0526 1911
7c6c6cc4 1912 if (!shmem_is_huge(vma, inode, index, false))
c5bf121e 1913 goto alloc_nohuge;
1da177e4 1914
164cc4fe 1915 huge_gfp = vma_thp_gfp_mask(vma);
78cc8cdc 1916 huge_gfp = limit_gfp_mask(huge_gfp, gfp);
b1d0ec3a
MWO
1917 folio = shmem_alloc_and_acct_folio(huge_gfp, inode, index, true);
1918 if (IS_ERR(folio)) {
c5bf121e 1919alloc_nohuge:
b1d0ec3a 1920 folio = shmem_alloc_and_acct_folio(gfp, inode, index, false);
c5bf121e 1921 }
b1d0ec3a 1922 if (IS_ERR(folio)) {
c5bf121e 1923 int retry = 5;
800d8c63 1924
b1d0ec3a
MWO
1925 error = PTR_ERR(folio);
1926 folio = NULL;
c5bf121e
VRP
1927 if (error != -ENOSPC)
1928 goto unlock;
1929 /*
fc26babb 1930 * Try to reclaim some space by splitting a large folio
c5bf121e
VRP
1931 * beyond i_size on the filesystem.
1932 */
1933 while (retry--) {
1934 int ret;
66d2f4d2 1935
c5bf121e
VRP
1936 ret = shmem_unused_huge_shrink(sbinfo, NULL, 1);
1937 if (ret == SHRINK_STOP)
1938 break;
1939 if (ret)
1940 goto alloc_nohuge;
b065b432 1941 }
c5bf121e
VRP
1942 goto unlock;
1943 }
54af6042 1944
b1d0ec3a 1945 hindex = round_down(index, folio_nr_pages(folio));
54af6042 1946
c5bf121e 1947 if (sgp == SGP_WRITE)
b1d0ec3a 1948 __folio_set_referenced(folio);
c5bf121e 1949
b7dd44a1 1950 error = shmem_add_to_page_cache(folio, mapping, hindex,
3fea5a49
JW
1951 NULL, gfp & GFP_RECLAIM_MASK,
1952 charge_mm);
1953 if (error)
c5bf121e 1954 goto unacct;
b1d0ec3a 1955 folio_add_lru(folio);
779750d2 1956
c5bf121e 1957 spin_lock_irq(&info->lock);
b1d0ec3a 1958 info->alloced += folio_nr_pages(folio);
fa020a2b 1959 inode->i_blocks += (blkcnt_t)BLOCKS_PER_PAGE << folio_order(folio);
c5bf121e
VRP
1960 shmem_recalc_inode(inode);
1961 spin_unlock_irq(&info->lock);
1962 alloced = true;
1963
b1d0ec3a 1964 if (folio_test_pmd_mappable(folio) &&
c5bf121e 1965 DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE) <
fc26babb 1966 folio_next_index(folio) - 1) {
ec9516fb 1967 /*
fc26babb 1968 * Part of the large folio is beyond i_size: subject
c5bf121e 1969 * to shrink under memory pressure.
1635f6a7 1970 */
c5bf121e 1971 spin_lock(&sbinfo->shrinklist_lock);
1635f6a7 1972 /*
c5bf121e
VRP
1973 * _careful to defend against unlocked access to
1974 * ->shrink_list in shmem_unused_huge_shrink()
ec9516fb 1975 */
c5bf121e
VRP
1976 if (list_empty_careful(&info->shrinklist)) {
1977 list_add_tail(&info->shrinklist,
1978 &sbinfo->shrinklist);
1979 sbinfo->shrinklist_len++;
1980 }
1981 spin_unlock(&sbinfo->shrinklist_lock);
1982 }
800d8c63 1983
c5bf121e 1984 /*
fc26babb 1985 * Let SGP_FALLOC use the SGP_WRITE optimization on a new folio.
c5bf121e
VRP
1986 */
1987 if (sgp == SGP_FALLOC)
1988 sgp = SGP_WRITE;
1989clear:
1990 /*
fc26babb
MWO
1991 * Let SGP_WRITE caller clear ends if write does not fill folio;
1992 * but SGP_FALLOC on a folio fallocated earlier must initialize
c5bf121e
VRP
1993 * it now, lest undo on failure cancel our earlier guarantee.
1994 */
b1d0ec3a
MWO
1995 if (sgp != SGP_WRITE && !folio_test_uptodate(folio)) {
1996 long i, n = folio_nr_pages(folio);
c5bf121e 1997
b1d0ec3a
MWO
1998 for (i = 0; i < n; i++)
1999 clear_highpage(folio_page(folio, i));
2000 flush_dcache_folio(folio);
2001 folio_mark_uptodate(folio);
1da177e4 2002 }
bde05d1c 2003
54af6042 2004 /* Perhaps the file has been truncated since we checked */
75edd345 2005 if (sgp <= SGP_CACHE &&
09cbfeaf 2006 ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) {
267a4c76 2007 if (alloced) {
b1d0ec3a
MWO
2008 folio_clear_dirty(folio);
2009 filemap_remove_folio(folio);
4595ef88 2010 spin_lock_irq(&info->lock);
267a4c76 2011 shmem_recalc_inode(inode);
4595ef88 2012 spin_unlock_irq(&info->lock);
267a4c76 2013 }
54af6042 2014 error = -EINVAL;
267a4c76 2015 goto unlock;
e83c32e8 2016 }
63ec1973 2017out:
fc26babb 2018 *foliop = folio;
54af6042 2019 return 0;
1da177e4 2020
59a16ead 2021 /*
54af6042 2022 * Error recovery.
59a16ead 2023 */
54af6042 2024unacct:
b1d0ec3a 2025 shmem_inode_unacct_blocks(inode, folio_nr_pages(folio));
800d8c63 2026
b1d0ec3a
MWO
2027 if (folio_test_large(folio)) {
2028 folio_unlock(folio);
2029 folio_put(folio);
800d8c63
KS
2030 goto alloc_nohuge;
2031 }
d1899228 2032unlock:
b1d0ec3a
MWO
2033 if (folio) {
2034 folio_unlock(folio);
2035 folio_put(folio);
54af6042
HD
2036 }
2037 if (error == -ENOSPC && !once++) {
4595ef88 2038 spin_lock_irq(&info->lock);
54af6042 2039 shmem_recalc_inode(inode);
4595ef88 2040 spin_unlock_irq(&info->lock);
27ab7006 2041 goto repeat;
ff36b801 2042 }
7f4446ee 2043 if (error == -EEXIST)
54af6042
HD
2044 goto repeat;
2045 return error;
1da177e4
LT
2046}
2047
4e1fc793
MWO
2048int shmem_get_folio(struct inode *inode, pgoff_t index, struct folio **foliop,
2049 enum sgp_type sgp)
2050{
2051 return shmem_get_folio_gfp(inode, index, foliop, sgp,
2052 mapping_gfp_mask(inode->i_mapping), NULL, NULL, NULL);
2053}
2054
10d20bd2
LT
2055/*
2056 * This is like autoremove_wake_function, but it removes the wait queue
2057 * entry unconditionally - even if something else had already woken the
2058 * target.
2059 */
ac6424b9 2060static int synchronous_wake_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
10d20bd2
LT
2061{
2062 int ret = default_wake_function(wait, mode, sync, key);
2055da97 2063 list_del_init(&wait->entry);
10d20bd2
LT
2064 return ret;
2065}
2066
20acce67 2067static vm_fault_t shmem_fault(struct vm_fault *vmf)
1da177e4 2068{
11bac800 2069 struct vm_area_struct *vma = vmf->vma;
496ad9aa 2070 struct inode *inode = file_inode(vma->vm_file);
9e18eb29 2071 gfp_t gfp = mapping_gfp_mask(inode->i_mapping);
68a54100 2072 struct folio *folio = NULL;
20acce67
SJ
2073 int err;
2074 vm_fault_t ret = VM_FAULT_LOCKED;
1da177e4 2075
f00cdc6d
HD
2076 /*
2077 * Trinity finds that probing a hole which tmpfs is punching can
2078 * prevent the hole-punch from ever completing: which in turn
9608703e 2079 * locks writers out with its hold on i_rwsem. So refrain from
8e205f77
HD
2080 * faulting pages into the hole while it's being punched. Although
2081 * shmem_undo_range() does remove the additions, it may be unable to
2082 * keep up, as each new page needs its own unmap_mapping_range() call,
2083 * and the i_mmap tree grows ever slower to scan if new vmas are added.
2084 *
2085 * It does not matter if we sometimes reach this check just before the
2086 * hole-punch begins, so that one fault then races with the punch:
2087 * we just need to make racing faults a rare case.
2088 *
2089 * The implementation below would be much simpler if we just used a
9608703e 2090 * standard mutex or completion: but we cannot take i_rwsem in fault,
8e205f77 2091 * and bloating every shmem inode for this unlikely case would be sad.
f00cdc6d
HD
2092 */
2093 if (unlikely(inode->i_private)) {
2094 struct shmem_falloc *shmem_falloc;
2095
2096 spin_lock(&inode->i_lock);
2097 shmem_falloc = inode->i_private;
8e205f77
HD
2098 if (shmem_falloc &&
2099 shmem_falloc->waitq &&
2100 vmf->pgoff >= shmem_falloc->start &&
2101 vmf->pgoff < shmem_falloc->next) {
8897c1b1 2102 struct file *fpin;
8e205f77 2103 wait_queue_head_t *shmem_falloc_waitq;
10d20bd2 2104 DEFINE_WAIT_FUNC(shmem_fault_wait, synchronous_wake_function);
8e205f77
HD
2105
2106 ret = VM_FAULT_NOPAGE;
8897c1b1
KS
2107 fpin = maybe_unlock_mmap_for_io(vmf, NULL);
2108 if (fpin)
8e205f77 2109 ret = VM_FAULT_RETRY;
8e205f77
HD
2110
2111 shmem_falloc_waitq = shmem_falloc->waitq;
2112 prepare_to_wait(shmem_falloc_waitq, &shmem_fault_wait,
2113 TASK_UNINTERRUPTIBLE);
2114 spin_unlock(&inode->i_lock);
2115 schedule();
2116
2117 /*
2118 * shmem_falloc_waitq points into the shmem_fallocate()
2119 * stack of the hole-punching task: shmem_falloc_waitq
2120 * is usually invalid by the time we reach here, but
2121 * finish_wait() does not dereference it in that case;
2122 * though i_lock needed lest racing with wake_up_all().
2123 */
2124 spin_lock(&inode->i_lock);
2125 finish_wait(shmem_falloc_waitq, &shmem_fault_wait);
2126 spin_unlock(&inode->i_lock);
8897c1b1
KS
2127
2128 if (fpin)
2129 fput(fpin);
8e205f77 2130 return ret;
f00cdc6d 2131 }
8e205f77 2132 spin_unlock(&inode->i_lock);
f00cdc6d
HD
2133 }
2134
68a54100 2135 err = shmem_get_folio_gfp(inode, vmf->pgoff, &folio, SGP_CACHE,
cfda0526 2136 gfp, vma, vmf, &ret);
20acce67
SJ
2137 if (err)
2138 return vmf_error(err);
68a54100
MWO
2139 if (folio)
2140 vmf->page = folio_file_page(folio, vmf->pgoff);
68da9f05 2141 return ret;
1da177e4
LT
2142}
2143
c01d5b30
HD
2144unsigned long shmem_get_unmapped_area(struct file *file,
2145 unsigned long uaddr, unsigned long len,
2146 unsigned long pgoff, unsigned long flags)
2147{
2148 unsigned long (*get_area)(struct file *,
2149 unsigned long, unsigned long, unsigned long, unsigned long);
2150 unsigned long addr;
2151 unsigned long offset;
2152 unsigned long inflated_len;
2153 unsigned long inflated_addr;
2154 unsigned long inflated_offset;
2155
2156 if (len > TASK_SIZE)
2157 return -ENOMEM;
2158
2159 get_area = current->mm->get_unmapped_area;
2160 addr = get_area(file, uaddr, len, pgoff, flags);
2161
396bcc52 2162 if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
c01d5b30
HD
2163 return addr;
2164 if (IS_ERR_VALUE(addr))
2165 return addr;
2166 if (addr & ~PAGE_MASK)
2167 return addr;
2168 if (addr > TASK_SIZE - len)
2169 return addr;
2170
2171 if (shmem_huge == SHMEM_HUGE_DENY)
2172 return addr;
2173 if (len < HPAGE_PMD_SIZE)
2174 return addr;
2175 if (flags & MAP_FIXED)
2176 return addr;
2177 /*
2178 * Our priority is to support MAP_SHARED mapped hugely;
2179 * and support MAP_PRIVATE mapped hugely too, until it is COWed.
99158997
KS
2180 * But if caller specified an address hint and we allocated area there
2181 * successfully, respect that as before.
c01d5b30 2182 */
99158997 2183 if (uaddr == addr)
c01d5b30
HD
2184 return addr;
2185
2186 if (shmem_huge != SHMEM_HUGE_FORCE) {
2187 struct super_block *sb;
2188
2189 if (file) {
2190 VM_BUG_ON(file->f_op != &shmem_file_operations);
2191 sb = file_inode(file)->i_sb;
2192 } else {
2193 /*
2194 * Called directly from mm/mmap.c, or drivers/char/mem.c
2195 * for "/dev/zero", to create a shared anonymous object.
2196 */
2197 if (IS_ERR(shm_mnt))
2198 return addr;
2199 sb = shm_mnt->mnt_sb;
2200 }
3089bf61 2201 if (SHMEM_SB(sb)->huge == SHMEM_HUGE_NEVER)
c01d5b30
HD
2202 return addr;
2203 }
2204
2205 offset = (pgoff << PAGE_SHIFT) & (HPAGE_PMD_SIZE-1);
2206 if (offset && offset + len < 2 * HPAGE_PMD_SIZE)
2207 return addr;
2208 if ((addr & (HPAGE_PMD_SIZE-1)) == offset)
2209 return addr;
2210
2211 inflated_len = len + HPAGE_PMD_SIZE - PAGE_SIZE;
2212 if (inflated_len > TASK_SIZE)
2213 return addr;
2214 if (inflated_len < len)
2215 return addr;
2216
99158997 2217 inflated_addr = get_area(NULL, uaddr, inflated_len, 0, flags);
c01d5b30
HD
2218 if (IS_ERR_VALUE(inflated_addr))
2219 return addr;
2220 if (inflated_addr & ~PAGE_MASK)
2221 return addr;
2222
2223 inflated_offset = inflated_addr & (HPAGE_PMD_SIZE-1);
2224 inflated_addr += offset - inflated_offset;
2225 if (inflated_offset > offset)
2226 inflated_addr += HPAGE_PMD_SIZE;
2227
2228 if (inflated_addr > TASK_SIZE - len)
2229 return addr;
2230 return inflated_addr;
2231}
2232
1da177e4 2233#ifdef CONFIG_NUMA
41ffe5d5 2234static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol)
1da177e4 2235{
496ad9aa 2236 struct inode *inode = file_inode(vma->vm_file);
41ffe5d5 2237 return mpol_set_shared_policy(&SHMEM_I(inode)->policy, vma, mpol);
1da177e4
LT
2238}
2239
d8dc74f2
AB
2240static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma,
2241 unsigned long addr)
1da177e4 2242{
496ad9aa 2243 struct inode *inode = file_inode(vma->vm_file);
41ffe5d5 2244 pgoff_t index;
1da177e4 2245
41ffe5d5
HD
2246 index = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
2247 return mpol_shared_policy_lookup(&SHMEM_I(inode)->policy, index);
1da177e4
LT
2248}
2249#endif
2250
d7c9e99a 2251int shmem_lock(struct file *file, int lock, struct ucounts *ucounts)
1da177e4 2252{
496ad9aa 2253 struct inode *inode = file_inode(file);
1da177e4
LT
2254 struct shmem_inode_info *info = SHMEM_I(inode);
2255 int retval = -ENOMEM;
2256
ea0dfeb4
HD
2257 /*
2258 * What serializes the accesses to info->flags?
2259 * ipc_lock_object() when called from shmctl_do_lock(),
2260 * no serialization needed when called from shm_destroy().
2261 */
1da177e4 2262 if (lock && !(info->flags & VM_LOCKED)) {
d7c9e99a 2263 if (!user_shm_lock(inode->i_size, ucounts))
1da177e4
LT
2264 goto out_nomem;
2265 info->flags |= VM_LOCKED;
89e004ea 2266 mapping_set_unevictable(file->f_mapping);
1da177e4 2267 }
d7c9e99a
AG
2268 if (!lock && (info->flags & VM_LOCKED) && ucounts) {
2269 user_shm_unlock(inode->i_size, ucounts);
1da177e4 2270 info->flags &= ~VM_LOCKED;
89e004ea 2271 mapping_clear_unevictable(file->f_mapping);
1da177e4
LT
2272 }
2273 retval = 0;
89e004ea 2274
1da177e4 2275out_nomem:
1da177e4
LT
2276 return retval;
2277}
2278
9b83a6a8 2279static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
1da177e4 2280{
d09e8ca6
PT
2281 struct inode *inode = file_inode(file);
2282 struct shmem_inode_info *info = SHMEM_I(inode);
22247efd 2283 int ret;
ab3948f5 2284
22247efd
PX
2285 ret = seal_check_future_write(info->seals, vma);
2286 if (ret)
2287 return ret;
ab3948f5 2288
51b0bff2
CM
2289 /* arm64 - allow memory tagging on RAM-based files */
2290 vma->vm_flags |= VM_MTE_ALLOWED;
2291
1da177e4 2292 file_accessed(file);
d09e8ca6
PT
2293 /* This is anonymous shared memory if it is unlinked at the time of mmap */
2294 if (inode->i_nlink)
2295 vma->vm_ops = &shmem_vm_ops;
2296 else
2297 vma->vm_ops = &shmem_anon_vm_ops;
1da177e4
LT
2298 return 0;
2299}
2300
cb241339
HD
2301#ifdef CONFIG_TMPFS_XATTR
2302static int shmem_initxattrs(struct inode *, const struct xattr *, void *);
2303
2304/*
2305 * chattr's fsflags are unrelated to extended attributes,
2306 * but tmpfs has chosen to enable them under the same config option.
2307 */
2308static void shmem_set_inode_flags(struct inode *inode, unsigned int fsflags)
2309{
2310 unsigned int i_flags = 0;
2311
2312 if (fsflags & FS_NOATIME_FL)
2313 i_flags |= S_NOATIME;
2314 if (fsflags & FS_APPEND_FL)
2315 i_flags |= S_APPEND;
2316 if (fsflags & FS_IMMUTABLE_FL)
2317 i_flags |= S_IMMUTABLE;
2318 /*
2319 * But FS_NODUMP_FL does not require any action in i_flags.
2320 */
2321 inode_set_flags(inode, i_flags, S_NOATIME | S_APPEND | S_IMMUTABLE);
2322}
2323#else
2324static void shmem_set_inode_flags(struct inode *inode, unsigned int fsflags)
e408e695 2325{
e408e695 2326}
cb241339
HD
2327#define shmem_initxattrs NULL
2328#endif
e408e695
TT
2329
2330static struct inode *shmem_get_inode(struct super_block *sb, struct inode *dir,
09208d15 2331 umode_t mode, dev_t dev, unsigned long flags)
1da177e4
LT
2332{
2333 struct inode *inode;
2334 struct shmem_inode_info *info;
2335 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
e809d5f0 2336 ino_t ino;
1da177e4 2337
e809d5f0 2338 if (shmem_reserve_inode(sb, &ino))
5b04c689 2339 return NULL;
1da177e4
LT
2340
2341 inode = new_inode(sb);
2342 if (inode) {
e809d5f0 2343 inode->i_ino = ino;
21cb47be 2344 inode_init_owner(&init_user_ns, inode, dir, mode);
1da177e4 2345 inode->i_blocks = 0;
078cd827 2346 inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
a251c17a 2347 inode->i_generation = get_random_u32();
1da177e4
LT
2348 info = SHMEM_I(inode);
2349 memset(info, 0, (char *)inode - (char *)info);
2350 spin_lock_init(&info->lock);
af53d3e9 2351 atomic_set(&info->stop_eviction, 0);
40e041a2 2352 info->seals = F_SEAL_SEAL;
0b0a0806 2353 info->flags = flags & VM_NORESERVE;
f7cd16a5 2354 info->i_crtime = inode->i_mtime;
e408e695
TT
2355 info->fsflags = (dir == NULL) ? 0 :
2356 SHMEM_I(dir)->fsflags & SHMEM_FL_INHERITED;
cb241339
HD
2357 if (info->fsflags)
2358 shmem_set_inode_flags(inode, info->fsflags);
779750d2 2359 INIT_LIST_HEAD(&info->shrinklist);
1da177e4 2360 INIT_LIST_HEAD(&info->swaplist);
38f38657 2361 simple_xattrs_init(&info->xattrs);
72c04902 2362 cache_no_acl(inode);
ff36da69 2363 mapping_set_large_folios(inode->i_mapping);
1da177e4
LT
2364
2365 switch (mode & S_IFMT) {
2366 default:
39f0247d 2367 inode->i_op = &shmem_special_inode_operations;
1da177e4
LT
2368 init_special_inode(inode, mode, dev);
2369 break;
2370 case S_IFREG:
14fcc23f 2371 inode->i_mapping->a_ops = &shmem_aops;
1da177e4
LT
2372 inode->i_op = &shmem_inode_operations;
2373 inode->i_fop = &shmem_file_operations;
71fe804b
LS
2374 mpol_shared_policy_init(&info->policy,
2375 shmem_get_sbmpol(sbinfo));
1da177e4
LT
2376 break;
2377 case S_IFDIR:
d8c76e6f 2378 inc_nlink(inode);
1da177e4
LT
2379 /* Some things misbehave if size == 0 on a directory */
2380 inode->i_size = 2 * BOGO_DIRENT_SIZE;
2381 inode->i_op = &shmem_dir_inode_operations;
2382 inode->i_fop = &simple_dir_operations;
2383 break;
2384 case S_IFLNK:
2385 /*
2386 * Must not load anything in the rbtree,
2387 * mpol_free_shared_policy will not be called.
2388 */
71fe804b 2389 mpol_shared_policy_init(&info->policy, NULL);
1da177e4
LT
2390 break;
2391 }
b45d71fb
JFG
2392
2393 lockdep_annotate_inode_mutex_key(inode);
5b04c689
PE
2394 } else
2395 shmem_free_inode(sb);
1da177e4
LT
2396 return inode;
2397}
2398
3460f6e5
AR
2399#ifdef CONFIG_USERFAULTFD
2400int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
2401 pmd_t *dst_pmd,
2402 struct vm_area_struct *dst_vma,
2403 unsigned long dst_addr,
2404 unsigned long src_addr,
8ee79edf 2405 bool zeropage, bool wp_copy,
3460f6e5 2406 struct page **pagep)
4c27fe4c
MR
2407{
2408 struct inode *inode = file_inode(dst_vma->vm_file);
2409 struct shmem_inode_info *info = SHMEM_I(inode);
4c27fe4c
MR
2410 struct address_space *mapping = inode->i_mapping;
2411 gfp_t gfp = mapping_gfp_mask(mapping);
2412 pgoff_t pgoff = linear_page_index(dst_vma, dst_addr);
4c27fe4c 2413 void *page_kaddr;
b7dd44a1 2414 struct folio *folio;
4c27fe4c 2415 int ret;
3460f6e5 2416 pgoff_t max_off;
4c27fe4c 2417
7ed9d238
AR
2418 if (!shmem_inode_acct_block(inode, 1)) {
2419 /*
2420 * We may have got a page, returned -ENOENT triggering a retry,
2421 * and now we find ourselves with -ENOMEM. Release the page, to
2422 * avoid a BUG_ON in our caller.
2423 */
2424 if (unlikely(*pagep)) {
2425 put_page(*pagep);
2426 *pagep = NULL;
2427 }
7d64ae3a 2428 return -ENOMEM;
7ed9d238 2429 }
4c27fe4c 2430
cb658a45 2431 if (!*pagep) {
7d64ae3a 2432 ret = -ENOMEM;
7a7256d5
MWO
2433 folio = shmem_alloc_folio(gfp, info, pgoff);
2434 if (!folio)
0f079694 2435 goto out_unacct_blocks;
4c27fe4c 2436
3460f6e5 2437 if (!zeropage) { /* COPY */
7a7256d5 2438 page_kaddr = kmap_local_folio(folio, 0);
5dc21f0c
IW
2439 /*
2440 * The read mmap_lock is held here. Despite the
2441 * mmap_lock being read recursive a deadlock is still
2442 * possible if a writer has taken a lock. For example:
2443 *
2444 * process A thread 1 takes read lock on own mmap_lock
2445 * process A thread 2 calls mmap, blocks taking write lock
2446 * process B thread 1 takes page fault, read lock on own mmap lock
2447 * process B thread 2 calls mmap, blocks taking write lock
2448 * process A thread 1 blocks taking read lock on process B
2449 * process B thread 1 blocks taking read lock on process A
2450 *
2451 * Disable page faults to prevent potential deadlock
2452 * and retry the copy outside the mmap_lock.
2453 */
2454 pagefault_disable();
8d103963
MR
2455 ret = copy_from_user(page_kaddr,
2456 (const void __user *)src_addr,
2457 PAGE_SIZE);
5dc21f0c 2458 pagefault_enable();
7a7256d5 2459 kunmap_local(page_kaddr);
8d103963 2460
c1e8d7c6 2461 /* fallback to copy_from_user outside mmap_lock */
8d103963 2462 if (unlikely(ret)) {
7a7256d5 2463 *pagep = &folio->page;
7d64ae3a 2464 ret = -ENOENT;
8d103963 2465 /* don't free the page */
7d64ae3a 2466 goto out_unacct_blocks;
8d103963 2467 }
19b482c2 2468
7a7256d5 2469 flush_dcache_folio(folio);
3460f6e5 2470 } else { /* ZEROPAGE */
7a7256d5 2471 clear_user_highpage(&folio->page, dst_addr);
4c27fe4c
MR
2472 }
2473 } else {
7a7256d5
MWO
2474 folio = page_folio(*pagep);
2475 VM_BUG_ON_FOLIO(folio_test_large(folio), folio);
4c27fe4c
MR
2476 *pagep = NULL;
2477 }
2478
7a7256d5
MWO
2479 VM_BUG_ON(folio_test_locked(folio));
2480 VM_BUG_ON(folio_test_swapbacked(folio));
2481 __folio_set_locked(folio);
2482 __folio_set_swapbacked(folio);
2483 __folio_mark_uptodate(folio);
9cc90c66 2484
e2a50c1f 2485 ret = -EFAULT;
e2a50c1f 2486 max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
3460f6e5 2487 if (unlikely(pgoff >= max_off))
e2a50c1f
AA
2488 goto out_release;
2489
b7dd44a1 2490 ret = shmem_add_to_page_cache(folio, mapping, pgoff, NULL,
3fea5a49 2491 gfp & GFP_RECLAIM_MASK, dst_mm);
4c27fe4c 2492 if (ret)
3fea5a49 2493 goto out_release;
4c27fe4c 2494
7d64ae3a 2495 ret = mfill_atomic_install_pte(dst_mm, dst_pmd, dst_vma, dst_addr,
7a7256d5 2496 &folio->page, true, wp_copy);
7d64ae3a
AR
2497 if (ret)
2498 goto out_delete_from_cache;
4c27fe4c 2499
94b7cc01 2500 spin_lock_irq(&info->lock);
4c27fe4c
MR
2501 info->alloced++;
2502 inode->i_blocks += BLOCKS_PER_PAGE;
2503 shmem_recalc_inode(inode);
94b7cc01 2504 spin_unlock_irq(&info->lock);
4c27fe4c 2505
7a7256d5 2506 folio_unlock(folio);
7d64ae3a
AR
2507 return 0;
2508out_delete_from_cache:
7a7256d5 2509 filemap_remove_folio(folio);
4c27fe4c 2510out_release:
7a7256d5
MWO
2511 folio_unlock(folio);
2512 folio_put(folio);
4c27fe4c 2513out_unacct_blocks:
0f079694 2514 shmem_inode_unacct_blocks(inode, 1);
7d64ae3a 2515 return ret;
8d103963 2516}
3460f6e5 2517#endif /* CONFIG_USERFAULTFD */
8d103963 2518
1da177e4 2519#ifdef CONFIG_TMPFS
92e1d5be 2520static const struct inode_operations shmem_symlink_inode_operations;
69f07ec9 2521static const struct inode_operations shmem_short_symlink_operations;
1da177e4 2522
1da177e4 2523static int
800d15a5 2524shmem_write_begin(struct file *file, struct address_space *mapping,
9d6b0cd7 2525 loff_t pos, unsigned len,
800d15a5 2526 struct page **pagep, void **fsdata)
1da177e4 2527{
800d15a5 2528 struct inode *inode = mapping->host;
40e041a2 2529 struct shmem_inode_info *info = SHMEM_I(inode);
09cbfeaf 2530 pgoff_t index = pos >> PAGE_SHIFT;
eff1f906 2531 struct folio *folio;
a7605426 2532 int ret = 0;
40e041a2 2533
9608703e 2534 /* i_rwsem is held by caller */
ab3948f5
JFG
2535 if (unlikely(info->seals & (F_SEAL_GROW |
2536 F_SEAL_WRITE | F_SEAL_FUTURE_WRITE))) {
2537 if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE))
40e041a2
DH
2538 return -EPERM;
2539 if ((info->seals & F_SEAL_GROW) && pos + len > inode->i_size)
2540 return -EPERM;
2541 }
2542
eff1f906 2543 ret = shmem_get_folio(inode, index, &folio, SGP_WRITE);
a7605426
YS
2544
2545 if (ret)
2546 return ret;
2547
eff1f906 2548 *pagep = folio_file_page(folio, index);
a7605426 2549 if (PageHWPoison(*pagep)) {
eff1f906
MWO
2550 folio_unlock(folio);
2551 folio_put(folio);
a7605426
YS
2552 *pagep = NULL;
2553 return -EIO;
2554 }
2555
2556 return 0;
800d15a5
NP
2557}
2558
2559static int
2560shmem_write_end(struct file *file, struct address_space *mapping,
2561 loff_t pos, unsigned len, unsigned copied,
2562 struct page *page, void *fsdata)
2563{
2564 struct inode *inode = mapping->host;
2565
d3602444
HD
2566 if (pos + copied > inode->i_size)
2567 i_size_write(inode, pos + copied);
2568
ec9516fb 2569 if (!PageUptodate(page)) {
800d8c63
KS
2570 struct page *head = compound_head(page);
2571 if (PageTransCompound(page)) {
2572 int i;
2573
2574 for (i = 0; i < HPAGE_PMD_NR; i++) {
2575 if (head + i == page)
2576 continue;
2577 clear_highpage(head + i);
2578 flush_dcache_page(head + i);
2579 }
2580 }
09cbfeaf
KS
2581 if (copied < PAGE_SIZE) {
2582 unsigned from = pos & (PAGE_SIZE - 1);
ec9516fb 2583 zero_user_segments(page, 0, from,
09cbfeaf 2584 from + copied, PAGE_SIZE);
ec9516fb 2585 }
800d8c63 2586 SetPageUptodate(head);
ec9516fb 2587 }
800d15a5 2588 set_page_dirty(page);
6746aff7 2589 unlock_page(page);
09cbfeaf 2590 put_page(page);
800d15a5 2591
800d15a5 2592 return copied;
1da177e4
LT
2593}
2594
2ba5bbed 2595static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
1da177e4 2596{
6e58e79d
AV
2597 struct file *file = iocb->ki_filp;
2598 struct inode *inode = file_inode(file);
1da177e4 2599 struct address_space *mapping = inode->i_mapping;
41ffe5d5
HD
2600 pgoff_t index;
2601 unsigned long offset;
f7c1d074 2602 int error = 0;
cb66a7a1 2603 ssize_t retval = 0;
6e58e79d 2604 loff_t *ppos = &iocb->ki_pos;
a0ee5ec5 2605
09cbfeaf
KS
2606 index = *ppos >> PAGE_SHIFT;
2607 offset = *ppos & ~PAGE_MASK;
1da177e4
LT
2608
2609 for (;;) {
4601e2fc 2610 struct folio *folio = NULL;
1da177e4 2611 struct page *page = NULL;
41ffe5d5
HD
2612 pgoff_t end_index;
2613 unsigned long nr, ret;
1da177e4
LT
2614 loff_t i_size = i_size_read(inode);
2615
09cbfeaf 2616 end_index = i_size >> PAGE_SHIFT;
1da177e4
LT
2617 if (index > end_index)
2618 break;
2619 if (index == end_index) {
09cbfeaf 2620 nr = i_size & ~PAGE_MASK;
1da177e4
LT
2621 if (nr <= offset)
2622 break;
2623 }
2624
4601e2fc 2625 error = shmem_get_folio(inode, index, &folio, SGP_READ);
6e58e79d
AV
2626 if (error) {
2627 if (error == -EINVAL)
2628 error = 0;
1da177e4
LT
2629 break;
2630 }
4601e2fc
MWO
2631 if (folio) {
2632 folio_unlock(folio);
a7605426 2633
4601e2fc 2634 page = folio_file_page(folio, index);
a7605426 2635 if (PageHWPoison(page)) {
4601e2fc 2636 folio_put(folio);
a7605426
YS
2637 error = -EIO;
2638 break;
2639 }
75edd345 2640 }
1da177e4
LT
2641
2642 /*
2643 * We must evaluate after, since reads (unlike writes)
9608703e 2644 * are called without i_rwsem protection against truncate
1da177e4 2645 */
09cbfeaf 2646 nr = PAGE_SIZE;
1da177e4 2647 i_size = i_size_read(inode);
09cbfeaf 2648 end_index = i_size >> PAGE_SHIFT;
1da177e4 2649 if (index == end_index) {
09cbfeaf 2650 nr = i_size & ~PAGE_MASK;
1da177e4 2651 if (nr <= offset) {
4601e2fc
MWO
2652 if (folio)
2653 folio_put(folio);
1da177e4
LT
2654 break;
2655 }
2656 }
2657 nr -= offset;
2658
4601e2fc 2659 if (folio) {
1da177e4
LT
2660 /*
2661 * If users can be writing to this page using arbitrary
2662 * virtual addresses, take care about potential aliasing
2663 * before reading the page on the kernel side.
2664 */
2665 if (mapping_writably_mapped(mapping))
2666 flush_dcache_page(page);
2667 /*
2668 * Mark the page accessed if we read the beginning.
2669 */
2670 if (!offset)
4601e2fc 2671 folio_mark_accessed(folio);
1bdec44b
HD
2672 /*
2673 * Ok, we have the page, and it's up-to-date, so
2674 * now we can copy it to user space...
2675 */
2676 ret = copy_page_to_iter(page, offset, nr, to);
4601e2fc 2677 folio_put(folio);
1bdec44b 2678
fcb14cb1 2679 } else if (user_backed_iter(to)) {
1bdec44b
HD
2680 /*
2681 * Copy to user tends to be so well optimized, but
2682 * clear_user() not so much, that it is noticeably
2683 * faster to copy the zero page instead of clearing.
2684 */
2685 ret = copy_page_to_iter(ZERO_PAGE(0), offset, nr, to);
b5810039 2686 } else {
1bdec44b
HD
2687 /*
2688 * But submitting the same page twice in a row to
2689 * splice() - or others? - can result in confusion:
2690 * so don't attempt that optimization on pipes etc.
2691 */
2692 ret = iov_iter_zero(nr, to);
b5810039 2693 }
1da177e4 2694
6e58e79d 2695 retval += ret;
1da177e4 2696 offset += ret;
09cbfeaf
KS
2697 index += offset >> PAGE_SHIFT;
2698 offset &= ~PAGE_MASK;
1da177e4 2699
2ba5bbed 2700 if (!iov_iter_count(to))
1da177e4 2701 break;
6e58e79d
AV
2702 if (ret < nr) {
2703 error = -EFAULT;
2704 break;
2705 }
1da177e4
LT
2706 cond_resched();
2707 }
2708
09cbfeaf 2709 *ppos = ((loff_t) index << PAGE_SHIFT) + offset;
6e58e79d
AV
2710 file_accessed(file);
2711 return retval ? retval : error;
1da177e4
LT
2712}
2713
965c8e59 2714static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence)
220f2ac9
HD
2715{
2716 struct address_space *mapping = file->f_mapping;
2717 struct inode *inode = mapping->host;
220f2ac9 2718
965c8e59
AM
2719 if (whence != SEEK_DATA && whence != SEEK_HOLE)
2720 return generic_file_llseek_size(file, offset, whence,
220f2ac9 2721 MAX_LFS_FILESIZE, i_size_read(inode));
41139aa4
MWO
2722 if (offset < 0)
2723 return -ENXIO;
2724
5955102c 2725 inode_lock(inode);
9608703e 2726 /* We're holding i_rwsem so we can access i_size directly */
41139aa4 2727 offset = mapping_seek_hole_data(mapping, offset, inode->i_size, whence);
387aae6f
HD
2728 if (offset >= 0)
2729 offset = vfs_setpos(file, offset, MAX_LFS_FILESIZE);
5955102c 2730 inode_unlock(inode);
220f2ac9
HD
2731 return offset;
2732}
2733
83e4fa9c
HD
2734static long shmem_fallocate(struct file *file, int mode, loff_t offset,
2735 loff_t len)
2736{
496ad9aa 2737 struct inode *inode = file_inode(file);
e2d12e22 2738 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
40e041a2 2739 struct shmem_inode_info *info = SHMEM_I(inode);
1aac1400 2740 struct shmem_falloc shmem_falloc;
d144bf62 2741 pgoff_t start, index, end, undo_fallocend;
e2d12e22 2742 int error;
83e4fa9c 2743
13ace4d0
HD
2744 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
2745 return -EOPNOTSUPP;
2746
5955102c 2747 inode_lock(inode);
83e4fa9c
HD
2748
2749 if (mode & FALLOC_FL_PUNCH_HOLE) {
2750 struct address_space *mapping = file->f_mapping;
2751 loff_t unmap_start = round_up(offset, PAGE_SIZE);
2752 loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1;
8e205f77 2753 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq);
83e4fa9c 2754
9608703e 2755 /* protected by i_rwsem */
ab3948f5 2756 if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) {
40e041a2
DH
2757 error = -EPERM;
2758 goto out;
2759 }
2760
8e205f77 2761 shmem_falloc.waitq = &shmem_falloc_waitq;
aa71ecd8 2762 shmem_falloc.start = (u64)unmap_start >> PAGE_SHIFT;
f00cdc6d
HD
2763 shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT;
2764 spin_lock(&inode->i_lock);
2765 inode->i_private = &shmem_falloc;
2766 spin_unlock(&inode->i_lock);
2767
83e4fa9c
HD
2768 if ((u64)unmap_end > (u64)unmap_start)
2769 unmap_mapping_range(mapping, unmap_start,
2770 1 + unmap_end - unmap_start, 0);
2771 shmem_truncate_range(inode, offset, offset + len - 1);
2772 /* No need to unmap again: hole-punching leaves COWed pages */
8e205f77
HD
2773
2774 spin_lock(&inode->i_lock);
2775 inode->i_private = NULL;
2776 wake_up_all(&shmem_falloc_waitq);
2055da97 2777 WARN_ON_ONCE(!list_empty(&shmem_falloc_waitq.head));
8e205f77 2778 spin_unlock(&inode->i_lock);
83e4fa9c 2779 error = 0;
8e205f77 2780 goto out;
e2d12e22
HD
2781 }
2782
2783 /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */
2784 error = inode_newsize_ok(inode, offset + len);
2785 if (error)
2786 goto out;
2787
40e041a2
DH
2788 if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) {
2789 error = -EPERM;
2790 goto out;
2791 }
2792
09cbfeaf
KS
2793 start = offset >> PAGE_SHIFT;
2794 end = (offset + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
e2d12e22
HD
2795 /* Try to avoid a swapstorm if len is impossible to satisfy */
2796 if (sbinfo->max_blocks && end - start > sbinfo->max_blocks) {
2797 error = -ENOSPC;
2798 goto out;
83e4fa9c
HD
2799 }
2800
8e205f77 2801 shmem_falloc.waitq = NULL;
1aac1400
HD
2802 shmem_falloc.start = start;
2803 shmem_falloc.next = start;
2804 shmem_falloc.nr_falloced = 0;
2805 shmem_falloc.nr_unswapped = 0;
2806 spin_lock(&inode->i_lock);
2807 inode->i_private = &shmem_falloc;
2808 spin_unlock(&inode->i_lock);
2809
d144bf62
HD
2810 /*
2811 * info->fallocend is only relevant when huge pages might be
2812 * involved: to prevent split_huge_page() freeing fallocated
2813 * pages when FALLOC_FL_KEEP_SIZE committed beyond i_size.
2814 */
2815 undo_fallocend = info->fallocend;
2816 if (info->fallocend < end)
2817 info->fallocend = end;
2818
050dcb5c 2819 for (index = start; index < end; ) {
b0802b22 2820 struct folio *folio;
e2d12e22
HD
2821
2822 /*
2823 * Good, the fallocate(2) manpage permits EINTR: we may have
2824 * been interrupted because we are using up too much memory.
2825 */
2826 if (signal_pending(current))
2827 error = -EINTR;
1aac1400
HD
2828 else if (shmem_falloc.nr_unswapped > shmem_falloc.nr_falloced)
2829 error = -ENOMEM;
e2d12e22 2830 else
b0802b22
MWO
2831 error = shmem_get_folio(inode, index, &folio,
2832 SGP_FALLOC);
e2d12e22 2833 if (error) {
d144bf62 2834 info->fallocend = undo_fallocend;
b0802b22 2835 /* Remove the !uptodate folios we added */
7f556567
HD
2836 if (index > start) {
2837 shmem_undo_range(inode,
2838 (loff_t)start << PAGE_SHIFT,
2839 ((loff_t)index << PAGE_SHIFT) - 1, true);
2840 }
1aac1400 2841 goto undone;
e2d12e22
HD
2842 }
2843
050dcb5c
HD
2844 /*
2845 * Here is a more important optimization than it appears:
b0802b22
MWO
2846 * a second SGP_FALLOC on the same large folio will clear it,
2847 * making it uptodate and un-undoable if we fail later.
050dcb5c 2848 */
b0802b22
MWO
2849 index = folio_next_index(folio);
2850 /* Beware 32-bit wraparound */
2851 if (!index)
2852 index--;
050dcb5c 2853
1aac1400
HD
2854 /*
2855 * Inform shmem_writepage() how far we have reached.
2856 * No need for lock or barrier: we have the page lock.
2857 */
b0802b22 2858 if (!folio_test_uptodate(folio))
050dcb5c
HD
2859 shmem_falloc.nr_falloced += index - shmem_falloc.next;
2860 shmem_falloc.next = index;
1aac1400 2861
e2d12e22 2862 /*
b0802b22 2863 * If !uptodate, leave it that way so that freeable folios
1635f6a7 2864 * can be recognized if we need to rollback on error later.
b0802b22
MWO
2865 * But mark it dirty so that memory pressure will swap rather
2866 * than free the folios we are allocating (and SGP_CACHE folios
e2d12e22
HD
2867 * might still be clean: we now need to mark those dirty too).
2868 */
b0802b22
MWO
2869 folio_mark_dirty(folio);
2870 folio_unlock(folio);
2871 folio_put(folio);
e2d12e22
HD
2872 cond_resched();
2873 }
2874
2875 if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size)
2876 i_size_write(inode, offset + len);
1aac1400
HD
2877undone:
2878 spin_lock(&inode->i_lock);
2879 inode->i_private = NULL;
2880 spin_unlock(&inode->i_lock);
e2d12e22 2881out:
15f242bb
HD
2882 if (!error)
2883 file_modified(file);
5955102c 2884 inode_unlock(inode);
83e4fa9c
HD
2885 return error;
2886}
2887
726c3342 2888static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf)
1da177e4 2889{
726c3342 2890 struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb);
1da177e4
LT
2891
2892 buf->f_type = TMPFS_MAGIC;
09cbfeaf 2893 buf->f_bsize = PAGE_SIZE;
1da177e4 2894 buf->f_namelen = NAME_MAX;
0edd73b3 2895 if (sbinfo->max_blocks) {
1da177e4 2896 buf->f_blocks = sbinfo->max_blocks;
41ffe5d5
HD
2897 buf->f_bavail =
2898 buf->f_bfree = sbinfo->max_blocks -
2899 percpu_counter_sum(&sbinfo->used_blocks);
0edd73b3
HD
2900 }
2901 if (sbinfo->max_inodes) {
1da177e4
LT
2902 buf->f_files = sbinfo->max_inodes;
2903 buf->f_ffree = sbinfo->free_inodes;
1da177e4
LT
2904 }
2905 /* else leave those fields 0 like simple_statfs */
59cda49e
AG
2906
2907 buf->f_fsid = uuid_to_fsid(dentry->d_sb->s_uuid.b);
2908
1da177e4
LT
2909 return 0;
2910}
2911
2912/*
2913 * File creation. Allocate an inode, and we're done..
2914 */
2915static int
549c7297
CB
2916shmem_mknod(struct user_namespace *mnt_userns, struct inode *dir,
2917 struct dentry *dentry, umode_t mode, dev_t dev)
1da177e4 2918{
0b0a0806 2919 struct inode *inode;
1da177e4
LT
2920 int error = -ENOSPC;
2921
454abafe 2922 inode = shmem_get_inode(dir->i_sb, dir, mode, dev, VM_NORESERVE);
1da177e4 2923 if (inode) {
feda821e
CH
2924 error = simple_acl_create(dir, inode);
2925 if (error)
2926 goto out_iput;
2a7dba39 2927 error = security_inode_init_security(inode, dir,
9d8f13ba 2928 &dentry->d_name,
6d9d88d0 2929 shmem_initxattrs, NULL);
feda821e
CH
2930 if (error && error != -EOPNOTSUPP)
2931 goto out_iput;
37ec43cd 2932
718deb6b 2933 error = 0;
1da177e4 2934 dir->i_size += BOGO_DIRENT_SIZE;
078cd827 2935 dir->i_ctime = dir->i_mtime = current_time(dir);
36f05cab 2936 inode_inc_iversion(dir);
1da177e4
LT
2937 d_instantiate(dentry, inode);
2938 dget(dentry); /* Extra count - pin the dentry in core */
1da177e4
LT
2939 }
2940 return error;
feda821e
CH
2941out_iput:
2942 iput(inode);
2943 return error;
1da177e4
LT
2944}
2945
60545d0d 2946static int
549c7297 2947shmem_tmpfile(struct user_namespace *mnt_userns, struct inode *dir,
863f144f 2948 struct file *file, umode_t mode)
60545d0d
AV
2949{
2950 struct inode *inode;
2951 int error = -ENOSPC;
2952
2953 inode = shmem_get_inode(dir->i_sb, dir, mode, 0, VM_NORESERVE);
2954 if (inode) {
2955 error = security_inode_init_security(inode, dir,
2956 NULL,
2957 shmem_initxattrs, NULL);
feda821e
CH
2958 if (error && error != -EOPNOTSUPP)
2959 goto out_iput;
2960 error = simple_acl_create(dir, inode);
2961 if (error)
2962 goto out_iput;
863f144f 2963 d_tmpfile(file, inode);
60545d0d 2964 }
863f144f 2965 return finish_open_simple(file, error);
feda821e
CH
2966out_iput:
2967 iput(inode);
2968 return error;
60545d0d
AV
2969}
2970
549c7297
CB
2971static int shmem_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
2972 struct dentry *dentry, umode_t mode)
1da177e4
LT
2973{
2974 int error;
2975
549c7297
CB
2976 if ((error = shmem_mknod(&init_user_ns, dir, dentry,
2977 mode | S_IFDIR, 0)))
1da177e4 2978 return error;
d8c76e6f 2979 inc_nlink(dir);
1da177e4
LT
2980 return 0;
2981}
2982
549c7297
CB
2983static int shmem_create(struct user_namespace *mnt_userns, struct inode *dir,
2984 struct dentry *dentry, umode_t mode, bool excl)
1da177e4 2985{
549c7297 2986 return shmem_mknod(&init_user_ns, dir, dentry, mode | S_IFREG, 0);
1da177e4
LT
2987}
2988
2989/*
2990 * Link a file..
2991 */
2992static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
2993{
75c3cfa8 2994 struct inode *inode = d_inode(old_dentry);
29b00e60 2995 int ret = 0;
1da177e4
LT
2996
2997 /*
2998 * No ordinary (disk based) filesystem counts links as inodes;
2999 * but each new link needs a new dentry, pinning lowmem, and
3000 * tmpfs dentries cannot be pruned until they are unlinked.
1062af92
DW
3001 * But if an O_TMPFILE file is linked into the tmpfs, the
3002 * first link must skip that, to get the accounting right.
1da177e4 3003 */
1062af92 3004 if (inode->i_nlink) {
e809d5f0 3005 ret = shmem_reserve_inode(inode->i_sb, NULL);
1062af92
DW
3006 if (ret)
3007 goto out;
3008 }
1da177e4
LT
3009
3010 dir->i_size += BOGO_DIRENT_SIZE;
078cd827 3011 inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode);
36f05cab 3012 inode_inc_iversion(dir);
d8c76e6f 3013 inc_nlink(inode);
7de9c6ee 3014 ihold(inode); /* New dentry reference */
1da177e4
LT
3015 dget(dentry); /* Extra pinning count for the created dentry */
3016 d_instantiate(dentry, inode);
5b04c689
PE
3017out:
3018 return ret;
1da177e4
LT
3019}
3020
3021static int shmem_unlink(struct inode *dir, struct dentry *dentry)
3022{
75c3cfa8 3023 struct inode *inode = d_inode(dentry);
1da177e4 3024
5b04c689
PE
3025 if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode))
3026 shmem_free_inode(inode->i_sb);
1da177e4
LT
3027
3028 dir->i_size -= BOGO_DIRENT_SIZE;
078cd827 3029 inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode);
36f05cab 3030 inode_inc_iversion(dir);
9a53c3a7 3031 drop_nlink(inode);
1da177e4
LT
3032 dput(dentry); /* Undo the count from "create" - this does all the work */
3033 return 0;
3034}
3035
3036static int shmem_rmdir(struct inode *dir, struct dentry *dentry)
3037{
3038 if (!simple_empty(dentry))
3039 return -ENOTEMPTY;
3040
75c3cfa8 3041 drop_nlink(d_inode(dentry));
9a53c3a7 3042 drop_nlink(dir);
1da177e4
LT
3043 return shmem_unlink(dir, dentry);
3044}
3045
549c7297
CB
3046static int shmem_whiteout(struct user_namespace *mnt_userns,
3047 struct inode *old_dir, struct dentry *old_dentry)
46fdb794
MS
3048{
3049 struct dentry *whiteout;
3050 int error;
3051
3052 whiteout = d_alloc(old_dentry->d_parent, &old_dentry->d_name);
3053 if (!whiteout)
3054 return -ENOMEM;
3055
549c7297 3056 error = shmem_mknod(&init_user_ns, old_dir, whiteout,
46fdb794
MS
3057 S_IFCHR | WHITEOUT_MODE, WHITEOUT_DEV);
3058 dput(whiteout);
3059 if (error)
3060 return error;
3061
3062 /*
3063 * Cheat and hash the whiteout while the old dentry is still in
3064 * place, instead of playing games with FS_RENAME_DOES_D_MOVE.
3065 *
3066 * d_lookup() will consistently find one of them at this point,
3067 * not sure which one, but that isn't even important.
3068 */
3069 d_rehash(whiteout);
3070 return 0;
3071}
3072
1da177e4
LT
3073/*
3074 * The VFS layer already does all the dentry stuff for rename,
3075 * we just have to decrement the usage count for the target if
3076 * it exists so that the VFS layer correctly free's it when it
3077 * gets overwritten.
3078 */
549c7297
CB
3079static int shmem_rename2(struct user_namespace *mnt_userns,
3080 struct inode *old_dir, struct dentry *old_dentry,
3081 struct inode *new_dir, struct dentry *new_dentry,
3082 unsigned int flags)
1da177e4 3083{
75c3cfa8 3084 struct inode *inode = d_inode(old_dentry);
1da177e4
LT
3085 int they_are_dirs = S_ISDIR(inode->i_mode);
3086
46fdb794 3087 if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT))
3b69ff51
MS
3088 return -EINVAL;
3089
37456771 3090 if (flags & RENAME_EXCHANGE)
6429e463 3091 return simple_rename_exchange(old_dir, old_dentry, new_dir, new_dentry);
37456771 3092
1da177e4
LT
3093 if (!simple_empty(new_dentry))
3094 return -ENOTEMPTY;
3095
46fdb794
MS
3096 if (flags & RENAME_WHITEOUT) {
3097 int error;
3098
549c7297 3099 error = shmem_whiteout(&init_user_ns, old_dir, old_dentry);
46fdb794
MS
3100 if (error)
3101 return error;
3102 }
3103
75c3cfa8 3104 if (d_really_is_positive(new_dentry)) {
1da177e4 3105 (void) shmem_unlink(new_dir, new_dentry);
b928095b 3106 if (they_are_dirs) {
75c3cfa8 3107 drop_nlink(d_inode(new_dentry));
9a53c3a7 3108 drop_nlink(old_dir);
b928095b 3109 }
1da177e4 3110 } else if (they_are_dirs) {
9a53c3a7 3111 drop_nlink(old_dir);
d8c76e6f 3112 inc_nlink(new_dir);
1da177e4
LT
3113 }
3114
3115 old_dir->i_size -= BOGO_DIRENT_SIZE;
3116 new_dir->i_size += BOGO_DIRENT_SIZE;
3117 old_dir->i_ctime = old_dir->i_mtime =
3118 new_dir->i_ctime = new_dir->i_mtime =
078cd827 3119 inode->i_ctime = current_time(old_dir);
36f05cab
JL
3120 inode_inc_iversion(old_dir);
3121 inode_inc_iversion(new_dir);
1da177e4
LT
3122 return 0;
3123}
3124
549c7297
CB
3125static int shmem_symlink(struct user_namespace *mnt_userns, struct inode *dir,
3126 struct dentry *dentry, const char *symname)
1da177e4
LT
3127{
3128 int error;
3129 int len;
3130 struct inode *inode;
7ad0414b 3131 struct folio *folio;
1da177e4
LT
3132
3133 len = strlen(symname) + 1;
09cbfeaf 3134 if (len > PAGE_SIZE)
1da177e4
LT
3135 return -ENAMETOOLONG;
3136
0825a6f9
JP
3137 inode = shmem_get_inode(dir->i_sb, dir, S_IFLNK | 0777, 0,
3138 VM_NORESERVE);
1da177e4
LT
3139 if (!inode)
3140 return -ENOSPC;
3141
9d8f13ba 3142 error = security_inode_init_security(inode, dir, &dentry->d_name,
6d9d88d0 3143 shmem_initxattrs, NULL);
343c3d7f
MN
3144 if (error && error != -EOPNOTSUPP) {
3145 iput(inode);
3146 return error;
570bc1c2
SS
3147 }
3148
1da177e4 3149 inode->i_size = len-1;
69f07ec9 3150 if (len <= SHORT_SYMLINK_LEN) {
3ed47db3
AV
3151 inode->i_link = kmemdup(symname, len, GFP_KERNEL);
3152 if (!inode->i_link) {
69f07ec9
HD
3153 iput(inode);
3154 return -ENOMEM;
3155 }
3156 inode->i_op = &shmem_short_symlink_operations;
1da177e4 3157 } else {
e8ecde25 3158 inode_nohighmem(inode);
7ad0414b 3159 error = shmem_get_folio(inode, 0, &folio, SGP_WRITE);
1da177e4
LT
3160 if (error) {
3161 iput(inode);
3162 return error;
3163 }
14fcc23f 3164 inode->i_mapping->a_ops = &shmem_aops;
1da177e4 3165 inode->i_op = &shmem_symlink_inode_operations;
7ad0414b
MWO
3166 memcpy(folio_address(folio), symname, len);
3167 folio_mark_uptodate(folio);
3168 folio_mark_dirty(folio);
3169 folio_unlock(folio);
3170 folio_put(folio);
1da177e4 3171 }
1da177e4 3172 dir->i_size += BOGO_DIRENT_SIZE;
078cd827 3173 dir->i_ctime = dir->i_mtime = current_time(dir);
36f05cab 3174 inode_inc_iversion(dir);
1da177e4
LT
3175 d_instantiate(dentry, inode);
3176 dget(dentry);
3177 return 0;
3178}
3179
fceef393 3180static void shmem_put_link(void *arg)
1da177e4 3181{
e4b57722
MWO
3182 folio_mark_accessed(arg);
3183 folio_put(arg);
1da177e4
LT
3184}
3185
6b255391 3186static const char *shmem_get_link(struct dentry *dentry,
fceef393
AV
3187 struct inode *inode,
3188 struct delayed_call *done)
1da177e4 3189{
e4b57722 3190 struct folio *folio = NULL;
6b255391 3191 int error;
e4b57722 3192
6a6c9904 3193 if (!dentry) {
e4b57722
MWO
3194 folio = filemap_get_folio(inode->i_mapping, 0);
3195 if (!folio)
6a6c9904 3196 return ERR_PTR(-ECHILD);
7459c149 3197 if (PageHWPoison(folio_page(folio, 0)) ||
e4b57722
MWO
3198 !folio_test_uptodate(folio)) {
3199 folio_put(folio);
6a6c9904
AV
3200 return ERR_PTR(-ECHILD);
3201 }
3202 } else {
e4b57722 3203 error = shmem_get_folio(inode, 0, &folio, SGP_READ);
6a6c9904
AV
3204 if (error)
3205 return ERR_PTR(error);
e4b57722 3206 if (!folio)
a7605426 3207 return ERR_PTR(-ECHILD);
7459c149 3208 if (PageHWPoison(folio_page(folio, 0))) {
e4b57722
MWO
3209 folio_unlock(folio);
3210 folio_put(folio);
a7605426
YS
3211 return ERR_PTR(-ECHILD);
3212 }
e4b57722 3213 folio_unlock(folio);
6a6c9904 3214 }
e4b57722
MWO
3215 set_delayed_call(done, shmem_put_link, folio);
3216 return folio_address(folio);
1da177e4
LT
3217}
3218
b09e0fa4 3219#ifdef CONFIG_TMPFS_XATTR
e408e695
TT
3220
3221static int shmem_fileattr_get(struct dentry *dentry, struct fileattr *fa)
3222{
3223 struct shmem_inode_info *info = SHMEM_I(d_inode(dentry));
3224
3225 fileattr_fill_flags(fa, info->fsflags & SHMEM_FL_USER_VISIBLE);
3226
3227 return 0;
3228}
3229
3230static int shmem_fileattr_set(struct user_namespace *mnt_userns,
3231 struct dentry *dentry, struct fileattr *fa)
3232{
3233 struct inode *inode = d_inode(dentry);
3234 struct shmem_inode_info *info = SHMEM_I(inode);
3235
3236 if (fileattr_has_fsx(fa))
3237 return -EOPNOTSUPP;
cb241339
HD
3238 if (fa->flags & ~SHMEM_FL_USER_MODIFIABLE)
3239 return -EOPNOTSUPP;
e408e695
TT
3240
3241 info->fsflags = (info->fsflags & ~SHMEM_FL_USER_MODIFIABLE) |
3242 (fa->flags & SHMEM_FL_USER_MODIFIABLE);
3243
cb241339 3244 shmem_set_inode_flags(inode, info->fsflags);
e408e695 3245 inode->i_ctime = current_time(inode);
36f05cab 3246 inode_inc_iversion(inode);
e408e695
TT
3247 return 0;
3248}
3249
46711810 3250/*
b09e0fa4
EP
3251 * Superblocks without xattr inode operations may get some security.* xattr
3252 * support from the LSM "for free". As soon as we have any other xattrs
39f0247d
AG
3253 * like ACLs, we also need to implement the security.* handlers at
3254 * filesystem level, though.
3255 */
3256
6d9d88d0
JS
3257/*
3258 * Callback for security_inode_init_security() for acquiring xattrs.
3259 */
3260static int shmem_initxattrs(struct inode *inode,
3261 const struct xattr *xattr_array,
3262 void *fs_info)
3263{
3264 struct shmem_inode_info *info = SHMEM_I(inode);
3265 const struct xattr *xattr;
38f38657 3266 struct simple_xattr *new_xattr;
6d9d88d0
JS
3267 size_t len;
3268
3269 for (xattr = xattr_array; xattr->name != NULL; xattr++) {
38f38657 3270 new_xattr = simple_xattr_alloc(xattr->value, xattr->value_len);
6d9d88d0
JS
3271 if (!new_xattr)
3272 return -ENOMEM;
3273
3274 len = strlen(xattr->name) + 1;
3275 new_xattr->name = kmalloc(XATTR_SECURITY_PREFIX_LEN + len,
3276 GFP_KERNEL);
3277 if (!new_xattr->name) {
3bef735a 3278 kvfree(new_xattr);
6d9d88d0
JS
3279 return -ENOMEM;
3280 }
3281
3282 memcpy(new_xattr->name, XATTR_SECURITY_PREFIX,
3283 XATTR_SECURITY_PREFIX_LEN);
3284 memcpy(new_xattr->name + XATTR_SECURITY_PREFIX_LEN,
3285 xattr->name, len);
3286
3b4c7bc0 3287 simple_xattr_add(&info->xattrs, new_xattr);
6d9d88d0
JS
3288 }
3289
3290 return 0;
3291}
3292
aa7c5241 3293static int shmem_xattr_handler_get(const struct xattr_handler *handler,
b296821a
AV
3294 struct dentry *unused, struct inode *inode,
3295 const char *name, void *buffer, size_t size)
b09e0fa4 3296{
b296821a 3297 struct shmem_inode_info *info = SHMEM_I(inode);
b09e0fa4 3298
aa7c5241 3299 name = xattr_full_name(handler, name);
38f38657 3300 return simple_xattr_get(&info->xattrs, name, buffer, size);
b09e0fa4
EP
3301}
3302
aa7c5241 3303static int shmem_xattr_handler_set(const struct xattr_handler *handler,
e65ce2a5 3304 struct user_namespace *mnt_userns,
59301226
AV
3305 struct dentry *unused, struct inode *inode,
3306 const char *name, const void *value,
3307 size_t size, int flags)
b09e0fa4 3308{
59301226 3309 struct shmem_inode_info *info = SHMEM_I(inode);
36f05cab 3310 int err;
b09e0fa4 3311
aa7c5241 3312 name = xattr_full_name(handler, name);
36f05cab
JL
3313 err = simple_xattr_set(&info->xattrs, name, value, size, flags, NULL);
3314 if (!err) {
3315 inode->i_ctime = current_time(inode);
3316 inode_inc_iversion(inode);
3317 }
3318 return err;
b09e0fa4
EP
3319}
3320
aa7c5241
AG
3321static const struct xattr_handler shmem_security_xattr_handler = {
3322 .prefix = XATTR_SECURITY_PREFIX,
3323 .get = shmem_xattr_handler_get,
3324 .set = shmem_xattr_handler_set,
3325};
b09e0fa4 3326
aa7c5241
AG
3327static const struct xattr_handler shmem_trusted_xattr_handler = {
3328 .prefix = XATTR_TRUSTED_PREFIX,
3329 .get = shmem_xattr_handler_get,
3330 .set = shmem_xattr_handler_set,
3331};
b09e0fa4 3332
aa7c5241
AG
3333static const struct xattr_handler *shmem_xattr_handlers[] = {
3334#ifdef CONFIG_TMPFS_POSIX_ACL
3335 &posix_acl_access_xattr_handler,
3336 &posix_acl_default_xattr_handler,
3337#endif
3338 &shmem_security_xattr_handler,
3339 &shmem_trusted_xattr_handler,
3340 NULL
3341};
b09e0fa4
EP
3342
3343static ssize_t shmem_listxattr(struct dentry *dentry, char *buffer, size_t size)
3344{
75c3cfa8 3345 struct shmem_inode_info *info = SHMEM_I(d_inode(dentry));
786534b9 3346 return simple_xattr_list(d_inode(dentry), &info->xattrs, buffer, size);
b09e0fa4
EP
3347}
3348#endif /* CONFIG_TMPFS_XATTR */
3349
69f07ec9 3350static const struct inode_operations shmem_short_symlink_operations = {
f7cd16a5 3351 .getattr = shmem_getattr,
6b255391 3352 .get_link = simple_get_link,
b09e0fa4 3353#ifdef CONFIG_TMPFS_XATTR
b09e0fa4 3354 .listxattr = shmem_listxattr,
b09e0fa4
EP
3355#endif
3356};
3357
3358static const struct inode_operations shmem_symlink_inode_operations = {
f7cd16a5 3359 .getattr = shmem_getattr,
6b255391 3360 .get_link = shmem_get_link,
b09e0fa4 3361#ifdef CONFIG_TMPFS_XATTR
b09e0fa4 3362 .listxattr = shmem_listxattr,
39f0247d 3363#endif
b09e0fa4 3364};
39f0247d 3365
91828a40
DG
3366static struct dentry *shmem_get_parent(struct dentry *child)
3367{
3368 return ERR_PTR(-ESTALE);
3369}
3370
3371static int shmem_match(struct inode *ino, void *vfh)
3372{
3373 __u32 *fh = vfh;
3374 __u64 inum = fh[2];
3375 inum = (inum << 32) | fh[1];
3376 return ino->i_ino == inum && fh[0] == ino->i_generation;
3377}
3378
12ba780d
AG
3379/* Find any alias of inode, but prefer a hashed alias */
3380static struct dentry *shmem_find_alias(struct inode *inode)
3381{
3382 struct dentry *alias = d_find_alias(inode);
3383
3384 return alias ?: d_find_any_alias(inode);
3385}
3386
3387
480b116c
CH
3388static struct dentry *shmem_fh_to_dentry(struct super_block *sb,
3389 struct fid *fid, int fh_len, int fh_type)
91828a40 3390{
91828a40 3391 struct inode *inode;
480b116c 3392 struct dentry *dentry = NULL;
35c2a7f4 3393 u64 inum;
480b116c
CH
3394
3395 if (fh_len < 3)
3396 return NULL;
91828a40 3397
35c2a7f4
HD
3398 inum = fid->raw[2];
3399 inum = (inum << 32) | fid->raw[1];
3400
480b116c
CH
3401 inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]),
3402 shmem_match, fid->raw);
91828a40 3403 if (inode) {
12ba780d 3404 dentry = shmem_find_alias(inode);
91828a40
DG
3405 iput(inode);
3406 }
3407
480b116c 3408 return dentry;
91828a40
DG
3409}
3410
b0b0382b
AV
3411static int shmem_encode_fh(struct inode *inode, __u32 *fh, int *len,
3412 struct inode *parent)
91828a40 3413{
5fe0c237
AK
3414 if (*len < 3) {
3415 *len = 3;
94e07a75 3416 return FILEID_INVALID;
5fe0c237 3417 }
91828a40 3418
1d3382cb 3419 if (inode_unhashed(inode)) {
91828a40
DG
3420 /* Unfortunately insert_inode_hash is not idempotent,
3421 * so as we hash inodes here rather than at creation
3422 * time, we need a lock to ensure we only try
3423 * to do it once
3424 */
3425 static DEFINE_SPINLOCK(lock);
3426 spin_lock(&lock);
1d3382cb 3427 if (inode_unhashed(inode))
91828a40
DG
3428 __insert_inode_hash(inode,
3429 inode->i_ino + inode->i_generation);
3430 spin_unlock(&lock);
3431 }
3432
3433 fh[0] = inode->i_generation;
3434 fh[1] = inode->i_ino;
3435 fh[2] = ((__u64)inode->i_ino) >> 32;
3436
3437 *len = 3;
3438 return 1;
3439}
3440
39655164 3441static const struct export_operations shmem_export_ops = {
91828a40 3442 .get_parent = shmem_get_parent,
91828a40 3443 .encode_fh = shmem_encode_fh,
480b116c 3444 .fh_to_dentry = shmem_fh_to_dentry,
91828a40
DG
3445};
3446
626c3920
AV
3447enum shmem_param {
3448 Opt_gid,
3449 Opt_huge,
3450 Opt_mode,
3451 Opt_mpol,
3452 Opt_nr_blocks,
3453 Opt_nr_inodes,
3454 Opt_size,
3455 Opt_uid,
ea3271f7
CD
3456 Opt_inode32,
3457 Opt_inode64,
626c3920
AV
3458};
3459
5eede625 3460static const struct constant_table shmem_param_enums_huge[] = {
2710c957
AV
3461 {"never", SHMEM_HUGE_NEVER },
3462 {"always", SHMEM_HUGE_ALWAYS },
3463 {"within_size", SHMEM_HUGE_WITHIN_SIZE },
3464 {"advise", SHMEM_HUGE_ADVISE },
2710c957
AV
3465 {}
3466};
3467
d7167b14 3468const struct fs_parameter_spec shmem_fs_parameters[] = {
626c3920 3469 fsparam_u32 ("gid", Opt_gid),
2710c957 3470 fsparam_enum ("huge", Opt_huge, shmem_param_enums_huge),
626c3920
AV
3471 fsparam_u32oct("mode", Opt_mode),
3472 fsparam_string("mpol", Opt_mpol),
3473 fsparam_string("nr_blocks", Opt_nr_blocks),
3474 fsparam_string("nr_inodes", Opt_nr_inodes),
3475 fsparam_string("size", Opt_size),
3476 fsparam_u32 ("uid", Opt_uid),
ea3271f7
CD
3477 fsparam_flag ("inode32", Opt_inode32),
3478 fsparam_flag ("inode64", Opt_inode64),
626c3920
AV
3479 {}
3480};
3481
f3235626 3482static int shmem_parse_one(struct fs_context *fc, struct fs_parameter *param)
1da177e4 3483{
f3235626 3484 struct shmem_options *ctx = fc->fs_private;
626c3920
AV
3485 struct fs_parse_result result;
3486 unsigned long long size;
e04dc423 3487 char *rest;
626c3920
AV
3488 int opt;
3489
d7167b14 3490 opt = fs_parse(fc, shmem_fs_parameters, param, &result);
f3235626 3491 if (opt < 0)
626c3920 3492 return opt;
1da177e4 3493
626c3920
AV
3494 switch (opt) {
3495 case Opt_size:
3496 size = memparse(param->string, &rest);
e04dc423
AV
3497 if (*rest == '%') {
3498 size <<= PAGE_SHIFT;
3499 size *= totalram_pages();
3500 do_div(size, 100);
3501 rest++;
3502 }
3503 if (*rest)
626c3920 3504 goto bad_value;
e04dc423
AV
3505 ctx->blocks = DIV_ROUND_UP(size, PAGE_SIZE);
3506 ctx->seen |= SHMEM_SEEN_BLOCKS;
626c3920
AV
3507 break;
3508 case Opt_nr_blocks:
3509 ctx->blocks = memparse(param->string, &rest);
0c98c8e1 3510 if (*rest || ctx->blocks > S64_MAX)
626c3920 3511 goto bad_value;
e04dc423 3512 ctx->seen |= SHMEM_SEEN_BLOCKS;
626c3920
AV
3513 break;
3514 case Opt_nr_inodes:
3515 ctx->inodes = memparse(param->string, &rest);
e04dc423 3516 if (*rest)
626c3920 3517 goto bad_value;
e04dc423 3518 ctx->seen |= SHMEM_SEEN_INODES;
626c3920
AV
3519 break;
3520 case Opt_mode:
3521 ctx->mode = result.uint_32 & 07777;
3522 break;
3523 case Opt_uid:
3524 ctx->uid = make_kuid(current_user_ns(), result.uint_32);
e04dc423 3525 if (!uid_valid(ctx->uid))
626c3920
AV
3526 goto bad_value;
3527 break;
3528 case Opt_gid:
3529 ctx->gid = make_kgid(current_user_ns(), result.uint_32);
e04dc423 3530 if (!gid_valid(ctx->gid))
626c3920
AV
3531 goto bad_value;
3532 break;
3533 case Opt_huge:
3534 ctx->huge = result.uint_32;
3535 if (ctx->huge != SHMEM_HUGE_NEVER &&
396bcc52 3536 !(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
626c3920
AV
3537 has_transparent_hugepage()))
3538 goto unsupported_parameter;
e04dc423 3539 ctx->seen |= SHMEM_SEEN_HUGE;
626c3920
AV
3540 break;
3541 case Opt_mpol:
3542 if (IS_ENABLED(CONFIG_NUMA)) {
3543 mpol_put(ctx->mpol);
3544 ctx->mpol = NULL;
3545 if (mpol_parse_str(param->string, &ctx->mpol))
3546 goto bad_value;
3547 break;
3548 }
3549 goto unsupported_parameter;
ea3271f7
CD
3550 case Opt_inode32:
3551 ctx->full_inums = false;
3552 ctx->seen |= SHMEM_SEEN_INUMS;
3553 break;
3554 case Opt_inode64:
3555 if (sizeof(ino_t) < 8) {
3556 return invalfc(fc,
3557 "Cannot use inode64 with <64bit inums in kernel\n");
3558 }
3559 ctx->full_inums = true;
3560 ctx->seen |= SHMEM_SEEN_INUMS;
3561 break;
e04dc423
AV
3562 }
3563 return 0;
3564
626c3920 3565unsupported_parameter:
f35aa2bc 3566 return invalfc(fc, "Unsupported parameter '%s'", param->key);
626c3920 3567bad_value:
f35aa2bc 3568 return invalfc(fc, "Bad value for '%s'", param->key);
e04dc423
AV
3569}
3570
f3235626 3571static int shmem_parse_options(struct fs_context *fc, void *data)
e04dc423 3572{
f3235626
DH
3573 char *options = data;
3574
33f37c64
AV
3575 if (options) {
3576 int err = security_sb_eat_lsm_opts(options, &fc->security);
3577 if (err)
3578 return err;
3579 }
3580
b00dc3ad 3581 while (options != NULL) {
626c3920 3582 char *this_char = options;
b00dc3ad
HD
3583 for (;;) {
3584 /*
3585 * NUL-terminate this option: unfortunately,
3586 * mount options form a comma-separated list,
3587 * but mpol's nodelist may also contain commas.
3588 */
3589 options = strchr(options, ',');
3590 if (options == NULL)
3591 break;
3592 options++;
3593 if (!isdigit(*options)) {
3594 options[-1] = '\0';
3595 break;
3596 }
3597 }
626c3920 3598 if (*this_char) {
68d68ff6 3599 char *value = strchr(this_char, '=');
f3235626 3600 size_t len = 0;
626c3920
AV
3601 int err;
3602
3603 if (value) {
3604 *value++ = '\0';
f3235626 3605 len = strlen(value);
626c3920 3606 }
f3235626
DH
3607 err = vfs_parse_fs_string(fc, this_char, value, len);
3608 if (err < 0)
3609 return err;
1da177e4 3610 }
1da177e4
LT
3611 }
3612 return 0;
1da177e4
LT
3613}
3614
f3235626
DH
3615/*
3616 * Reconfigure a shmem filesystem.
3617 *
3618 * Note that we disallow change from limited->unlimited blocks/inodes while any
3619 * are in use; but we must separately disallow unlimited->limited, because in
3620 * that case we have no record of how much is already in use.
3621 */
3622static int shmem_reconfigure(struct fs_context *fc)
1da177e4 3623{
f3235626
DH
3624 struct shmem_options *ctx = fc->fs_private;
3625 struct shmem_sb_info *sbinfo = SHMEM_SB(fc->root->d_sb);
0edd73b3 3626 unsigned long inodes;
bf11b9a8 3627 struct mempolicy *mpol = NULL;
f3235626 3628 const char *err;
1da177e4 3629
bf11b9a8 3630 raw_spin_lock(&sbinfo->stat_lock);
0edd73b3 3631 inodes = sbinfo->max_inodes - sbinfo->free_inodes;
0c98c8e1 3632
f3235626
DH
3633 if ((ctx->seen & SHMEM_SEEN_BLOCKS) && ctx->blocks) {
3634 if (!sbinfo->max_blocks) {
3635 err = "Cannot retroactively limit size";
0b5071dd 3636 goto out;
f3235626 3637 }
0b5071dd 3638 if (percpu_counter_compare(&sbinfo->used_blocks,
f3235626
DH
3639 ctx->blocks) > 0) {
3640 err = "Too small a size for current use";
0b5071dd 3641 goto out;
f3235626 3642 }
0b5071dd 3643 }
f3235626
DH
3644 if ((ctx->seen & SHMEM_SEEN_INODES) && ctx->inodes) {
3645 if (!sbinfo->max_inodes) {
3646 err = "Cannot retroactively limit inodes";
0b5071dd 3647 goto out;
f3235626
DH
3648 }
3649 if (ctx->inodes < inodes) {
3650 err = "Too few inodes for current use";
0b5071dd 3651 goto out;
f3235626 3652 }
0b5071dd 3653 }
0edd73b3 3654
ea3271f7
CD
3655 if ((ctx->seen & SHMEM_SEEN_INUMS) && !ctx->full_inums &&
3656 sbinfo->next_ino > UINT_MAX) {
3657 err = "Current inum too high to switch to 32-bit inums";
3658 goto out;
3659 }
3660
f3235626
DH
3661 if (ctx->seen & SHMEM_SEEN_HUGE)
3662 sbinfo->huge = ctx->huge;
ea3271f7
CD
3663 if (ctx->seen & SHMEM_SEEN_INUMS)
3664 sbinfo->full_inums = ctx->full_inums;
f3235626
DH
3665 if (ctx->seen & SHMEM_SEEN_BLOCKS)
3666 sbinfo->max_blocks = ctx->blocks;
3667 if (ctx->seen & SHMEM_SEEN_INODES) {
3668 sbinfo->max_inodes = ctx->inodes;
3669 sbinfo->free_inodes = ctx->inodes - inodes;
0b5071dd 3670 }
71fe804b 3671
5f00110f
GT
3672 /*
3673 * Preserve previous mempolicy unless mpol remount option was specified.
3674 */
f3235626 3675 if (ctx->mpol) {
bf11b9a8 3676 mpol = sbinfo->mpol;
f3235626
DH
3677 sbinfo->mpol = ctx->mpol; /* transfers initial ref */
3678 ctx->mpol = NULL;
5f00110f 3679 }
bf11b9a8
SAS
3680 raw_spin_unlock(&sbinfo->stat_lock);
3681 mpol_put(mpol);
f3235626 3682 return 0;
0edd73b3 3683out:
bf11b9a8 3684 raw_spin_unlock(&sbinfo->stat_lock);
f35aa2bc 3685 return invalfc(fc, "%s", err);
1da177e4 3686}
680d794b 3687
34c80b1d 3688static int shmem_show_options(struct seq_file *seq, struct dentry *root)
680d794b 3689{
34c80b1d 3690 struct shmem_sb_info *sbinfo = SHMEM_SB(root->d_sb);
680d794b 3691
3692 if (sbinfo->max_blocks != shmem_default_max_blocks())
3693 seq_printf(seq, ",size=%luk",
09cbfeaf 3694 sbinfo->max_blocks << (PAGE_SHIFT - 10));
680d794b 3695 if (sbinfo->max_inodes != shmem_default_max_inodes())
3696 seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes);
0825a6f9 3697 if (sbinfo->mode != (0777 | S_ISVTX))
09208d15 3698 seq_printf(seq, ",mode=%03ho", sbinfo->mode);
8751e039
EB
3699 if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID))
3700 seq_printf(seq, ",uid=%u",
3701 from_kuid_munged(&init_user_ns, sbinfo->uid));
3702 if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID))
3703 seq_printf(seq, ",gid=%u",
3704 from_kgid_munged(&init_user_ns, sbinfo->gid));
ea3271f7
CD
3705
3706 /*
3707 * Showing inode{64,32} might be useful even if it's the system default,
3708 * since then people don't have to resort to checking both here and
3709 * /proc/config.gz to confirm 64-bit inums were successfully applied
3710 * (which may not even exist if IKCONFIG_PROC isn't enabled).
3711 *
3712 * We hide it when inode64 isn't the default and we are using 32-bit
3713 * inodes, since that probably just means the feature isn't even under
3714 * consideration.
3715 *
3716 * As such:
3717 *
3718 * +-----------------+-----------------+
3719 * | TMPFS_INODE64=y | TMPFS_INODE64=n |
3720 * +------------------+-----------------+-----------------+
3721 * | full_inums=true | show | show |
3722 * | full_inums=false | show | hide |
3723 * +------------------+-----------------+-----------------+
3724 *
3725 */
3726 if (IS_ENABLED(CONFIG_TMPFS_INODE64) || sbinfo->full_inums)
3727 seq_printf(seq, ",inode%d", (sbinfo->full_inums ? 64 : 32));
396bcc52 3728#ifdef CONFIG_TRANSPARENT_HUGEPAGE
5a6e75f8
KS
3729 /* Rightly or wrongly, show huge mount option unmasked by shmem_huge */
3730 if (sbinfo->huge)
3731 seq_printf(seq, ",huge=%s", shmem_format_huge(sbinfo->huge));
3732#endif
71fe804b 3733 shmem_show_mpol(seq, sbinfo->mpol);
680d794b 3734 return 0;
3735}
9183df25 3736
680d794b 3737#endif /* CONFIG_TMPFS */
1da177e4
LT
3738
3739static void shmem_put_super(struct super_block *sb)
3740{
602586a8
HD
3741 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
3742
e809d5f0 3743 free_percpu(sbinfo->ino_batch);
602586a8 3744 percpu_counter_destroy(&sbinfo->used_blocks);
49cd0a5c 3745 mpol_put(sbinfo->mpol);
602586a8 3746 kfree(sbinfo);
1da177e4
LT
3747 sb->s_fs_info = NULL;
3748}
3749
f3235626 3750static int shmem_fill_super(struct super_block *sb, struct fs_context *fc)
1da177e4 3751{
f3235626 3752 struct shmem_options *ctx = fc->fs_private;
1da177e4 3753 struct inode *inode;
0edd73b3 3754 struct shmem_sb_info *sbinfo;
680d794b 3755
3756 /* Round up to L1_CACHE_BYTES to resist false sharing */
425fbf04 3757 sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
680d794b 3758 L1_CACHE_BYTES), GFP_KERNEL);
3759 if (!sbinfo)
3760 return -ENOMEM;
3761
680d794b 3762 sb->s_fs_info = sbinfo;
1da177e4 3763
0edd73b3 3764#ifdef CONFIG_TMPFS
1da177e4
LT
3765 /*
3766 * Per default we only allow half of the physical ram per
3767 * tmpfs instance, limiting inodes to one per page of lowmem;
3768 * but the internal instance is left unlimited.
3769 */
1751e8a6 3770 if (!(sb->s_flags & SB_KERNMOUNT)) {
f3235626
DH
3771 if (!(ctx->seen & SHMEM_SEEN_BLOCKS))
3772 ctx->blocks = shmem_default_max_blocks();
3773 if (!(ctx->seen & SHMEM_SEEN_INODES))
3774 ctx->inodes = shmem_default_max_inodes();
ea3271f7
CD
3775 if (!(ctx->seen & SHMEM_SEEN_INUMS))
3776 ctx->full_inums = IS_ENABLED(CONFIG_TMPFS_INODE64);
ca4e0519 3777 } else {
1751e8a6 3778 sb->s_flags |= SB_NOUSER;
1da177e4 3779 }
91828a40 3780 sb->s_export_op = &shmem_export_ops;
36f05cab 3781 sb->s_flags |= SB_NOSEC | SB_I_VERSION;
1da177e4 3782#else
1751e8a6 3783 sb->s_flags |= SB_NOUSER;
1da177e4 3784#endif
f3235626
DH
3785 sbinfo->max_blocks = ctx->blocks;
3786 sbinfo->free_inodes = sbinfo->max_inodes = ctx->inodes;
e809d5f0
CD
3787 if (sb->s_flags & SB_KERNMOUNT) {
3788 sbinfo->ino_batch = alloc_percpu(ino_t);
3789 if (!sbinfo->ino_batch)
3790 goto failed;
3791 }
f3235626
DH
3792 sbinfo->uid = ctx->uid;
3793 sbinfo->gid = ctx->gid;
ea3271f7 3794 sbinfo->full_inums = ctx->full_inums;
f3235626
DH
3795 sbinfo->mode = ctx->mode;
3796 sbinfo->huge = ctx->huge;
3797 sbinfo->mpol = ctx->mpol;
3798 ctx->mpol = NULL;
1da177e4 3799
bf11b9a8 3800 raw_spin_lock_init(&sbinfo->stat_lock);
908c7f19 3801 if (percpu_counter_init(&sbinfo->used_blocks, 0, GFP_KERNEL))
602586a8 3802 goto failed;
779750d2
KS
3803 spin_lock_init(&sbinfo->shrinklist_lock);
3804 INIT_LIST_HEAD(&sbinfo->shrinklist);
0edd73b3 3805
285b2c4f 3806 sb->s_maxbytes = MAX_LFS_FILESIZE;
09cbfeaf
KS
3807 sb->s_blocksize = PAGE_SIZE;
3808 sb->s_blocksize_bits = PAGE_SHIFT;
1da177e4
LT
3809 sb->s_magic = TMPFS_MAGIC;
3810 sb->s_op = &shmem_ops;
cfd95a9c 3811 sb->s_time_gran = 1;
b09e0fa4 3812#ifdef CONFIG_TMPFS_XATTR
39f0247d 3813 sb->s_xattr = shmem_xattr_handlers;
b09e0fa4
EP
3814#endif
3815#ifdef CONFIG_TMPFS_POSIX_ACL
1751e8a6 3816 sb->s_flags |= SB_POSIXACL;
39f0247d 3817#endif
2b4db796 3818 uuid_gen(&sb->s_uuid);
0edd73b3 3819
454abafe 3820 inode = shmem_get_inode(sb, NULL, S_IFDIR | sbinfo->mode, 0, VM_NORESERVE);
1da177e4
LT
3821 if (!inode)
3822 goto failed;
680d794b 3823 inode->i_uid = sbinfo->uid;
3824 inode->i_gid = sbinfo->gid;
318ceed0
AV
3825 sb->s_root = d_make_root(inode);
3826 if (!sb->s_root)
48fde701 3827 goto failed;
1da177e4
LT
3828 return 0;
3829
1da177e4
LT
3830failed:
3831 shmem_put_super(sb);
f2b346e4 3832 return -ENOMEM;
1da177e4
LT
3833}
3834
f3235626
DH
3835static int shmem_get_tree(struct fs_context *fc)
3836{
3837 return get_tree_nodev(fc, shmem_fill_super);
3838}
3839
3840static void shmem_free_fc(struct fs_context *fc)
3841{
3842 struct shmem_options *ctx = fc->fs_private;
3843
3844 if (ctx) {
3845 mpol_put(ctx->mpol);
3846 kfree(ctx);
3847 }
3848}
3849
3850static const struct fs_context_operations shmem_fs_context_ops = {
3851 .free = shmem_free_fc,
3852 .get_tree = shmem_get_tree,
3853#ifdef CONFIG_TMPFS
3854 .parse_monolithic = shmem_parse_options,
3855 .parse_param = shmem_parse_one,
3856 .reconfigure = shmem_reconfigure,
3857#endif
3858};
3859
fcc234f8 3860static struct kmem_cache *shmem_inode_cachep;
1da177e4
LT
3861
3862static struct inode *shmem_alloc_inode(struct super_block *sb)
3863{
41ffe5d5 3864 struct shmem_inode_info *info;
fd60b288 3865 info = alloc_inode_sb(sb, shmem_inode_cachep, GFP_KERNEL);
41ffe5d5 3866 if (!info)
1da177e4 3867 return NULL;
41ffe5d5 3868 return &info->vfs_inode;
1da177e4
LT
3869}
3870
74b1da56 3871static void shmem_free_in_core_inode(struct inode *inode)
fa0d7e3d 3872{
84e710da
AV
3873 if (S_ISLNK(inode->i_mode))
3874 kfree(inode->i_link);
fa0d7e3d
NP
3875 kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode));
3876}
3877
1da177e4
LT
3878static void shmem_destroy_inode(struct inode *inode)
3879{
09208d15 3880 if (S_ISREG(inode->i_mode))
1da177e4 3881 mpol_free_shared_policy(&SHMEM_I(inode)->policy);
1da177e4
LT
3882}
3883
41ffe5d5 3884static void shmem_init_inode(void *foo)
1da177e4 3885{
41ffe5d5
HD
3886 struct shmem_inode_info *info = foo;
3887 inode_init_once(&info->vfs_inode);
1da177e4
LT
3888}
3889
9a8ec03e 3890static void shmem_init_inodecache(void)
1da177e4
LT
3891{
3892 shmem_inode_cachep = kmem_cache_create("shmem_inode_cache",
3893 sizeof(struct shmem_inode_info),
5d097056 3894 0, SLAB_PANIC|SLAB_ACCOUNT, shmem_init_inode);
1da177e4
LT
3895}
3896
41ffe5d5 3897static void shmem_destroy_inodecache(void)
1da177e4 3898{
1a1d92c1 3899 kmem_cache_destroy(shmem_inode_cachep);
1da177e4
LT
3900}
3901
a7605426
YS
3902/* Keep the page in page cache instead of truncating it */
3903static int shmem_error_remove_page(struct address_space *mapping,
3904 struct page *page)
3905{
3906 return 0;
3907}
3908
30e6a51d 3909const struct address_space_operations shmem_aops = {
1da177e4 3910 .writepage = shmem_writepage,
46de8b97 3911 .dirty_folio = noop_dirty_folio,
1da177e4 3912#ifdef CONFIG_TMPFS
800d15a5
NP
3913 .write_begin = shmem_write_begin,
3914 .write_end = shmem_write_end,
1da177e4 3915#endif
1c93923c 3916#ifdef CONFIG_MIGRATION
54184650 3917 .migrate_folio = migrate_folio,
1c93923c 3918#endif
a7605426 3919 .error_remove_page = shmem_error_remove_page,
1da177e4 3920};
30e6a51d 3921EXPORT_SYMBOL(shmem_aops);
1da177e4 3922
15ad7cdc 3923static const struct file_operations shmem_file_operations = {
1da177e4 3924 .mmap = shmem_mmap,
a5454f95 3925 .open = generic_file_open,
c01d5b30 3926 .get_unmapped_area = shmem_get_unmapped_area,
1da177e4 3927#ifdef CONFIG_TMPFS
220f2ac9 3928 .llseek = shmem_file_llseek,
2ba5bbed 3929 .read_iter = shmem_file_read_iter,
8174202b 3930 .write_iter = generic_file_write_iter,
1b061d92 3931 .fsync = noop_fsync,
82c156f8 3932 .splice_read = generic_file_splice_read,
f6cb85d0 3933 .splice_write = iter_file_splice_write,
83e4fa9c 3934 .fallocate = shmem_fallocate,
1da177e4
LT
3935#endif
3936};
3937
92e1d5be 3938static const struct inode_operations shmem_inode_operations = {
44a30220 3939 .getattr = shmem_getattr,
94c1e62d 3940 .setattr = shmem_setattr,
b09e0fa4 3941#ifdef CONFIG_TMPFS_XATTR
b09e0fa4 3942 .listxattr = shmem_listxattr,
feda821e 3943 .set_acl = simple_set_acl,
e408e695
TT
3944 .fileattr_get = shmem_fileattr_get,
3945 .fileattr_set = shmem_fileattr_set,
b09e0fa4 3946#endif
1da177e4
LT
3947};
3948
92e1d5be 3949static const struct inode_operations shmem_dir_inode_operations = {
1da177e4 3950#ifdef CONFIG_TMPFS
f7cd16a5 3951 .getattr = shmem_getattr,
1da177e4
LT
3952 .create = shmem_create,
3953 .lookup = simple_lookup,
3954 .link = shmem_link,
3955 .unlink = shmem_unlink,
3956 .symlink = shmem_symlink,
3957 .mkdir = shmem_mkdir,
3958 .rmdir = shmem_rmdir,
3959 .mknod = shmem_mknod,
2773bf00 3960 .rename = shmem_rename2,
60545d0d 3961 .tmpfile = shmem_tmpfile,
1da177e4 3962#endif
b09e0fa4 3963#ifdef CONFIG_TMPFS_XATTR
b09e0fa4 3964 .listxattr = shmem_listxattr,
e408e695
TT
3965 .fileattr_get = shmem_fileattr_get,
3966 .fileattr_set = shmem_fileattr_set,
b09e0fa4 3967#endif
39f0247d 3968#ifdef CONFIG_TMPFS_POSIX_ACL
94c1e62d 3969 .setattr = shmem_setattr,
feda821e 3970 .set_acl = simple_set_acl,
39f0247d
AG
3971#endif
3972};
3973
92e1d5be 3974static const struct inode_operations shmem_special_inode_operations = {
f7cd16a5 3975 .getattr = shmem_getattr,
b09e0fa4 3976#ifdef CONFIG_TMPFS_XATTR
b09e0fa4 3977 .listxattr = shmem_listxattr,
b09e0fa4 3978#endif
39f0247d 3979#ifdef CONFIG_TMPFS_POSIX_ACL
94c1e62d 3980 .setattr = shmem_setattr,
feda821e 3981 .set_acl = simple_set_acl,
39f0247d 3982#endif
1da177e4
LT
3983};
3984
759b9775 3985static const struct super_operations shmem_ops = {
1da177e4 3986 .alloc_inode = shmem_alloc_inode,
74b1da56 3987 .free_inode = shmem_free_in_core_inode,
1da177e4
LT
3988 .destroy_inode = shmem_destroy_inode,
3989#ifdef CONFIG_TMPFS
3990 .statfs = shmem_statfs,
680d794b 3991 .show_options = shmem_show_options,
1da177e4 3992#endif
1f895f75 3993 .evict_inode = shmem_evict_inode,
1da177e4
LT
3994 .drop_inode = generic_delete_inode,
3995 .put_super = shmem_put_super,
396bcc52 3996#ifdef CONFIG_TRANSPARENT_HUGEPAGE
779750d2
KS
3997 .nr_cached_objects = shmem_unused_huge_count,
3998 .free_cached_objects = shmem_unused_huge_scan,
3999#endif
1da177e4
LT
4000};
4001
f0f37e2f 4002static const struct vm_operations_struct shmem_vm_ops = {
54cb8821 4003 .fault = shmem_fault,
d7c17551 4004 .map_pages = filemap_map_pages,
1da177e4
LT
4005#ifdef CONFIG_NUMA
4006 .set_policy = shmem_set_policy,
4007 .get_policy = shmem_get_policy,
4008#endif
4009};
4010
d09e8ca6
PT
4011static const struct vm_operations_struct shmem_anon_vm_ops = {
4012 .fault = shmem_fault,
4013 .map_pages = filemap_map_pages,
4014#ifdef CONFIG_NUMA
4015 .set_policy = shmem_set_policy,
4016 .get_policy = shmem_get_policy,
4017#endif
4018};
4019
f3235626 4020int shmem_init_fs_context(struct fs_context *fc)
1da177e4 4021{
f3235626
DH
4022 struct shmem_options *ctx;
4023
4024 ctx = kzalloc(sizeof(struct shmem_options), GFP_KERNEL);
4025 if (!ctx)
4026 return -ENOMEM;
4027
4028 ctx->mode = 0777 | S_ISVTX;
4029 ctx->uid = current_fsuid();
4030 ctx->gid = current_fsgid();
4031
4032 fc->fs_private = ctx;
4033 fc->ops = &shmem_fs_context_ops;
4034 return 0;
1da177e4
LT
4035}
4036
41ffe5d5 4037static struct file_system_type shmem_fs_type = {
1da177e4
LT
4038 .owner = THIS_MODULE,
4039 .name = "tmpfs",
f3235626
DH
4040 .init_fs_context = shmem_init_fs_context,
4041#ifdef CONFIG_TMPFS
d7167b14 4042 .parameters = shmem_fs_parameters,
f3235626 4043#endif
1da177e4 4044 .kill_sb = kill_litter_super,
ff36da69 4045 .fs_flags = FS_USERNS_MOUNT,
1da177e4 4046};
1da177e4 4047
9096bbe9 4048void __init shmem_init(void)
1da177e4
LT
4049{
4050 int error;
4051
9a8ec03e 4052 shmem_init_inodecache();
1da177e4 4053
41ffe5d5 4054 error = register_filesystem(&shmem_fs_type);
1da177e4 4055 if (error) {
1170532b 4056 pr_err("Could not register tmpfs\n");
1da177e4
LT
4057 goto out2;
4058 }
95dc112a 4059
ca4e0519 4060 shm_mnt = kern_mount(&shmem_fs_type);
1da177e4
LT
4061 if (IS_ERR(shm_mnt)) {
4062 error = PTR_ERR(shm_mnt);
1170532b 4063 pr_err("Could not kern_mount tmpfs\n");
1da177e4
LT
4064 goto out1;
4065 }
5a6e75f8 4066
396bcc52 4067#ifdef CONFIG_TRANSPARENT_HUGEPAGE
435c0b87 4068 if (has_transparent_hugepage() && shmem_huge > SHMEM_HUGE_DENY)
5a6e75f8
KS
4069 SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
4070 else
5e6e5a12 4071 shmem_huge = SHMEM_HUGE_NEVER; /* just in case it was patched */
5a6e75f8 4072#endif
9096bbe9 4073 return;
1da177e4
LT
4074
4075out1:
41ffe5d5 4076 unregister_filesystem(&shmem_fs_type);
1da177e4 4077out2:
41ffe5d5 4078 shmem_destroy_inodecache();
1da177e4 4079 shm_mnt = ERR_PTR(error);
1da177e4 4080}
853ac43a 4081
396bcc52 4082#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && defined(CONFIG_SYSFS)
5a6e75f8 4083static ssize_t shmem_enabled_show(struct kobject *kobj,
79d4d38a 4084 struct kobj_attribute *attr, char *buf)
5a6e75f8 4085{
26083eb6 4086 static const int values[] = {
5a6e75f8
KS
4087 SHMEM_HUGE_ALWAYS,
4088 SHMEM_HUGE_WITHIN_SIZE,
4089 SHMEM_HUGE_ADVISE,
4090 SHMEM_HUGE_NEVER,
4091 SHMEM_HUGE_DENY,
4092 SHMEM_HUGE_FORCE,
4093 };
79d4d38a
JP
4094 int len = 0;
4095 int i;
5a6e75f8 4096
79d4d38a
JP
4097 for (i = 0; i < ARRAY_SIZE(values); i++) {
4098 len += sysfs_emit_at(buf, len,
4099 shmem_huge == values[i] ? "%s[%s]" : "%s%s",
4100 i ? " " : "",
4101 shmem_format_huge(values[i]));
5a6e75f8 4102 }
79d4d38a
JP
4103
4104 len += sysfs_emit_at(buf, len, "\n");
4105
4106 return len;
5a6e75f8
KS
4107}
4108
4109static ssize_t shmem_enabled_store(struct kobject *kobj,
4110 struct kobj_attribute *attr, const char *buf, size_t count)
4111{
4112 char tmp[16];
4113 int huge;
4114
4115 if (count + 1 > sizeof(tmp))
4116 return -EINVAL;
4117 memcpy(tmp, buf, count);
4118 tmp[count] = '\0';
4119 if (count && tmp[count - 1] == '\n')
4120 tmp[count - 1] = '\0';
4121
4122 huge = shmem_parse_huge(tmp);
4123 if (huge == -EINVAL)
4124 return -EINVAL;
4125 if (!has_transparent_hugepage() &&
4126 huge != SHMEM_HUGE_NEVER && huge != SHMEM_HUGE_DENY)
4127 return -EINVAL;
4128
4129 shmem_huge = huge;
435c0b87 4130 if (shmem_huge > SHMEM_HUGE_DENY)
5a6e75f8
KS
4131 SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
4132 return count;
4133}
4134
4bfa8ada 4135struct kobj_attribute shmem_enabled_attr = __ATTR_RW(shmem_enabled);
396bcc52 4136#endif /* CONFIG_TRANSPARENT_HUGEPAGE && CONFIG_SYSFS */
f3f0e1d2 4137
853ac43a
MM
4138#else /* !CONFIG_SHMEM */
4139
4140/*
4141 * tiny-shmem: simple shmemfs and tmpfs using ramfs code
4142 *
4143 * This is intended for small system where the benefits of the full
4144 * shmem code (swap-backed and resource-limited) are outweighed by
4145 * their complexity. On systems without swap this code should be
4146 * effectively equivalent, but much lighter weight.
4147 */
4148
41ffe5d5 4149static struct file_system_type shmem_fs_type = {
853ac43a 4150 .name = "tmpfs",
f3235626 4151 .init_fs_context = ramfs_init_fs_context,
d7167b14 4152 .parameters = ramfs_fs_parameters,
853ac43a 4153 .kill_sb = kill_litter_super,
2b8576cb 4154 .fs_flags = FS_USERNS_MOUNT,
853ac43a
MM
4155};
4156
9096bbe9 4157void __init shmem_init(void)
853ac43a 4158{
41ffe5d5 4159 BUG_ON(register_filesystem(&shmem_fs_type) != 0);
853ac43a 4160
41ffe5d5 4161 shm_mnt = kern_mount(&shmem_fs_type);
853ac43a 4162 BUG_ON(IS_ERR(shm_mnt));
853ac43a
MM
4163}
4164
10a9c496 4165int shmem_unuse(unsigned int type)
853ac43a
MM
4166{
4167 return 0;
4168}
4169
d7c9e99a 4170int shmem_lock(struct file *file, int lock, struct ucounts *ucounts)
3f96b79a
HD
4171{
4172 return 0;
4173}
4174
24513264
HD
4175void shmem_unlock_mapping(struct address_space *mapping)
4176{
4177}
4178
c01d5b30
HD
4179#ifdef CONFIG_MMU
4180unsigned long shmem_get_unmapped_area(struct file *file,
4181 unsigned long addr, unsigned long len,
4182 unsigned long pgoff, unsigned long flags)
4183{
4184 return current->mm->get_unmapped_area(file, addr, len, pgoff, flags);
4185}
4186#endif
4187
41ffe5d5 4188void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
94c1e62d 4189{
41ffe5d5 4190 truncate_inode_pages_range(inode->i_mapping, lstart, lend);
94c1e62d
HD
4191}
4192EXPORT_SYMBOL_GPL(shmem_truncate_range);
4193
0b0a0806 4194#define shmem_vm_ops generic_file_vm_ops
d09e8ca6 4195#define shmem_anon_vm_ops generic_file_vm_ops
0b0a0806 4196#define shmem_file_operations ramfs_file_operations
454abafe 4197#define shmem_get_inode(sb, dir, mode, dev, flags) ramfs_get_inode(sb, dir, mode, dev)
0b0a0806
HD
4198#define shmem_acct_size(flags, size) 0
4199#define shmem_unacct_size(flags, size) do {} while (0)
853ac43a
MM
4200
4201#endif /* CONFIG_SHMEM */
4202
4203/* common code */
1da177e4 4204
703321b6 4205static struct file *__shmem_file_setup(struct vfsmount *mnt, const char *name, loff_t size,
c7277090 4206 unsigned long flags, unsigned int i_flags)
1da177e4 4207{
1da177e4 4208 struct inode *inode;
93dec2da 4209 struct file *res;
1da177e4 4210
703321b6
MA
4211 if (IS_ERR(mnt))
4212 return ERR_CAST(mnt);
1da177e4 4213
285b2c4f 4214 if (size < 0 || size > MAX_LFS_FILESIZE)
1da177e4
LT
4215 return ERR_PTR(-EINVAL);
4216
4217 if (shmem_acct_size(flags, size))
4218 return ERR_PTR(-ENOMEM);
4219
93dec2da
AV
4220 inode = shmem_get_inode(mnt->mnt_sb, NULL, S_IFREG | S_IRWXUGO, 0,
4221 flags);
dac2d1f6
AV
4222 if (unlikely(!inode)) {
4223 shmem_unacct_size(flags, size);
4224 return ERR_PTR(-ENOSPC);
4225 }
c7277090 4226 inode->i_flags |= i_flags;
1da177e4 4227 inode->i_size = size;
6d6b77f1 4228 clear_nlink(inode); /* It is unlinked */
26567cdb 4229 res = ERR_PTR(ramfs_nommu_expand_for_mapping(inode, size));
93dec2da
AV
4230 if (!IS_ERR(res))
4231 res = alloc_file_pseudo(inode, mnt, name, O_RDWR,
4232 &shmem_file_operations);
26567cdb 4233 if (IS_ERR(res))
93dec2da 4234 iput(inode);
6b4d0b27 4235 return res;
1da177e4 4236}
c7277090
EP
4237
4238/**
4239 * shmem_kernel_file_setup - get an unlinked file living in tmpfs which must be
4240 * kernel internal. There will be NO LSM permission checks against the
4241 * underlying inode. So users of this interface must do LSM checks at a
e1832f29
SS
4242 * higher layer. The users are the big_key and shm implementations. LSM
4243 * checks are provided at the key or shm level rather than the inode.
c7277090
EP
4244 * @name: name for dentry (to be seen in /proc/<pid>/maps
4245 * @size: size to be set for the file
4246 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
4247 */
4248struct file *shmem_kernel_file_setup(const char *name, loff_t size, unsigned long flags)
4249{
703321b6 4250 return __shmem_file_setup(shm_mnt, name, size, flags, S_PRIVATE);
c7277090
EP
4251}
4252
4253/**
4254 * shmem_file_setup - get an unlinked file living in tmpfs
4255 * @name: name for dentry (to be seen in /proc/<pid>/maps
4256 * @size: size to be set for the file
4257 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
4258 */
4259struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags)
4260{
703321b6 4261 return __shmem_file_setup(shm_mnt, name, size, flags, 0);
c7277090 4262}
395e0ddc 4263EXPORT_SYMBOL_GPL(shmem_file_setup);
1da177e4 4264
703321b6
MA
4265/**
4266 * shmem_file_setup_with_mnt - get an unlinked file living in tmpfs
4267 * @mnt: the tmpfs mount where the file will be created
4268 * @name: name for dentry (to be seen in /proc/<pid>/maps
4269 * @size: size to be set for the file
4270 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
4271 */
4272struct file *shmem_file_setup_with_mnt(struct vfsmount *mnt, const char *name,
4273 loff_t size, unsigned long flags)
4274{
4275 return __shmem_file_setup(mnt, name, size, flags, 0);
4276}
4277EXPORT_SYMBOL_GPL(shmem_file_setup_with_mnt);
4278
46711810 4279/**
1da177e4 4280 * shmem_zero_setup - setup a shared anonymous mapping
45e55300 4281 * @vma: the vma to be mmapped is prepared by do_mmap
1da177e4
LT
4282 */
4283int shmem_zero_setup(struct vm_area_struct *vma)
4284{
4285 struct file *file;
4286 loff_t size = vma->vm_end - vma->vm_start;
4287
66fc1303 4288 /*
c1e8d7c6 4289 * Cloning a new file under mmap_lock leads to a lock ordering conflict
66fc1303
HD
4290 * between XFS directory reading and selinux: since this file is only
4291 * accessible to the user through its mapping, use S_PRIVATE flag to
4292 * bypass file security, in the same way as shmem_kernel_file_setup().
4293 */
703321b6 4294 file = shmem_kernel_file_setup("dev/zero", size, vma->vm_flags);
1da177e4
LT
4295 if (IS_ERR(file))
4296 return PTR_ERR(file);
4297
4298 if (vma->vm_file)
4299 fput(vma->vm_file);
4300 vma->vm_file = file;
d09e8ca6 4301 vma->vm_ops = &shmem_anon_vm_ops;
f3f0e1d2 4302
1da177e4
LT
4303 return 0;
4304}
d9d90e5e
HD
4305
4306/**
4307 * shmem_read_mapping_page_gfp - read into page cache, using specified page allocation flags.
4308 * @mapping: the page's address_space
4309 * @index: the page index
4310 * @gfp: the page allocator flags to use if allocating
4311 *
4312 * This behaves as a tmpfs "read_cache_page_gfp(mapping, index, gfp)",
4313 * with any new page allocations done using the specified allocation flags.
7e0a1265 4314 * But read_cache_page_gfp() uses the ->read_folio() method: which does not
d9d90e5e
HD
4315 * suit tmpfs, since it may have pages in swapcache, and needs to find those
4316 * for itself; although drivers/gpu/drm i915 and ttm rely upon this support.
4317 *
68da9f05
HD
4318 * i915_gem_object_get_pages_gtt() mixes __GFP_NORETRY | __GFP_NOWARN in
4319 * with the mapping_gfp_mask(), to avoid OOMing the machine unnecessarily.
d9d90e5e
HD
4320 */
4321struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
4322 pgoff_t index, gfp_t gfp)
4323{
68da9f05
HD
4324#ifdef CONFIG_SHMEM
4325 struct inode *inode = mapping->host;
a3a9c397 4326 struct folio *folio;
9276aad6 4327 struct page *page;
68da9f05
HD
4328 int error;
4329
30e6a51d 4330 BUG_ON(!shmem_mapping(mapping));
a3a9c397 4331 error = shmem_get_folio_gfp(inode, index, &folio, SGP_CACHE,
cfda0526 4332 gfp, NULL, NULL, NULL);
68da9f05 4333 if (error)
a7605426
YS
4334 return ERR_PTR(error);
4335
a3a9c397
MWO
4336 folio_unlock(folio);
4337 page = folio_file_page(folio, index);
a7605426 4338 if (PageHWPoison(page)) {
a3a9c397 4339 folio_put(folio);
a7605426
YS
4340 return ERR_PTR(-EIO);
4341 }
4342
68da9f05
HD
4343 return page;
4344#else
4345 /*
4346 * The tiny !SHMEM case uses ramfs without swap
4347 */
d9d90e5e 4348 return read_cache_page_gfp(mapping, index, gfp);
68da9f05 4349#endif
d9d90e5e
HD
4350}
4351EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp);