Merge tag 'locking_urgent_for_v6.2_rc2' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-block.git] / fs / xfs / xfs_iops.c
CommitLineData
0b61f8a4 1// SPDX-License-Identifier: GPL-2.0
1da177e4 2/*
7b718769
NS
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4 * All Rights Reserved.
1da177e4 5 */
1da177e4
LT
6#include "xfs.h"
7#include "xfs_fs.h"
70a9883c 8#include "xfs_shared.h"
239880ef
DC
9#include "xfs_format.h"
10#include "xfs_log_format.h"
11#include "xfs_trans_resv.h"
1da177e4 12#include "xfs_mount.h"
1da177e4 13#include "xfs_inode.h"
239880ef 14#include "xfs_acl.h"
239880ef 15#include "xfs_quota.h"
fd920008
AH
16#include "xfs_da_format.h"
17#include "xfs_da_btree.h"
1da177e4 18#include "xfs_attr.h"
239880ef 19#include "xfs_trans.h"
0b1b213f 20#include "xfs_trace.h"
27b52867 21#include "xfs_icache.h"
c24b5dfa 22#include "xfs_symlink.h"
1b767ee3 23#include "xfs_dir2.h"
68a9f5e7 24#include "xfs_iomap.h"
a5155b87 25#include "xfs_error.h"
9fefd5db 26#include "xfs_ioctl.h"
efc2efeb 27#include "xfs_xattr.h"
1da177e4 28
ef14f0c1 29#include <linux/posix_acl.h>
446ada4a 30#include <linux/security.h>
c3b1b131 31#include <linux/iversion.h>
10c5db28 32#include <linux/fiemap.h>
1da177e4 33
93a8614e 34/*
c1e8d7c6 35 * Directories have different lock order w.r.t. mmap_lock compared to regular
93a8614e
DC
36 * files. This is due to readdir potentially triggering page faults on a user
37 * buffer inside filldir(), and this happens with the ilock on the directory
38 * held. For regular files, the lock order is the other way around - the
c1e8d7c6 39 * mmap_lock is taken during the page fault, and then we lock the ilock to do
93a8614e
DC
40 * block mapping. Hence we need a different class for the directory ilock so
41 * that lockdep can tell them apart.
42 */
43static struct lock_class_key xfs_nondir_ilock_class;
44static struct lock_class_key xfs_dir_ilock_class;
45
8d2a5e6e
DC
46static int
47xfs_initxattrs(
48 struct inode *inode,
49 const struct xattr *xattr_array,
50 void *fs_info)
9d8f13ba 51{
8d2a5e6e
DC
52 const struct xattr *xattr;
53 struct xfs_inode *ip = XFS_I(inode);
54 int error = 0;
9d8f13ba
MZ
55
56 for (xattr = xattr_array; xattr->name != NULL; xattr++) {
a2544622
CH
57 struct xfs_da_args args = {
58 .dp = ip,
d5f0f49a 59 .attr_filter = XFS_ATTR_SECURE,
a2544622
CH
60 .name = xattr->name,
61 .namelen = strlen(xattr->name),
62 .value = xattr->value,
63 .valuelen = xattr->value_len,
64 };
efc2efeb 65 error = xfs_attr_change(&args);
9d8f13ba
MZ
66 if (error < 0)
67 break;
68 }
69 return error;
70}
71
446ada4a
NS
72/*
73 * Hook in SELinux. This is not quite correct yet, what we really need
74 * here (as we do for default ACLs) is a mechanism by which creation of
75 * these attrs can be journalled at inode creation time (along with the
76 * inode, of course, such that log replay can't cause these to be lost).
77 */
70b589a3
ES
78int
79xfs_inode_init_security(
af048193 80 struct inode *inode,
2a7dba39
EP
81 struct inode *dir,
82 const struct qstr *qstr)
446ada4a 83{
2451337d 84 return security_inode_init_security(inode, dir, qstr,
a5a14de2 85 &xfs_initxattrs, NULL);
446ada4a
NS
86}
87
556b8b16
BN
88static void
89xfs_dentry_to_name(
fab8eef8
AG
90 struct xfs_name *namep,
91 struct dentry *dentry)
92{
93 namep->name = dentry->d_name.name;
94 namep->len = dentry->d_name.len;
95 namep->type = XFS_DIR3_FT_UNKNOWN;
96}
97
98static int
99xfs_dentry_mode_to_name(
556b8b16 100 struct xfs_name *namep,
0cb97766
DC
101 struct dentry *dentry,
102 int mode)
556b8b16
BN
103{
104 namep->name = dentry->d_name.name;
105 namep->len = dentry->d_name.len;
1fc4d33f 106 namep->type = xfs_mode_to_ftype(mode);
fab8eef8
AG
107
108 if (unlikely(namep->type == XFS_DIR3_FT_UNKNOWN))
109 return -EFSCORRUPTED;
110
111 return 0;
556b8b16
BN
112}
113
7989cb8e 114STATIC void
416c6d5b 115xfs_cleanup_inode(
739bfb2a 116 struct inode *dir,
af048193 117 struct inode *inode,
8f112e3b 118 struct dentry *dentry)
3a69c7dc 119{
556b8b16 120 struct xfs_name teardown;
3a69c7dc
YL
121
122 /* Oh, the horror.
220b5284 123 * If we can't add the ACL or we fail in
70b589a3 124 * xfs_inode_init_security we must back out.
3a69c7dc
YL
125 * ENOSPC can hit here, among other things.
126 */
fab8eef8 127 xfs_dentry_to_name(&teardown, dentry);
3a69c7dc 128
8f112e3b 129 xfs_remove(XFS_I(dir), &teardown, XFS_I(inode));
3a69c7dc
YL
130}
131
e6a688c3
DC
132/*
133 * Check to see if we are likely to need an extended attribute to be added to
134 * the inode we are about to allocate. This allows the attribute fork to be
135 * created during the inode allocation, reducing the number of transactions we
136 * need to do in this fast path.
137 *
138 * The security checks are optimistic, but not guaranteed. The two LSMs that
139 * require xattrs to be added here (selinux and smack) are also the only two
140 * LSMs that add a sb->s_security structure to the superblock. Hence if security
141 * is enabled and sb->s_security is set, we have a pretty good idea that we are
142 * going to be asked to add a security xattr immediately after allocating the
143 * xfs inode and instantiating the VFS inode.
144 */
145static inline bool
146xfs_create_need_xattr(
147 struct inode *dir,
148 struct posix_acl *default_acl,
149 struct posix_acl *acl)
150{
151 if (acl)
152 return true;
153 if (default_acl)
154 return true;
155#if IS_ENABLED(CONFIG_SECURITY)
156 if (dir->i_sb->s_security)
157 return true;
158#endif
159 return false;
160}
161
162
1da177e4 163STATIC int
d540e43b 164xfs_generic_create(
f736d93d 165 struct user_namespace *mnt_userns,
1da177e4
LT
166 struct inode *dir,
167 struct dentry *dentry,
1a67aafb 168 umode_t mode,
d540e43b 169 dev_t rdev,
863f144f 170 struct file *tmpfile) /* unnamed file */
1da177e4 171{
db0bb7ba 172 struct inode *inode;
979ebab1 173 struct xfs_inode *ip = NULL;
2401dc29 174 struct posix_acl *default_acl, *acl;
556b8b16 175 struct xfs_name name;
1da177e4
LT
176 int error;
177
178 /*
179 * Irix uses Missed'em'V split, but doesn't want to see
180 * the upper 5 bits of (14bit) major.
181 */
517b5e8c
CH
182 if (S_ISCHR(mode) || S_ISBLK(mode)) {
183 if (unlikely(!sysv_valid_dev(rdev) || MAJOR(rdev) & ~0x1ff))
184 return -EINVAL;
517b5e8c
CH
185 } else {
186 rdev = 0;
187 }
1da177e4 188
2401dc29
CH
189 error = posix_acl_create(dir, &mode, &default_acl, &acl);
190 if (error)
191 return error;
1da177e4 192
fab8eef8
AG
193 /* Verify mode is valid also for tmpfile case */
194 error = xfs_dentry_mode_to_name(&name, dentry, mode);
195 if (unlikely(error))
196 goto out_free_acl;
197
d540e43b 198 if (!tmpfile) {
f736d93d 199 error = xfs_create(mnt_userns, XFS_I(dir), &name, mode, rdev,
e6a688c3
DC
200 xfs_create_need_xattr(dir, default_acl, acl),
201 &ip);
d540e43b 202 } else {
f736d93d 203 error = xfs_create_tmpfile(mnt_userns, XFS_I(dir), mode, &ip);
d540e43b 204 }
db0bb7ba
CH
205 if (unlikely(error))
206 goto out_free_acl;
446ada4a 207
01651646 208 inode = VFS_I(ip);
979ebab1 209
70b589a3 210 error = xfs_inode_init_security(inode, dir, &dentry->d_name);
db0bb7ba
CH
211 if (unlikely(error))
212 goto out_cleanup_inode;
213
214 if (default_acl) {
8ba35875 215 error = __xfs_set_acl(inode, default_acl, ACL_TYPE_DEFAULT);
2401dc29 216 if (error)
db0bb7ba 217 goto out_cleanup_inode;
1da177e4 218 }
2401dc29 219 if (acl) {
8ba35875 220 error = __xfs_set_acl(inode, acl, ACL_TYPE_ACCESS);
2401dc29
CH
221 if (error)
222 goto out_cleanup_inode;
223 }
1da177e4 224
2b3d1d41
CH
225 xfs_setup_iops(ip);
226
c4a6bf7f
DW
227 if (tmpfile) {
228 /*
229 * The VFS requires that any inode fed to d_tmpfile must have
230 * nlink == 1 so that it can decrement the nlink in d_tmpfile.
231 * However, we created the temp file with nlink == 0 because
232 * we're not allowed to put an inode with nlink > 0 on the
233 * unlinked list. Therefore we have to set nlink to 1 so that
234 * d_tmpfile can immediately set it back to zero.
235 */
236 set_nlink(inode, 1);
863f144f 237 d_tmpfile(tmpfile, inode);
c4a6bf7f 238 } else
d540e43b
BF
239 d_instantiate(dentry, inode);
240
58c90473
DC
241 xfs_finish_inode_setup(ip);
242
2401dc29 243 out_free_acl:
88269b88
KX
244 posix_acl_release(default_acl);
245 posix_acl_release(acl);
2451337d 246 return error;
db0bb7ba
CH
247
248 out_cleanup_inode:
58c90473 249 xfs_finish_inode_setup(ip);
d540e43b
BF
250 if (!tmpfile)
251 xfs_cleanup_inode(dir, inode, dentry);
44a8736b 252 xfs_irele(ip);
2401dc29 253 goto out_free_acl;
1da177e4
LT
254}
255
d540e43b
BF
256STATIC int
257xfs_vn_mknod(
549c7297
CB
258 struct user_namespace *mnt_userns,
259 struct inode *dir,
260 struct dentry *dentry,
261 umode_t mode,
262 dev_t rdev)
d540e43b 263{
863f144f 264 return xfs_generic_create(mnt_userns, dir, dentry, mode, rdev, NULL);
d540e43b
BF
265}
266
1da177e4 267STATIC int
416c6d5b 268xfs_vn_create(
549c7297
CB
269 struct user_namespace *mnt_userns,
270 struct inode *dir,
271 struct dentry *dentry,
272 umode_t mode,
273 bool flags)
1da177e4 274{
863f144f 275 return xfs_generic_create(mnt_userns, dir, dentry, mode, 0, NULL);
1da177e4
LT
276}
277
278STATIC int
416c6d5b 279xfs_vn_mkdir(
549c7297
CB
280 struct user_namespace *mnt_userns,
281 struct inode *dir,
282 struct dentry *dentry,
283 umode_t mode)
1da177e4 284{
f736d93d 285 return xfs_generic_create(mnt_userns, dir, dentry, mode | S_IFDIR, 0,
863f144f 286 NULL);
1da177e4
LT
287}
288
289STATIC struct dentry *
416c6d5b 290xfs_vn_lookup(
1da177e4
LT
291 struct inode *dir,
292 struct dentry *dentry,
00cd8dd3 293 unsigned int flags)
1da177e4 294{
b113a6d3 295 struct inode *inode;
ef1f5e7a 296 struct xfs_inode *cip;
556b8b16 297 struct xfs_name name;
1da177e4
LT
298 int error;
299
300 if (dentry->d_name.len >= MAXNAMELEN)
301 return ERR_PTR(-ENAMETOOLONG);
302
fab8eef8 303 xfs_dentry_to_name(&name, dentry);
384f3ced 304 error = xfs_lookup(XFS_I(dir), &name, &cip, NULL);
b113a6d3
AV
305 if (likely(!error))
306 inode = VFS_I(cip);
307 else if (likely(error == -ENOENT))
308 inode = NULL;
309 else
310 inode = ERR_PTR(error);
311 return d_splice_alias(inode, dentry);
1da177e4
LT
312}
313
384f3ced
BN
314STATIC struct dentry *
315xfs_vn_ci_lookup(
316 struct inode *dir,
317 struct dentry *dentry,
00cd8dd3 318 unsigned int flags)
384f3ced
BN
319{
320 struct xfs_inode *ip;
321 struct xfs_name xname;
322 struct xfs_name ci_name;
323 struct qstr dname;
324 int error;
325
326 if (dentry->d_name.len >= MAXNAMELEN)
327 return ERR_PTR(-ENAMETOOLONG);
328
fab8eef8 329 xfs_dentry_to_name(&xname, dentry);
384f3ced
BN
330 error = xfs_lookup(XFS_I(dir), &xname, &ip, &ci_name);
331 if (unlikely(error)) {
2451337d
DC
332 if (unlikely(error != -ENOENT))
333 return ERR_PTR(error);
866d5dc9
BN
334 /*
335 * call d_add(dentry, NULL) here when d_drop_negative_children
336 * is called in xfs_vn_mknod (ie. allow negative dentries
337 * with CI filesystems).
338 */
384f3ced
BN
339 return NULL;
340 }
341
342 /* if exact match, just splice and exit */
343 if (!ci_name.name)
01651646 344 return d_splice_alias(VFS_I(ip), dentry);
384f3ced
BN
345
346 /* else case-insensitive match... */
347 dname.name = ci_name.name;
348 dname.len = ci_name.len;
e45b590b 349 dentry = d_add_ci(dentry, VFS_I(ip), &dname);
384f3ced
BN
350 kmem_free(ci_name.name);
351 return dentry;
352}
353
1da177e4 354STATIC int
416c6d5b 355xfs_vn_link(
1da177e4
LT
356 struct dentry *old_dentry,
357 struct inode *dir,
358 struct dentry *dentry)
359{
2b0143b5 360 struct inode *inode = d_inode(old_dentry);
556b8b16 361 struct xfs_name name;
1da177e4
LT
362 int error;
363
fab8eef8
AG
364 error = xfs_dentry_mode_to_name(&name, dentry, inode->i_mode);
365 if (unlikely(error))
366 return error;
1da177e4 367
556b8b16 368 error = xfs_link(XFS_I(dir), XFS_I(inode), &name);
d9424b3c 369 if (unlikely(error))
2451337d 370 return error;
a3da7896 371
7de9c6ee 372 ihold(inode);
a3da7896
CH
373 d_instantiate(dentry, inode);
374 return 0;
1da177e4
LT
375}
376
377STATIC int
416c6d5b 378xfs_vn_unlink(
1da177e4
LT
379 struct inode *dir,
380 struct dentry *dentry)
381{
556b8b16 382 struct xfs_name name;
1da177e4
LT
383 int error;
384
fab8eef8 385 xfs_dentry_to_name(&name, dentry);
1da177e4 386
2b0143b5 387 error = xfs_remove(XFS_I(dir), &name, XFS_I(d_inode(dentry)));
e5700704
CH
388 if (error)
389 return error;
390
391 /*
392 * With unlink, the VFS makes the dentry "negative": no inode,
393 * but still hashed. This is incompatible with case-insensitive
394 * mode, so invalidate (unhash) the dentry in CI-mode.
395 */
38c26bfd 396 if (xfs_has_asciici(XFS_M(dir->i_sb)))
e5700704
CH
397 d_invalidate(dentry);
398 return 0;
1da177e4
LT
399}
400
401STATIC int
416c6d5b 402xfs_vn_symlink(
549c7297
CB
403 struct user_namespace *mnt_userns,
404 struct inode *dir,
405 struct dentry *dentry,
406 const char *symname)
1da177e4 407{
3937be5b
CH
408 struct inode *inode;
409 struct xfs_inode *cip = NULL;
556b8b16 410 struct xfs_name name;
1da177e4 411 int error;
576b1d67 412 umode_t mode;
1da177e4 413
3e5daf05 414 mode = S_IFLNK |
ce3b0f8d 415 (irix_symlink_mode ? 0777 & ~current_umask() : S_IRWXUGO);
fab8eef8
AG
416 error = xfs_dentry_mode_to_name(&name, dentry, mode);
417 if (unlikely(error))
418 goto out;
1da177e4 419
f736d93d 420 error = xfs_symlink(mnt_userns, XFS_I(dir), &name, symname, mode, &cip);
3937be5b
CH
421 if (unlikely(error))
422 goto out;
423
01651646 424 inode = VFS_I(cip);
3937be5b 425
70b589a3 426 error = xfs_inode_init_security(inode, dir, &dentry->d_name);
3937be5b
CH
427 if (unlikely(error))
428 goto out_cleanup_inode;
429
2b3d1d41
CH
430 xfs_setup_iops(cip);
431
3937be5b 432 d_instantiate(dentry, inode);
58c90473 433 xfs_finish_inode_setup(cip);
3937be5b
CH
434 return 0;
435
436 out_cleanup_inode:
58c90473 437 xfs_finish_inode_setup(cip);
8f112e3b 438 xfs_cleanup_inode(dir, inode, dentry);
44a8736b 439 xfs_irele(cip);
3937be5b 440 out:
2451337d 441 return error;
1da177e4
LT
442}
443
1da177e4 444STATIC int
416c6d5b 445xfs_vn_rename(
549c7297
CB
446 struct user_namespace *mnt_userns,
447 struct inode *odir,
448 struct dentry *odentry,
449 struct inode *ndir,
450 struct dentry *ndentry,
451 unsigned int flags)
1da177e4 452{
2b0143b5 453 struct inode *new_inode = d_inode(ndentry);
d31a1825 454 int omode = 0;
fab8eef8 455 int error;
556b8b16
BN
456 struct xfs_name oname;
457 struct xfs_name nname;
1da177e4 458
7dcf5c3e 459 if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT))
dbe1b5ca
CM
460 return -EINVAL;
461
d31a1825
CM
462 /* if we are exchanging files, we need to set i_mode of both files */
463 if (flags & RENAME_EXCHANGE)
2b0143b5 464 omode = d_inode(ndentry)->i_mode;
d31a1825 465
fab8eef8
AG
466 error = xfs_dentry_mode_to_name(&oname, odentry, omode);
467 if (omode && unlikely(error))
468 return error;
469
470 error = xfs_dentry_mode_to_name(&nname, ndentry,
471 d_inode(odentry)->i_mode);
472 if (unlikely(error))
473 return error;
556b8b16 474
f736d93d
CH
475 return xfs_rename(mnt_userns, XFS_I(odir), &oname,
476 XFS_I(d_inode(odentry)), XFS_I(ndir), &nname,
d31a1825 477 new_inode ? XFS_I(new_inode) : NULL, flags);
1da177e4
LT
478}
479
480/*
481 * careful here - this function can get called recursively, so
482 * we need to be very careful about how much stack we use.
483 * uio is kmalloced for this reason...
484 */
680baacb 485STATIC const char *
6b255391 486xfs_vn_get_link(
1da177e4 487 struct dentry *dentry,
6b255391 488 struct inode *inode,
fceef393 489 struct delayed_call *done)
1da177e4 490{
1da177e4 491 char *link;
804c83c3 492 int error = -ENOMEM;
1da177e4 493
6b255391
AV
494 if (!dentry)
495 return ERR_PTR(-ECHILD);
496
6eb0b8df 497 link = kmalloc(XFS_SYMLINK_MAXLEN+1, GFP_KERNEL);
804c83c3
CH
498 if (!link)
499 goto out_err;
1da177e4 500
2b0143b5 501 error = xfs_readlink(XFS_I(d_inode(dentry)), link);
804c83c3
CH
502 if (unlikely(error))
503 goto out_kfree;
1da177e4 504
fceef393
AV
505 set_delayed_call(done, kfree_link, link);
506 return link;
804c83c3
CH
507
508 out_kfree:
509 kfree(link);
510 out_err:
680baacb 511 return ERR_PTR(error);
1da177e4
LT
512}
513
dd2d535e
CH
514static uint32_t
515xfs_stat_blksize(
516 struct xfs_inode *ip)
517{
518 struct xfs_mount *mp = ip->i_mount;
519
520 /*
521 * If the file blocks are being allocated from a realtime volume, then
522 * always return the realtime extent size.
523 */
524 if (XFS_IS_REALTIME_INODE(ip))
a7bcb147 525 return XFS_FSB_TO_B(mp, xfs_get_extsz_hint(ip));
dd2d535e
CH
526
527 /*
528 * Allow large block sizes to be reported to userspace programs if the
529 * "largeio" mount option is used.
530 *
531 * If compatibility mode is specified, simply return the basic unit of
532 * caching so that we don't get inefficient read/modify/write I/O from
533 * user apps. Otherwise....
534 *
535 * If the underlying volume is a stripe, then return the stripe width in
536 * bytes as the recommended I/O size. It is not a stripe and we've set a
537 * default buffered I/O size, return that, otherwise return the compat
538 * default.
539 */
0560f31a 540 if (xfs_has_large_iosize(mp)) {
dd2d535e 541 if (mp->m_swidth)
a7bcb147 542 return XFS_FSB_TO_B(mp, mp->m_swidth);
0560f31a 543 if (xfs_has_allocsize(mp))
5da8a07c 544 return 1U << mp->m_allocsize_log;
dd2d535e
CH
545 }
546
547 return PAGE_SIZE;
548}
549
1da177e4 550STATIC int
416c6d5b 551xfs_vn_getattr(
549c7297 552 struct user_namespace *mnt_userns,
a528d35e
DH
553 const struct path *path,
554 struct kstat *stat,
555 u32 request_mask,
556 unsigned int query_flags)
1da177e4 557{
a528d35e 558 struct inode *inode = d_inode(path->dentry);
c43f4087
CH
559 struct xfs_inode *ip = XFS_I(inode);
560 struct xfs_mount *mp = ip->i_mount;
42b7cc11
CB
561 vfsuid_t vfsuid = i_uid_into_vfsuid(mnt_userns, inode);
562 vfsgid_t vfsgid = i_gid_into_vfsgid(mnt_userns, inode);
c43f4087 563
cca28fb8 564 trace_xfs_getattr(ip);
c43f4087 565
75c8c50f 566 if (xfs_is_shutdown(mp))
b474c7ae 567 return -EIO;
c43f4087
CH
568
569 stat->size = XFS_ISIZE(ip);
570 stat->dev = inode->i_sb->s_dev;
c19b3b05 571 stat->mode = inode->i_mode;
54d7b5c1 572 stat->nlink = inode->i_nlink;
42b7cc11
CB
573 stat->uid = vfsuid_into_kuid(vfsuid);
574 stat->gid = vfsgid_into_kgid(vfsgid);
c43f4087 575 stat->ino = ip->i_ino;
c43f4087 576 stat->atime = inode->i_atime;
f9581b14
CH
577 stat->mtime = inode->i_mtime;
578 stat->ctime = inode->i_ctime;
6e73a545 579 stat->blocks = XFS_FSB_TO_BB(mp, ip->i_nblocks + ip->i_delayed_blks);
c43f4087 580
38c26bfd 581 if (xfs_has_v3inodes(mp)) {
5f955f26
DW
582 if (request_mask & STATX_BTIME) {
583 stat->result_mask |= STATX_BTIME;
e98d5e88 584 stat->btime = ip->i_crtime;
5f955f26
DW
585 }
586 }
587
1b9598c8
LR
588 /*
589 * Note: If you add another clause to set an attribute flag, please
590 * update attributes_mask below.
591 */
db07349d 592 if (ip->i_diflags & XFS_DIFLAG_IMMUTABLE)
5f955f26 593 stat->attributes |= STATX_ATTR_IMMUTABLE;
db07349d 594 if (ip->i_diflags & XFS_DIFLAG_APPEND)
5f955f26 595 stat->attributes |= STATX_ATTR_APPEND;
db07349d 596 if (ip->i_diflags & XFS_DIFLAG_NODUMP)
5f955f26 597 stat->attributes |= STATX_ATTR_NODUMP;
c43f4087 598
1b9598c8
LR
599 stat->attributes_mask |= (STATX_ATTR_IMMUTABLE |
600 STATX_ATTR_APPEND |
601 STATX_ATTR_NODUMP);
602
c43f4087
CH
603 switch (inode->i_mode & S_IFMT) {
604 case S_IFBLK:
605 case S_IFCHR:
606 stat->blksize = BLKDEV_IOSIZE;
66f36464 607 stat->rdev = inode->i_rdev;
c43f4087 608 break;
61a223df
EB
609 case S_IFREG:
610 if (request_mask & STATX_DIOALIGN) {
611 struct xfs_buftarg *target = xfs_inode_buftarg(ip);
612 struct block_device *bdev = target->bt_bdev;
613
614 stat->result_mask |= STATX_DIOALIGN;
615 stat->dio_mem_align = bdev_dma_alignment(bdev) + 1;
616 stat->dio_offset_align = bdev_logical_block_size(bdev);
617 }
618 fallthrough;
c43f4087 619 default:
dd2d535e 620 stat->blksize = xfs_stat_blksize(ip);
c43f4087
CH
621 stat->rdev = 0;
622 break;
69e23b9a 623 }
c43f4087
CH
624
625 return 0;
1da177e4
LT
626}
627
69bca807
JK
628static int
629xfs_vn_change_ok(
f736d93d
CH
630 struct user_namespace *mnt_userns,
631 struct dentry *dentry,
632 struct iattr *iattr)
69bca807 633{
31051c85 634 struct xfs_mount *mp = XFS_I(d_inode(dentry))->i_mount;
69bca807 635
2e973b2c 636 if (xfs_is_readonly(mp))
69bca807
JK
637 return -EROFS;
638
75c8c50f 639 if (xfs_is_shutdown(mp))
69bca807
JK
640 return -EIO;
641
f736d93d 642 return setattr_prepare(mnt_userns, dentry, iattr);
69bca807
JK
643}
644
645/*
646 * Set non-size attributes of an inode.
647 *
648 * Caution: The caller of this function is responsible for calling
31051c85 649 * setattr_prepare() or otherwise verifying the change is fine.
69bca807 650 */
5d24ec4c 651static int
c4ed4243 652xfs_setattr_nonsize(
f736d93d 653 struct user_namespace *mnt_userns,
138060ba 654 struct dentry *dentry,
c4ed4243 655 struct xfs_inode *ip,
5d24ec4c 656 struct iattr *iattr)
c4ed4243
CH
657{
658 xfs_mount_t *mp = ip->i_mount;
659 struct inode *inode = VFS_I(ip);
660 int mask = iattr->ia_valid;
661 xfs_trans_t *tp;
662 int error;
dd3b015d
DW
663 kuid_t uid = GLOBAL_ROOT_UID;
664 kgid_t gid = GLOBAL_ROOT_GID;
c4ed4243 665 struct xfs_dquot *udqp = NULL, *gdqp = NULL;
dd3b015d 666 struct xfs_dquot *old_udqp = NULL, *old_gdqp = NULL;
c4ed4243 667
c4ed4243
CH
668 ASSERT((mask & ATTR_SIZE) == 0);
669
670 /*
671 * If disk quotas is on, we make sure that the dquots do exist on disk,
672 * before we start any other transactions. Trying to do this later
673 * is messy. We don't care to take a readlock to look at the ids
674 * in inode here, because we can't hold it across the trans_reserve.
675 * If the IDs do change before we take the ilock, we're covered
676 * because the i_*dquot fields will get updated anyway.
677 */
678 if (XFS_IS_QUOTA_ON(mp) && (mask & (ATTR_UID|ATTR_GID))) {
679 uint qflags = 0;
680
681 if ((mask & ATTR_UID) && XFS_IS_UQUOTA_ON(mp)) {
b27c82e1
CB
682 uid = from_vfsuid(mnt_userns, i_user_ns(inode),
683 iattr->ia_vfsuid);
c4ed4243
CH
684 qflags |= XFS_QMOPT_UQUOTA;
685 } else {
7aab1b28 686 uid = inode->i_uid;
c4ed4243
CH
687 }
688 if ((mask & ATTR_GID) && XFS_IS_GQUOTA_ON(mp)) {
b27c82e1
CB
689 gid = from_vfsgid(mnt_userns, i_user_ns(inode),
690 iattr->ia_vfsgid);
c4ed4243
CH
691 qflags |= XFS_QMOPT_GQUOTA;
692 } else {
7aab1b28 693 gid = inode->i_gid;
c4ed4243
CH
694 }
695
696 /*
697 * We take a reference when we initialize udqp and gdqp,
698 * so it is important that we never blindly double trip on
699 * the same variable. See xfs_create() for an example.
700 */
701 ASSERT(udqp == NULL);
702 ASSERT(gdqp == NULL);
ceaf603c 703 error = xfs_qm_vop_dqalloc(ip, uid, gid, ip->i_projid,
7aab1b28 704 qflags, &udqp, &gdqp, NULL);
c4ed4243
CH
705 if (error)
706 return error;
707 }
708
7317a03d 709 error = xfs_trans_alloc_ichange(ip, udqp, gdqp, NULL,
eba0549b 710 has_capability_noaudit(current, CAP_FOWNER), &tp);
c4ed4243 711 if (error)
253f4911 712 goto out_dqrele;
c4ed4243 713
c4ed4243 714 /*
dd3b015d
DW
715 * Register quota modifications in the transaction. Must be the owner
716 * or privileged. These IDs could have changed since we last looked at
717 * them. But, we're assured that if the ownership did change while we
718 * didn't have the inode locked, inode's dquot(s) would have changed
719 * also.
c4ed4243 720 */
35faf310 721 if (XFS_IS_UQUOTA_ON(mp) &&
b27c82e1 722 i_uid_needs_update(mnt_userns, iattr, inode)) {
dd3b015d
DW
723 ASSERT(udqp);
724 old_udqp = xfs_qm_vop_chown(tp, ip, &ip->i_udquot, udqp);
725 }
35faf310 726 if (XFS_IS_GQUOTA_ON(mp) &&
b27c82e1 727 i_gid_needs_update(mnt_userns, iattr, inode)) {
dd3b015d
DW
728 ASSERT(xfs_has_pquotino(mp) || !XFS_IS_PQUOTA_ON(mp));
729 ASSERT(gdqp);
730 old_gdqp = xfs_qm_vop_chown(tp, ip, &ip->i_gdquot, gdqp);
c4ed4243
CH
731 }
732
e014f37d 733 setattr_copy(mnt_userns, inode, iattr);
c4ed4243
CH
734 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
735
ff6d6af2 736 XFS_STATS_INC(mp, xs_ig_attrchg);
c4ed4243 737
0560f31a 738 if (xfs_has_wsync(mp))
c4ed4243 739 xfs_trans_set_sync(tp);
70393313 740 error = xfs_trans_commit(tp);
c4ed4243 741
c4ed4243
CH
742 /*
743 * Release any dquot(s) the inode had kept before chown.
744 */
dd3b015d
DW
745 xfs_qm_dqrele(old_udqp);
746 xfs_qm_dqrele(old_gdqp);
c4ed4243
CH
747 xfs_qm_dqrele(udqp);
748 xfs_qm_dqrele(gdqp);
749
750 if (error)
b474c7ae 751 return error;
c4ed4243
CH
752
753 /*
754 * XXX(hch): Updating the ACL entries is not atomic vs the i_mode
755 * update. We could avoid this with linked transactions
756 * and passing down the transaction pointer all the way
757 * to attr_set. No previous user of the generic
758 * Posix ACL code seems to care about this issue either.
759 */
5d24ec4c 760 if (mask & ATTR_MODE) {
138060ba 761 error = posix_acl_chmod(mnt_userns, dentry, inode->i_mode);
c4ed4243 762 if (error)
b474c7ae 763 return error;
c4ed4243
CH
764 }
765
766 return 0;
767
253f4911 768out_dqrele:
c4ed4243
CH
769 xfs_qm_dqrele(udqp);
770 xfs_qm_dqrele(gdqp);
771 return error;
772}
773
774/*
775 * Truncate file. Must have write permission and not be a directory.
69bca807
JK
776 *
777 * Caution: The caller of this function is responsible for calling
31051c85 778 * setattr_prepare() or otherwise verifying the change is fine.
c4ed4243 779 */
7bf7a193 780STATIC int
c4ed4243 781xfs_setattr_size(
f736d93d 782 struct user_namespace *mnt_userns,
138060ba 783 struct dentry *dentry,
c4ed4243 784 struct xfs_inode *ip,
76ca4c23 785 struct iattr *iattr)
c4ed4243
CH
786{
787 struct xfs_mount *mp = ip->i_mount;
788 struct inode *inode = VFS_I(ip);
673e8e59 789 xfs_off_t oldsize, newsize;
c4ed4243
CH
790 struct xfs_trans *tp;
791 int error;
f38996f5 792 uint lock_flags = 0;
5885ebda 793 bool did_zeroing = false;
c4ed4243 794
76ca4c23 795 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
e8e9ad42 796 ASSERT(xfs_isilocked(ip, XFS_MMAPLOCK_EXCL));
c19b3b05 797 ASSERT(S_ISREG(inode->i_mode));
fe60a8a0 798 ASSERT((iattr->ia_valid & (ATTR_UID|ATTR_GID|ATTR_ATIME|ATTR_ATIME_SET|
88a9e03b 799 ATTR_MTIME_SET|ATTR_TIMES_SET)) == 0);
c4ed4243 800
ce7ae151 801 oldsize = inode->i_size;
673e8e59
CH
802 newsize = iattr->ia_size;
803
c4ed4243
CH
804 /*
805 * Short circuit the truncate case for zero length files.
806 */
daf83964 807 if (newsize == 0 && oldsize == 0 && ip->i_df.if_nextents == 0) {
fe60a8a0 808 if (!(iattr->ia_valid & (ATTR_CTIME|ATTR_MTIME)))
76ca4c23 809 return 0;
681b1200
CH
810
811 /*
812 * Use the regular setattr path to update the timestamps.
813 */
681b1200 814 iattr->ia_valid &= ~ATTR_SIZE;
138060ba 815 return xfs_setattr_nonsize(mnt_userns, dentry, ip, iattr);
c4ed4243
CH
816 }
817
818 /*
819 * Make sure that the dquots are attached to the inode.
820 */
c14cfcca 821 error = xfs_qm_dqattach(ip);
c4ed4243 822 if (error)
76ca4c23 823 return error;
c4ed4243 824
f0c6bcba
CH
825 /*
826 * Wait for all direct I/O to complete.
827 */
828 inode_dio_wait(inode);
829
c4ed4243 830 /*
5885ebda
DC
831 * File data changes must be complete before we start the transaction to
832 * modify the inode. This needs to be done before joining the inode to
833 * the transaction because the inode cannot be unlocked once it is a
834 * part of the transaction.
835 *
f0c6bcba
CH
836 * Start with zeroing any data beyond EOF that we may expose on file
837 * extension, or zeroing out the rest of the block on a downward
838 * truncate.
c4ed4243 839 */
673e8e59 840 if (newsize > oldsize) {
f5c54717 841 trace_xfs_zero_eof(ip, oldsize, newsize - oldsize);
f1ba5faf
SR
842 error = xfs_zero_range(ip, oldsize, newsize - oldsize,
843 &did_zeroing);
f0c6bcba 844 } else {
869ae85d
BF
845 /*
846 * iomap won't detect a dirty page over an unwritten block (or a
847 * cow block over a hole) and subsequently skips zeroing the
848 * newly post-EOF portion of the page. Flush the new EOF to
849 * convert the block before the pagecache truncate.
850 */
851 error = filemap_write_and_wait_range(inode->i_mapping, newsize,
852 newsize);
853 if (error)
854 return error;
f1ba5faf 855 error = xfs_truncate_page(ip, newsize, &did_zeroing);
c4ed4243 856 }
c4ed4243 857
f0c6bcba
CH
858 if (error)
859 return error;
860
49abc3a8 861 /*
0f9160b4
DC
862 * We've already locked out new page faults, so now we can safely remove
863 * pages from the page cache knowing they won't get refaulted until we
864 * drop the XFS_MMAP_EXCL lock after the extent manipulations are
865 * complete. The truncate_setsize() call also cleans partial EOF page
866 * PTEs on extending truncates and hence ensures sub-page block size
867 * filesystems are correctly handled, too.
49abc3a8 868 *
0f9160b4
DC
869 * We have to do all the page cache truncate work outside the
870 * transaction context as the "lock" order is page lock->log space
871 * reservation as defined by extent allocation in the writeback path.
253f4911 872 * Hence a truncate can fail with ENOMEM from xfs_trans_alloc(), but
0f9160b4
DC
873 * having already truncated the in-memory version of the file (i.e. made
874 * user visible changes). There's not much we can do about this, except
875 * to hope that the caller sees ENOMEM and retries the truncate
876 * operation.
350976ae
EG
877 *
878 * And we update in-core i_size and truncate page cache beyond newsize
13d2c10b
CH
879 * before writeback the [i_disk_size, newsize] range, so we're
880 * guaranteed not to write stale data past the new EOF on truncate down.
49abc3a8 881 */
49abc3a8 882 truncate_setsize(inode, newsize);
c4ed4243 883
350976ae
EG
884 /*
885 * We are going to log the inode size change in this transaction so
886 * any previous writes that are beyond the on disk EOF and the new
887 * EOF that have not been written out need to be written here. If we
888 * do not write the data out, we expose ourselves to the null files
889 * problem. Note that this includes any block zeroing we did above;
890 * otherwise those blocks may not be zeroed after a crash.
891 */
892 if (did_zeroing ||
13d2c10b 893 (newsize > ip->i_disk_size && oldsize != ip->i_disk_size)) {
350976ae 894 error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
13d2c10b 895 ip->i_disk_size, newsize - 1);
350976ae
EG
896 if (error)
897 return error;
898 }
899
253f4911 900 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp);
c4ed4243 901 if (error)
253f4911 902 return error;
c4ed4243 903
c4ed4243 904 lock_flags |= XFS_ILOCK_EXCL;
c4ed4243 905 xfs_ilock(ip, XFS_ILOCK_EXCL);
ddc3415a 906 xfs_trans_ijoin(tp, ip, 0);
c4ed4243
CH
907
908 /*
909 * Only change the c/mtime if we are changing the size or we are
910 * explicitly asked to change it. This handles the semantic difference
911 * between truncate() and ftruncate() as implemented in the VFS.
912 *
913 * The regular truncate() case without ATTR_CTIME and ATTR_MTIME is a
914 * special case where we need to update the times despite not having
915 * these flags set. For all other operations the VFS set these flags
916 * explicitly if it wants a timestamp update.
917 */
fe60a8a0
CH
918 if (newsize != oldsize &&
919 !(iattr->ia_valid & (ATTR_CTIME | ATTR_MTIME))) {
c4ed4243 920 iattr->ia_ctime = iattr->ia_mtime =
c2050a45 921 current_time(inode);
fe60a8a0 922 iattr->ia_valid |= ATTR_CTIME | ATTR_MTIME;
c4ed4243
CH
923 }
924
673e8e59
CH
925 /*
926 * The first thing we do is set the size to new_size permanently on
927 * disk. This way we don't have to worry about anyone ever being able
928 * to look at the data being freed even in the face of a crash.
929 * What we're getting around here is the case where we free a block, it
930 * is allocated to another file, it is written to, and then we crash.
931 * If the new data gets written to the file but the log buffers
932 * containing the free and reallocation don't, then we'd end up with
933 * garbage in the blocks being freed. As long as we make the new size
934 * permanent before actually freeing any blocks it doesn't matter if
935 * they get written to.
936 */
13d2c10b 937 ip->i_disk_size = newsize;
673e8e59
CH
938 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
939
940 if (newsize <= oldsize) {
941 error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK, newsize);
c4ed4243 942 if (error)
4906e215 943 goto out_trans_cancel;
c4ed4243
CH
944
945 /*
946 * Truncated "down", so we're removing references to old data
947 * here - if we delay flushing for a long time, we expose
948 * ourselves unduly to the notorious NULL files problem. So,
949 * we mark this inode and flush it when the file is closed,
950 * and do not wait the usual (long) time for writeout.
951 */
952 xfs_iflags_set(ip, XFS_ITRUNCATED);
27b52867
BF
953
954 /* A truncate down always removes post-EOF blocks. */
955 xfs_inode_clear_eofblocks_tag(ip);
c4ed4243
CH
956 }
957
e014f37d
DW
958 ASSERT(!(iattr->ia_valid & (ATTR_UID | ATTR_GID)));
959 setattr_copy(mnt_userns, inode, iattr);
c4ed4243
CH
960 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
961
ff6d6af2 962 XFS_STATS_INC(mp, xs_ig_attrchg);
c4ed4243 963
0560f31a 964 if (xfs_has_wsync(mp))
c4ed4243
CH
965 xfs_trans_set_sync(tp);
966
70393313 967 error = xfs_trans_commit(tp);
c4ed4243
CH
968out_unlock:
969 if (lock_flags)
970 xfs_iunlock(ip, lock_flags);
971 return error;
972
c4ed4243 973out_trans_cancel:
4906e215 974 xfs_trans_cancel(tp);
c4ed4243
CH
975 goto out_unlock;
976}
977
69bca807
JK
978int
979xfs_vn_setattr_size(
f736d93d 980 struct user_namespace *mnt_userns,
69bca807
JK
981 struct dentry *dentry,
982 struct iattr *iattr)
983{
984 struct xfs_inode *ip = XFS_I(d_inode(dentry));
985 int error;
986
987 trace_xfs_setattr(ip);
988
f736d93d 989 error = xfs_vn_change_ok(mnt_userns, dentry, iattr);
69bca807
JK
990 if (error)
991 return error;
138060ba 992 return xfs_setattr_size(mnt_userns, dentry, ip, iattr);
69bca807
JK
993}
994
1da177e4 995STATIC int
416c6d5b 996xfs_vn_setattr(
549c7297 997 struct user_namespace *mnt_userns,
76ca4c23
CH
998 struct dentry *dentry,
999 struct iattr *iattr)
1da177e4 1000{
26f88363
CH
1001 struct inode *inode = d_inode(dentry);
1002 struct xfs_inode *ip = XFS_I(inode);
76ca4c23
CH
1003 int error;
1004
1005 if (iattr->ia_valid & ATTR_SIZE) {
c63a8eae 1006 uint iolock;
781355c6 1007
c63a8eae
DW
1008 xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
1009 iolock = XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL;
781355c6 1010
69eb5fa1 1011 error = xfs_break_layouts(inode, &iolock, BREAK_UNMAP);
c63a8eae
DW
1012 if (error) {
1013 xfs_iunlock(ip, XFS_MMAPLOCK_EXCL);
65523218 1014 return error;
c63a8eae 1015 }
e8e9ad42 1016
f736d93d 1017 error = xfs_vn_setattr_size(mnt_userns, dentry, iattr);
65523218 1018 xfs_iunlock(ip, XFS_MMAPLOCK_EXCL);
76ca4c23 1019 } else {
26f88363
CH
1020 trace_xfs_setattr(ip);
1021
f736d93d 1022 error = xfs_vn_change_ok(mnt_userns, dentry, iattr);
26f88363 1023 if (!error)
138060ba 1024 error = xfs_setattr_nonsize(mnt_userns, dentry, ip, iattr);
76ca4c23
CH
1025 }
1026
2451337d 1027 return error;
1da177e4
LT
1028}
1029
69ff2826
CH
1030STATIC int
1031xfs_vn_update_time(
1032 struct inode *inode,
95582b00 1033 struct timespec64 *now,
69ff2826
CH
1034 int flags)
1035{
1036 struct xfs_inode *ip = XFS_I(inode);
1037 struct xfs_mount *mp = ip->i_mount;
c3b1b131 1038 int log_flags = XFS_ILOG_TIMESTAMP;
69ff2826
CH
1039 struct xfs_trans *tp;
1040 int error;
1041
1042 trace_xfs_update_time(ip);
1043
c3b1b131
CH
1044 if (inode->i_sb->s_flags & SB_LAZYTIME) {
1045 if (!((flags & S_VERSION) &&
1046 inode_maybe_inc_iversion(inode, false)))
1047 return generic_update_time(inode, now, flags);
1048
1049 /* Capture the iversion update that just occurred */
1050 log_flags |= XFS_ILOG_CORE;
1051 }
1052
253f4911
CH
1053 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp);
1054 if (error)
2451337d 1055 return error;
69ff2826
CH
1056
1057 xfs_ilock(ip, XFS_ILOCK_EXCL);
3987848c 1058 if (flags & S_CTIME)
69ff2826 1059 inode->i_ctime = *now;
3987848c 1060 if (flags & S_MTIME)
69ff2826 1061 inode->i_mtime = *now;
3987848c 1062 if (flags & S_ATIME)
69ff2826 1063 inode->i_atime = *now;
3987848c 1064
69ff2826 1065 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
c3b1b131 1066 xfs_trans_log_inode(tp, ip, log_flags);
70393313 1067 return xfs_trans_commit(tp);
69ff2826
CH
1068}
1069
f35642e2
ES
1070STATIC int
1071xfs_vn_fiemap(
1072 struct inode *inode,
1073 struct fiemap_extent_info *fieinfo,
1074 u64 start,
1075 u64 length)
1076{
f35642e2
ES
1077 int error;
1078
d2bb140e 1079 xfs_ilock(XFS_I(inode), XFS_IOLOCK_SHARED);
1d4795e7
CH
1080 if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) {
1081 fieinfo->fi_flags &= ~FIEMAP_FLAG_XATTR;
1082 error = iomap_fiemap(inode, fieinfo, start, length,
1083 &xfs_xattr_iomap_ops);
1084 } else {
1085 error = iomap_fiemap(inode, fieinfo, start, length,
690c2a38 1086 &xfs_read_iomap_ops);
1d4795e7 1087 }
d2bb140e 1088 xfs_iunlock(XFS_I(inode), XFS_IOLOCK_SHARED);
f35642e2 1089
d2bb140e 1090 return error;
f35642e2
ES
1091}
1092
99b6436b
ZYW
1093STATIC int
1094xfs_vn_tmpfile(
549c7297
CB
1095 struct user_namespace *mnt_userns,
1096 struct inode *dir,
863f144f 1097 struct file *file,
549c7297 1098 umode_t mode)
99b6436b 1099{
863f144f
MS
1100 int err = xfs_generic_create(mnt_userns, dir, file->f_path.dentry, mode, 0, file);
1101
1102 return finish_open_simple(file, err);
99b6436b
ZYW
1103}
1104
41be8bed 1105static const struct inode_operations xfs_inode_operations = {
cac2f8b8 1106 .get_inode_acl = xfs_get_acl,
2401dc29 1107 .set_acl = xfs_set_acl,
416c6d5b
NS
1108 .getattr = xfs_vn_getattr,
1109 .setattr = xfs_vn_setattr,
416c6d5b 1110 .listxattr = xfs_vn_listxattr,
f35642e2 1111 .fiemap = xfs_vn_fiemap,
69ff2826 1112 .update_time = xfs_vn_update_time,
9fefd5db
MS
1113 .fileattr_get = xfs_fileattr_get,
1114 .fileattr_set = xfs_fileattr_set,
1da177e4
LT
1115};
1116
41be8bed 1117static const struct inode_operations xfs_dir_inode_operations = {
416c6d5b
NS
1118 .create = xfs_vn_create,
1119 .lookup = xfs_vn_lookup,
1120 .link = xfs_vn_link,
1121 .unlink = xfs_vn_unlink,
1122 .symlink = xfs_vn_symlink,
1123 .mkdir = xfs_vn_mkdir,
8f112e3b
CH
1124 /*
1125 * Yes, XFS uses the same method for rmdir and unlink.
1126 *
1127 * There are some subtile differences deeper in the code,
1128 * but we use S_ISDIR to check for those.
1129 */
1130 .rmdir = xfs_vn_unlink,
416c6d5b 1131 .mknod = xfs_vn_mknod,
2773bf00 1132 .rename = xfs_vn_rename,
cac2f8b8 1133 .get_inode_acl = xfs_get_acl,
2401dc29 1134 .set_acl = xfs_set_acl,
416c6d5b
NS
1135 .getattr = xfs_vn_getattr,
1136 .setattr = xfs_vn_setattr,
416c6d5b 1137 .listxattr = xfs_vn_listxattr,
69ff2826 1138 .update_time = xfs_vn_update_time,
99b6436b 1139 .tmpfile = xfs_vn_tmpfile,
9fefd5db
MS
1140 .fileattr_get = xfs_fileattr_get,
1141 .fileattr_set = xfs_fileattr_set,
1da177e4
LT
1142};
1143
41be8bed 1144static const struct inode_operations xfs_dir_ci_inode_operations = {
384f3ced
BN
1145 .create = xfs_vn_create,
1146 .lookup = xfs_vn_ci_lookup,
1147 .link = xfs_vn_link,
1148 .unlink = xfs_vn_unlink,
1149 .symlink = xfs_vn_symlink,
1150 .mkdir = xfs_vn_mkdir,
8f112e3b
CH
1151 /*
1152 * Yes, XFS uses the same method for rmdir and unlink.
1153 *
1154 * There are some subtile differences deeper in the code,
1155 * but we use S_ISDIR to check for those.
1156 */
1157 .rmdir = xfs_vn_unlink,
384f3ced 1158 .mknod = xfs_vn_mknod,
2773bf00 1159 .rename = xfs_vn_rename,
cac2f8b8 1160 .get_inode_acl = xfs_get_acl,
2401dc29 1161 .set_acl = xfs_set_acl,
384f3ced
BN
1162 .getattr = xfs_vn_getattr,
1163 .setattr = xfs_vn_setattr,
384f3ced 1164 .listxattr = xfs_vn_listxattr,
69ff2826 1165 .update_time = xfs_vn_update_time,
99b6436b 1166 .tmpfile = xfs_vn_tmpfile,
9fefd5db
MS
1167 .fileattr_get = xfs_fileattr_get,
1168 .fileattr_set = xfs_fileattr_set,
384f3ced
BN
1169};
1170
41be8bed 1171static const struct inode_operations xfs_symlink_inode_operations = {
6b255391 1172 .get_link = xfs_vn_get_link,
416c6d5b
NS
1173 .getattr = xfs_vn_getattr,
1174 .setattr = xfs_vn_setattr,
416c6d5b 1175 .listxattr = xfs_vn_listxattr,
69ff2826 1176 .update_time = xfs_vn_update_time,
1da177e4 1177};
41be8bed 1178
ba23cba9
DW
1179/* Figure out if this file actually supports DAX. */
1180static bool
1181xfs_inode_supports_dax(
1182 struct xfs_inode *ip)
1183{
1184 struct xfs_mount *mp = ip->i_mount;
1185
32dbc565
IW
1186 /* Only supported on regular files. */
1187 if (!S_ISREG(VFS_I(ip)->i_mode))
ba23cba9
DW
1188 return false;
1189
ba23cba9
DW
1190 /* Block size must match page size */
1191 if (mp->m_sb.sb_blocksize != PAGE_SIZE)
1192 return false;
1193
1194 /* Device has to support DAX too. */
30fa529e 1195 return xfs_inode_buftarg(ip)->bt_daxdev != NULL;
ba23cba9
DW
1196}
1197
32dbc565
IW
1198static bool
1199xfs_inode_should_enable_dax(
1200 struct xfs_inode *ip)
1201{
1202 if (!IS_ENABLED(CONFIG_FS_DAX))
1203 return false;
0560f31a 1204 if (xfs_has_dax_never(ip->i_mount))
32dbc565
IW
1205 return false;
1206 if (!xfs_inode_supports_dax(ip))
1207 return false;
0560f31a 1208 if (xfs_has_dax_always(ip->i_mount))
32dbc565 1209 return true;
3e09ab8f 1210 if (ip->i_diflags2 & XFS_DIFLAG2_DAX)
32dbc565
IW
1211 return true;
1212 return false;
1213}
1214
840d493d 1215void
41be8bed 1216xfs_diflags_to_iflags(
840d493d
IW
1217 struct xfs_inode *ip,
1218 bool init)
41be8bed 1219{
840d493d
IW
1220 struct inode *inode = VFS_I(ip);
1221 unsigned int xflags = xfs_ip2xflags(ip);
1222 unsigned int flags = 0;
1223
1224 ASSERT(!(IS_DAX(inode) && init));
1225
1226 if (xflags & FS_XFLAG_IMMUTABLE)
1227 flags |= S_IMMUTABLE;
1228 if (xflags & FS_XFLAG_APPEND)
1229 flags |= S_APPEND;
1230 if (xflags & FS_XFLAG_SYNC)
1231 flags |= S_SYNC;
1232 if (xflags & FS_XFLAG_NOATIME)
1233 flags |= S_NOATIME;
1234 if (init && xfs_inode_should_enable_dax(ip))
1235 flags |= S_DAX;
1236
1237 /*
1238 * S_DAX can only be set during inode initialization and is never set by
1239 * the VFS, so we cannot mask off S_DAX in i_flags.
1240 */
1241 inode->i_flags &= ~(S_IMMUTABLE | S_APPEND | S_SYNC | S_NOATIME);
1242 inode->i_flags |= flags;
41be8bed
CH
1243}
1244
1245/*
2b3d1d41 1246 * Initialize the Linux inode.
bf904248 1247 *
58c90473 1248 * When reading existing inodes from disk this is called directly from xfs_iget,
132c460e
YX
1249 * when creating a new inode it is called from xfs_init_new_inode after setting
1250 * up the inode. These callers have different criteria for clearing XFS_INEW, so
1251 * leave it up to the caller to deal with unlocking the inode appropriately.
41be8bed
CH
1252 */
1253void
1254xfs_setup_inode(
1255 struct xfs_inode *ip)
1256{
bf904248 1257 struct inode *inode = &ip->i_vnode;
ad22c7a0 1258 gfp_t gfp_mask;
bf904248
DC
1259
1260 inode->i_ino = ip->i_ino;
f38a032b 1261 inode->i_state |= I_NEW;
646ec461
CH
1262
1263 inode_sb_list_add(inode);
c6f6cd06 1264 /* make the inode look hashed for the writeback code */
5bef9151 1265 inode_fake_hash(inode);
41be8bed 1266
13d2c10b 1267 i_size_write(inode, ip->i_disk_size);
840d493d 1268 xfs_diflags_to_iflags(ip, true);
41be8bed 1269
2b3d1d41 1270 if (S_ISDIR(inode->i_mode)) {
ef215e39
DC
1271 /*
1272 * We set the i_rwsem class here to avoid potential races with
1273 * lockdep_annotate_inode_mutex_key() reinitialising the lock
1274 * after a filehandle lookup has already found the inode in
1275 * cache before it has been unlocked via unlock_new_inode().
1276 */
1277 lockdep_set_class(&inode->i_rwsem,
1278 &inode->i_sb->s_type->i_mutex_dir_key);
93a8614e 1279 lockdep_set_class(&ip->i_lock.mr_lock, &xfs_dir_ilock_class);
2b3d1d41 1280 } else {
2b3d1d41 1281 lockdep_set_class(&ip->i_lock.mr_lock, &xfs_nondir_ilock_class);
41be8bed
CH
1282 }
1283
ad22c7a0
DC
1284 /*
1285 * Ensure all page cache allocations are done from GFP_NOFS context to
1286 * prevent direct reclaim recursion back into the filesystem and blowing
1287 * stacks or deadlocking.
1288 */
1289 gfp_mask = mapping_gfp_mask(inode->i_mapping);
1290 mapping_set_gfp_mask(inode->i_mapping, (gfp_mask & ~(__GFP_FS)));
1291
510792ee
CH
1292 /*
1293 * If there is no attribute fork no ACL can exist on this inode,
1294 * and it can't have any file capabilities attached to it either.
1295 */
932b42c6 1296 if (!xfs_inode_has_attr_fork(ip)) {
510792ee 1297 inode_has_no_xattr(inode);
6311b108 1298 cache_no_acl(inode);
510792ee 1299 }
41be8bed 1300}
2b3d1d41
CH
1301
1302void
1303xfs_setup_iops(
1304 struct xfs_inode *ip)
1305{
1306 struct inode *inode = &ip->i_vnode;
1307
41be8bed
CH
1308 switch (inode->i_mode & S_IFMT) {
1309 case S_IFREG:
1310 inode->i_op = &xfs_inode_operations;
1311 inode->i_fop = &xfs_file_operations;
6e2608df
DW
1312 if (IS_DAX(inode))
1313 inode->i_mapping->a_ops = &xfs_dax_aops;
1314 else
1315 inode->i_mapping->a_ops = &xfs_address_space_operations;
41be8bed
CH
1316 break;
1317 case S_IFDIR:
38c26bfd 1318 if (xfs_has_asciici(XFS_M(inode->i_sb)))
41be8bed
CH
1319 inode->i_op = &xfs_dir_ci_inode_operations;
1320 else
1321 inode->i_op = &xfs_dir_inode_operations;
1322 inode->i_fop = &xfs_dir_file_operations;
1323 break;
1324 case S_IFLNK:
7b7820b8 1325 inode->i_op = &xfs_symlink_inode_operations;
41be8bed
CH
1326 break;
1327 default:
1328 inode->i_op = &xfs_inode_operations;
1329 init_special_inode(inode, inode->i_mode, inode->i_rdev);
1330 break;
1331 }
41be8bed 1332}