xfs: convert open coded corruption check to use XFS_IS_CORRUPT
[linux-2.6-block.git] / fs / xfs / xfs_iops.c
CommitLineData
0b61f8a4 1// SPDX-License-Identifier: GPL-2.0
1da177e4 2/*
7b718769
NS
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4 * All Rights Reserved.
1da177e4 5 */
1da177e4
LT
6#include "xfs.h"
7#include "xfs_fs.h"
70a9883c 8#include "xfs_shared.h"
239880ef
DC
9#include "xfs_format.h"
10#include "xfs_log_format.h"
11#include "xfs_trans_resv.h"
1da177e4 12#include "xfs_mount.h"
1da177e4 13#include "xfs_inode.h"
239880ef 14#include "xfs_acl.h"
239880ef 15#include "xfs_quota.h"
1da177e4 16#include "xfs_attr.h"
239880ef 17#include "xfs_trans.h"
0b1b213f 18#include "xfs_trace.h"
27b52867 19#include "xfs_icache.h"
c24b5dfa 20#include "xfs_symlink.h"
1b767ee3 21#include "xfs_dir2.h"
68a9f5e7 22#include "xfs_iomap.h"
a5155b87 23#include "xfs_error.h"
1da177e4
LT
24
25#include <linux/xattr.h>
ef14f0c1 26#include <linux/posix_acl.h>
446ada4a 27#include <linux/security.h>
c3b1b131 28#include <linux/iversion.h>
1da177e4 29
93a8614e
DC
30/*
31 * Directories have different lock order w.r.t. mmap_sem compared to regular
32 * files. This is due to readdir potentially triggering page faults on a user
33 * buffer inside filldir(), and this happens with the ilock on the directory
34 * held. For regular files, the lock order is the other way around - the
35 * mmap_sem is taken during the page fault, and then we lock the ilock to do
36 * block mapping. Hence we need a different class for the directory ilock so
37 * that lockdep can tell them apart.
38 */
39static struct lock_class_key xfs_nondir_ilock_class;
40static struct lock_class_key xfs_dir_ilock_class;
41
8d2a5e6e
DC
42static int
43xfs_initxattrs(
44 struct inode *inode,
45 const struct xattr *xattr_array,
46 void *fs_info)
9d8f13ba 47{
8d2a5e6e
DC
48 const struct xattr *xattr;
49 struct xfs_inode *ip = XFS_I(inode);
50 int error = 0;
9d8f13ba
MZ
51
52 for (xattr = xattr_array; xattr->name != NULL; xattr++) {
2451337d 53 error = xfs_attr_set(ip, xattr->name, xattr->value,
a5a14de2 54 xattr->value_len, ATTR_SECURE);
9d8f13ba
MZ
55 if (error < 0)
56 break;
57 }
58 return error;
59}
60
446ada4a
NS
61/*
62 * Hook in SELinux. This is not quite correct yet, what we really need
63 * here (as we do for default ACLs) is a mechanism by which creation of
64 * these attrs can be journalled at inode creation time (along with the
65 * inode, of course, such that log replay can't cause these to be lost).
66 */
9d8f13ba 67
446ada4a 68STATIC int
416c6d5b 69xfs_init_security(
af048193 70 struct inode *inode,
2a7dba39
EP
71 struct inode *dir,
72 const struct qstr *qstr)
446ada4a 73{
2451337d 74 return security_inode_init_security(inode, dir, qstr,
a5a14de2 75 &xfs_initxattrs, NULL);
446ada4a
NS
76}
77
556b8b16
BN
78static void
79xfs_dentry_to_name(
fab8eef8
AG
80 struct xfs_name *namep,
81 struct dentry *dentry)
82{
83 namep->name = dentry->d_name.name;
84 namep->len = dentry->d_name.len;
85 namep->type = XFS_DIR3_FT_UNKNOWN;
86}
87
88static int
89xfs_dentry_mode_to_name(
556b8b16 90 struct xfs_name *namep,
0cb97766
DC
91 struct dentry *dentry,
92 int mode)
556b8b16
BN
93{
94 namep->name = dentry->d_name.name;
95 namep->len = dentry->d_name.len;
1fc4d33f 96 namep->type = xfs_mode_to_ftype(mode);
fab8eef8
AG
97
98 if (unlikely(namep->type == XFS_DIR3_FT_UNKNOWN))
99 return -EFSCORRUPTED;
100
101 return 0;
556b8b16
BN
102}
103
7989cb8e 104STATIC void
416c6d5b 105xfs_cleanup_inode(
739bfb2a 106 struct inode *dir,
af048193 107 struct inode *inode,
8f112e3b 108 struct dentry *dentry)
3a69c7dc 109{
556b8b16 110 struct xfs_name teardown;
3a69c7dc
YL
111
112 /* Oh, the horror.
220b5284 113 * If we can't add the ACL or we fail in
416c6d5b 114 * xfs_init_security we must back out.
3a69c7dc
YL
115 * ENOSPC can hit here, among other things.
116 */
fab8eef8 117 xfs_dentry_to_name(&teardown, dentry);
3a69c7dc 118
8f112e3b 119 xfs_remove(XFS_I(dir), &teardown, XFS_I(inode));
3a69c7dc
YL
120}
121
1da177e4 122STATIC int
d540e43b 123xfs_generic_create(
1da177e4
LT
124 struct inode *dir,
125 struct dentry *dentry,
1a67aafb 126 umode_t mode,
d540e43b
BF
127 dev_t rdev,
128 bool tmpfile) /* unnamed file */
1da177e4 129{
db0bb7ba 130 struct inode *inode;
979ebab1 131 struct xfs_inode *ip = NULL;
2401dc29 132 struct posix_acl *default_acl, *acl;
556b8b16 133 struct xfs_name name;
1da177e4
LT
134 int error;
135
136 /*
137 * Irix uses Missed'em'V split, but doesn't want to see
138 * the upper 5 bits of (14bit) major.
139 */
517b5e8c
CH
140 if (S_ISCHR(mode) || S_ISBLK(mode)) {
141 if (unlikely(!sysv_valid_dev(rdev) || MAJOR(rdev) & ~0x1ff))
142 return -EINVAL;
517b5e8c
CH
143 } else {
144 rdev = 0;
145 }
1da177e4 146
2401dc29
CH
147 error = posix_acl_create(dir, &mode, &default_acl, &acl);
148 if (error)
149 return error;
1da177e4 150
fab8eef8
AG
151 /* Verify mode is valid also for tmpfile case */
152 error = xfs_dentry_mode_to_name(&name, dentry, mode);
153 if (unlikely(error))
154 goto out_free_acl;
155
d540e43b 156 if (!tmpfile) {
d540e43b
BF
157 error = xfs_create(XFS_I(dir), &name, mode, rdev, &ip);
158 } else {
a1f69417 159 error = xfs_create_tmpfile(XFS_I(dir), mode, &ip);
d540e43b 160 }
db0bb7ba
CH
161 if (unlikely(error))
162 goto out_free_acl;
446ada4a 163
01651646 164 inode = VFS_I(ip);
979ebab1 165
2a7dba39 166 error = xfs_init_security(inode, dir, &dentry->d_name);
db0bb7ba
CH
167 if (unlikely(error))
168 goto out_cleanup_inode;
169
2401dc29 170#ifdef CONFIG_XFS_POSIX_ACL
db0bb7ba 171 if (default_acl) {
8ba35875 172 error = __xfs_set_acl(inode, default_acl, ACL_TYPE_DEFAULT);
2401dc29 173 if (error)
db0bb7ba 174 goto out_cleanup_inode;
1da177e4 175 }
2401dc29 176 if (acl) {
8ba35875 177 error = __xfs_set_acl(inode, acl, ACL_TYPE_ACCESS);
2401dc29
CH
178 if (error)
179 goto out_cleanup_inode;
180 }
181#endif
1da177e4 182
2b3d1d41
CH
183 xfs_setup_iops(ip);
184
c4a6bf7f
DW
185 if (tmpfile) {
186 /*
187 * The VFS requires that any inode fed to d_tmpfile must have
188 * nlink == 1 so that it can decrement the nlink in d_tmpfile.
189 * However, we created the temp file with nlink == 0 because
190 * we're not allowed to put an inode with nlink > 0 on the
191 * unlinked list. Therefore we have to set nlink to 1 so that
192 * d_tmpfile can immediately set it back to zero.
193 */
194 set_nlink(inode, 1);
d540e43b 195 d_tmpfile(dentry, inode);
c4a6bf7f 196 } else
d540e43b
BF
197 d_instantiate(dentry, inode);
198
58c90473
DC
199 xfs_finish_inode_setup(ip);
200
2401dc29
CH
201 out_free_acl:
202 if (default_acl)
203 posix_acl_release(default_acl);
204 if (acl)
205 posix_acl_release(acl);
2451337d 206 return error;
db0bb7ba
CH
207
208 out_cleanup_inode:
58c90473 209 xfs_finish_inode_setup(ip);
d540e43b
BF
210 if (!tmpfile)
211 xfs_cleanup_inode(dir, inode, dentry);
44a8736b 212 xfs_irele(ip);
2401dc29 213 goto out_free_acl;
1da177e4
LT
214}
215
d540e43b
BF
216STATIC int
217xfs_vn_mknod(
218 struct inode *dir,
219 struct dentry *dentry,
220 umode_t mode,
221 dev_t rdev)
222{
223 return xfs_generic_create(dir, dentry, mode, rdev, false);
224}
225
1da177e4 226STATIC int
416c6d5b 227xfs_vn_create(
1da177e4
LT
228 struct inode *dir,
229 struct dentry *dentry,
4acdaf27 230 umode_t mode,
ebfc3b49 231 bool flags)
1da177e4 232{
416c6d5b 233 return xfs_vn_mknod(dir, dentry, mode, 0);
1da177e4
LT
234}
235
236STATIC int
416c6d5b 237xfs_vn_mkdir(
1da177e4
LT
238 struct inode *dir,
239 struct dentry *dentry,
18bb1db3 240 umode_t mode)
1da177e4 241{
416c6d5b 242 return xfs_vn_mknod(dir, dentry, mode|S_IFDIR, 0);
1da177e4
LT
243}
244
245STATIC struct dentry *
416c6d5b 246xfs_vn_lookup(
1da177e4
LT
247 struct inode *dir,
248 struct dentry *dentry,
00cd8dd3 249 unsigned int flags)
1da177e4 250{
b113a6d3 251 struct inode *inode;
ef1f5e7a 252 struct xfs_inode *cip;
556b8b16 253 struct xfs_name name;
1da177e4
LT
254 int error;
255
256 if (dentry->d_name.len >= MAXNAMELEN)
257 return ERR_PTR(-ENAMETOOLONG);
258
fab8eef8 259 xfs_dentry_to_name(&name, dentry);
384f3ced 260 error = xfs_lookup(XFS_I(dir), &name, &cip, NULL);
b113a6d3
AV
261 if (likely(!error))
262 inode = VFS_I(cip);
263 else if (likely(error == -ENOENT))
264 inode = NULL;
265 else
266 inode = ERR_PTR(error);
267 return d_splice_alias(inode, dentry);
1da177e4
LT
268}
269
384f3ced
BN
270STATIC struct dentry *
271xfs_vn_ci_lookup(
272 struct inode *dir,
273 struct dentry *dentry,
00cd8dd3 274 unsigned int flags)
384f3ced
BN
275{
276 struct xfs_inode *ip;
277 struct xfs_name xname;
278 struct xfs_name ci_name;
279 struct qstr dname;
280 int error;
281
282 if (dentry->d_name.len >= MAXNAMELEN)
283 return ERR_PTR(-ENAMETOOLONG);
284
fab8eef8 285 xfs_dentry_to_name(&xname, dentry);
384f3ced
BN
286 error = xfs_lookup(XFS_I(dir), &xname, &ip, &ci_name);
287 if (unlikely(error)) {
2451337d
DC
288 if (unlikely(error != -ENOENT))
289 return ERR_PTR(error);
866d5dc9
BN
290 /*
291 * call d_add(dentry, NULL) here when d_drop_negative_children
292 * is called in xfs_vn_mknod (ie. allow negative dentries
293 * with CI filesystems).
294 */
384f3ced
BN
295 return NULL;
296 }
297
298 /* if exact match, just splice and exit */
299 if (!ci_name.name)
01651646 300 return d_splice_alias(VFS_I(ip), dentry);
384f3ced
BN
301
302 /* else case-insensitive match... */
303 dname.name = ci_name.name;
304 dname.len = ci_name.len;
e45b590b 305 dentry = d_add_ci(dentry, VFS_I(ip), &dname);
384f3ced
BN
306 kmem_free(ci_name.name);
307 return dentry;
308}
309
1da177e4 310STATIC int
416c6d5b 311xfs_vn_link(
1da177e4
LT
312 struct dentry *old_dentry,
313 struct inode *dir,
314 struct dentry *dentry)
315{
2b0143b5 316 struct inode *inode = d_inode(old_dentry);
556b8b16 317 struct xfs_name name;
1da177e4
LT
318 int error;
319
fab8eef8
AG
320 error = xfs_dentry_mode_to_name(&name, dentry, inode->i_mode);
321 if (unlikely(error))
322 return error;
1da177e4 323
556b8b16 324 error = xfs_link(XFS_I(dir), XFS_I(inode), &name);
d9424b3c 325 if (unlikely(error))
2451337d 326 return error;
a3da7896 327
7de9c6ee 328 ihold(inode);
a3da7896
CH
329 d_instantiate(dentry, inode);
330 return 0;
1da177e4
LT
331}
332
333STATIC int
416c6d5b 334xfs_vn_unlink(
1da177e4
LT
335 struct inode *dir,
336 struct dentry *dentry)
337{
556b8b16 338 struct xfs_name name;
1da177e4
LT
339 int error;
340
fab8eef8 341 xfs_dentry_to_name(&name, dentry);
1da177e4 342
2b0143b5 343 error = xfs_remove(XFS_I(dir), &name, XFS_I(d_inode(dentry)));
e5700704
CH
344 if (error)
345 return error;
346
347 /*
348 * With unlink, the VFS makes the dentry "negative": no inode,
349 * but still hashed. This is incompatible with case-insensitive
350 * mode, so invalidate (unhash) the dentry in CI-mode.
351 */
352 if (xfs_sb_version_hasasciici(&XFS_M(dir->i_sb)->m_sb))
353 d_invalidate(dentry);
354 return 0;
1da177e4
LT
355}
356
357STATIC int
416c6d5b 358xfs_vn_symlink(
1da177e4
LT
359 struct inode *dir,
360 struct dentry *dentry,
361 const char *symname)
362{
3937be5b
CH
363 struct inode *inode;
364 struct xfs_inode *cip = NULL;
556b8b16 365 struct xfs_name name;
1da177e4 366 int error;
576b1d67 367 umode_t mode;
1da177e4 368
3e5daf05 369 mode = S_IFLNK |
ce3b0f8d 370 (irix_symlink_mode ? 0777 & ~current_umask() : S_IRWXUGO);
fab8eef8
AG
371 error = xfs_dentry_mode_to_name(&name, dentry, mode);
372 if (unlikely(error))
373 goto out;
1da177e4 374
6c77b0ea 375 error = xfs_symlink(XFS_I(dir), &name, symname, mode, &cip);
3937be5b
CH
376 if (unlikely(error))
377 goto out;
378
01651646 379 inode = VFS_I(cip);
3937be5b 380
2a7dba39 381 error = xfs_init_security(inode, dir, &dentry->d_name);
3937be5b
CH
382 if (unlikely(error))
383 goto out_cleanup_inode;
384
2b3d1d41
CH
385 xfs_setup_iops(cip);
386
3937be5b 387 d_instantiate(dentry, inode);
58c90473 388 xfs_finish_inode_setup(cip);
3937be5b
CH
389 return 0;
390
391 out_cleanup_inode:
58c90473 392 xfs_finish_inode_setup(cip);
8f112e3b 393 xfs_cleanup_inode(dir, inode, dentry);
44a8736b 394 xfs_irele(cip);
3937be5b 395 out:
2451337d 396 return error;
1da177e4
LT
397}
398
1da177e4 399STATIC int
416c6d5b 400xfs_vn_rename(
1da177e4
LT
401 struct inode *odir,
402 struct dentry *odentry,
403 struct inode *ndir,
dbe1b5ca
CM
404 struct dentry *ndentry,
405 unsigned int flags)
1da177e4 406{
2b0143b5 407 struct inode *new_inode = d_inode(ndentry);
d31a1825 408 int omode = 0;
fab8eef8 409 int error;
556b8b16
BN
410 struct xfs_name oname;
411 struct xfs_name nname;
1da177e4 412
7dcf5c3e 413 if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT))
dbe1b5ca
CM
414 return -EINVAL;
415
d31a1825
CM
416 /* if we are exchanging files, we need to set i_mode of both files */
417 if (flags & RENAME_EXCHANGE)
2b0143b5 418 omode = d_inode(ndentry)->i_mode;
d31a1825 419
fab8eef8
AG
420 error = xfs_dentry_mode_to_name(&oname, odentry, omode);
421 if (omode && unlikely(error))
422 return error;
423
424 error = xfs_dentry_mode_to_name(&nname, ndentry,
425 d_inode(odentry)->i_mode);
426 if (unlikely(error))
427 return error;
556b8b16 428
2b0143b5 429 return xfs_rename(XFS_I(odir), &oname, XFS_I(d_inode(odentry)),
dbe1b5ca 430 XFS_I(ndir), &nname,
d31a1825 431 new_inode ? XFS_I(new_inode) : NULL, flags);
1da177e4
LT
432}
433
434/*
435 * careful here - this function can get called recursively, so
436 * we need to be very careful about how much stack we use.
437 * uio is kmalloced for this reason...
438 */
680baacb 439STATIC const char *
6b255391 440xfs_vn_get_link(
1da177e4 441 struct dentry *dentry,
6b255391 442 struct inode *inode,
fceef393 443 struct delayed_call *done)
1da177e4 444{
1da177e4 445 char *link;
804c83c3 446 int error = -ENOMEM;
1da177e4 447
6b255391
AV
448 if (!dentry)
449 return ERR_PTR(-ECHILD);
450
6eb0b8df 451 link = kmalloc(XFS_SYMLINK_MAXLEN+1, GFP_KERNEL);
804c83c3
CH
452 if (!link)
453 goto out_err;
1da177e4 454
2b0143b5 455 error = xfs_readlink(XFS_I(d_inode(dentry)), link);
804c83c3
CH
456 if (unlikely(error))
457 goto out_kfree;
1da177e4 458
fceef393
AV
459 set_delayed_call(done, kfree_link, link);
460 return link;
804c83c3
CH
461
462 out_kfree:
463 kfree(link);
464 out_err:
680baacb 465 return ERR_PTR(error);
1da177e4
LT
466}
467
30ee052e
CH
468STATIC const char *
469xfs_vn_get_link_inline(
470 struct dentry *dentry,
471 struct inode *inode,
472 struct delayed_call *done)
473{
a5155b87 474 struct xfs_inode *ip = XFS_I(inode);
ae294787
DW
475 char *link;
476
a5155b87 477 ASSERT(ip->i_df.if_flags & XFS_IFINLINE);
ae294787
DW
478
479 /*
480 * The VFS crashes on a NULL pointer, so return -EFSCORRUPTED if
481 * if_data is junk.
482 */
a5155b87 483 link = ip->i_df.if_u1.if_data;
a71895c5 484 if (XFS_IS_CORRUPT(ip->i_mount, !link))
ae294787
DW
485 return ERR_PTR(-EFSCORRUPTED);
486 return link;
30ee052e
CH
487}
488
dd2d535e
CH
489static uint32_t
490xfs_stat_blksize(
491 struct xfs_inode *ip)
492{
493 struct xfs_mount *mp = ip->i_mount;
494
495 /*
496 * If the file blocks are being allocated from a realtime volume, then
497 * always return the realtime extent size.
498 */
499 if (XFS_IS_REALTIME_INODE(ip))
500 return xfs_get_extsz_hint(ip) << mp->m_sb.sb_blocklog;
501
502 /*
503 * Allow large block sizes to be reported to userspace programs if the
504 * "largeio" mount option is used.
505 *
506 * If compatibility mode is specified, simply return the basic unit of
507 * caching so that we don't get inefficient read/modify/write I/O from
508 * user apps. Otherwise....
509 *
510 * If the underlying volume is a stripe, then return the stripe width in
511 * bytes as the recommended I/O size. It is not a stripe and we've set a
512 * default buffered I/O size, return that, otherwise return the compat
513 * default.
514 */
7c6b94b1 515 if (mp->m_flags & XFS_MOUNT_LARGEIO) {
dd2d535e
CH
516 if (mp->m_swidth)
517 return mp->m_swidth << mp->m_sb.sb_blocklog;
3274d008 518 if (mp->m_flags & XFS_MOUNT_ALLOCSIZE)
5da8a07c 519 return 1U << mp->m_allocsize_log;
dd2d535e
CH
520 }
521
522 return PAGE_SIZE;
523}
524
1da177e4 525STATIC int
416c6d5b 526xfs_vn_getattr(
a528d35e
DH
527 const struct path *path,
528 struct kstat *stat,
529 u32 request_mask,
530 unsigned int query_flags)
1da177e4 531{
a528d35e 532 struct inode *inode = d_inode(path->dentry);
c43f4087
CH
533 struct xfs_inode *ip = XFS_I(inode);
534 struct xfs_mount *mp = ip->i_mount;
535
cca28fb8 536 trace_xfs_getattr(ip);
c43f4087
CH
537
538 if (XFS_FORCED_SHUTDOWN(mp))
b474c7ae 539 return -EIO;
c43f4087
CH
540
541 stat->size = XFS_ISIZE(ip);
542 stat->dev = inode->i_sb->s_dev;
c19b3b05 543 stat->mode = inode->i_mode;
54d7b5c1 544 stat->nlink = inode->i_nlink;
7aab1b28
DE
545 stat->uid = inode->i_uid;
546 stat->gid = inode->i_gid;
c43f4087 547 stat->ino = ip->i_ino;
c43f4087 548 stat->atime = inode->i_atime;
f9581b14
CH
549 stat->mtime = inode->i_mtime;
550 stat->ctime = inode->i_ctime;
c43f4087
CH
551 stat->blocks =
552 XFS_FSB_TO_BB(mp, ip->i_d.di_nblocks + ip->i_delayed_blks);
553
5f955f26
DW
554 if (ip->i_d.di_version == 3) {
555 if (request_mask & STATX_BTIME) {
556 stat->result_mask |= STATX_BTIME;
557 stat->btime.tv_sec = ip->i_d.di_crtime.t_sec;
558 stat->btime.tv_nsec = ip->i_d.di_crtime.t_nsec;
559 }
560 }
561
1b9598c8
LR
562 /*
563 * Note: If you add another clause to set an attribute flag, please
564 * update attributes_mask below.
565 */
5f955f26
DW
566 if (ip->i_d.di_flags & XFS_DIFLAG_IMMUTABLE)
567 stat->attributes |= STATX_ATTR_IMMUTABLE;
568 if (ip->i_d.di_flags & XFS_DIFLAG_APPEND)
569 stat->attributes |= STATX_ATTR_APPEND;
570 if (ip->i_d.di_flags & XFS_DIFLAG_NODUMP)
571 stat->attributes |= STATX_ATTR_NODUMP;
c43f4087 572
1b9598c8
LR
573 stat->attributes_mask |= (STATX_ATTR_IMMUTABLE |
574 STATX_ATTR_APPEND |
575 STATX_ATTR_NODUMP);
576
c43f4087
CH
577 switch (inode->i_mode & S_IFMT) {
578 case S_IFBLK:
579 case S_IFCHR:
580 stat->blksize = BLKDEV_IOSIZE;
66f36464 581 stat->rdev = inode->i_rdev;
c43f4087
CH
582 break;
583 default:
dd2d535e 584 stat->blksize = xfs_stat_blksize(ip);
c43f4087
CH
585 stat->rdev = 0;
586 break;
69e23b9a 587 }
c43f4087
CH
588
589 return 0;
1da177e4
LT
590}
591
56c19e89
DC
592static void
593xfs_setattr_mode(
56c19e89
DC
594 struct xfs_inode *ip,
595 struct iattr *iattr)
596{
0c3d88df
CH
597 struct inode *inode = VFS_I(ip);
598 umode_t mode = iattr->ia_mode;
56c19e89 599
56c19e89
DC
600 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
601
56c19e89
DC
602 inode->i_mode &= S_IFMT;
603 inode->i_mode |= mode & ~S_IFMT;
604}
605
52785112 606void
c91c46c1
CH
607xfs_setattr_time(
608 struct xfs_inode *ip,
609 struct iattr *iattr)
610{
611 struct inode *inode = VFS_I(ip);
612
613 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
614
3987848c 615 if (iattr->ia_valid & ATTR_ATIME)
c91c46c1 616 inode->i_atime = iattr->ia_atime;
3987848c 617 if (iattr->ia_valid & ATTR_CTIME)
c91c46c1 618 inode->i_ctime = iattr->ia_ctime;
3987848c 619 if (iattr->ia_valid & ATTR_MTIME)
c91c46c1 620 inode->i_mtime = iattr->ia_mtime;
c91c46c1
CH
621}
622
69bca807
JK
623static int
624xfs_vn_change_ok(
625 struct dentry *dentry,
626 struct iattr *iattr)
627{
31051c85 628 struct xfs_mount *mp = XFS_I(d_inode(dentry))->i_mount;
69bca807
JK
629
630 if (mp->m_flags & XFS_MOUNT_RDONLY)
631 return -EROFS;
632
633 if (XFS_FORCED_SHUTDOWN(mp))
634 return -EIO;
635
31051c85 636 return setattr_prepare(dentry, iattr);
69bca807
JK
637}
638
639/*
640 * Set non-size attributes of an inode.
641 *
642 * Caution: The caller of this function is responsible for calling
31051c85 643 * setattr_prepare() or otherwise verifying the change is fine.
69bca807 644 */
c4ed4243
CH
645int
646xfs_setattr_nonsize(
647 struct xfs_inode *ip,
648 struct iattr *iattr,
649 int flags)
650{
651 xfs_mount_t *mp = ip->i_mount;
652 struct inode *inode = VFS_I(ip);
653 int mask = iattr->ia_valid;
654 xfs_trans_t *tp;
655 int error;
7aab1b28
DE
656 kuid_t uid = GLOBAL_ROOT_UID, iuid = GLOBAL_ROOT_UID;
657 kgid_t gid = GLOBAL_ROOT_GID, igid = GLOBAL_ROOT_GID;
c4ed4243
CH
658 struct xfs_dquot *udqp = NULL, *gdqp = NULL;
659 struct xfs_dquot *olddquot1 = NULL, *olddquot2 = NULL;
660
c4ed4243
CH
661 ASSERT((mask & ATTR_SIZE) == 0);
662
663 /*
664 * If disk quotas is on, we make sure that the dquots do exist on disk,
665 * before we start any other transactions. Trying to do this later
666 * is messy. We don't care to take a readlock to look at the ids
667 * in inode here, because we can't hold it across the trans_reserve.
668 * If the IDs do change before we take the ilock, we're covered
669 * because the i_*dquot fields will get updated anyway.
670 */
671 if (XFS_IS_QUOTA_ON(mp) && (mask & (ATTR_UID|ATTR_GID))) {
672 uint qflags = 0;
673
674 if ((mask & ATTR_UID) && XFS_IS_UQUOTA_ON(mp)) {
675 uid = iattr->ia_uid;
676 qflags |= XFS_QMOPT_UQUOTA;
677 } else {
7aab1b28 678 uid = inode->i_uid;
c4ed4243
CH
679 }
680 if ((mask & ATTR_GID) && XFS_IS_GQUOTA_ON(mp)) {
681 gid = iattr->ia_gid;
682 qflags |= XFS_QMOPT_GQUOTA;
683 } else {
7aab1b28 684 gid = inode->i_gid;
c4ed4243
CH
685 }
686
687 /*
688 * We take a reference when we initialize udqp and gdqp,
689 * so it is important that we never blindly double trip on
690 * the same variable. See xfs_create() for an example.
691 */
692 ASSERT(udqp == NULL);
693 ASSERT(gdqp == NULL);
7aab1b28
DE
694 error = xfs_qm_vop_dqalloc(ip, xfs_kuid_to_uid(uid),
695 xfs_kgid_to_gid(gid),
696 xfs_get_projid(ip),
697 qflags, &udqp, &gdqp, NULL);
c4ed4243
CH
698 if (error)
699 return error;
700 }
701
253f4911 702 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ichange, 0, 0, 0, &tp);
c4ed4243 703 if (error)
253f4911 704 goto out_dqrele;
c4ed4243
CH
705
706 xfs_ilock(ip, XFS_ILOCK_EXCL);
253f4911 707 xfs_trans_ijoin(tp, ip, 0);
c4ed4243
CH
708
709 /*
710 * Change file ownership. Must be the owner or privileged.
711 */
712 if (mask & (ATTR_UID|ATTR_GID)) {
713 /*
714 * These IDs could have changed since we last looked at them.
715 * But, we're assured that if the ownership did change
716 * while we didn't have the inode locked, inode's dquot(s)
717 * would have changed also.
718 */
7aab1b28
DE
719 iuid = inode->i_uid;
720 igid = inode->i_gid;
c4ed4243
CH
721 gid = (mask & ATTR_GID) ? iattr->ia_gid : igid;
722 uid = (mask & ATTR_UID) ? iattr->ia_uid : iuid;
723
724 /*
725 * Do a quota reservation only if uid/gid is actually
726 * going to change.
727 */
728 if (XFS_IS_QUOTA_RUNNING(mp) &&
7aab1b28
DE
729 ((XFS_IS_UQUOTA_ON(mp) && !uid_eq(iuid, uid)) ||
730 (XFS_IS_GQUOTA_ON(mp) && !gid_eq(igid, gid)))) {
c4ed4243
CH
731 ASSERT(tp);
732 error = xfs_qm_vop_chown_reserve(tp, ip, udqp, gdqp,
92f8ff73 733 NULL, capable(CAP_FOWNER) ?
c4ed4243
CH
734 XFS_QMOPT_FORCE_RES : 0);
735 if (error) /* out of quota */
253f4911 736 goto out_cancel;
c4ed4243
CH
737 }
738 }
739
c4ed4243
CH
740 /*
741 * Change file ownership. Must be the owner or privileged.
742 */
743 if (mask & (ATTR_UID|ATTR_GID)) {
744 /*
745 * CAP_FSETID overrides the following restrictions:
746 *
747 * The set-user-ID and set-group-ID bits of a file will be
748 * cleared upon successful return from chown()
749 */
c19b3b05 750 if ((inode->i_mode & (S_ISUID|S_ISGID)) &&
c4ed4243 751 !capable(CAP_FSETID))
c19b3b05 752 inode->i_mode &= ~(S_ISUID|S_ISGID);
c4ed4243
CH
753
754 /*
755 * Change the ownerships and register quota modifications
756 * in the transaction.
757 */
7aab1b28 758 if (!uid_eq(iuid, uid)) {
c4ed4243
CH
759 if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_UQUOTA_ON(mp)) {
760 ASSERT(mask & ATTR_UID);
761 ASSERT(udqp);
762 olddquot1 = xfs_qm_vop_chown(tp, ip,
763 &ip->i_udquot, udqp);
764 }
7aab1b28 765 ip->i_d.di_uid = xfs_kuid_to_uid(uid);
c4ed4243
CH
766 inode->i_uid = uid;
767 }
7aab1b28 768 if (!gid_eq(igid, gid)) {
c4ed4243 769 if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_GQUOTA_ON(mp)) {
5a01dd54
JL
770 ASSERT(xfs_sb_version_has_pquotino(&mp->m_sb) ||
771 !XFS_IS_PQUOTA_ON(mp));
c4ed4243
CH
772 ASSERT(mask & ATTR_GID);
773 ASSERT(gdqp);
774 olddquot2 = xfs_qm_vop_chown(tp, ip,
775 &ip->i_gdquot, gdqp);
776 }
7aab1b28 777 ip->i_d.di_gid = xfs_kgid_to_gid(gid);
c4ed4243
CH
778 inode->i_gid = gid;
779 }
780 }
781
56c19e89 782 if (mask & ATTR_MODE)
0c3d88df 783 xfs_setattr_mode(ip, iattr);
c91c46c1
CH
784 if (mask & (ATTR_ATIME|ATTR_CTIME|ATTR_MTIME))
785 xfs_setattr_time(ip, iattr);
c4ed4243
CH
786
787 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
788
ff6d6af2 789 XFS_STATS_INC(mp, xs_ig_attrchg);
c4ed4243
CH
790
791 if (mp->m_flags & XFS_MOUNT_WSYNC)
792 xfs_trans_set_sync(tp);
70393313 793 error = xfs_trans_commit(tp);
c4ed4243
CH
794
795 xfs_iunlock(ip, XFS_ILOCK_EXCL);
796
797 /*
798 * Release any dquot(s) the inode had kept before chown.
799 */
800 xfs_qm_dqrele(olddquot1);
801 xfs_qm_dqrele(olddquot2);
802 xfs_qm_dqrele(udqp);
803 xfs_qm_dqrele(gdqp);
804
805 if (error)
b474c7ae 806 return error;
c4ed4243
CH
807
808 /*
809 * XXX(hch): Updating the ACL entries is not atomic vs the i_mode
810 * update. We could avoid this with linked transactions
811 * and passing down the transaction pointer all the way
812 * to attr_set. No previous user of the generic
813 * Posix ACL code seems to care about this issue either.
814 */
815 if ((mask & ATTR_MODE) && !(flags & XFS_ATTR_NOACL)) {
2451337d 816 error = posix_acl_chmod(inode, inode->i_mode);
c4ed4243 817 if (error)
b474c7ae 818 return error;
c4ed4243
CH
819 }
820
821 return 0;
822
253f4911 823out_cancel:
4906e215 824 xfs_trans_cancel(tp);
1fb254aa 825 xfs_iunlock(ip, XFS_ILOCK_EXCL);
253f4911 826out_dqrele:
c4ed4243
CH
827 xfs_qm_dqrele(udqp);
828 xfs_qm_dqrele(gdqp);
829 return error;
830}
831
69bca807
JK
832int
833xfs_vn_setattr_nonsize(
834 struct dentry *dentry,
835 struct iattr *iattr)
836{
837 struct xfs_inode *ip = XFS_I(d_inode(dentry));
838 int error;
839
840 trace_xfs_setattr(ip);
841
842 error = xfs_vn_change_ok(dentry, iattr);
843 if (error)
844 return error;
845 return xfs_setattr_nonsize(ip, iattr, 0);
846}
847
c4ed4243
CH
848/*
849 * Truncate file. Must have write permission and not be a directory.
69bca807
JK
850 *
851 * Caution: The caller of this function is responsible for calling
31051c85 852 * setattr_prepare() or otherwise verifying the change is fine.
c4ed4243 853 */
7bf7a193 854STATIC int
c4ed4243
CH
855xfs_setattr_size(
856 struct xfs_inode *ip,
76ca4c23 857 struct iattr *iattr)
c4ed4243
CH
858{
859 struct xfs_mount *mp = ip->i_mount;
860 struct inode *inode = VFS_I(ip);
673e8e59 861 xfs_off_t oldsize, newsize;
c4ed4243
CH
862 struct xfs_trans *tp;
863 int error;
f38996f5 864 uint lock_flags = 0;
5885ebda 865 bool did_zeroing = false;
c4ed4243 866
76ca4c23 867 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
e8e9ad42 868 ASSERT(xfs_isilocked(ip, XFS_MMAPLOCK_EXCL));
c19b3b05 869 ASSERT(S_ISREG(inode->i_mode));
fe60a8a0
CH
870 ASSERT((iattr->ia_valid & (ATTR_UID|ATTR_GID|ATTR_ATIME|ATTR_ATIME_SET|
871 ATTR_MTIME_SET|ATTR_KILL_PRIV|ATTR_TIMES_SET)) == 0);
c4ed4243 872
ce7ae151 873 oldsize = inode->i_size;
673e8e59
CH
874 newsize = iattr->ia_size;
875
c4ed4243
CH
876 /*
877 * Short circuit the truncate case for zero length files.
878 */
673e8e59 879 if (newsize == 0 && oldsize == 0 && ip->i_d.di_nextents == 0) {
fe60a8a0 880 if (!(iattr->ia_valid & (ATTR_CTIME|ATTR_MTIME)))
76ca4c23 881 return 0;
681b1200
CH
882
883 /*
884 * Use the regular setattr path to update the timestamps.
885 */
681b1200
CH
886 iattr->ia_valid &= ~ATTR_SIZE;
887 return xfs_setattr_nonsize(ip, iattr, 0);
c4ed4243
CH
888 }
889
890 /*
891 * Make sure that the dquots are attached to the inode.
892 */
c14cfcca 893 error = xfs_qm_dqattach(ip);
c4ed4243 894 if (error)
76ca4c23 895 return error;
c4ed4243 896
f0c6bcba
CH
897 /*
898 * Wait for all direct I/O to complete.
899 */
900 inode_dio_wait(inode);
901
c4ed4243 902 /*
5885ebda
DC
903 * File data changes must be complete before we start the transaction to
904 * modify the inode. This needs to be done before joining the inode to
905 * the transaction because the inode cannot be unlocked once it is a
906 * part of the transaction.
907 *
f0c6bcba
CH
908 * Start with zeroing any data beyond EOF that we may expose on file
909 * extension, or zeroing out the rest of the block on a downward
910 * truncate.
c4ed4243 911 */
673e8e59 912 if (newsize > oldsize) {
f5c54717
CH
913 trace_xfs_zero_eof(ip, oldsize, newsize - oldsize);
914 error = iomap_zero_range(inode, oldsize, newsize - oldsize,
f150b423 915 &did_zeroing, &xfs_buffered_write_iomap_ops);
f0c6bcba 916 } else {
459f0fbc 917 error = iomap_truncate_page(inode, newsize, &did_zeroing,
f150b423 918 &xfs_buffered_write_iomap_ops);
c4ed4243 919 }
c4ed4243 920
f0c6bcba
CH
921 if (error)
922 return error;
923
49abc3a8 924 /*
0f9160b4
DC
925 * We've already locked out new page faults, so now we can safely remove
926 * pages from the page cache knowing they won't get refaulted until we
927 * drop the XFS_MMAP_EXCL lock after the extent manipulations are
928 * complete. The truncate_setsize() call also cleans partial EOF page
929 * PTEs on extending truncates and hence ensures sub-page block size
930 * filesystems are correctly handled, too.
49abc3a8 931 *
0f9160b4
DC
932 * We have to do all the page cache truncate work outside the
933 * transaction context as the "lock" order is page lock->log space
934 * reservation as defined by extent allocation in the writeback path.
253f4911 935 * Hence a truncate can fail with ENOMEM from xfs_trans_alloc(), but
0f9160b4
DC
936 * having already truncated the in-memory version of the file (i.e. made
937 * user visible changes). There's not much we can do about this, except
938 * to hope that the caller sees ENOMEM and retries the truncate
939 * operation.
350976ae
EG
940 *
941 * And we update in-core i_size and truncate page cache beyond newsize
942 * before writeback the [di_size, newsize] range, so we're guaranteed
943 * not to write stale data past the new EOF on truncate down.
49abc3a8 944 */
49abc3a8 945 truncate_setsize(inode, newsize);
c4ed4243 946
350976ae
EG
947 /*
948 * We are going to log the inode size change in this transaction so
949 * any previous writes that are beyond the on disk EOF and the new
950 * EOF that have not been written out need to be written here. If we
951 * do not write the data out, we expose ourselves to the null files
952 * problem. Note that this includes any block zeroing we did above;
953 * otherwise those blocks may not be zeroed after a crash.
954 */
955 if (did_zeroing ||
956 (newsize > ip->i_d.di_size && oldsize != ip->i_d.di_size)) {
957 error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
958 ip->i_d.di_size, newsize - 1);
959 if (error)
960 return error;
961 }
962
253f4911 963 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp);
c4ed4243 964 if (error)
253f4911 965 return error;
c4ed4243 966
c4ed4243 967 lock_flags |= XFS_ILOCK_EXCL;
c4ed4243 968 xfs_ilock(ip, XFS_ILOCK_EXCL);
ddc3415a 969 xfs_trans_ijoin(tp, ip, 0);
c4ed4243
CH
970
971 /*
972 * Only change the c/mtime if we are changing the size or we are
973 * explicitly asked to change it. This handles the semantic difference
974 * between truncate() and ftruncate() as implemented in the VFS.
975 *
976 * The regular truncate() case without ATTR_CTIME and ATTR_MTIME is a
977 * special case where we need to update the times despite not having
978 * these flags set. For all other operations the VFS set these flags
979 * explicitly if it wants a timestamp update.
980 */
fe60a8a0
CH
981 if (newsize != oldsize &&
982 !(iattr->ia_valid & (ATTR_CTIME | ATTR_MTIME))) {
c4ed4243 983 iattr->ia_ctime = iattr->ia_mtime =
c2050a45 984 current_time(inode);
fe60a8a0 985 iattr->ia_valid |= ATTR_CTIME | ATTR_MTIME;
c4ed4243
CH
986 }
987
673e8e59
CH
988 /*
989 * The first thing we do is set the size to new_size permanently on
990 * disk. This way we don't have to worry about anyone ever being able
991 * to look at the data being freed even in the face of a crash.
992 * What we're getting around here is the case where we free a block, it
993 * is allocated to another file, it is written to, and then we crash.
994 * If the new data gets written to the file but the log buffers
995 * containing the free and reallocation don't, then we'd end up with
996 * garbage in the blocks being freed. As long as we make the new size
997 * permanent before actually freeing any blocks it doesn't matter if
998 * they get written to.
999 */
1000 ip->i_d.di_size = newsize;
673e8e59
CH
1001 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1002
1003 if (newsize <= oldsize) {
1004 error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK, newsize);
c4ed4243 1005 if (error)
4906e215 1006 goto out_trans_cancel;
c4ed4243
CH
1007
1008 /*
1009 * Truncated "down", so we're removing references to old data
1010 * here - if we delay flushing for a long time, we expose
1011 * ourselves unduly to the notorious NULL files problem. So,
1012 * we mark this inode and flush it when the file is closed,
1013 * and do not wait the usual (long) time for writeout.
1014 */
1015 xfs_iflags_set(ip, XFS_ITRUNCATED);
27b52867
BF
1016
1017 /* A truncate down always removes post-EOF blocks. */
1018 xfs_inode_clear_eofblocks_tag(ip);
c4ed4243
CH
1019 }
1020
fe60a8a0 1021 if (iattr->ia_valid & ATTR_MODE)
0c3d88df 1022 xfs_setattr_mode(ip, iattr);
fe60a8a0 1023 if (iattr->ia_valid & (ATTR_ATIME|ATTR_CTIME|ATTR_MTIME))
c91c46c1 1024 xfs_setattr_time(ip, iattr);
c4ed4243
CH
1025
1026 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1027
ff6d6af2 1028 XFS_STATS_INC(mp, xs_ig_attrchg);
c4ed4243
CH
1029
1030 if (mp->m_flags & XFS_MOUNT_WSYNC)
1031 xfs_trans_set_sync(tp);
1032
70393313 1033 error = xfs_trans_commit(tp);
c4ed4243
CH
1034out_unlock:
1035 if (lock_flags)
1036 xfs_iunlock(ip, lock_flags);
1037 return error;
1038
c4ed4243 1039out_trans_cancel:
4906e215 1040 xfs_trans_cancel(tp);
c4ed4243
CH
1041 goto out_unlock;
1042}
1043
69bca807
JK
1044int
1045xfs_vn_setattr_size(
1046 struct dentry *dentry,
1047 struct iattr *iattr)
1048{
1049 struct xfs_inode *ip = XFS_I(d_inode(dentry));
1050 int error;
1051
1052 trace_xfs_setattr(ip);
1053
1054 error = xfs_vn_change_ok(dentry, iattr);
1055 if (error)
1056 return error;
1057 return xfs_setattr_size(ip, iattr);
1058}
1059
1da177e4 1060STATIC int
416c6d5b 1061xfs_vn_setattr(
76ca4c23
CH
1062 struct dentry *dentry,
1063 struct iattr *iattr)
1da177e4 1064{
76ca4c23
CH
1065 int error;
1066
1067 if (iattr->ia_valid & ATTR_SIZE) {
69eb5fa1
DW
1068 struct inode *inode = d_inode(dentry);
1069 struct xfs_inode *ip = XFS_I(inode);
c63a8eae 1070 uint iolock;
781355c6 1071
c63a8eae
DW
1072 xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
1073 iolock = XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL;
781355c6 1074
69eb5fa1 1075 error = xfs_break_layouts(inode, &iolock, BREAK_UNMAP);
c63a8eae
DW
1076 if (error) {
1077 xfs_iunlock(ip, XFS_MMAPLOCK_EXCL);
65523218 1078 return error;
c63a8eae 1079 }
e8e9ad42 1080
0c187dc5 1081 error = xfs_vn_setattr_size(dentry, iattr);
65523218 1082 xfs_iunlock(ip, XFS_MMAPLOCK_EXCL);
76ca4c23 1083 } else {
69bca807 1084 error = xfs_vn_setattr_nonsize(dentry, iattr);
76ca4c23
CH
1085 }
1086
2451337d 1087 return error;
1da177e4
LT
1088}
1089
69ff2826
CH
1090STATIC int
1091xfs_vn_update_time(
1092 struct inode *inode,
95582b00 1093 struct timespec64 *now,
69ff2826
CH
1094 int flags)
1095{
1096 struct xfs_inode *ip = XFS_I(inode);
1097 struct xfs_mount *mp = ip->i_mount;
c3b1b131 1098 int log_flags = XFS_ILOG_TIMESTAMP;
69ff2826
CH
1099 struct xfs_trans *tp;
1100 int error;
1101
1102 trace_xfs_update_time(ip);
1103
c3b1b131
CH
1104 if (inode->i_sb->s_flags & SB_LAZYTIME) {
1105 if (!((flags & S_VERSION) &&
1106 inode_maybe_inc_iversion(inode, false)))
1107 return generic_update_time(inode, now, flags);
1108
1109 /* Capture the iversion update that just occurred */
1110 log_flags |= XFS_ILOG_CORE;
1111 }
1112
253f4911
CH
1113 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp);
1114 if (error)
2451337d 1115 return error;
69ff2826
CH
1116
1117 xfs_ilock(ip, XFS_ILOCK_EXCL);
3987848c 1118 if (flags & S_CTIME)
69ff2826 1119 inode->i_ctime = *now;
3987848c 1120 if (flags & S_MTIME)
69ff2826 1121 inode->i_mtime = *now;
3987848c 1122 if (flags & S_ATIME)
69ff2826 1123 inode->i_atime = *now;
3987848c 1124
69ff2826 1125 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
c3b1b131 1126 xfs_trans_log_inode(tp, ip, log_flags);
70393313 1127 return xfs_trans_commit(tp);
69ff2826
CH
1128}
1129
f35642e2
ES
1130STATIC int
1131xfs_vn_fiemap(
1132 struct inode *inode,
1133 struct fiemap_extent_info *fieinfo,
1134 u64 start,
1135 u64 length)
1136{
f35642e2
ES
1137 int error;
1138
d2bb140e 1139 xfs_ilock(XFS_I(inode), XFS_IOLOCK_SHARED);
1d4795e7
CH
1140 if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) {
1141 fieinfo->fi_flags &= ~FIEMAP_FLAG_XATTR;
1142 error = iomap_fiemap(inode, fieinfo, start, length,
1143 &xfs_xattr_iomap_ops);
1144 } else {
1145 error = iomap_fiemap(inode, fieinfo, start, length,
690c2a38 1146 &xfs_read_iomap_ops);
1d4795e7 1147 }
d2bb140e 1148 xfs_iunlock(XFS_I(inode), XFS_IOLOCK_SHARED);
f35642e2 1149
d2bb140e 1150 return error;
f35642e2
ES
1151}
1152
99b6436b
ZYW
1153STATIC int
1154xfs_vn_tmpfile(
1155 struct inode *dir,
1156 struct dentry *dentry,
1157 umode_t mode)
1158{
d540e43b 1159 return xfs_generic_create(dir, dentry, mode, 0, true);
99b6436b
ZYW
1160}
1161
41be8bed 1162static const struct inode_operations xfs_inode_operations = {
4e34e719 1163 .get_acl = xfs_get_acl,
2401dc29 1164 .set_acl = xfs_set_acl,
416c6d5b
NS
1165 .getattr = xfs_vn_getattr,
1166 .setattr = xfs_vn_setattr,
416c6d5b 1167 .listxattr = xfs_vn_listxattr,
f35642e2 1168 .fiemap = xfs_vn_fiemap,
69ff2826 1169 .update_time = xfs_vn_update_time,
1da177e4
LT
1170};
1171
41be8bed 1172static const struct inode_operations xfs_dir_inode_operations = {
416c6d5b
NS
1173 .create = xfs_vn_create,
1174 .lookup = xfs_vn_lookup,
1175 .link = xfs_vn_link,
1176 .unlink = xfs_vn_unlink,
1177 .symlink = xfs_vn_symlink,
1178 .mkdir = xfs_vn_mkdir,
8f112e3b
CH
1179 /*
1180 * Yes, XFS uses the same method for rmdir and unlink.
1181 *
1182 * There are some subtile differences deeper in the code,
1183 * but we use S_ISDIR to check for those.
1184 */
1185 .rmdir = xfs_vn_unlink,
416c6d5b 1186 .mknod = xfs_vn_mknod,
2773bf00 1187 .rename = xfs_vn_rename,
4e34e719 1188 .get_acl = xfs_get_acl,
2401dc29 1189 .set_acl = xfs_set_acl,
416c6d5b
NS
1190 .getattr = xfs_vn_getattr,
1191 .setattr = xfs_vn_setattr,
416c6d5b 1192 .listxattr = xfs_vn_listxattr,
69ff2826 1193 .update_time = xfs_vn_update_time,
99b6436b 1194 .tmpfile = xfs_vn_tmpfile,
1da177e4
LT
1195};
1196
41be8bed 1197static const struct inode_operations xfs_dir_ci_inode_operations = {
384f3ced
BN
1198 .create = xfs_vn_create,
1199 .lookup = xfs_vn_ci_lookup,
1200 .link = xfs_vn_link,
1201 .unlink = xfs_vn_unlink,
1202 .symlink = xfs_vn_symlink,
1203 .mkdir = xfs_vn_mkdir,
8f112e3b
CH
1204 /*
1205 * Yes, XFS uses the same method for rmdir and unlink.
1206 *
1207 * There are some subtile differences deeper in the code,
1208 * but we use S_ISDIR to check for those.
1209 */
1210 .rmdir = xfs_vn_unlink,
384f3ced 1211 .mknod = xfs_vn_mknod,
2773bf00 1212 .rename = xfs_vn_rename,
4e34e719 1213 .get_acl = xfs_get_acl,
2401dc29 1214 .set_acl = xfs_set_acl,
384f3ced
BN
1215 .getattr = xfs_vn_getattr,
1216 .setattr = xfs_vn_setattr,
384f3ced 1217 .listxattr = xfs_vn_listxattr,
69ff2826 1218 .update_time = xfs_vn_update_time,
99b6436b 1219 .tmpfile = xfs_vn_tmpfile,
384f3ced
BN
1220};
1221
41be8bed 1222static const struct inode_operations xfs_symlink_inode_operations = {
6b255391 1223 .get_link = xfs_vn_get_link,
416c6d5b
NS
1224 .getattr = xfs_vn_getattr,
1225 .setattr = xfs_vn_setattr,
416c6d5b 1226 .listxattr = xfs_vn_listxattr,
69ff2826 1227 .update_time = xfs_vn_update_time,
1da177e4 1228};
41be8bed 1229
30ee052e 1230static const struct inode_operations xfs_inline_symlink_inode_operations = {
30ee052e
CH
1231 .get_link = xfs_vn_get_link_inline,
1232 .getattr = xfs_vn_getattr,
1233 .setattr = xfs_vn_setattr,
30ee052e
CH
1234 .listxattr = xfs_vn_listxattr,
1235 .update_time = xfs_vn_update_time,
1236};
1237
ba23cba9
DW
1238/* Figure out if this file actually supports DAX. */
1239static bool
1240xfs_inode_supports_dax(
1241 struct xfs_inode *ip)
1242{
1243 struct xfs_mount *mp = ip->i_mount;
1244
1245 /* Only supported on non-reflinked files. */
1246 if (!S_ISREG(VFS_I(ip)->i_mode) || xfs_is_reflink_inode(ip))
1247 return false;
1248
1249 /* DAX mount option or DAX iflag must be set. */
1250 if (!(mp->m_flags & XFS_MOUNT_DAX) &&
1251 !(ip->i_d.di_flags2 & XFS_DIFLAG2_DAX))
1252 return false;
1253
1254 /* Block size must match page size */
1255 if (mp->m_sb.sb_blocksize != PAGE_SIZE)
1256 return false;
1257
1258 /* Device has to support DAX too. */
30fa529e 1259 return xfs_inode_buftarg(ip)->bt_daxdev != NULL;
ba23cba9
DW
1260}
1261
41be8bed
CH
1262STATIC void
1263xfs_diflags_to_iflags(
1264 struct inode *inode,
1265 struct xfs_inode *ip)
1266{
cbe4dab1
DC
1267 uint16_t flags = ip->i_d.di_flags;
1268
1269 inode->i_flags &= ~(S_IMMUTABLE | S_APPEND | S_SYNC |
1270 S_NOATIME | S_DAX);
1271
1272 if (flags & XFS_DIFLAG_IMMUTABLE)
41be8bed 1273 inode->i_flags |= S_IMMUTABLE;
cbe4dab1 1274 if (flags & XFS_DIFLAG_APPEND)
41be8bed 1275 inode->i_flags |= S_APPEND;
cbe4dab1 1276 if (flags & XFS_DIFLAG_SYNC)
41be8bed 1277 inode->i_flags |= S_SYNC;
cbe4dab1 1278 if (flags & XFS_DIFLAG_NOATIME)
41be8bed 1279 inode->i_flags |= S_NOATIME;
ba23cba9 1280 if (xfs_inode_supports_dax(ip))
cbe4dab1 1281 inode->i_flags |= S_DAX;
41be8bed
CH
1282}
1283
1284/*
2b3d1d41 1285 * Initialize the Linux inode.
bf904248 1286 *
58c90473
DC
1287 * When reading existing inodes from disk this is called directly from xfs_iget,
1288 * when creating a new inode it is called from xfs_ialloc after setting up the
1289 * inode. These callers have different criteria for clearing XFS_INEW, so leave
1290 * it up to the caller to deal with unlocking the inode appropriately.
41be8bed
CH
1291 */
1292void
1293xfs_setup_inode(
1294 struct xfs_inode *ip)
1295{
bf904248 1296 struct inode *inode = &ip->i_vnode;
ad22c7a0 1297 gfp_t gfp_mask;
bf904248
DC
1298
1299 inode->i_ino = ip->i_ino;
eaff8079 1300 inode->i_state = I_NEW;
646ec461
CH
1301
1302 inode_sb_list_add(inode);
c6f6cd06 1303 /* make the inode look hashed for the writeback code */
5bef9151 1304 inode_fake_hash(inode);
41be8bed 1305
7aab1b28
DE
1306 inode->i_uid = xfs_uid_to_kuid(ip->i_d.di_uid);
1307 inode->i_gid = xfs_gid_to_kgid(ip->i_d.di_gid);
41be8bed 1308
41be8bed 1309 i_size_write(inode, ip->i_d.di_size);
41be8bed 1310 xfs_diflags_to_iflags(inode, ip);
41be8bed 1311
2b3d1d41 1312 if (S_ISDIR(inode->i_mode)) {
ef215e39
DC
1313 /*
1314 * We set the i_rwsem class here to avoid potential races with
1315 * lockdep_annotate_inode_mutex_key() reinitialising the lock
1316 * after a filehandle lookup has already found the inode in
1317 * cache before it has been unlocked via unlock_new_inode().
1318 */
1319 lockdep_set_class(&inode->i_rwsem,
1320 &inode->i_sb->s_type->i_mutex_dir_key);
93a8614e 1321 lockdep_set_class(&ip->i_lock.mr_lock, &xfs_dir_ilock_class);
2b3d1d41 1322 } else {
2b3d1d41 1323 lockdep_set_class(&ip->i_lock.mr_lock, &xfs_nondir_ilock_class);
41be8bed
CH
1324 }
1325
ad22c7a0
DC
1326 /*
1327 * Ensure all page cache allocations are done from GFP_NOFS context to
1328 * prevent direct reclaim recursion back into the filesystem and blowing
1329 * stacks or deadlocking.
1330 */
1331 gfp_mask = mapping_gfp_mask(inode->i_mapping);
1332 mapping_set_gfp_mask(inode->i_mapping, (gfp_mask & ~(__GFP_FS)));
1333
510792ee
CH
1334 /*
1335 * If there is no attribute fork no ACL can exist on this inode,
1336 * and it can't have any file capabilities attached to it either.
1337 */
1338 if (!XFS_IFORK_Q(ip)) {
1339 inode_has_no_xattr(inode);
6311b108 1340 cache_no_acl(inode);
510792ee 1341 }
41be8bed 1342}
2b3d1d41
CH
1343
1344void
1345xfs_setup_iops(
1346 struct xfs_inode *ip)
1347{
1348 struct inode *inode = &ip->i_vnode;
1349
41be8bed
CH
1350 switch (inode->i_mode & S_IFMT) {
1351 case S_IFREG:
1352 inode->i_op = &xfs_inode_operations;
1353 inode->i_fop = &xfs_file_operations;
6e2608df
DW
1354 if (IS_DAX(inode))
1355 inode->i_mapping->a_ops = &xfs_dax_aops;
1356 else
1357 inode->i_mapping->a_ops = &xfs_address_space_operations;
41be8bed
CH
1358 break;
1359 case S_IFDIR:
1360 if (xfs_sb_version_hasasciici(&XFS_M(inode->i_sb)->m_sb))
1361 inode->i_op = &xfs_dir_ci_inode_operations;
1362 else
1363 inode->i_op = &xfs_dir_inode_operations;
1364 inode->i_fop = &xfs_dir_file_operations;
1365 break;
1366 case S_IFLNK:
30ee052e
CH
1367 if (ip->i_df.if_flags & XFS_IFINLINE)
1368 inode->i_op = &xfs_inline_symlink_inode_operations;
1369 else
1370 inode->i_op = &xfs_symlink_inode_operations;
41be8bed
CH
1371 break;
1372 default:
1373 inode->i_op = &xfs_inode_operations;
1374 init_special_inode(inode, inode->i_mode, inode->i_rdev);
1375 break;
1376 }
41be8bed 1377}