Linux 6.10-rc3
[linux-2.6-block.git] / fs / fuse / file.c
CommitLineData
b6aeaded
MS
1/*
2 FUSE: Filesystem in Userspace
1729a16c 3 Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu>
b6aeaded
MS
4
5 This program can be distributed under the terms of the GNU GPL.
6 See the file COPYING.
7*/
8
9#include "fuse_i.h"
10
11#include <linux/pagemap.h>
12#include <linux/slab.h>
13#include <linux/kernel.h>
e8edc6e0 14#include <linux/sched.h>
7a36094d 15#include <linux/sched/signal.h>
08cbf542 16#include <linux/module.h>
478e0841 17#include <linux/swap.h>
3634a632 18#include <linux/falloc.h>
e2e40f2c 19#include <linux/uio.h>
31070f6c 20#include <linux/fs.h>
5970e15d 21#include <linux/filelock.h>
705bcfcb 22#include <linux/splice.h>
2e3f7dd0 23#include <linux/task_io_accounting_ops.h>
b6aeaded 24
b9d54c6f
MS
25static int fuse_send_open(struct fuse_mount *fm, u64 nodeid,
26 unsigned int open_flags, int opcode,
27 struct fuse_open_out *outargp)
b6aeaded 28{
b6aeaded 29 struct fuse_open_in inarg;
7078187a 30 FUSE_ARGS(args);
fd72faac
MS
31
32 memset(&inarg, 0, sizeof(inarg));
b9d54c6f 33 inarg.flags = open_flags & ~(O_CREAT | O_EXCL | O_NOCTTY);
fcee216b 34 if (!fm->fc->atomic_o_trunc)
6ff958ed 35 inarg.flags &= ~O_TRUNC;
643a666a
VG
36
37 if (fm->fc->handle_killpriv_v2 &&
38 (inarg.flags & O_TRUNC) && !capable(CAP_FSETID)) {
39 inarg.open_flags |= FUSE_OPEN_KILL_SUIDGID;
40 }
41
d5b48543
MS
42 args.opcode = opcode;
43 args.nodeid = nodeid;
44 args.in_numargs = 1;
45 args.in_args[0].size = sizeof(inarg);
46 args.in_args[0].value = &inarg;
47 args.out_numargs = 1;
48 args.out_args[0].size = sizeof(*outargp);
49 args.out_args[0].value = outargp;
fd72faac 50
fcee216b 51 return fuse_simple_request(fm, &args);
fd72faac
MS
52}
53
e26ee4ef 54struct fuse_file *fuse_file_alloc(struct fuse_mount *fm, bool release)
fd72faac
MS
55{
56 struct fuse_file *ff;
6b2db28a 57
dc69e98c 58 ff = kzalloc(sizeof(struct fuse_file), GFP_KERNEL_ACCOUNT);
6b2db28a
TH
59 if (unlikely(!ff))
60 return NULL;
61
fcee216b 62 ff->fm = fm;
e26ee4ef 63 if (release) {
fc8ff397
AG
64 ff->args = kzalloc(sizeof(*ff->args), GFP_KERNEL_ACCOUNT);
65 if (!ff->args) {
e26ee4ef
AG
66 kfree(ff);
67 return NULL;
68 }
fd72faac 69 }
6b2db28a
TH
70
71 INIT_LIST_HEAD(&ff->write_entry);
4e8c2eb5 72 refcount_set(&ff->count, 1);
6b2db28a
TH
73 RB_CLEAR_NODE(&ff->polled_node);
74 init_waitqueue_head(&ff->poll_wait);
75
fcee216b 76 ff->kh = atomic64_inc_return(&fm->fc->khctr);
6b2db28a 77
fd72faac
MS
78 return ff;
79}
80
81void fuse_file_free(struct fuse_file *ff)
82{
fc8ff397 83 kfree(ff->args);
fd72faac
MS
84 kfree(ff);
85}
86
267d8444 87static struct fuse_file *fuse_file_get(struct fuse_file *ff)
c756e0a4 88{
4e8c2eb5 89 refcount_inc(&ff->count);
c756e0a4
MS
90 return ff;
91}
92
fcee216b 93static void fuse_release_end(struct fuse_mount *fm, struct fuse_args *args,
4cb54866 94 int error)
819c4b3b 95{
4cb54866
MS
96 struct fuse_release_args *ra = container_of(args, typeof(*ra), args);
97
98 iput(ra->inode);
99 kfree(ra);
819c4b3b
MS
100}
101
e26ee4ef 102static void fuse_file_put(struct fuse_file *ff, bool sync)
c756e0a4 103{
4e8c2eb5 104 if (refcount_dec_and_test(&ff->count)) {
fc8ff397 105 struct fuse_release_args *ra = &ff->args->release_args;
e26ee4ef 106 struct fuse_args *args = (ra ? &ra->args : NULL);
8b0797a4 107
cb098dd2
AG
108 if (ra && ra->inode)
109 fuse_file_io_release(ff, ra->inode);
110
e26ee4ef
AG
111 if (!args) {
112 /* Do nothing when server does not implement 'open' */
7678ac50 113 } else if (sync) {
fcee216b
MR
114 fuse_simple_request(ff->fm, args);
115 fuse_release_end(ff->fm, args, 0);
5a18ec17 116 } else {
4cb54866 117 args->end = fuse_release_end;
fcee216b 118 if (fuse_simple_background(ff->fm, args,
4cb54866 119 GFP_KERNEL | __GFP_NOFAIL))
fcee216b 120 fuse_release_end(ff->fm, args, -ENOTCONN);
5a18ec17 121 }
c756e0a4
MS
122 kfree(ff);
123 }
124}
125
b9d54c6f
MS
126struct fuse_file *fuse_file_open(struct fuse_mount *fm, u64 nodeid,
127 unsigned int open_flags, bool isdir)
91fe96b4 128{
fcee216b 129 struct fuse_conn *fc = fm->fc;
91fe96b4 130 struct fuse_file *ff;
91fe96b4 131 int opcode = isdir ? FUSE_OPENDIR : FUSE_OPEN;
e26ee4ef 132 bool open = isdir ? !fc->no_opendir : !fc->no_open;
91fe96b4 133
e26ee4ef 134 ff = fuse_file_alloc(fm, open);
91fe96b4 135 if (!ff)
b9d54c6f 136 return ERR_PTR(-ENOMEM);
91fe96b4 137
7678ac50 138 ff->fh = 0;
fabf7e02
CA
139 /* Default for no-open */
140 ff->open_flags = FOPEN_KEEP_CACHE | (isdir ? FOPEN_CACHE_DIR : 0);
e26ee4ef 141 if (open) {
fc8ff397
AG
142 /* Store outarg for fuse_finish_open() */
143 struct fuse_open_out *outargp = &ff->args->open_outarg;
7678ac50
AG
144 int err;
145
fc8ff397 146 err = fuse_send_open(fm, nodeid, open_flags, opcode, outargp);
7678ac50 147 if (!err) {
fc8ff397
AG
148 ff->fh = outargp->fh;
149 ff->open_flags = outargp->open_flags;
d9a9ea94 150 } else if (err != -ENOSYS) {
7678ac50 151 fuse_file_free(ff);
b9d54c6f 152 return ERR_PTR(err);
7678ac50 153 } else {
e26ee4ef 154 /* No release needed */
fc8ff397
AG
155 kfree(ff->args);
156 ff->args = NULL;
d9a9ea94
CA
157 if (isdir)
158 fc->no_opendir = 1;
159 else
160 fc->no_open = 1;
7678ac50 161 }
91fe96b4
MS
162 }
163
164 if (isdir)
7678ac50 165 ff->open_flags &= ~FOPEN_DIRECT_IO;
91fe96b4 166
91fe96b4 167 ff->nodeid = nodeid;
91fe96b4 168
b9d54c6f
MS
169 return ff;
170}
171
172int fuse_do_open(struct fuse_mount *fm, u64 nodeid, struct file *file,
173 bool isdir)
174{
175 struct fuse_file *ff = fuse_file_open(fm, nodeid, file->f_flags, isdir);
176
177 if (!IS_ERR(ff))
178 file->private_data = ff;
179
180 return PTR_ERR_OR_ZERO(ff);
91fe96b4 181}
08cbf542 182EXPORT_SYMBOL_GPL(fuse_do_open);
91fe96b4 183
650b22b9
PE
184static void fuse_link_write_file(struct file *file)
185{
186 struct inode *inode = file_inode(file);
650b22b9
PE
187 struct fuse_inode *fi = get_fuse_inode(inode);
188 struct fuse_file *ff = file->private_data;
189 /*
190 * file may be written through mmap, so chain it onto the
191 * inodes's write_file list
192 */
f15ecfef 193 spin_lock(&fi->lock);
650b22b9
PE
194 if (list_empty(&ff->write_entry))
195 list_add(&ff->write_entry, &fi->write_files);
f15ecfef 196 spin_unlock(&fi->lock);
650b22b9
PE
197}
198
d2c487f1 199int fuse_finish_open(struct inode *inode, struct file *file)
fd72faac 200{
c7b7143c 201 struct fuse_file *ff = file->private_data;
a0822c55 202 struct fuse_conn *fc = get_fuse_conn(inode);
cb098dd2
AG
203 int err;
204
205 err = fuse_file_io_open(file, inode);
206 if (err)
207 return err;
c7b7143c 208
bbd84f33
KS
209 if (ff->open_flags & FOPEN_STREAM)
210 stream_open(inode, file);
211 else if (ff->open_flags & FOPEN_NONSEEKABLE)
a7c1b990 212 nonseekable_open(inode, file);
76224355 213
4d99ff8f
PE
214 if ((file->f_mode & FMODE_WRITE) && fc->writeback_cache)
215 fuse_link_write_file(file);
d2c487f1
AG
216
217 return 0;
fd72faac
MS
218}
219
0c9d7089
AG
220static void fuse_truncate_update_attr(struct inode *inode, struct file *file)
221{
222 struct fuse_conn *fc = get_fuse_conn(inode);
223 struct fuse_inode *fi = get_fuse_inode(inode);
224
225 spin_lock(&fi->lock);
226 fi->attr_version = atomic64_inc_return(&fc->attr_version);
227 i_size_write(inode, 0);
228 spin_unlock(&fi->lock);
229 file_update_time(file);
230 fuse_invalidate_attr_mask(inode, FUSE_STATX_MODSIZE);
231}
232
7de64d52 233static int fuse_open(struct inode *inode, struct file *file)
fd72faac 234{
fcee216b 235 struct fuse_mount *fm = get_fuse_mount(inode);
d2c487f1 236 struct fuse_inode *fi = get_fuse_inode(inode);
fcee216b 237 struct fuse_conn *fc = fm->fc;
d2c487f1 238 struct fuse_file *ff;
b6aeaded 239 int err;
0c9d7089
AG
240 bool is_truncate = (file->f_flags & O_TRUNC) && fc->atomic_o_trunc;
241 bool is_wb_truncate = is_truncate && fc->writeback_cache;
242 bool dax_truncate = is_truncate && FUSE_IS_DAX(inode);
b6aeaded 243
5d069dbe
MS
244 if (fuse_is_bad(inode))
245 return -EIO;
246
b6aeaded
MS
247 err = generic_file_open(inode, file);
248 if (err)
249 return err;
250
2fdbb8dd 251 if (is_wb_truncate || dax_truncate)
5955102c 252 inode_lock(inode);
75caeecd 253
6ae330ca 254 if (dax_truncate) {
8bcbbe9c 255 filemap_invalidate_lock(inode->i_mapping);
6ae330ca
VG
256 err = fuse_dax_break_layouts(inode, 0, 0);
257 if (err)
2fdbb8dd 258 goto out_inode_unlock;
6ae330ca 259 }
b6aeaded 260
2fdbb8dd
MS
261 if (is_wb_truncate || dax_truncate)
262 fuse_set_nowrite(inode);
263
7de64d52 264 err = fuse_do_open(fm, get_node_id(inode), file, false);
0c9d7089 265 if (!err) {
d2c487f1
AG
266 ff = file->private_data;
267 err = fuse_finish_open(inode, file);
268 if (err)
269 fuse_sync_release(fi, ff, file->f_flags);
270 else if (is_truncate)
0c9d7089
AG
271 fuse_truncate_update_attr(inode, file);
272 }
91fe96b4 273
2fdbb8dd
MS
274 if (is_wb_truncate || dax_truncate)
275 fuse_release_nowrite(inode);
276 if (!err) {
0c9d7089 277 if (is_truncate)
2fdbb8dd
MS
278 truncate_pagecache(inode, 0);
279 else if (!(ff->open_flags & FOPEN_KEEP_CACHE))
280 invalidate_inode_pages2(inode->i_mapping);
281 }
6ae330ca 282 if (dax_truncate)
8bcbbe9c 283 filemap_invalidate_unlock(inode->i_mapping);
2fdbb8dd
MS
284out_inode_unlock:
285 if (is_wb_truncate || dax_truncate)
5955102c 286 inode_unlock(inode);
75caeecd
MP
287
288 return err;
b6aeaded
MS
289}
290
ebf84d0c 291static void fuse_prepare_release(struct fuse_inode *fi, struct fuse_file *ff,
e26ee4ef 292 unsigned int flags, int opcode, bool sync)
64c6d8ed 293{
fcee216b 294 struct fuse_conn *fc = ff->fm->fc;
fc8ff397 295 struct fuse_release_args *ra = &ff->args->release_args;
b6aeaded 296
4a90451b
AG
297 if (fuse_file_passthrough(ff))
298 fuse_passthrough_release(ff, fuse_inode_backing(fi));
b6aeaded 299
f15ecfef
KT
300 /* Inode is NULL on error path of fuse_create_open() */
301 if (likely(fi)) {
302 spin_lock(&fi->lock);
303 list_del(&ff->write_entry);
304 spin_unlock(&fi->lock);
305 }
8b0797a4 306 spin_lock(&fc->lock);
8b0797a4
MS
307 if (!RB_EMPTY_NODE(&ff->polled_node))
308 rb_erase(&ff->polled_node, &fc->polled_files);
309 spin_unlock(&fc->lock);
310
357ccf2b 311 wake_up_interruptible_all(&ff->poll_wait);
8b0797a4 312
e26ee4ef
AG
313 if (!ra)
314 return;
315
fc8ff397
AG
316 /* ff->args was used for open outarg */
317 memset(ff->args, 0, sizeof(*ff->args));
4cb54866
MS
318 ra->inarg.fh = ff->fh;
319 ra->inarg.flags = flags;
320 ra->args.in_numargs = 1;
321 ra->args.in_args[0].size = sizeof(struct fuse_release_in);
322 ra->args.in_args[0].value = &ra->inarg;
323 ra->args.opcode = opcode;
324 ra->args.nodeid = ff->nodeid;
325 ra->args.force = true;
326 ra->args.nocreds = true;
e26ee4ef
AG
327
328 /*
329 * Hold inode until release is finished.
330 * From fuse_sync_release() the refcount is 1 and everything's
331 * synchronous, so we are fine with not doing igrab() here.
332 */
333 ra->inode = sync ? NULL : igrab(&fi->inode);
fd72faac
MS
334}
335
b9d54c6f
MS
336void fuse_file_release(struct inode *inode, struct fuse_file *ff,
337 unsigned int open_flags, fl_owner_t id, bool isdir)
fd72faac 338{
b9d54c6f 339 struct fuse_inode *fi = get_fuse_inode(inode);
fc8ff397 340 struct fuse_release_args *ra = &ff->args->release_args;
2e64ff15 341 int opcode = isdir ? FUSE_RELEASEDIR : FUSE_RELEASE;
6b2db28a 342
e26ee4ef 343 fuse_prepare_release(fi, ff, open_flags, opcode, false);
6b2db28a 344
e26ee4ef 345 if (ra && ff->flock) {
4cb54866 346 ra->inarg.release_flags |= FUSE_RELEASE_FLOCK_UNLOCK;
b9d54c6f 347 ra->inarg.lock_owner = fuse_lock_owner_id(ff->fm->fc, id);
37fb3a30 348 }
6b2db28a 349
6b2db28a
TH
350 /*
351 * Normally this will send the RELEASE request, however if
352 * some asynchronous READ or WRITE requests are outstanding,
353 * the sending will be delayed.
5a18ec17
MS
354 *
355 * Make the release synchronous if this is a fuseblk mount,
356 * synchronous RELEASE is allowed (and desirable) in this case
357 * because the server can be trusted not to screw up.
6b2db28a 358 */
e26ee4ef 359 fuse_file_put(ff, ff->fm->fc->destroy);
b6aeaded
MS
360}
361
b9d54c6f
MS
362void fuse_release_common(struct file *file, bool isdir)
363{
364 fuse_file_release(file_inode(file), file->private_data, file->f_flags,
365 (fl_owner_t) file, isdir);
366}
367
04730fef
MS
368static int fuse_release(struct inode *inode, struct file *file)
369{
035ff33c
MS
370 struct fuse_conn *fc = get_fuse_conn(inode);
371
372 /*
373 * Dirty pages might remain despite write_inode_now() call from
374 * fuse_flush() due to writes racing with the close.
375 */
376 if (fc->writeback_cache)
377 write_inode_now(inode, 1);
378
2e64ff15 379 fuse_release_common(file, false);
8b0797a4
MS
380
381 /* return value is ignored by VFS */
382 return 0;
383}
384
54d601cb
MS
385void fuse_sync_release(struct fuse_inode *fi, struct fuse_file *ff,
386 unsigned int flags)
8b0797a4 387{
4e8c2eb5 388 WARN_ON(refcount_read(&ff->count) > 1);
e26ee4ef
AG
389 fuse_prepare_release(fi, ff, flags, FUSE_RELEASE, true);
390 fuse_file_put(ff, true);
04730fef 391}
08cbf542 392EXPORT_SYMBOL_GPL(fuse_sync_release);
04730fef 393
71421259 394/*
9c8ef561
MS
395 * Scramble the ID space with XTEA, so that the value of the files_struct
396 * pointer is not exposed to userspace.
71421259 397 */
f3332114 398u64 fuse_lock_owner_id(struct fuse_conn *fc, fl_owner_t id)
71421259 399{
9c8ef561
MS
400 u32 *k = fc->scramble_key;
401 u64 v = (unsigned long) id;
402 u32 v0 = v;
403 u32 v1 = v >> 32;
404 u32 sum = 0;
405 int i;
406
407 for (i = 0; i < 32; i++) {
408 v0 += ((v1 << 4 ^ v1 >> 5) + v1) ^ (sum + k[sum & 3]);
409 sum += 0x9E3779B9;
410 v1 += ((v0 << 4 ^ v0 >> 5) + v0) ^ (sum + k[sum>>11 & 3]);
411 }
412
413 return (u64) v0 + ((u64) v1 << 32);
71421259
MS
414}
415
33826ebb
MS
416struct fuse_writepage_args {
417 struct fuse_io_args ia;
6b2fb799 418 struct rb_node writepages_entry;
33826ebb
MS
419 struct list_head queue_entry;
420 struct fuse_writepage_args *next;
421 struct inode *inode;
660585b5 422 struct fuse_sync_bucket *bucket;
33826ebb
MS
423};
424
425static struct fuse_writepage_args *fuse_find_writeback(struct fuse_inode *fi,
2fe93bd4
MS
426 pgoff_t idx_from, pgoff_t idx_to)
427{
6b2fb799
MP
428 struct rb_node *n;
429
430 n = fi->writepages.rb_node;
2fe93bd4 431
6b2fb799
MP
432 while (n) {
433 struct fuse_writepage_args *wpa;
2fe93bd4
MS
434 pgoff_t curr_index;
435
6b2fb799 436 wpa = rb_entry(n, struct fuse_writepage_args, writepages_entry);
33826ebb
MS
437 WARN_ON(get_fuse_inode(wpa->inode) != fi);
438 curr_index = wpa->ia.write.in.offset >> PAGE_SHIFT;
6b2fb799
MP
439 if (idx_from >= curr_index + wpa->ia.ap.num_pages)
440 n = n->rb_right;
441 else if (idx_to < curr_index)
442 n = n->rb_left;
443 else
33826ebb 444 return wpa;
2fe93bd4
MS
445 }
446 return NULL;
447}
448
3be5a52b 449/*
ea8cd333 450 * Check if any page in a range is under writeback
3be5a52b
MS
451 *
452 * This is currently done by walking the list of writepage requests
453 * for the inode, which can be pretty inefficient.
454 */
ea8cd333
PE
455static bool fuse_range_is_writeback(struct inode *inode, pgoff_t idx_from,
456 pgoff_t idx_to)
3be5a52b 457{
3be5a52b 458 struct fuse_inode *fi = get_fuse_inode(inode);
2fe93bd4 459 bool found;
3be5a52b 460
f15ecfef 461 spin_lock(&fi->lock);
2fe93bd4 462 found = fuse_find_writeback(fi, idx_from, idx_to);
f15ecfef 463 spin_unlock(&fi->lock);
3be5a52b
MS
464
465 return found;
466}
467
ea8cd333
PE
468static inline bool fuse_page_is_writeback(struct inode *inode, pgoff_t index)
469{
470 return fuse_range_is_writeback(inode, index, index);
471}
472
3be5a52b
MS
473/*
474 * Wait for page writeback to be completed.
475 *
476 * Since fuse doesn't rely on the VM writeback tracking, this has to
477 * use some other means.
478 */
17b2cbe2 479static void fuse_wait_on_page_writeback(struct inode *inode, pgoff_t index)
3be5a52b
MS
480{
481 struct fuse_inode *fi = get_fuse_inode(inode);
482
483 wait_event(fi->page_waitq, !fuse_page_is_writeback(inode, index));
3be5a52b
MS
484}
485
fe38d7df
MP
486/*
487 * Wait for all pending writepages on the inode to finish.
488 *
489 * This is currently done by blocking further writes with FUSE_NOWRITE
490 * and waiting for all sent writes to complete.
491 *
492 * This must be called under i_mutex, otherwise the FUSE_NOWRITE usage
493 * could conflict with truncation.
494 */
495static void fuse_sync_writes(struct inode *inode)
496{
497 fuse_set_nowrite(inode);
498 fuse_release_nowrite(inode);
499}
500
91ec6c85 501static int fuse_flush(struct file *file, fl_owner_t id)
5a8bee63 502{
91ec6c85 503 struct inode *inode = file_inode(file);
5a8bee63 504 struct fuse_mount *fm = get_fuse_mount(inode);
91ec6c85
MS
505 struct fuse_file *ff = file->private_data;
506 struct fuse_flush_in inarg;
507 FUSE_ARGS(args);
508 int err;
509
510 if (fuse_is_bad(inode))
511 return -EIO;
512
513 if (ff->open_flags & FOPEN_NOFLUSH && !fm->fc->writeback_cache)
514 return 0;
a390ccb3 515
1e18bda8 516 err = write_inode_now(inode, 1);
fe38d7df 517 if (err)
91ec6c85 518 return err;
fe38d7df 519
5955102c 520 inode_lock(inode);
fe38d7df 521 fuse_sync_writes(inode);
5955102c 522 inode_unlock(inode);
fe38d7df 523
91ec6c85 524 err = filemap_check_errors(file->f_mapping);
9ebce595 525 if (err)
91ec6c85 526 return err;
9ebce595 527
614c026e 528 err = 0;
fcee216b 529 if (fm->fc->no_flush)
614c026e
MS
530 goto inval_attr_out;
531
91ec6c85
MS
532 memset(&inarg, 0, sizeof(inarg));
533 inarg.fh = ff->fh;
534 inarg.lock_owner = fuse_lock_owner_id(fm->fc, id);
535 args.opcode = FUSE_FLUSH;
536 args.nodeid = get_node_id(inode);
537 args.in_numargs = 1;
538 args.in_args[0].size = sizeof(inarg);
539 args.in_args[0].value = &inarg;
540 args.force = true;
541
542 err = fuse_simple_request(fm, &args);
b6aeaded 543 if (err == -ENOSYS) {
fcee216b 544 fm->fc->no_flush = 1;
b6aeaded
MS
545 err = 0;
546 }
cf576c58
EG
547
548inval_attr_out:
549 /*
550 * In memory i_blocks is not maintained by fuse, if writeback cache is
551 * enabled, i_blocks from cached attr may not be accurate.
552 */
fcee216b 553 if (!err && fm->fc->writeback_cache)
fa5eee57 554 fuse_invalidate_attr_mask(inode, STATX_BLOCKS);
b6aeaded
MS
555 return err;
556}
557
02c24a82 558int fuse_fsync_common(struct file *file, loff_t start, loff_t end,
a9c2d1e8 559 int datasync, int opcode)
b6aeaded 560{
7ea80859 561 struct inode *inode = file->f_mapping->host;
fcee216b 562 struct fuse_mount *fm = get_fuse_mount(inode);
b6aeaded 563 struct fuse_file *ff = file->private_data;
7078187a 564 FUSE_ARGS(args);
b6aeaded 565 struct fuse_fsync_in inarg;
a9c2d1e8
MS
566
567 memset(&inarg, 0, sizeof(inarg));
568 inarg.fh = ff->fh;
154603fe 569 inarg.fsync_flags = datasync ? FUSE_FSYNC_FDATASYNC : 0;
d5b48543
MS
570 args.opcode = opcode;
571 args.nodeid = get_node_id(inode);
572 args.in_numargs = 1;
573 args.in_args[0].size = sizeof(inarg);
574 args.in_args[0].value = &inarg;
fcee216b 575 return fuse_simple_request(fm, &args);
a9c2d1e8
MS
576}
577
578static int fuse_fsync(struct file *file, loff_t start, loff_t end,
579 int datasync)
580{
581 struct inode *inode = file->f_mapping->host;
582 struct fuse_conn *fc = get_fuse_conn(inode);
b6aeaded
MS
583 int err;
584
5d069dbe 585 if (fuse_is_bad(inode))
248d86e8
MS
586 return -EIO;
587
5955102c 588 inode_lock(inode);
02c24a82 589
3be5a52b
MS
590 /*
591 * Start writeback against all dirty pages of the inode, then
592 * wait for all outstanding writes, before sending the FSYNC
593 * request.
594 */
7e51fe1d 595 err = file_write_and_wait_range(file, start, end);
3be5a52b 596 if (err)
02c24a82 597 goto out;
3be5a52b
MS
598
599 fuse_sync_writes(inode);
ac7f052b
AK
600
601 /*
602 * Due to implementation of fuse writeback
7e51fe1d 603 * file_write_and_wait_range() does not catch errors.
ac7f052b
AK
604 * We have to do this directly after fuse_sync_writes()
605 */
7e51fe1d 606 err = file_check_and_advance_wb_err(file);
ac7f052b
AK
607 if (err)
608 goto out;
609
1e18bda8
MS
610 err = sync_inode_metadata(inode, 1);
611 if (err)
612 goto out;
3be5a52b 613
a9c2d1e8 614 if (fc->no_fsync)
22401e7b 615 goto out;
b0aa7606 616
a9c2d1e8 617 err = fuse_fsync_common(file, start, end, datasync, FUSE_FSYNC);
b6aeaded 618 if (err == -ENOSYS) {
a9c2d1e8 619 fc->no_fsync = 1;
b6aeaded
MS
620 err = 0;
621 }
02c24a82 622out:
5955102c 623 inode_unlock(inode);
b6aeaded 624
a9c2d1e8 625 return err;
82547981
MS
626}
627
00793ca5
MS
628void fuse_read_args_fill(struct fuse_io_args *ia, struct file *file, loff_t pos,
629 size_t count, int opcode)
630{
631 struct fuse_file *ff = file->private_data;
632 struct fuse_args *args = &ia->ap.args;
633
634 ia->read.in.fh = ff->fh;
635 ia->read.in.offset = pos;
636 ia->read.in.size = count;
637 ia->read.in.flags = file->f_flags;
638 args->opcode = opcode;
639 args->nodeid = ff->nodeid;
640 args->in_numargs = 1;
641 args->in_args[0].size = sizeof(ia->read.in);
642 args->in_args[0].value = &ia->read.in;
643 args->out_argvar = true;
644 args->out_numargs = 1;
645 args->out_args[0].size = count;
646}
647
45ac96ed
MS
648static void fuse_release_user_pages(struct fuse_args_pages *ap,
649 bool should_dirty)
187c5c36 650{
45ac96ed 651 unsigned int i;
187c5c36 652
45ac96ed 653 for (i = 0; i < ap->num_pages; i++) {
8fba54ae 654 if (should_dirty)
45ac96ed 655 set_page_dirty_lock(ap->pages[i]);
738adade
LH
656 if (ap->args.is_pinned)
657 unpin_user_page(ap->pages[i]);
187c5c36
MP
658 }
659}
660
744742d6
SF
661static void fuse_io_release(struct kref *kref)
662{
663 kfree(container_of(kref, struct fuse_io_priv, refcnt));
664}
665
9d5722b7
CH
666static ssize_t fuse_get_res_by_io(struct fuse_io_priv *io)
667{
668 if (io->err)
669 return io->err;
670
671 if (io->bytes >= 0 && io->write)
672 return -EIO;
673
674 return io->bytes < 0 ? io->size : io->bytes;
675}
676
06bbb761 677/*
01e9d11a
MP
678 * In case of short read, the caller sets 'pos' to the position of
679 * actual end of fuse request in IO request. Otherwise, if bytes_requested
680 * == bytes_transferred or rw == WRITE, the caller sets 'pos' to -1.
681 *
682 * An example:
c4e0cd4e 683 * User requested DIO read of 64K. It was split into two 32K fuse requests,
01e9d11a
MP
684 * both submitted asynchronously. The first of them was ACKed by userspace as
685 * fully completed (req->out.args[0].size == 32K) resulting in pos == -1. The
686 * second request was ACKed as short, e.g. only 1K was read, resulting in
687 * pos == 33K.
688 *
689 * Thus, when all fuse requests are completed, the minimal non-negative 'pos'
690 * will be equal to the length of the longest contiguous fragment of
691 * transferred data starting from the beginning of IO request.
692 */
693static void fuse_aio_complete(struct fuse_io_priv *io, int err, ssize_t pos)
694{
695 int left;
696
697 spin_lock(&io->lock);
698 if (err)
699 io->err = io->err ? : err;
700 else if (pos >= 0 && (io->bytes < 0 || pos < io->bytes))
701 io->bytes = pos;
702
703 left = --io->reqs;
7879c4e5 704 if (!left && io->blocking)
9d5722b7 705 complete(io->done);
01e9d11a
MP
706 spin_unlock(&io->lock);
707
7879c4e5 708 if (!left && !io->blocking) {
9d5722b7 709 ssize_t res = fuse_get_res_by_io(io);
01e9d11a 710
9d5722b7
CH
711 if (res >= 0) {
712 struct inode *inode = file_inode(io->iocb->ki_filp);
713 struct fuse_conn *fc = get_fuse_conn(inode);
714 struct fuse_inode *fi = get_fuse_inode(inode);
01e9d11a 715
f15ecfef 716 spin_lock(&fi->lock);
4510d86f 717 fi->attr_version = atomic64_inc_return(&fc->attr_version);
f15ecfef 718 spin_unlock(&fi->lock);
01e9d11a
MP
719 }
720
6b19b766 721 io->iocb->ki_complete(io->iocb, res);
01e9d11a 722 }
744742d6
SF
723
724 kref_put(&io->refcnt, fuse_io_release);
01e9d11a
MP
725}
726
45ac96ed
MS
727static struct fuse_io_args *fuse_io_alloc(struct fuse_io_priv *io,
728 unsigned int npages)
729{
730 struct fuse_io_args *ia;
731
732 ia = kzalloc(sizeof(*ia), GFP_KERNEL);
733 if (ia) {
734 ia->io = io;
735 ia->ap.pages = fuse_pages_alloc(npages, GFP_KERNEL,
736 &ia->ap.descs);
737 if (!ia->ap.pages) {
738 kfree(ia);
739 ia = NULL;
740 }
741 }
742 return ia;
743}
744
745static void fuse_io_free(struct fuse_io_args *ia)
01e9d11a 746{
45ac96ed
MS
747 kfree(ia->ap.pages);
748 kfree(ia);
749}
750
fcee216b 751static void fuse_aio_complete_req(struct fuse_mount *fm, struct fuse_args *args,
45ac96ed
MS
752 int err)
753{
754 struct fuse_io_args *ia = container_of(args, typeof(*ia), ap.args);
755 struct fuse_io_priv *io = ia->io;
01e9d11a
MP
756 ssize_t pos = -1;
757
45ac96ed 758 fuse_release_user_pages(&ia->ap, io->should_dirty);
01e9d11a 759
45ac96ed
MS
760 if (err) {
761 /* Nothing */
762 } else if (io->write) {
763 if (ia->write.out.size > ia->write.in.size) {
764 err = -EIO;
765 } else if (ia->write.in.size != ia->write.out.size) {
766 pos = ia->write.in.offset - io->offset +
767 ia->write.out.size;
768 }
01e9d11a 769 } else {
45ac96ed
MS
770 u32 outsize = args->out_args[0].size;
771
772 if (ia->read.in.size != outsize)
773 pos = ia->read.in.offset - io->offset + outsize;
01e9d11a
MP
774 }
775
45ac96ed
MS
776 fuse_aio_complete(io, err, pos);
777 fuse_io_free(ia);
01e9d11a
MP
778}
779
fcee216b 780static ssize_t fuse_async_req_send(struct fuse_mount *fm,
45ac96ed 781 struct fuse_io_args *ia, size_t num_bytes)
01e9d11a 782{
45ac96ed
MS
783 ssize_t err;
784 struct fuse_io_priv *io = ia->io;
785
01e9d11a 786 spin_lock(&io->lock);
744742d6 787 kref_get(&io->refcnt);
01e9d11a
MP
788 io->size += num_bytes;
789 io->reqs++;
790 spin_unlock(&io->lock);
791
45ac96ed 792 ia->ap.args.end = fuse_aio_complete_req;
bb737bbe 793 ia->ap.args.may_block = io->should_dirty;
fcee216b 794 err = fuse_simple_background(fm, &ia->ap.args, GFP_KERNEL);
f1ebdeff 795 if (err)
fcee216b 796 fuse_aio_complete_req(fm, &ia->ap.args, err);
01e9d11a 797
f1ebdeff 798 return num_bytes;
01e9d11a
MP
799}
800
45ac96ed
MS
801static ssize_t fuse_send_read(struct fuse_io_args *ia, loff_t pos, size_t count,
802 fl_owner_t owner)
04730fef 803{
45ac96ed 804 struct file *file = ia->io->iocb->ki_filp;
2106cb18 805 struct fuse_file *ff = file->private_data;
fcee216b 806 struct fuse_mount *fm = ff->fm;
f3332114 807
45ac96ed 808 fuse_read_args_fill(ia, file, pos, count, FUSE_READ);
f3332114 809 if (owner != NULL) {
45ac96ed 810 ia->read.in.read_flags |= FUSE_READ_LOCKOWNER;
fcee216b 811 ia->read.in.lock_owner = fuse_lock_owner_id(fm->fc, owner);
f3332114 812 }
36cf66ed 813
45ac96ed 814 if (ia->io->async)
fcee216b 815 return fuse_async_req_send(fm, ia, count);
36cf66ed 816
fcee216b 817 return fuse_simple_request(fm, &ia->ap.args);
04730fef
MS
818}
819
5c5c5e51
MS
820static void fuse_read_update_size(struct inode *inode, loff_t size,
821 u64 attr_ver)
822{
823 struct fuse_conn *fc = get_fuse_conn(inode);
824 struct fuse_inode *fi = get_fuse_inode(inode);
825
f15ecfef 826 spin_lock(&fi->lock);
484ce657 827 if (attr_ver >= fi->attr_version && size < inode->i_size &&
06a7c3c2 828 !test_bit(FUSE_I_SIZE_UNSTABLE, &fi->state)) {
4510d86f 829 fi->attr_version = atomic64_inc_return(&fc->attr_version);
5c5c5e51
MS
830 i_size_write(inode, size);
831 }
f15ecfef 832 spin_unlock(&fi->lock);
5c5c5e51
MS
833}
834
a0d45d84 835static void fuse_short_read(struct inode *inode, u64 attr_ver, size_t num_read,
134831e3 836 struct fuse_args_pages *ap)
a92adc82 837{
8373200b
PE
838 struct fuse_conn *fc = get_fuse_conn(inode);
839
a73d47f5
MS
840 /*
841 * If writeback_cache is enabled, a short read means there's a hole in
842 * the file. Some data after the hole is in page cache, but has not
843 * reached the client fs yet. So the hole is not present there.
844 */
845 if (!fc->writeback_cache) {
134831e3 846 loff_t pos = page_offset(ap->pages[0]) + num_read;
8373200b
PE
847 fuse_read_update_size(inode, pos, attr_ver);
848 }
a92adc82
PE
849}
850
482fce55 851static int fuse_do_readpage(struct file *file, struct page *page)
b6aeaded
MS
852{
853 struct inode *inode = page->mapping->host;
fcee216b 854 struct fuse_mount *fm = get_fuse_mount(inode);
5c5c5e51 855 loff_t pos = page_offset(page);
00793ca5
MS
856 struct fuse_page_desc desc = { .length = PAGE_SIZE };
857 struct fuse_io_args ia = {
858 .ap.args.page_zeroing = true,
859 .ap.args.out_pages = true,
860 .ap.num_pages = 1,
861 .ap.pages = &page,
862 .ap.descs = &desc,
863 };
864 ssize_t res;
5c5c5e51 865 u64 attr_ver;
248d86e8 866
3be5a52b 867 /*
25985edc 868 * Page writeback can extend beyond the lifetime of the
3be5a52b
MS
869 * page-cache page, so make sure we read a properly synced
870 * page.
871 */
872 fuse_wait_on_page_writeback(inode, page->index);
873
fcee216b 874 attr_ver = fuse_get_attr_version(fm->fc);
5c5c5e51 875
2f139829
MS
876 /* Don't overflow end offset */
877 if (pos + (desc.length - 1) == LLONG_MAX)
878 desc.length--;
879
00793ca5 880 fuse_read_args_fill(&ia, file, pos, desc.length, FUSE_READ);
fcee216b 881 res = fuse_simple_request(fm, &ia.ap.args);
00793ca5
MS
882 if (res < 0)
883 return res;
884 /*
885 * Short read means EOF. If file size is larger, truncate it
886 */
887 if (res < desc.length)
134831e3 888 fuse_short_read(inode, attr_ver, res, &ia.ap);
5c5c5e51 889
00793ca5 890 SetPageUptodate(page);
482fce55 891
00793ca5 892 return 0;
482fce55
MP
893}
894
5efd00e4 895static int fuse_read_folio(struct file *file, struct folio *folio)
482fce55 896{
5efd00e4 897 struct page *page = &folio->page;
482fce55
MP
898 struct inode *inode = page->mapping->host;
899 int err;
900
901 err = -EIO;
5d069dbe 902 if (fuse_is_bad(inode))
482fce55
MP
903 goto out;
904
905 err = fuse_do_readpage(file, page);
451418fc 906 fuse_invalidate_atime(inode);
b6aeaded
MS
907 out:
908 unlock_page(page);
909 return err;
910}
911
fcee216b 912static void fuse_readpages_end(struct fuse_mount *fm, struct fuse_args *args,
134831e3 913 int err)
db50b96c 914{
c1aa96a5 915 int i;
134831e3
MS
916 struct fuse_io_args *ia = container_of(args, typeof(*ia), ap.args);
917 struct fuse_args_pages *ap = &ia->ap;
918 size_t count = ia->read.in.size;
919 size_t num_read = args->out_args[0].size;
ce534fb0 920 struct address_space *mapping = NULL;
c1aa96a5 921
134831e3
MS
922 for (i = 0; mapping == NULL && i < ap->num_pages; i++)
923 mapping = ap->pages[i]->mapping;
5c5c5e51 924
ce534fb0
MS
925 if (mapping) {
926 struct inode *inode = mapping->host;
927
928 /*
929 * Short read means EOF. If file size is larger, truncate it
930 */
134831e3
MS
931 if (!err && num_read < count)
932 fuse_short_read(inode, ia->read.attr_ver, num_read, ap);
ce534fb0 933
451418fc 934 fuse_invalidate_atime(inode);
ce534fb0 935 }
c1aa96a5 936
134831e3 937 for (i = 0; i < ap->num_pages; i++) {
413e8f01 938 struct folio *folio = page_folio(ap->pages[i]);
134831e3 939
413e8f01
MWO
940 folio_end_read(folio, !err);
941 folio_put(folio);
db50b96c 942 }
134831e3 943 if (ia->ff)
e26ee4ef 944 fuse_file_put(ia->ff, false);
134831e3
MS
945
946 fuse_io_free(ia);
c1aa96a5
MS
947}
948
134831e3 949static void fuse_send_readpages(struct fuse_io_args *ia, struct file *file)
c1aa96a5 950{
2106cb18 951 struct fuse_file *ff = file->private_data;
fcee216b 952 struct fuse_mount *fm = ff->fm;
134831e3
MS
953 struct fuse_args_pages *ap = &ia->ap;
954 loff_t pos = page_offset(ap->pages[0]);
955 size_t count = ap->num_pages << PAGE_SHIFT;
7df1e988 956 ssize_t res;
134831e3
MS
957 int err;
958
959 ap->args.out_pages = true;
960 ap->args.page_zeroing = true;
961 ap->args.page_replace = true;
2f139829
MS
962
963 /* Don't overflow end offset */
964 if (pos + (count - 1) == LLONG_MAX) {
965 count--;
966 ap->descs[ap->num_pages - 1].length--;
967 }
968 WARN_ON((loff_t) (pos + count) < 0);
969
134831e3 970 fuse_read_args_fill(ia, file, pos, count, FUSE_READ);
fcee216b
MR
971 ia->read.attr_ver = fuse_get_attr_version(fm->fc);
972 if (fm->fc->async_read) {
134831e3
MS
973 ia->ff = fuse_file_get(ff);
974 ap->args.end = fuse_readpages_end;
fcee216b 975 err = fuse_simple_background(fm, &ap->args, GFP_KERNEL);
134831e3
MS
976 if (!err)
977 return;
9cd68455 978 } else {
fcee216b 979 res = fuse_simple_request(fm, &ap->args);
7df1e988 980 err = res < 0 ? res : 0;
9cd68455 981 }
fcee216b 982 fuse_readpages_end(fm, &ap->args, err);
db50b96c
MS
983}
984
76a0294e 985static void fuse_readahead(struct readahead_control *rac)
db50b96c 986{
76a0294e 987 struct inode *inode = rac->mapping->host;
db50b96c 988 struct fuse_conn *fc = get_fuse_conn(inode);
76a0294e 989 unsigned int i, max_pages, nr_pages = 0;
db50b96c 990
5d069dbe 991 if (fuse_is_bad(inode))
76a0294e 992 return;
248d86e8 993
76a0294e
MWO
994 max_pages = min_t(unsigned int, fc->max_pages,
995 fc->max_read / PAGE_SIZE);
db50b96c 996
76a0294e
MWO
997 for (;;) {
998 struct fuse_io_args *ia;
999 struct fuse_args_pages *ap;
1000
670d21c6
N
1001 if (fc->num_background >= fc->congestion_threshold &&
1002 rac->ra->async_size >= readahead_count(rac))
1003 /*
1004 * Congested and only async pages left, so skip the
1005 * rest.
1006 */
1007 break;
1008
76a0294e
MWO
1009 nr_pages = readahead_count(rac) - nr_pages;
1010 if (nr_pages > max_pages)
1011 nr_pages = max_pages;
1012 if (nr_pages == 0)
1013 break;
1014 ia = fuse_io_alloc(NULL, nr_pages);
1015 if (!ia)
1016 return;
1017 ap = &ia->ap;
1018 nr_pages = __readahead_batch(rac, ap->pages, nr_pages);
1019 for (i = 0; i < nr_pages; i++) {
1020 fuse_wait_on_page_writeback(inode,
1021 readahead_index(rac) + i);
1022 ap->descs[i].length = PAGE_SIZE;
1023 }
1024 ap->num_pages = nr_pages;
1025 fuse_send_readpages(ia, rac->file);
d3406ffa 1026 }
db50b96c
MS
1027}
1028
55752a3a 1029static ssize_t fuse_cache_read_iter(struct kiocb *iocb, struct iov_iter *to)
bcb4be80
MS
1030{
1031 struct inode *inode = iocb->ki_filp->f_mapping->host;
a8894274 1032 struct fuse_conn *fc = get_fuse_conn(inode);
bcb4be80 1033
a8894274
BF
1034 /*
1035 * In auto invalidate mode, always update attributes on read.
1036 * Otherwise, only update if we attempt to read past EOF (to ensure
1037 * i_size is up to date).
1038 */
1039 if (fc->auto_inval_data ||
37c20f16 1040 (iocb->ki_pos + iov_iter_count(to) > i_size_read(inode))) {
bcb4be80 1041 int err;
c6c745b8 1042 err = fuse_update_attributes(inode, iocb->ki_filp, STATX_SIZE);
bcb4be80
MS
1043 if (err)
1044 return err;
1045 }
1046
37c20f16 1047 return generic_file_read_iter(iocb, to);
bcb4be80
MS
1048}
1049
338f2e3f
MS
1050static void fuse_write_args_fill(struct fuse_io_args *ia, struct fuse_file *ff,
1051 loff_t pos, size_t count)
1052{
1053 struct fuse_args *args = &ia->ap.args;
1054
1055 ia->write.in.fh = ff->fh;
1056 ia->write.in.offset = pos;
1057 ia->write.in.size = count;
1058 args->opcode = FUSE_WRITE;
1059 args->nodeid = ff->nodeid;
1060 args->in_numargs = 2;
fcee216b 1061 if (ff->fm->fc->minor < 9)
338f2e3f
MS
1062 args->in_args[0].size = FUSE_COMPAT_WRITE_IN_SIZE;
1063 else
1064 args->in_args[0].size = sizeof(ia->write.in);
1065 args->in_args[0].value = &ia->write.in;
1066 args->in_args[1].size = count;
1067 args->out_numargs = 1;
1068 args->out_args[0].size = sizeof(ia->write.out);
1069 args->out_args[0].value = &ia->write.out;
1070}
1071
1072static unsigned int fuse_write_flags(struct kiocb *iocb)
1073{
1074 unsigned int flags = iocb->ki_filp->f_flags;
1075
91b94c5d 1076 if (iocb_is_dsync(iocb))
338f2e3f
MS
1077 flags |= O_DSYNC;
1078 if (iocb->ki_flags & IOCB_SYNC)
1079 flags |= O_SYNC;
1080
1081 return flags;
1082}
1083
45ac96ed
MS
1084static ssize_t fuse_send_write(struct fuse_io_args *ia, loff_t pos,
1085 size_t count, fl_owner_t owner)
b25e82e5 1086{
45ac96ed 1087 struct kiocb *iocb = ia->io->iocb;
e1c0eecb 1088 struct file *file = iocb->ki_filp;
2106cb18 1089 struct fuse_file *ff = file->private_data;
fcee216b 1090 struct fuse_mount *fm = ff->fm;
45ac96ed
MS
1091 struct fuse_write_in *inarg = &ia->write.in;
1092 ssize_t err;
2d698b07 1093
45ac96ed 1094 fuse_write_args_fill(ia, ff, pos, count);
338f2e3f 1095 inarg->flags = fuse_write_flags(iocb);
f3332114 1096 if (owner != NULL) {
f3332114 1097 inarg->write_flags |= FUSE_WRITE_LOCKOWNER;
fcee216b 1098 inarg->lock_owner = fuse_lock_owner_id(fm->fc, owner);
f3332114 1099 }
36cf66ed 1100
45ac96ed 1101 if (ia->io->async)
fcee216b 1102 return fuse_async_req_send(fm, ia, count);
45ac96ed 1103
fcee216b 1104 err = fuse_simple_request(fm, &ia->ap.args);
45ac96ed
MS
1105 if (!err && ia->write.out.size > count)
1106 err = -EIO;
36cf66ed 1107
45ac96ed 1108 return err ?: ia->write.out.size;
b6aeaded
MS
1109}
1110
d347739a 1111bool fuse_write_update_attr(struct inode *inode, loff_t pos, ssize_t written)
854512ec
MS
1112{
1113 struct fuse_conn *fc = get_fuse_conn(inode);
1114 struct fuse_inode *fi = get_fuse_inode(inode);
b0aa7606 1115 bool ret = false;
854512ec 1116
f15ecfef 1117 spin_lock(&fi->lock);
4510d86f 1118 fi->attr_version = atomic64_inc_return(&fc->attr_version);
d347739a 1119 if (written > 0 && pos > inode->i_size) {
854512ec 1120 i_size_write(inode, pos);
b0aa7606
MP
1121 ret = true;
1122 }
f15ecfef 1123 spin_unlock(&fi->lock);
b0aa7606 1124
d347739a
MS
1125 fuse_invalidate_attr_mask(inode, FUSE_STATX_MODSIZE);
1126
b0aa7606 1127 return ret;
854512ec
MS
1128}
1129
338f2e3f
MS
1130static ssize_t fuse_send_write_pages(struct fuse_io_args *ia,
1131 struct kiocb *iocb, struct inode *inode,
1132 loff_t pos, size_t count)
ea9b9907 1133{
338f2e3f
MS
1134 struct fuse_args_pages *ap = &ia->ap;
1135 struct file *file = iocb->ki_filp;
1136 struct fuse_file *ff = file->private_data;
fcee216b 1137 struct fuse_mount *fm = ff->fm;
338f2e3f 1138 unsigned int offset, i;
4f06dd92 1139 bool short_write;
338f2e3f 1140 int err;
ea9b9907 1141
338f2e3f
MS
1142 for (i = 0; i < ap->num_pages; i++)
1143 fuse_wait_on_page_writeback(inode, ap->pages[i]->index);
ea9b9907 1144
338f2e3f
MS
1145 fuse_write_args_fill(ia, ff, pos, count);
1146 ia->write.in.flags = fuse_write_flags(iocb);
b8667395
VG
1147 if (fm->fc->handle_killpriv_v2 && !capable(CAP_FSETID))
1148 ia->write.in.write_flags |= FUSE_WRITE_KILL_SUIDGID;
ea9b9907 1149
fcee216b 1150 err = fuse_simple_request(fm, &ap->args);
8aab336b
MS
1151 if (!err && ia->write.out.size > count)
1152 err = -EIO;
338f2e3f 1153
4f06dd92 1154 short_write = ia->write.out.size < count;
338f2e3f
MS
1155 offset = ap->descs[0].offset;
1156 count = ia->write.out.size;
1157 for (i = 0; i < ap->num_pages; i++) {
1158 struct page *page = ap->pages[i];
ea9b9907 1159
4f06dd92
VG
1160 if (err) {
1161 ClearPageUptodate(page);
1162 } else {
1163 if (count >= PAGE_SIZE - offset)
1164 count -= PAGE_SIZE - offset;
1165 else {
1166 if (short_write)
1167 ClearPageUptodate(page);
1168 count = 0;
1169 }
1170 offset = 0;
1171 }
1172 if (ia->write.page_locked && (i == ap->num_pages - 1))
1173 unlock_page(page);
09cbfeaf 1174 put_page(page);
ea9b9907
NP
1175 }
1176
338f2e3f 1177 return err;
ea9b9907
NP
1178}
1179
4f06dd92 1180static ssize_t fuse_fill_write_pages(struct fuse_io_args *ia,
338f2e3f
MS
1181 struct address_space *mapping,
1182 struct iov_iter *ii, loff_t pos,
1183 unsigned int max_pages)
ea9b9907 1184{
4f06dd92 1185 struct fuse_args_pages *ap = &ia->ap;
ea9b9907 1186 struct fuse_conn *fc = get_fuse_conn(mapping->host);
09cbfeaf 1187 unsigned offset = pos & (PAGE_SIZE - 1);
ea9b9907
NP
1188 size_t count = 0;
1189 int err;
1190
338f2e3f
MS
1191 ap->args.in_pages = true;
1192 ap->descs[0].offset = offset;
ea9b9907
NP
1193
1194 do {
1195 size_t tmp;
1196 struct page *page;
09cbfeaf
KS
1197 pgoff_t index = pos >> PAGE_SHIFT;
1198 size_t bytes = min_t(size_t, PAGE_SIZE - offset,
ea9b9907
NP
1199 iov_iter_count(ii));
1200
1201 bytes = min_t(size_t, bytes, fc->max_write - count);
1202
1203 again:
1204 err = -EFAULT;
a6294593 1205 if (fault_in_iov_iter_readable(ii, bytes))
ea9b9907
NP
1206 break;
1207
1208 err = -ENOMEM;
b7446e7c 1209 page = grab_cache_page_write_begin(mapping, index);
ea9b9907
NP
1210 if (!page)
1211 break;
1212
931e80e4 1213 if (mapping_writably_mapped(mapping))
1214 flush_dcache_page(page);
1215
f0b65f39 1216 tmp = copy_page_from_iter_atomic(page, offset, bytes, ii);
ea9b9907
NP
1217 flush_dcache_page(page);
1218
1219 if (!tmp) {
1220 unlock_page(page);
09cbfeaf 1221 put_page(page);
ea9b9907
NP
1222 goto again;
1223 }
1224
1225 err = 0;
338f2e3f
MS
1226 ap->pages[ap->num_pages] = page;
1227 ap->descs[ap->num_pages].length = tmp;
1228 ap->num_pages++;
ea9b9907 1229
ea9b9907
NP
1230 count += tmp;
1231 pos += tmp;
1232 offset += tmp;
09cbfeaf 1233 if (offset == PAGE_SIZE)
ea9b9907
NP
1234 offset = 0;
1235
4f06dd92
VG
1236 /* If we copied full page, mark it uptodate */
1237 if (tmp == PAGE_SIZE)
1238 SetPageUptodate(page);
1239
1240 if (PageUptodate(page)) {
1241 unlock_page(page);
1242 } else {
1243 ia->write.page_locked = true;
1244 break;
1245 }
78bb6cb9
MS
1246 if (!fc->big_writes)
1247 break;
ea9b9907 1248 } while (iov_iter_count(ii) && count < fc->max_write &&
338f2e3f 1249 ap->num_pages < max_pages && offset == 0);
ea9b9907
NP
1250
1251 return count > 0 ? count : err;
1252}
1253
5da784cc
CS
1254static inline unsigned int fuse_wr_pages(loff_t pos, size_t len,
1255 unsigned int max_pages)
d07f09f5 1256{
5da784cc 1257 return min_t(unsigned int,
09cbfeaf
KS
1258 ((pos + len - 1) >> PAGE_SHIFT) -
1259 (pos >> PAGE_SHIFT) + 1,
5da784cc 1260 max_pages);
d07f09f5
MP
1261}
1262
596df33d 1263static ssize_t fuse_perform_write(struct kiocb *iocb, struct iov_iter *ii)
ea9b9907 1264{
596df33d 1265 struct address_space *mapping = iocb->ki_filp->f_mapping;
ea9b9907
NP
1266 struct inode *inode = mapping->host;
1267 struct fuse_conn *fc = get_fuse_conn(inode);
06a7c3c2 1268 struct fuse_inode *fi = get_fuse_inode(inode);
596df33d 1269 loff_t pos = iocb->ki_pos;
ea9b9907
NP
1270 int err = 0;
1271 ssize_t res = 0;
1272
06a7c3c2
MP
1273 if (inode->i_size < pos + iov_iter_count(ii))
1274 set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
1275
ea9b9907 1276 do {
ea9b9907 1277 ssize_t count;
338f2e3f
MS
1278 struct fuse_io_args ia = {};
1279 struct fuse_args_pages *ap = &ia.ap;
5da784cc
CS
1280 unsigned int nr_pages = fuse_wr_pages(pos, iov_iter_count(ii),
1281 fc->max_pages);
ea9b9907 1282
338f2e3f
MS
1283 ap->pages = fuse_pages_alloc(nr_pages, GFP_KERNEL, &ap->descs);
1284 if (!ap->pages) {
1285 err = -ENOMEM;
ea9b9907
NP
1286 break;
1287 }
1288
4f06dd92 1289 count = fuse_fill_write_pages(&ia, mapping, ii, pos, nr_pages);
ea9b9907
NP
1290 if (count <= 0) {
1291 err = count;
1292 } else {
338f2e3f
MS
1293 err = fuse_send_write_pages(&ia, iocb, inode,
1294 pos, count);
ea9b9907 1295 if (!err) {
338f2e3f
MS
1296 size_t num_written = ia.write.out.size;
1297
ea9b9907
NP
1298 res += num_written;
1299 pos += num_written;
1300
1301 /* break out of the loop on short write */
1302 if (num_written != count)
1303 err = -EIO;
1304 }
1305 }
338f2e3f 1306 kfree(ap->pages);
ea9b9907
NP
1307 } while (!err && iov_iter_count(ii));
1308
d347739a 1309 fuse_write_update_attr(inode, pos, res);
06a7c3c2 1310 clear_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
ea9b9907 1311
70e986c3
CH
1312 if (!res)
1313 return err;
1314 iocb->ki_pos += res;
1315 return res;
ea9b9907
NP
1316}
1317
699cf824
BS
1318static bool fuse_io_past_eof(struct kiocb *iocb, struct iov_iter *iter)
1319{
1320 struct inode *inode = file_inode(iocb->ki_filp);
1321
1322 return iocb->ki_pos + iov_iter_count(iter) > i_size_read(inode);
1323}
1324
1325/*
1326 * @return true if an exclusive lock for direct IO writes is needed
1327 */
1328static bool fuse_dio_wr_exclusive_lock(struct kiocb *iocb, struct iov_iter *from)
1329{
1330 struct file *file = iocb->ki_filp;
1331 struct fuse_file *ff = file->private_data;
1332 struct inode *inode = file_inode(iocb->ki_filp);
205c1d80 1333 struct fuse_inode *fi = get_fuse_inode(inode);
699cf824
BS
1334
1335 /* Server side has to advise that it supports parallel dio writes. */
1336 if (!(ff->open_flags & FOPEN_PARALLEL_DIRECT_WRITES))
1337 return true;
1338
1339 /*
1340 * Append will need to know the eventual EOF - always needs an
1341 * exclusive lock.
1342 */
1343 if (iocb->ki_flags & IOCB_APPEND)
1344 return true;
1345
205c1d80
AG
1346 /* shared locks are not allowed with parallel page cache IO */
1347 if (test_bit(FUSE_I_CACHE_IO_MODE, &fi->state))
1348 return false;
699cf824
BS
1349
1350 /* Parallel dio beyond EOF is not supported, at least for now. */
1351 if (fuse_io_past_eof(iocb, from))
1352 return true;
1353
1354 return false;
1355}
1356
9bbb6717
BS
1357static void fuse_dio_lock(struct kiocb *iocb, struct iov_iter *from,
1358 bool *exclusive)
1359{
1360 struct inode *inode = file_inode(iocb->ki_filp);
4864a6dd 1361 struct fuse_inode *fi = get_fuse_inode(inode);
9bbb6717
BS
1362
1363 *exclusive = fuse_dio_wr_exclusive_lock(iocb, from);
1364 if (*exclusive) {
1365 inode_lock(inode);
1366 } else {
1367 inode_lock_shared(inode);
1368 /*
205c1d80
AG
1369 * New parallal dio allowed only if inode is not in caching
1370 * mode and denies new opens in caching mode. This check
1371 * should be performed only after taking shared inode lock.
1372 * Previous past eof check was without inode lock and might
1373 * have raced, so check it again.
9bbb6717 1374 */
205c1d80 1375 if (fuse_io_past_eof(iocb, from) ||
4864a6dd 1376 fuse_inode_uncached_io_start(fi, NULL) != 0) {
9bbb6717
BS
1377 inode_unlock_shared(inode);
1378 inode_lock(inode);
1379 *exclusive = true;
1380 }
1381 }
1382}
1383
205c1d80 1384static void fuse_dio_unlock(struct kiocb *iocb, bool exclusive)
9bbb6717 1385{
205c1d80 1386 struct inode *inode = file_inode(iocb->ki_filp);
4864a6dd 1387 struct fuse_inode *fi = get_fuse_inode(inode);
205c1d80 1388
9bbb6717
BS
1389 if (exclusive) {
1390 inode_unlock(inode);
1391 } else {
205c1d80 1392 /* Allow opens in caching mode after last parallel dio end */
4864a6dd 1393 fuse_inode_uncached_io_end(fi);
9bbb6717
BS
1394 inode_unlock_shared(inode);
1395 }
1396}
1397
55752a3a 1398static ssize_t fuse_cache_write_iter(struct kiocb *iocb, struct iov_iter *from)
ea9b9907
NP
1399{
1400 struct file *file = iocb->ki_filp;
1401 struct address_space *mapping = file->f_mapping;
ea9b9907
NP
1402 ssize_t written = 0;
1403 struct inode *inode = mapping->host;
2e3f7dd0 1404 ssize_t err, count;
8981bdfd 1405 struct fuse_conn *fc = get_fuse_conn(inode);
ea9b9907 1406
8981bdfd 1407 if (fc->writeback_cache) {
4d99ff8f 1408 /* Update size (EOF optimization) and mode (SUID clearing) */
c6c745b8
MS
1409 err = fuse_update_attributes(mapping->host, file,
1410 STATX_SIZE | STATX_MODE);
4d99ff8f
PE
1411 if (err)
1412 return err;
1413
8981bdfd 1414 if (fc->handle_killpriv_v2 &&
9452e93e
CB
1415 setattr_should_drop_suidgid(&nop_mnt_idmap,
1416 file_inode(file))) {
8981bdfd
VG
1417 goto writethrough;
1418 }
1419
84c3d55c 1420 return generic_file_write_iter(iocb, from);
4d99ff8f
PE
1421 }
1422
8981bdfd 1423writethrough:
5955102c 1424 inode_lock(inode);
ea9b9907 1425
2e3f7dd0 1426 err = count = generic_write_checks(iocb, from);
3309dd04 1427 if (err <= 0)
ea9b9907
NP
1428 goto out;
1429
2e3f7dd0
ZJ
1430 task_io_account_write(count);
1431
5fa8e0a1 1432 err = file_remove_privs(file);
ea9b9907
NP
1433 if (err)
1434 goto out;
1435
c3b2da31
JB
1436 err = file_update_time(file);
1437 if (err)
1438 goto out;
ea9b9907 1439
2ba48ce5 1440 if (iocb->ki_flags & IOCB_DIRECT) {
1af5bb49 1441 written = generic_file_direct_write(iocb, from);
84c3d55c 1442 if (written < 0 || !iov_iter_count(from))
4273b793 1443 goto out;
64d1b4dd
CH
1444 written = direct_write_fallback(iocb, from, written,
1445 fuse_perform_write(iocb, from));
4273b793 1446 } else {
596df33d 1447 written = fuse_perform_write(iocb, from);
4273b793 1448 }
ea9b9907 1449out:
5955102c 1450 inode_unlock(inode);
e1c0eecb
MS
1451 if (written > 0)
1452 written = generic_write_sync(iocb, written);
ea9b9907
NP
1453
1454 return written ? written : err;
1455}
1456
7c190c8b
MP
1457static inline unsigned long fuse_get_user_addr(const struct iov_iter *ii)
1458{
de4f5fed 1459 return (unsigned long)iter_iov(ii)->iov_base + ii->iov_offset;
7c190c8b
MP
1460}
1461
1462static inline size_t fuse_get_frag_size(const struct iov_iter *ii,
1463 size_t max_size)
1464{
1465 return min(iov_iter_single_seg_count(ii), max_size);
1466}
1467
45ac96ed
MS
1468static int fuse_get_user_pages(struct fuse_args_pages *ap, struct iov_iter *ii,
1469 size_t *nbytesp, int write,
1470 unsigned int max_pages)
413ef8cb 1471{
7c190c8b 1472 size_t nbytes = 0; /* # bytes already packed in req */
742f9927 1473 ssize_t ret = 0;
b98d023a 1474
f4975c67 1475 /* Special case for kernel I/O: can copy directly into the buffer */
00e23707 1476 if (iov_iter_is_kvec(ii)) {
7c190c8b
MP
1477 unsigned long user_addr = fuse_get_user_addr(ii);
1478 size_t frag_size = fuse_get_frag_size(ii, *nbytesp);
1479
f4975c67 1480 if (write)
45ac96ed 1481 ap->args.in_args[1].value = (void *) user_addr;
f4975c67 1482 else
45ac96ed 1483 ap->args.out_args[0].value = (void *) user_addr;
f4975c67 1484
b98d023a
MP
1485 iov_iter_advance(ii, frag_size);
1486 *nbytesp = frag_size;
f4975c67
MS
1487 return 0;
1488 }
413ef8cb 1489
45ac96ed 1490 while (nbytes < *nbytesp && ap->num_pages < max_pages) {
7c190c8b 1491 unsigned npages;
f67da30c 1492 size_t start;
738adade
LH
1493 struct page **pt_pages;
1494
1495 pt_pages = &ap->pages[ap->num_pages];
1496 ret = iov_iter_extract_pages(ii, &pt_pages,
1497 *nbytesp - nbytes,
1498 max_pages - ap->num_pages,
1499 0, &start);
7c190c8b 1500 if (ret < 0)
742f9927 1501 break;
7c190c8b 1502
c9c37e2e 1503 nbytes += ret;
7c190c8b 1504
c9c37e2e 1505 ret += start;
6c88632b 1506 npages = DIV_ROUND_UP(ret, PAGE_SIZE);
7c190c8b 1507
45ac96ed
MS
1508 ap->descs[ap->num_pages].offset = start;
1509 fuse_page_descs_length_init(ap->descs, ap->num_pages, npages);
7c190c8b 1510
45ac96ed
MS
1511 ap->num_pages += npages;
1512 ap->descs[ap->num_pages - 1].length -=
c9c37e2e 1513 (PAGE_SIZE - ret) & (PAGE_SIZE - 1);
7c190c8b 1514 }
f4975c67 1515
738adade 1516 ap->args.is_pinned = iov_iter_extract_will_pin(ii);
0c4bcfde 1517 ap->args.user_pages = true;
f4975c67 1518 if (write)
cabdb4fa 1519 ap->args.in_pages = true;
f4975c67 1520 else
cabdb4fa 1521 ap->args.out_pages = true;
f4975c67 1522
7c190c8b 1523 *nbytesp = nbytes;
f4975c67 1524
2c932d4c 1525 return ret < 0 ? ret : 0;
413ef8cb
MS
1526}
1527
d22a943f
AV
1528ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter,
1529 loff_t *ppos, int flags)
413ef8cb 1530{
ea8cd333
PE
1531 int write = flags & FUSE_DIO_WRITE;
1532 int cuse = flags & FUSE_DIO_CUSE;
e1c0eecb 1533 struct file *file = io->iocb->ki_filp;
80e4f252
HX
1534 struct address_space *mapping = file->f_mapping;
1535 struct inode *inode = mapping->host;
2106cb18 1536 struct fuse_file *ff = file->private_data;
fcee216b 1537 struct fuse_conn *fc = ff->fm->fc;
413ef8cb
MS
1538 size_t nmax = write ? fc->max_write : fc->max_read;
1539 loff_t pos = *ppos;
d22a943f 1540 size_t count = iov_iter_count(iter);
09cbfeaf
KS
1541 pgoff_t idx_from = pos >> PAGE_SHIFT;
1542 pgoff_t idx_to = (pos + count - 1) >> PAGE_SHIFT;
413ef8cb 1543 ssize_t res = 0;
742f9927 1544 int err = 0;
45ac96ed
MS
1545 struct fuse_io_args *ia;
1546 unsigned int max_pages;
80e4f252 1547 bool fopen_direct_io = ff->open_flags & FOPEN_DIRECT_IO;
248d86e8 1548
45ac96ed
MS
1549 max_pages = iov_iter_npages(iter, fc->max_pages);
1550 ia = fuse_io_alloc(io, max_pages);
1551 if (!ia)
1552 return -ENOMEM;
413ef8cb 1553
c55e0a55 1554 if (fopen_direct_io && fc->direct_io_allow_mmap) {
b5a2a3a0
HX
1555 res = filemap_write_and_wait_range(mapping, pos, pos + count - 1);
1556 if (res) {
1557 fuse_io_free(ia);
1558 return res;
1559 }
1560 }
ea8cd333
PE
1561 if (!cuse && fuse_range_is_writeback(inode, idx_from, idx_to)) {
1562 if (!write)
5955102c 1563 inode_lock(inode);
ea8cd333
PE
1564 fuse_sync_writes(inode);
1565 if (!write)
5955102c 1566 inode_unlock(inode);
ea8cd333
PE
1567 }
1568
80e4f252
HX
1569 if (fopen_direct_io && write) {
1570 res = invalidate_inode_pages2_range(mapping, idx_from, idx_to);
1571 if (res) {
1572 fuse_io_free(ia);
1573 return res;
1574 }
1575 }
1576
fcb14cb1 1577 io->should_dirty = !write && user_backed_iter(iter);
413ef8cb 1578 while (count) {
45ac96ed 1579 ssize_t nres;
2106cb18 1580 fl_owner_t owner = current->files;
f4975c67 1581 size_t nbytes = min(count, nmax);
45ac96ed
MS
1582
1583 err = fuse_get_user_pages(&ia->ap, iter, &nbytes, write,
1584 max_pages);
742f9927 1585 if (err && !nbytes)
413ef8cb 1586 break;
f4975c67 1587
4a2abf99 1588 if (write) {
45ac96ed 1589 if (!capable(CAP_FSETID))
10c52c84 1590 ia->write.in.write_flags |= FUSE_WRITE_KILL_SUIDGID;
4a2abf99 1591
45ac96ed 1592 nres = fuse_send_write(ia, pos, nbytes, owner);
4a2abf99 1593 } else {
45ac96ed 1594 nres = fuse_send_read(ia, pos, nbytes, owner);
4a2abf99 1595 }
2106cb18 1596
45ac96ed
MS
1597 if (!io->async || nres < 0) {
1598 fuse_release_user_pages(&ia->ap, io->should_dirty);
1599 fuse_io_free(ia);
1600 }
1601 ia = NULL;
1602 if (nres < 0) {
f658adee 1603 iov_iter_revert(iter, nbytes);
45ac96ed 1604 err = nres;
413ef8cb
MS
1605 break;
1606 }
45ac96ed
MS
1607 WARN_ON(nres > nbytes);
1608
413ef8cb
MS
1609 count -= nres;
1610 res += nres;
1611 pos += nres;
f658adee
MS
1612 if (nres != nbytes) {
1613 iov_iter_revert(iter, nbytes - nres);
413ef8cb 1614 break;
f658adee 1615 }
56cf34ff 1616 if (count) {
45ac96ed
MS
1617 max_pages = iov_iter_npages(iter, fc->max_pages);
1618 ia = fuse_io_alloc(io, max_pages);
1619 if (!ia)
56cf34ff
MS
1620 break;
1621 }
413ef8cb 1622 }
45ac96ed
MS
1623 if (ia)
1624 fuse_io_free(ia);
d09cb9d7 1625 if (res > 0)
413ef8cb 1626 *ppos = pos;
413ef8cb 1627
742f9927 1628 return res > 0 ? res : err;
413ef8cb 1629}
08cbf542 1630EXPORT_SYMBOL_GPL(fuse_direct_io);
413ef8cb 1631
36cf66ed 1632static ssize_t __fuse_direct_read(struct fuse_io_priv *io,
d22a943f
AV
1633 struct iov_iter *iter,
1634 loff_t *ppos)
413ef8cb 1635{
d09cb9d7 1636 ssize_t res;
e1c0eecb 1637 struct inode *inode = file_inode(io->iocb->ki_filp);
d09cb9d7 1638
d22a943f 1639 res = fuse_direct_io(io, iter, ppos, 0);
d09cb9d7 1640
9a2eb24d 1641 fuse_invalidate_atime(inode);
d09cb9d7
MS
1642
1643 return res;
413ef8cb
MS
1644}
1645
23c94e1c
MR
1646static ssize_t fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter);
1647
15316263 1648static ssize_t fuse_direct_read_iter(struct kiocb *iocb, struct iov_iter *to)
b98d023a 1649{
23c94e1c
MR
1650 ssize_t res;
1651
1652 if (!is_sync_kiocb(iocb) && iocb->ki_flags & IOCB_DIRECT) {
23c94e1c
MR
1653 res = fuse_direct_IO(iocb, to);
1654 } else {
1655 struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(iocb);
1656
1657 res = __fuse_direct_read(&io, to, &iocb->ki_pos);
1658 }
1659
1660 return res;
b98d023a
MP
1661}
1662
15316263 1663static ssize_t fuse_direct_write_iter(struct kiocb *iocb, struct iov_iter *from)
4273b793 1664{
e1c0eecb
MS
1665 struct inode *inode = file_inode(iocb->ki_filp);
1666 struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(iocb);
15316263 1667 ssize_t res;
9bbb6717 1668 bool exclusive;
4273b793 1669
9bbb6717 1670 fuse_dio_lock(iocb, from, &exclusive);
3309dd04 1671 res = generic_write_checks(iocb, from);
23c94e1c 1672 if (res > 0) {
2e3f7dd0 1673 task_io_account_write(res);
23c94e1c
MR
1674 if (!is_sync_kiocb(iocb) && iocb->ki_flags & IOCB_DIRECT) {
1675 res = fuse_direct_IO(iocb, from);
1676 } else {
1677 res = fuse_direct_io(&io, from, &iocb->ki_pos,
1678 FUSE_DIO_WRITE);
d347739a 1679 fuse_write_update_attr(inode, iocb->ki_pos, res);
23c94e1c
MR
1680 }
1681 }
205c1d80 1682 fuse_dio_unlock(iocb, exclusive);
4273b793
AA
1683
1684 return res;
1685}
1686
55752a3a
MS
1687static ssize_t fuse_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
1688{
2f7b6f5b
MS
1689 struct file *file = iocb->ki_filp;
1690 struct fuse_file *ff = file->private_data;
c2d0ad00 1691 struct inode *inode = file_inode(file);
2f7b6f5b 1692
5d069dbe 1693 if (fuse_is_bad(inode))
2f7b6f5b 1694 return -EIO;
55752a3a 1695
c2d0ad00
VG
1696 if (FUSE_IS_DAX(inode))
1697 return fuse_dax_read_iter(iocb, to);
1698
57e1176e
AG
1699 /* FOPEN_DIRECT_IO overrides FOPEN_PASSTHROUGH */
1700 if (ff->open_flags & FOPEN_DIRECT_IO)
55752a3a 1701 return fuse_direct_read_iter(iocb, to);
57e1176e
AG
1702 else if (fuse_file_passthrough(ff))
1703 return fuse_passthrough_read_iter(iocb, to);
1704 else
1705 return fuse_cache_read_iter(iocb, to);
55752a3a
MS
1706}
1707
1708static ssize_t fuse_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
1709{
2f7b6f5b
MS
1710 struct file *file = iocb->ki_filp;
1711 struct fuse_file *ff = file->private_data;
c2d0ad00 1712 struct inode *inode = file_inode(file);
2f7b6f5b 1713
5d069dbe 1714 if (fuse_is_bad(inode))
2f7b6f5b 1715 return -EIO;
55752a3a 1716
c2d0ad00
VG
1717 if (FUSE_IS_DAX(inode))
1718 return fuse_dax_write_iter(iocb, from);
1719
57e1176e
AG
1720 /* FOPEN_DIRECT_IO overrides FOPEN_PASSTHROUGH */
1721 if (ff->open_flags & FOPEN_DIRECT_IO)
55752a3a 1722 return fuse_direct_write_iter(iocb, from);
57e1176e
AG
1723 else if (fuse_file_passthrough(ff))
1724 return fuse_passthrough_write_iter(iocb, from);
1725 else
55752a3a 1726 return fuse_cache_write_iter(iocb, from);
55752a3a
MS
1727}
1728
5ca73468
AG
1729static ssize_t fuse_splice_read(struct file *in, loff_t *ppos,
1730 struct pipe_inode_info *pipe, size_t len,
1731 unsigned int flags)
1732{
1733 struct fuse_file *ff = in->private_data;
1734
1735 /* FOPEN_DIRECT_IO overrides FOPEN_PASSTHROUGH */
1736 if (fuse_file_passthrough(ff) && !(ff->open_flags & FOPEN_DIRECT_IO))
1737 return fuse_passthrough_splice_read(in, ppos, pipe, len, flags);
55752a3a 1738 else
5ca73468
AG
1739 return filemap_splice_read(in, ppos, pipe, len, flags);
1740}
1741
1742static ssize_t fuse_splice_write(struct pipe_inode_info *pipe, struct file *out,
1743 loff_t *ppos, size_t len, unsigned int flags)
1744{
1745 struct fuse_file *ff = out->private_data;
1746
1747 /* FOPEN_DIRECT_IO overrides FOPEN_PASSTHROUGH */
1748 if (fuse_file_passthrough(ff) && !(ff->open_flags & FOPEN_DIRECT_IO))
1749 return fuse_passthrough_splice_write(pipe, out, ppos, len, flags);
1750 else
1751 return iter_file_splice_write(pipe, out, ppos, len, flags);
55752a3a
MS
1752}
1753
33826ebb 1754static void fuse_writepage_free(struct fuse_writepage_args *wpa)
b6aeaded 1755{
33826ebb 1756 struct fuse_args_pages *ap = &wpa->ia.ap;
385b1268
PE
1757 int i;
1758
660585b5
MS
1759 if (wpa->bucket)
1760 fuse_sync_bucket_dec(wpa->bucket);
1761
33826ebb
MS
1762 for (i = 0; i < ap->num_pages; i++)
1763 __free_page(ap->pages[i]);
1764
1765 if (wpa->ia.ff)
e26ee4ef 1766 fuse_file_put(wpa->ia.ff, false);
8b284dc4 1767
33826ebb
MS
1768 kfree(ap->pages);
1769 kfree(wpa);
3be5a52b
MS
1770}
1771
fcee216b 1772static void fuse_writepage_finish(struct fuse_mount *fm,
33826ebb 1773 struct fuse_writepage_args *wpa)
3be5a52b 1774{
33826ebb
MS
1775 struct fuse_args_pages *ap = &wpa->ia.ap;
1776 struct inode *inode = wpa->inode;
3be5a52b 1777 struct fuse_inode *fi = get_fuse_inode(inode);
de1414a6 1778 struct backing_dev_info *bdi = inode_to_bdi(inode);
385b1268 1779 int i;
3be5a52b 1780
33826ebb 1781 for (i = 0; i < ap->num_pages; i++) {
93f78d88 1782 dec_wb_stat(&bdi->wb, WB_WRITEBACK);
33826ebb 1783 dec_node_page_state(ap->pages[i], NR_WRITEBACK_TEMP);
93f78d88 1784 wb_writeout_inc(&bdi->wb);
385b1268 1785 }
3be5a52b
MS
1786 wake_up(&fi->page_waitq);
1787}
1788
f15ecfef 1789/* Called under fi->lock, may release and reacquire it */
fcee216b 1790static void fuse_send_writepage(struct fuse_mount *fm,
33826ebb 1791 struct fuse_writepage_args *wpa, loff_t size)
f15ecfef
KT
1792__releases(fi->lock)
1793__acquires(fi->lock)
3be5a52b 1794{
33826ebb
MS
1795 struct fuse_writepage_args *aux, *next;
1796 struct fuse_inode *fi = get_fuse_inode(wpa->inode);
1797 struct fuse_write_in *inarg = &wpa->ia.write.in;
1798 struct fuse_args *args = &wpa->ia.ap.args;
1799 __u64 data_size = wpa->ia.ap.num_pages * PAGE_SIZE;
1800 int err;
3be5a52b 1801
33826ebb 1802 fi->writectr++;
385b1268
PE
1803 if (inarg->offset + data_size <= size) {
1804 inarg->size = data_size;
3be5a52b 1805 } else if (inarg->offset < size) {
385b1268 1806 inarg->size = size - inarg->offset;
3be5a52b
MS
1807 } else {
1808 /* Got truncated off completely */
1809 goto out_free;
b6aeaded 1810 }
3be5a52b 1811
33826ebb
MS
1812 args->in_args[1].size = inarg->size;
1813 args->force = true;
1814 args->nocreds = true;
1815
fcee216b 1816 err = fuse_simple_background(fm, args, GFP_ATOMIC);
33826ebb
MS
1817 if (err == -ENOMEM) {
1818 spin_unlock(&fi->lock);
fcee216b 1819 err = fuse_simple_background(fm, args, GFP_NOFS | __GFP_NOFAIL);
33826ebb
MS
1820 spin_lock(&fi->lock);
1821 }
1822
f15ecfef 1823 /* Fails on broken connection only */
33826ebb 1824 if (unlikely(err))
f15ecfef
KT
1825 goto out_free;
1826
3be5a52b
MS
1827 return;
1828
1829 out_free:
33826ebb 1830 fi->writectr--;
69a6487a 1831 rb_erase(&wpa->writepages_entry, &fi->writepages);
fcee216b 1832 fuse_writepage_finish(fm, wpa);
f15ecfef 1833 spin_unlock(&fi->lock);
e2653bd5
MS
1834
1835 /* After fuse_writepage_finish() aux request list is private */
33826ebb
MS
1836 for (aux = wpa->next; aux; aux = next) {
1837 next = aux->next;
1838 aux->next = NULL;
1839 fuse_writepage_free(aux);
e2653bd5
MS
1840 }
1841
33826ebb 1842 fuse_writepage_free(wpa);
f15ecfef 1843 spin_lock(&fi->lock);
b6aeaded
MS
1844}
1845
3be5a52b
MS
1846/*
1847 * If fi->writectr is positive (no truncate or fsync going on) send
1848 * all queued writepage requests.
1849 *
f15ecfef 1850 * Called with fi->lock
3be5a52b
MS
1851 */
1852void fuse_flush_writepages(struct inode *inode)
f15ecfef
KT
1853__releases(fi->lock)
1854__acquires(fi->lock)
b6aeaded 1855{
fcee216b 1856 struct fuse_mount *fm = get_fuse_mount(inode);
3be5a52b 1857 struct fuse_inode *fi = get_fuse_inode(inode);
9de5be06 1858 loff_t crop = i_size_read(inode);
33826ebb 1859 struct fuse_writepage_args *wpa;
3be5a52b
MS
1860
1861 while (fi->writectr >= 0 && !list_empty(&fi->queued_writes)) {
33826ebb
MS
1862 wpa = list_entry(fi->queued_writes.next,
1863 struct fuse_writepage_args, queue_entry);
1864 list_del_init(&wpa->queue_entry);
fcee216b 1865 fuse_send_writepage(fm, wpa, crop);
3be5a52b
MS
1866 }
1867}
1868
c146024e
MS
1869static struct fuse_writepage_args *fuse_insert_writeback(struct rb_root *root,
1870 struct fuse_writepage_args *wpa)
6b2fb799
MP
1871{
1872 pgoff_t idx_from = wpa->ia.write.in.offset >> PAGE_SHIFT;
1873 pgoff_t idx_to = idx_from + wpa->ia.ap.num_pages - 1;
1874 struct rb_node **p = &root->rb_node;
1875 struct rb_node *parent = NULL;
1876
1877 WARN_ON(!wpa->ia.ap.num_pages);
1878 while (*p) {
1879 struct fuse_writepage_args *curr;
1880 pgoff_t curr_index;
1881
1882 parent = *p;
1883 curr = rb_entry(parent, struct fuse_writepage_args,
1884 writepages_entry);
1885 WARN_ON(curr->inode != wpa->inode);
1886 curr_index = curr->ia.write.in.offset >> PAGE_SHIFT;
1887
1888 if (idx_from >= curr_index + curr->ia.ap.num_pages)
1889 p = &(*p)->rb_right;
1890 else if (idx_to < curr_index)
1891 p = &(*p)->rb_left;
1892 else
c146024e 1893 return curr;
6b2fb799
MP
1894 }
1895
1896 rb_link_node(&wpa->writepages_entry, parent, p);
1897 rb_insert_color(&wpa->writepages_entry, root);
c146024e
MS
1898 return NULL;
1899}
1900
1901static void tree_insert(struct rb_root *root, struct fuse_writepage_args *wpa)
1902{
1903 WARN_ON(fuse_insert_writeback(root, wpa));
6b2fb799
MP
1904}
1905
fcee216b 1906static void fuse_writepage_end(struct fuse_mount *fm, struct fuse_args *args,
33826ebb 1907 int error)
3be5a52b 1908{
33826ebb
MS
1909 struct fuse_writepage_args *wpa =
1910 container_of(args, typeof(*wpa), ia.ap.args);
1911 struct inode *inode = wpa->inode;
3be5a52b 1912 struct fuse_inode *fi = get_fuse_inode(inode);
3466958b 1913 struct fuse_conn *fc = get_fuse_conn(inode);
3be5a52b 1914
33826ebb 1915 mapping_set_error(inode->i_mapping, error);
3466958b
VG
1916 /*
1917 * A writeback finished and this might have updated mtime/ctime on
1918 * server making local mtime/ctime stale. Hence invalidate attrs.
1919 * Do this only if writeback_cache is not enabled. If writeback_cache
1920 * is enabled, we trust local ctime/mtime.
1921 */
1922 if (!fc->writeback_cache)
fa5eee57 1923 fuse_invalidate_attr_mask(inode, FUSE_STATX_MODIFY);
f15ecfef 1924 spin_lock(&fi->lock);
69a6487a 1925 rb_erase(&wpa->writepages_entry, &fi->writepages);
33826ebb 1926 while (wpa->next) {
fcee216b 1927 struct fuse_mount *fm = get_fuse_mount(inode);
33826ebb
MS
1928 struct fuse_write_in *inarg = &wpa->ia.write.in;
1929 struct fuse_writepage_args *next = wpa->next;
1930
1931 wpa->next = next->next;
1932 next->next = NULL;
1933 next->ia.ff = fuse_file_get(wpa->ia.ff);
6b2fb799 1934 tree_insert(&fi->writepages, next);
6eaf4782
MP
1935
1936 /*
1937 * Skip fuse_flush_writepages() to make it easy to crop requests
1938 * based on primary request size.
1939 *
1940 * 1st case (trivial): there are no concurrent activities using
1941 * fuse_set/release_nowrite. Then we're on safe side because
1942 * fuse_flush_writepages() would call fuse_send_writepage()
1943 * anyway.
1944 *
1945 * 2nd case: someone called fuse_set_nowrite and it is waiting
1946 * now for completion of all in-flight requests. This happens
1947 * rarely and no more than once per page, so this should be
1948 * okay.
1949 *
1950 * 3rd case: someone (e.g. fuse_do_setattr()) is in the middle
1951 * of fuse_set_nowrite..fuse_release_nowrite section. The fact
1952 * that fuse_set_nowrite returned implies that all in-flight
1953 * requests were completed along with all of their secondary
1954 * requests. Further primary requests are blocked by negative
1955 * writectr. Hence there cannot be any in-flight requests and
1956 * no invocations of fuse_writepage_end() while we're in
1957 * fuse_set_nowrite..fuse_release_nowrite section.
1958 */
fcee216b 1959 fuse_send_writepage(fm, next, inarg->offset + inarg->size);
8b284dc4 1960 }
3be5a52b 1961 fi->writectr--;
fcee216b 1962 fuse_writepage_finish(fm, wpa);
f15ecfef 1963 spin_unlock(&fi->lock);
33826ebb 1964 fuse_writepage_free(wpa);
3be5a52b
MS
1965}
1966
a9667ac8 1967static struct fuse_file *__fuse_write_file_get(struct fuse_inode *fi)
adcadfa8 1968{
84840efc 1969 struct fuse_file *ff;
adcadfa8 1970
f15ecfef 1971 spin_lock(&fi->lock);
84840efc
MS
1972 ff = list_first_entry_or_null(&fi->write_files, struct fuse_file,
1973 write_entry);
1974 if (ff)
72523425 1975 fuse_file_get(ff);
f15ecfef 1976 spin_unlock(&fi->lock);
adcadfa8
PE
1977
1978 return ff;
1979}
1980
a9667ac8 1981static struct fuse_file *fuse_write_file_get(struct fuse_inode *fi)
1e18bda8 1982{
a9667ac8 1983 struct fuse_file *ff = __fuse_write_file_get(fi);
1e18bda8
MS
1984 WARN_ON(!ff);
1985 return ff;
1986}
1987
1988int fuse_write_inode(struct inode *inode, struct writeback_control *wbc)
1989{
1e18bda8
MS
1990 struct fuse_inode *fi = get_fuse_inode(inode);
1991 struct fuse_file *ff;
1992 int err;
1993
5c791fe1
MS
1994 /*
1995 * Inode is always written before the last reference is dropped and
1996 * hence this should not be reached from reclaim.
1997 *
1998 * Writing back the inode from reclaim can deadlock if the request
1999 * processing itself needs an allocation. Allocations triggering
2000 * reclaim while serving a request can't be prevented, because it can
2001 * involve any number of unrelated userspace processes.
2002 */
2003 WARN_ON(wbc->for_reclaim);
2004
a9667ac8 2005 ff = __fuse_write_file_get(fi);
ab9e13f7 2006 err = fuse_flush_times(inode, ff);
1e18bda8 2007 if (ff)
e26ee4ef 2008 fuse_file_put(ff, false);
1e18bda8
MS
2009
2010 return err;
2011}
2012
33826ebb
MS
2013static struct fuse_writepage_args *fuse_writepage_args_alloc(void)
2014{
2015 struct fuse_writepage_args *wpa;
2016 struct fuse_args_pages *ap;
2017
2018 wpa = kzalloc(sizeof(*wpa), GFP_NOFS);
2019 if (wpa) {
2020 ap = &wpa->ia.ap;
2021 ap->num_pages = 0;
2022 ap->pages = fuse_pages_alloc(1, GFP_NOFS, &ap->descs);
2023 if (!ap->pages) {
2024 kfree(wpa);
2025 wpa = NULL;
2026 }
2027 }
2028 return wpa;
2029
2030}
2031
660585b5
MS
2032static void fuse_writepage_add_to_bucket(struct fuse_conn *fc,
2033 struct fuse_writepage_args *wpa)
2034{
2035 if (!fc->sync_fs)
2036 return;
2037
2038 rcu_read_lock();
2039 /* Prevent resurrection of dead bucket in unlikely race with syncfs */
2040 do {
2041 wpa->bucket = rcu_dereference(fc->curr_bucket);
2042 } while (unlikely(!atomic_inc_not_zero(&wpa->bucket->count)));
2043 rcu_read_unlock();
2044}
2045
e0887e09 2046static int fuse_writepage_locked(struct folio *folio)
3be5a52b 2047{
e0887e09 2048 struct address_space *mapping = folio->mapping;
3be5a52b
MS
2049 struct inode *inode = mapping->host;
2050 struct fuse_conn *fc = get_fuse_conn(inode);
2051 struct fuse_inode *fi = get_fuse_inode(inode);
33826ebb
MS
2052 struct fuse_writepage_args *wpa;
2053 struct fuse_args_pages *ap;
e0887e09 2054 struct folio *tmp_folio;
72523425 2055 int error = -ENOMEM;
3be5a52b 2056
e0887e09 2057 folio_start_writeback(folio);
3be5a52b 2058
33826ebb
MS
2059 wpa = fuse_writepage_args_alloc();
2060 if (!wpa)
3be5a52b 2061 goto err;
33826ebb 2062 ap = &wpa->ia.ap;
3be5a52b 2063
e0887e09
MWO
2064 tmp_folio = folio_alloc(GFP_NOFS | __GFP_HIGHMEM, 0);
2065 if (!tmp_folio)
3be5a52b
MS
2066 goto err_free;
2067
72523425 2068 error = -EIO;
a9667ac8 2069 wpa->ia.ff = fuse_write_file_get(fi);
33826ebb 2070 if (!wpa->ia.ff)
27f1b363 2071 goto err_nofile;
72523425 2072
660585b5 2073 fuse_writepage_add_to_bucket(fc, wpa);
e0887e09 2074 fuse_write_args_fill(&wpa->ia, wpa->ia.ff, folio_pos(folio), 0);
3be5a52b 2075
e0887e09 2076 folio_copy(tmp_folio, folio);
33826ebb
MS
2077 wpa->ia.write.in.write_flags |= FUSE_WRITE_CACHE;
2078 wpa->next = NULL;
2079 ap->args.in_pages = true;
2080 ap->num_pages = 1;
e0887e09 2081 ap->pages[0] = &tmp_folio->page;
33826ebb
MS
2082 ap->descs[0].offset = 0;
2083 ap->descs[0].length = PAGE_SIZE;
2084 ap->args.end = fuse_writepage_end;
2085 wpa->inode = inode;
3be5a52b 2086
93f78d88 2087 inc_wb_stat(&inode_to_bdi(inode)->wb, WB_WRITEBACK);
e0887e09 2088 node_stat_add_folio(tmp_folio, NR_WRITEBACK_TEMP);
3be5a52b 2089
f15ecfef 2090 spin_lock(&fi->lock);
6b2fb799 2091 tree_insert(&fi->writepages, wpa);
33826ebb 2092 list_add_tail(&wpa->queue_entry, &fi->queued_writes);
3be5a52b 2093 fuse_flush_writepages(inode);
f15ecfef 2094 spin_unlock(&fi->lock);
3be5a52b 2095
e0887e09 2096 folio_end_writeback(folio);
4a4ac4eb 2097
3be5a52b
MS
2098 return 0;
2099
27f1b363 2100err_nofile:
e0887e09 2101 folio_put(tmp_folio);
3be5a52b 2102err_free:
33826ebb 2103 kfree(wpa);
3be5a52b 2104err:
e0887e09
MWO
2105 mapping_set_error(folio->mapping, error);
2106 folio_end_writeback(folio);
72523425 2107 return error;
3be5a52b
MS
2108}
2109
26d614df 2110struct fuse_fill_wb_data {
33826ebb 2111 struct fuse_writepage_args *wpa;
26d614df
PE
2112 struct fuse_file *ff;
2113 struct inode *inode;
2d033eaa 2114 struct page **orig_pages;
33826ebb 2115 unsigned int max_pages;
26d614df
PE
2116};
2117
33826ebb
MS
2118static bool fuse_pages_realloc(struct fuse_fill_wb_data *data)
2119{
2120 struct fuse_args_pages *ap = &data->wpa->ia.ap;
2121 struct fuse_conn *fc = get_fuse_conn(data->inode);
2122 struct page **pages;
2123 struct fuse_page_desc *descs;
2124 unsigned int npages = min_t(unsigned int,
2125 max_t(unsigned int, data->max_pages * 2,
2126 FUSE_DEFAULT_MAX_PAGES_PER_REQ),
2127 fc->max_pages);
2128 WARN_ON(npages <= data->max_pages);
2129
2130 pages = fuse_pages_alloc(npages, GFP_NOFS, &descs);
2131 if (!pages)
2132 return false;
2133
2134 memcpy(pages, ap->pages, sizeof(struct page *) * ap->num_pages);
2135 memcpy(descs, ap->descs, sizeof(struct fuse_page_desc) * ap->num_pages);
2136 kfree(ap->pages);
2137 ap->pages = pages;
2138 ap->descs = descs;
2139 data->max_pages = npages;
2140
2141 return true;
2142}
2143
26d614df
PE
2144static void fuse_writepages_send(struct fuse_fill_wb_data *data)
2145{
33826ebb 2146 struct fuse_writepage_args *wpa = data->wpa;
26d614df 2147 struct inode *inode = data->inode;
26d614df 2148 struct fuse_inode *fi = get_fuse_inode(inode);
33826ebb 2149 int num_pages = wpa->ia.ap.num_pages;
2d033eaa 2150 int i;
26d614df 2151
33826ebb 2152 wpa->ia.ff = fuse_file_get(data->ff);
f15ecfef 2153 spin_lock(&fi->lock);
33826ebb 2154 list_add_tail(&wpa->queue_entry, &fi->queued_writes);
26d614df 2155 fuse_flush_writepages(inode);
f15ecfef 2156 spin_unlock(&fi->lock);
2d033eaa
MP
2157
2158 for (i = 0; i < num_pages; i++)
2159 end_page_writeback(data->orig_pages[i]);
26d614df
PE
2160}
2161
7f305ca1 2162/*
c146024e
MS
2163 * Check under fi->lock if the page is under writeback, and insert it onto the
2164 * rb_tree if not. Otherwise iterate auxiliary write requests, to see if there's
419234d5
MS
2165 * one already added for a page at this offset. If there's none, then insert
2166 * this new request onto the auxiliary list, otherwise reuse the existing one by
c146024e 2167 * swapping the new temp page with the old one.
7f305ca1 2168 */
c146024e
MS
2169static bool fuse_writepage_add(struct fuse_writepage_args *new_wpa,
2170 struct page *page)
8b284dc4 2171{
33826ebb
MS
2172 struct fuse_inode *fi = get_fuse_inode(new_wpa->inode);
2173 struct fuse_writepage_args *tmp;
2174 struct fuse_writepage_args *old_wpa;
2175 struct fuse_args_pages *new_ap = &new_wpa->ia.ap;
8b284dc4 2176
33826ebb 2177 WARN_ON(new_ap->num_pages != 0);
c146024e 2178 new_ap->num_pages = 1;
8b284dc4 2179
f15ecfef 2180 spin_lock(&fi->lock);
c146024e 2181 old_wpa = fuse_insert_writeback(&fi->writepages, new_wpa);
33826ebb 2182 if (!old_wpa) {
f15ecfef 2183 spin_unlock(&fi->lock);
c146024e 2184 return true;
f6011081 2185 }
8b284dc4 2186
33826ebb 2187 for (tmp = old_wpa->next; tmp; tmp = tmp->next) {
7f305ca1
MS
2188 pgoff_t curr_index;
2189
33826ebb
MS
2190 WARN_ON(tmp->inode != new_wpa->inode);
2191 curr_index = tmp->ia.write.in.offset >> PAGE_SHIFT;
419234d5 2192 if (curr_index == page->index) {
33826ebb
MS
2193 WARN_ON(tmp->ia.ap.num_pages != 1);
2194 swap(tmp->ia.ap.pages[0], new_ap->pages[0]);
7f305ca1 2195 break;
8b284dc4
MS
2196 }
2197 }
2198
7f305ca1 2199 if (!tmp) {
33826ebb
MS
2200 new_wpa->next = old_wpa->next;
2201 old_wpa->next = new_wpa;
7f305ca1 2202 }
41b6e41f 2203
f15ecfef 2204 spin_unlock(&fi->lock);
7f305ca1
MS
2205
2206 if (tmp) {
33826ebb 2207 struct backing_dev_info *bdi = inode_to_bdi(new_wpa->inode);
8b284dc4 2208
93f78d88 2209 dec_wb_stat(&bdi->wb, WB_WRITEBACK);
33826ebb 2210 dec_node_page_state(new_ap->pages[0], NR_WRITEBACK_TEMP);
93f78d88 2211 wb_writeout_inc(&bdi->wb);
33826ebb 2212 fuse_writepage_free(new_wpa);
8b284dc4 2213 }
7f305ca1 2214
c146024e 2215 return false;
8b284dc4
MS
2216}
2217
6ddf3af9
MS
2218static bool fuse_writepage_need_send(struct fuse_conn *fc, struct page *page,
2219 struct fuse_args_pages *ap,
2220 struct fuse_fill_wb_data *data)
2221{
2222 WARN_ON(!ap->num_pages);
2223
2224 /*
2225 * Being under writeback is unlikely but possible. For example direct
2226 * read to an mmaped fuse file will set the page dirty twice; once when
2227 * the pages are faulted with get_user_pages(), and then after the read
2228 * completed.
2229 */
2230 if (fuse_page_is_writeback(data->inode, page->index))
2231 return true;
2232
2233 /* Reached max pages */
2234 if (ap->num_pages == fc->max_pages)
2235 return true;
2236
2237 /* Reached max write bytes */
2238 if ((ap->num_pages + 1) * PAGE_SIZE > fc->max_write)
2239 return true;
2240
2241 /* Discontinuity */
2242 if (data->orig_pages[ap->num_pages - 1]->index + 1 != page->index)
2243 return true;
2244
2245 /* Need to grow the pages array? If so, did the expansion fail? */
2246 if (ap->num_pages == data->max_pages && !fuse_pages_realloc(data))
2247 return true;
2248
2249 return false;
2250}
2251
d585bdbe 2252static int fuse_writepages_fill(struct folio *folio,
26d614df
PE
2253 struct writeback_control *wbc, void *_data)
2254{
2255 struct fuse_fill_wb_data *data = _data;
33826ebb
MS
2256 struct fuse_writepage_args *wpa = data->wpa;
2257 struct fuse_args_pages *ap = &wpa->ia.ap;
26d614df 2258 struct inode *inode = data->inode;
f15ecfef 2259 struct fuse_inode *fi = get_fuse_inode(inode);
26d614df
PE
2260 struct fuse_conn *fc = get_fuse_conn(inode);
2261 struct page *tmp_page;
2262 int err;
2263
2264 if (!data->ff) {
2265 err = -EIO;
a9667ac8 2266 data->ff = fuse_write_file_get(fi);
26d614df
PE
2267 if (!data->ff)
2268 goto out_unlock;
2269 }
2270
d585bdbe 2271 if (wpa && fuse_writepage_need_send(fc, &folio->page, ap, data)) {
8b284dc4 2272 fuse_writepages_send(data);
33826ebb 2273 data->wpa = NULL;
26d614df 2274 }
e52a8250 2275
26d614df
PE
2276 err = -ENOMEM;
2277 tmp_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
2278 if (!tmp_page)
2279 goto out_unlock;
2280
2281 /*
2282 * The page must not be redirtied until the writeout is completed
2283 * (i.e. userspace has sent a reply to the write request). Otherwise
2284 * there could be more than one temporary page instance for each real
2285 * page.
2286 *
2287 * This is ensured by holding the page lock in page_mkwrite() while
2288 * checking fuse_page_is_writeback(). We already hold the page lock
2289 * since clear_page_dirty_for_io() and keep it held until we add the
33826ebb 2290 * request to the fi->writepages list and increment ap->num_pages.
26d614df
PE
2291 * After this fuse_page_is_writeback() will indicate that the page is
2292 * under writeback, so we can release the page lock.
2293 */
33826ebb 2294 if (data->wpa == NULL) {
26d614df 2295 err = -ENOMEM;
33826ebb
MS
2296 wpa = fuse_writepage_args_alloc();
2297 if (!wpa) {
26d614df
PE
2298 __free_page(tmp_page);
2299 goto out_unlock;
2300 }
660585b5
MS
2301 fuse_writepage_add_to_bucket(fc, wpa);
2302
33826ebb 2303 data->max_pages = 1;
26d614df 2304
33826ebb 2305 ap = &wpa->ia.ap;
d585bdbe 2306 fuse_write_args_fill(&wpa->ia, data->ff, folio_pos(folio), 0);
33826ebb
MS
2307 wpa->ia.write.in.write_flags |= FUSE_WRITE_CACHE;
2308 wpa->next = NULL;
2309 ap->args.in_pages = true;
2310 ap->args.end = fuse_writepage_end;
2311 ap->num_pages = 0;
2312 wpa->inode = inode;
26d614df 2313 }
d585bdbe 2314 folio_start_writeback(folio);
26d614df 2315
d585bdbe 2316 copy_highpage(tmp_page, &folio->page);
33826ebb
MS
2317 ap->pages[ap->num_pages] = tmp_page;
2318 ap->descs[ap->num_pages].offset = 0;
2319 ap->descs[ap->num_pages].length = PAGE_SIZE;
d585bdbe 2320 data->orig_pages[ap->num_pages] = &folio->page;
26d614df 2321
93f78d88 2322 inc_wb_stat(&inode_to_bdi(inode)->wb, WB_WRITEBACK);
11fb9989 2323 inc_node_page_state(tmp_page, NR_WRITEBACK_TEMP);
8b284dc4
MS
2324
2325 err = 0;
c146024e
MS
2326 if (data->wpa) {
2327 /*
2328 * Protected by fi->lock against concurrent access by
2329 * fuse_page_is_writeback().
2330 */
2331 spin_lock(&fi->lock);
2332 ap->num_pages++;
2333 spin_unlock(&fi->lock);
d585bdbe 2334 } else if (fuse_writepage_add(wpa, &folio->page)) {
c146024e
MS
2335 data->wpa = wpa;
2336 } else {
d585bdbe 2337 folio_end_writeback(folio);
8b284dc4 2338 }
26d614df 2339out_unlock:
d585bdbe 2340 folio_unlock(folio);
26d614df
PE
2341
2342 return err;
2343}
2344
2345static int fuse_writepages(struct address_space *mapping,
2346 struct writeback_control *wbc)
2347{
2348 struct inode *inode = mapping->host;
5da784cc 2349 struct fuse_conn *fc = get_fuse_conn(inode);
26d614df
PE
2350 struct fuse_fill_wb_data data;
2351 int err;
2352
2353 err = -EIO;
5d069dbe 2354 if (fuse_is_bad(inode))
26d614df
PE
2355 goto out;
2356
670d21c6
N
2357 if (wbc->sync_mode == WB_SYNC_NONE &&
2358 fc->num_background >= fc->congestion_threshold)
2359 return 0;
2360
26d614df 2361 data.inode = inode;
33826ebb 2362 data.wpa = NULL;
26d614df
PE
2363 data.ff = NULL;
2364
2d033eaa 2365 err = -ENOMEM;
5da784cc 2366 data.orig_pages = kcalloc(fc->max_pages,
f2b3455e 2367 sizeof(struct page *),
2d033eaa
MP
2368 GFP_NOFS);
2369 if (!data.orig_pages)
2370 goto out;
2371
26d614df 2372 err = write_cache_pages(mapping, wbc, fuse_writepages_fill, &data);
33826ebb 2373 if (data.wpa) {
33826ebb 2374 WARN_ON(!data.wpa->ia.ap.num_pages);
26d614df 2375 fuse_writepages_send(&data);
26d614df
PE
2376 }
2377 if (data.ff)
e26ee4ef 2378 fuse_file_put(data.ff, false);
2d033eaa
MP
2379
2380 kfree(data.orig_pages);
26d614df
PE
2381out:
2382 return err;
2383}
2384
6b12c1b3
PE
2385/*
2386 * It's worthy to make sure that space is reserved on disk for the write,
2387 * but how to implement it without killing performance need more thinking.
2388 */
2389static int fuse_write_begin(struct file *file, struct address_space *mapping,
9d6b0cd7 2390 loff_t pos, unsigned len, struct page **pagep, void **fsdata)
6b12c1b3 2391{
09cbfeaf 2392 pgoff_t index = pos >> PAGE_SHIFT;
a455589f 2393 struct fuse_conn *fc = get_fuse_conn(file_inode(file));
6b12c1b3
PE
2394 struct page *page;
2395 loff_t fsize;
2396 int err = -ENOMEM;
2397
2398 WARN_ON(!fc->writeback_cache);
2399
b7446e7c 2400 page = grab_cache_page_write_begin(mapping, index);
6b12c1b3
PE
2401 if (!page)
2402 goto error;
2403
2404 fuse_wait_on_page_writeback(mapping->host, page->index);
2405
09cbfeaf 2406 if (PageUptodate(page) || len == PAGE_SIZE)
6b12c1b3
PE
2407 goto success;
2408 /*
2409 * Check if the start this page comes after the end of file, in which
2410 * case the readpage can be optimized away.
2411 */
2412 fsize = i_size_read(mapping->host);
09cbfeaf
KS
2413 if (fsize <= (pos & PAGE_MASK)) {
2414 size_t off = pos & ~PAGE_MASK;
6b12c1b3
PE
2415 if (off)
2416 zero_user_segment(page, 0, off);
2417 goto success;
2418 }
2419 err = fuse_do_readpage(file, page);
2420 if (err)
2421 goto cleanup;
2422success:
2423 *pagep = page;
2424 return 0;
2425
2426cleanup:
2427 unlock_page(page);
09cbfeaf 2428 put_page(page);
6b12c1b3
PE
2429error:
2430 return err;
2431}
2432
2433static int fuse_write_end(struct file *file, struct address_space *mapping,
2434 loff_t pos, unsigned len, unsigned copied,
2435 struct page *page, void *fsdata)
2436{
2437 struct inode *inode = page->mapping->host;
2438
59c3b76c
MS
2439 /* Haven't copied anything? Skip zeroing, size extending, dirtying. */
2440 if (!copied)
2441 goto unlock;
2442
8c56e03d 2443 pos += copied;
6b12c1b3
PE
2444 if (!PageUptodate(page)) {
2445 /* Zero any unwritten bytes at the end of the page */
8c56e03d 2446 size_t endoff = pos & ~PAGE_MASK;
6b12c1b3 2447 if (endoff)
09cbfeaf 2448 zero_user_segment(page, endoff, PAGE_SIZE);
6b12c1b3
PE
2449 SetPageUptodate(page);
2450 }
2451
8c56e03d
MS
2452 if (pos > inode->i_size)
2453 i_size_write(inode, pos);
2454
6b12c1b3 2455 set_page_dirty(page);
59c3b76c
MS
2456
2457unlock:
6b12c1b3 2458 unlock_page(page);
09cbfeaf 2459 put_page(page);
6b12c1b3
PE
2460
2461 return copied;
2462}
2463
2bf06b8e 2464static int fuse_launder_folio(struct folio *folio)
3be5a52b
MS
2465{
2466 int err = 0;
2bf06b8e
MWO
2467 if (folio_clear_dirty_for_io(folio)) {
2468 struct inode *inode = folio->mapping->host;
3993382b
MS
2469
2470 /* Serialize with pending writeback for the same page */
2bf06b8e 2471 fuse_wait_on_page_writeback(inode, folio->index);
e0887e09 2472 err = fuse_writepage_locked(folio);
3be5a52b 2473 if (!err)
2bf06b8e 2474 fuse_wait_on_page_writeback(inode, folio->index);
3be5a52b
MS
2475 }
2476 return err;
2477}
2478
2479/*
36ea2337
MS
2480 * Write back dirty data/metadata now (there may not be any suitable
2481 * open files later for data)
3be5a52b
MS
2482 */
2483static void fuse_vma_close(struct vm_area_struct *vma)
2484{
36ea2337
MS
2485 int err;
2486
2487 err = write_inode_now(vma->vm_file->f_mapping->host, 1);
2488 mapping_set_error(vma->vm_file->f_mapping, err);
3be5a52b
MS
2489}
2490
2491/*
2492 * Wait for writeback against this page to complete before allowing it
2493 * to be marked dirty again, and hence written back again, possibly
2494 * before the previous writepage completed.
2495 *
2496 * Block here, instead of in ->writepage(), so that the userspace fs
2497 * can only block processes actually operating on the filesystem.
2498 *
2499 * Otherwise unprivileged userspace fs would be able to block
2500 * unrelated:
2501 *
2502 * - page migration
2503 * - sync(2)
2504 * - try_to_free_pages() with order > PAGE_ALLOC_COSTLY_ORDER
2505 */
46fb504a 2506static vm_fault_t fuse_page_mkwrite(struct vm_fault *vmf)
3be5a52b 2507{
c2ec175c 2508 struct page *page = vmf->page;
11bac800 2509 struct inode *inode = file_inode(vmf->vma->vm_file);
cca24370 2510
11bac800 2511 file_update_time(vmf->vma->vm_file);
cca24370
MS
2512 lock_page(page);
2513 if (page->mapping != inode->i_mapping) {
2514 unlock_page(page);
2515 return VM_FAULT_NOPAGE;
2516 }
3be5a52b
MS
2517
2518 fuse_wait_on_page_writeback(inode, page->index);
cca24370 2519 return VM_FAULT_LOCKED;
3be5a52b
MS
2520}
2521
f0f37e2f 2522static const struct vm_operations_struct fuse_file_vm_ops = {
3be5a52b
MS
2523 .close = fuse_vma_close,
2524 .fault = filemap_fault,
f1820361 2525 .map_pages = filemap_map_pages,
3be5a52b
MS
2526 .page_mkwrite = fuse_page_mkwrite,
2527};
2528
2529static int fuse_file_mmap(struct file *file, struct vm_area_struct *vma)
2530{
55752a3a 2531 struct fuse_file *ff = file->private_data;
e78662e8 2532 struct fuse_conn *fc = ff->fm->fc;
fda0b98e 2533 struct inode *inode = file_inode(file);
cb098dd2 2534 int rc;
55752a3a 2535
2a9a609a 2536 /* DAX mmap is superior to direct_io mmap */
fda0b98e 2537 if (FUSE_IS_DAX(inode))
2a9a609a
SH
2538 return fuse_dax_mmap(file, vma);
2539
fda0b98e
AG
2540 /*
2541 * If inode is in passthrough io mode, because it has some file open
2542 * in passthrough mode, either mmap to backing file or fail mmap,
2543 * because mixing cached mmap and passthrough io mode is not allowed.
2544 */
4a90451b 2545 if (fuse_file_passthrough(ff))
fda0b98e
AG
2546 return fuse_passthrough_mmap(file, vma);
2547 else if (fuse_inode_backing(get_fuse_inode(inode)))
4a90451b
AG
2548 return -ENODEV;
2549
205c1d80
AG
2550 /*
2551 * FOPEN_DIRECT_IO handling is special compared to O_DIRECT,
2552 * as does not allow MAP_SHARED mmap without FUSE_DIRECT_IO_ALLOW_MMAP.
2553 */
55752a3a 2554 if (ff->open_flags & FOPEN_DIRECT_IO) {
9511176b
BS
2555 /*
2556 * Can't provide the coherency needed for MAP_SHARED
c55e0a55 2557 * if FUSE_DIRECT_IO_ALLOW_MMAP isn't set.
e78662e8 2558 */
c55e0a55 2559 if ((vma->vm_flags & VM_MAYSHARE) && !fc->direct_io_allow_mmap)
55752a3a
MS
2560 return -ENODEV;
2561
2562 invalidate_inode_pages2(file->f_mapping);
2563
9511176b
BS
2564 if (!(vma->vm_flags & VM_MAYSHARE)) {
2565 /* MAP_PRIVATE */
2566 return generic_file_mmap(file, vma);
2567 }
cb098dd2 2568
205c1d80
AG
2569 /*
2570 * First mmap of direct_io file enters caching inode io mode.
2571 * Also waits for parallel dio writers to go into serial mode
2572 * (exclusive instead of shared lock).
4864a6dd
AG
2573 * After first mmap, the inode stays in caching io mode until
2574 * the direct_io file release.
205c1d80 2575 */
4864a6dd 2576 rc = fuse_file_cached_io_open(inode, ff);
cb098dd2
AG
2577 if (rc)
2578 return rc;
55752a3a
MS
2579 }
2580
650b22b9
PE
2581 if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
2582 fuse_link_write_file(file);
2583
3be5a52b
MS
2584 file_accessed(file);
2585 vma->vm_ops = &fuse_file_vm_ops;
b6aeaded
MS
2586 return 0;
2587}
2588
0b6e9ea0
SF
2589static int convert_fuse_file_lock(struct fuse_conn *fc,
2590 const struct fuse_file_lock *ffl,
71421259
MS
2591 struct file_lock *fl)
2592{
2593 switch (ffl->type) {
2594 case F_UNLCK:
2595 break;
2596
2597 case F_RDLCK:
2598 case F_WRLCK:
2599 if (ffl->start > OFFSET_MAX || ffl->end > OFFSET_MAX ||
2600 ffl->end < ffl->start)
2601 return -EIO;
2602
2603 fl->fl_start = ffl->start;
2604 fl->fl_end = ffl->end;
0b6e9ea0
SF
2605
2606 /*
9d5b86ac
BC
2607 * Convert pid into init's pid namespace. The locks API will
2608 * translate it into the caller's pid namespace.
0b6e9ea0
SF
2609 */
2610 rcu_read_lock();
9a7eec48 2611 fl->c.flc_pid = pid_nr_ns(find_pid_ns(ffl->pid, fc->pid_ns), &init_pid_ns);
0b6e9ea0 2612 rcu_read_unlock();
71421259
MS
2613 break;
2614
2615 default:
2616 return -EIO;
2617 }
9a7eec48 2618 fl->c.flc_type = ffl->type;
71421259
MS
2619 return 0;
2620}
2621
7078187a 2622static void fuse_lk_fill(struct fuse_args *args, struct file *file,
a9ff4f87 2623 const struct file_lock *fl, int opcode, pid_t pid,
7078187a 2624 int flock, struct fuse_lk_in *inarg)
71421259 2625{
6131ffaa 2626 struct inode *inode = file_inode(file);
9c8ef561 2627 struct fuse_conn *fc = get_fuse_conn(inode);
71421259 2628 struct fuse_file *ff = file->private_data;
7078187a
MS
2629
2630 memset(inarg, 0, sizeof(*inarg));
2631 inarg->fh = ff->fh;
9a7eec48 2632 inarg->owner = fuse_lock_owner_id(fc, fl->c.flc_owner);
7078187a
MS
2633 inarg->lk.start = fl->fl_start;
2634 inarg->lk.end = fl->fl_end;
9a7eec48 2635 inarg->lk.type = fl->c.flc_type;
7078187a 2636 inarg->lk.pid = pid;
a9ff4f87 2637 if (flock)
7078187a 2638 inarg->lk_flags |= FUSE_LK_FLOCK;
d5b48543
MS
2639 args->opcode = opcode;
2640 args->nodeid = get_node_id(inode);
2641 args->in_numargs = 1;
2642 args->in_args[0].size = sizeof(*inarg);
2643 args->in_args[0].value = inarg;
71421259
MS
2644}
2645
2646static int fuse_getlk(struct file *file, struct file_lock *fl)
2647{
6131ffaa 2648 struct inode *inode = file_inode(file);
fcee216b 2649 struct fuse_mount *fm = get_fuse_mount(inode);
7078187a
MS
2650 FUSE_ARGS(args);
2651 struct fuse_lk_in inarg;
71421259
MS
2652 struct fuse_lk_out outarg;
2653 int err;
2654
7078187a 2655 fuse_lk_fill(&args, file, fl, FUSE_GETLK, 0, 0, &inarg);
d5b48543
MS
2656 args.out_numargs = 1;
2657 args.out_args[0].size = sizeof(outarg);
2658 args.out_args[0].value = &outarg;
fcee216b 2659 err = fuse_simple_request(fm, &args);
71421259 2660 if (!err)
fcee216b 2661 err = convert_fuse_file_lock(fm->fc, &outarg.lk, fl);
71421259
MS
2662
2663 return err;
2664}
2665
a9ff4f87 2666static int fuse_setlk(struct file *file, struct file_lock *fl, int flock)
71421259 2667{
6131ffaa 2668 struct inode *inode = file_inode(file);
fcee216b 2669 struct fuse_mount *fm = get_fuse_mount(inode);
7078187a
MS
2670 FUSE_ARGS(args);
2671 struct fuse_lk_in inarg;
9a7eec48
JL
2672 int opcode = (fl->c.flc_flags & FL_SLEEP) ? FUSE_SETLKW : FUSE_SETLK;
2673 struct pid *pid = fl->c.flc_type != F_UNLCK ? task_tgid(current) : NULL;
fcee216b 2674 pid_t pid_nr = pid_nr_ns(pid, fm->fc->pid_ns);
71421259
MS
2675 int err;
2676
8fb47a4f 2677 if (fl->fl_lmops && fl->fl_lmops->lm_grant) {
48e90761
MS
2678 /* NLM needs asynchronous locks, which we don't support yet */
2679 return -ENOLCK;
2680 }
2681
0b6e9ea0 2682 fuse_lk_fill(&args, file, fl, opcode, pid_nr, flock, &inarg);
fcee216b 2683 err = fuse_simple_request(fm, &args);
71421259 2684
a4d27e75
MS
2685 /* locking is restartable */
2686 if (err == -EINTR)
2687 err = -ERESTARTSYS;
7078187a 2688
71421259
MS
2689 return err;
2690}
2691
2692static int fuse_file_lock(struct file *file, int cmd, struct file_lock *fl)
2693{
6131ffaa 2694 struct inode *inode = file_inode(file);
71421259
MS
2695 struct fuse_conn *fc = get_fuse_conn(inode);
2696 int err;
2697
48e90761
MS
2698 if (cmd == F_CANCELLK) {
2699 err = 0;
2700 } else if (cmd == F_GETLK) {
71421259 2701 if (fc->no_lock) {
9d6a8c5c 2702 posix_test_lock(file, fl);
71421259
MS
2703 err = 0;
2704 } else
2705 err = fuse_getlk(file, fl);
2706 } else {
2707 if (fc->no_lock)
48e90761 2708 err = posix_lock_file(file, fl, NULL);
71421259 2709 else
a9ff4f87 2710 err = fuse_setlk(file, fl, 0);
71421259
MS
2711 }
2712 return err;
2713}
2714
a9ff4f87
MS
2715static int fuse_file_flock(struct file *file, int cmd, struct file_lock *fl)
2716{
6131ffaa 2717 struct inode *inode = file_inode(file);
a9ff4f87
MS
2718 struct fuse_conn *fc = get_fuse_conn(inode);
2719 int err;
2720
37fb3a30 2721 if (fc->no_flock) {
4f656367 2722 err = locks_lock_file_wait(file, fl);
a9ff4f87 2723 } else {
37fb3a30
MS
2724 struct fuse_file *ff = file->private_data;
2725
a9ff4f87 2726 /* emulate flock with POSIX locks */
37fb3a30 2727 ff->flock = true;
a9ff4f87
MS
2728 err = fuse_setlk(file, fl, 1);
2729 }
2730
2731 return err;
2732}
2733
b2d2272f
MS
2734static sector_t fuse_bmap(struct address_space *mapping, sector_t block)
2735{
2736 struct inode *inode = mapping->host;
fcee216b 2737 struct fuse_mount *fm = get_fuse_mount(inode);
7078187a 2738 FUSE_ARGS(args);
b2d2272f
MS
2739 struct fuse_bmap_in inarg;
2740 struct fuse_bmap_out outarg;
2741 int err;
2742
fcee216b 2743 if (!inode->i_sb->s_bdev || fm->fc->no_bmap)
b2d2272f
MS
2744 return 0;
2745
b2d2272f
MS
2746 memset(&inarg, 0, sizeof(inarg));
2747 inarg.block = block;
2748 inarg.blocksize = inode->i_sb->s_blocksize;
d5b48543
MS
2749 args.opcode = FUSE_BMAP;
2750 args.nodeid = get_node_id(inode);
2751 args.in_numargs = 1;
2752 args.in_args[0].size = sizeof(inarg);
2753 args.in_args[0].value = &inarg;
2754 args.out_numargs = 1;
2755 args.out_args[0].size = sizeof(outarg);
2756 args.out_args[0].value = &outarg;
fcee216b 2757 err = fuse_simple_request(fm, &args);
b2d2272f 2758 if (err == -ENOSYS)
fcee216b 2759 fm->fc->no_bmap = 1;
b2d2272f
MS
2760
2761 return err ? 0 : outarg.block;
2762}
2763
0b5da8db
R
2764static loff_t fuse_lseek(struct file *file, loff_t offset, int whence)
2765{
2766 struct inode *inode = file->f_mapping->host;
fcee216b 2767 struct fuse_mount *fm = get_fuse_mount(inode);
0b5da8db
R
2768 struct fuse_file *ff = file->private_data;
2769 FUSE_ARGS(args);
2770 struct fuse_lseek_in inarg = {
2771 .fh = ff->fh,
2772 .offset = offset,
2773 .whence = whence
2774 };
2775 struct fuse_lseek_out outarg;
2776 int err;
2777
fcee216b 2778 if (fm->fc->no_lseek)
0b5da8db
R
2779 goto fallback;
2780
d5b48543
MS
2781 args.opcode = FUSE_LSEEK;
2782 args.nodeid = ff->nodeid;
2783 args.in_numargs = 1;
2784 args.in_args[0].size = sizeof(inarg);
2785 args.in_args[0].value = &inarg;
2786 args.out_numargs = 1;
2787 args.out_args[0].size = sizeof(outarg);
2788 args.out_args[0].value = &outarg;
fcee216b 2789 err = fuse_simple_request(fm, &args);
0b5da8db
R
2790 if (err) {
2791 if (err == -ENOSYS) {
fcee216b 2792 fm->fc->no_lseek = 1;
0b5da8db
R
2793 goto fallback;
2794 }
2795 return err;
2796 }
2797
2798 return vfs_setpos(file, outarg.offset, inode->i_sb->s_maxbytes);
2799
2800fallback:
c6c745b8 2801 err = fuse_update_attributes(inode, file, STATX_SIZE);
0b5da8db
R
2802 if (!err)
2803 return generic_file_llseek(file, offset, whence);
2804 else
2805 return err;
2806}
2807
965c8e59 2808static loff_t fuse_file_llseek(struct file *file, loff_t offset, int whence)
5559b8f4
MS
2809{
2810 loff_t retval;
6131ffaa 2811 struct inode *inode = file_inode(file);
5559b8f4 2812
0b5da8db
R
2813 switch (whence) {
2814 case SEEK_SET:
2815 case SEEK_CUR:
2816 /* No i_mutex protection necessary for SEEK_CUR and SEEK_SET */
965c8e59 2817 retval = generic_file_llseek(file, offset, whence);
0b5da8db
R
2818 break;
2819 case SEEK_END:
5955102c 2820 inode_lock(inode);
c6c745b8 2821 retval = fuse_update_attributes(inode, file, STATX_SIZE);
0b5da8db
R
2822 if (!retval)
2823 retval = generic_file_llseek(file, offset, whence);
5955102c 2824 inode_unlock(inode);
0b5da8db
R
2825 break;
2826 case SEEK_HOLE:
2827 case SEEK_DATA:
5955102c 2828 inode_lock(inode);
0b5da8db 2829 retval = fuse_lseek(file, offset, whence);
5955102c 2830 inode_unlock(inode);
0b5da8db
R
2831 break;
2832 default:
2833 retval = -EINVAL;
2834 }
c07c3d19 2835
5559b8f4
MS
2836 return retval;
2837}
2838
95668a69
TH
2839/*
2840 * All files which have been polled are linked to RB tree
2841 * fuse_conn->polled_files which is indexed by kh. Walk the tree and
2842 * find the matching one.
2843 */
2844static struct rb_node **fuse_find_polled_node(struct fuse_conn *fc, u64 kh,
2845 struct rb_node **parent_out)
2846{
2847 struct rb_node **link = &fc->polled_files.rb_node;
2848 struct rb_node *last = NULL;
2849
2850 while (*link) {
2851 struct fuse_file *ff;
2852
2853 last = *link;
2854 ff = rb_entry(last, struct fuse_file, polled_node);
2855
2856 if (kh < ff->kh)
2857 link = &last->rb_left;
2858 else if (kh > ff->kh)
2859 link = &last->rb_right;
2860 else
2861 return link;
2862 }
2863
2864 if (parent_out)
2865 *parent_out = last;
2866 return link;
2867}
2868
2869/*
2870 * The file is about to be polled. Make sure it's on the polled_files
2871 * RB tree. Note that files once added to the polled_files tree are
2872 * not removed before the file is released. This is because a file
2873 * polled once is likely to be polled again.
2874 */
2875static void fuse_register_polled_file(struct fuse_conn *fc,
2876 struct fuse_file *ff)
2877{
2878 spin_lock(&fc->lock);
2879 if (RB_EMPTY_NODE(&ff->polled_node)) {
3f649ab7 2880 struct rb_node **link, *parent;
95668a69
TH
2881
2882 link = fuse_find_polled_node(fc, ff->kh, &parent);
2883 BUG_ON(*link);
2884 rb_link_node(&ff->polled_node, parent, link);
2885 rb_insert_color(&ff->polled_node, &fc->polled_files);
2886 }
2887 spin_unlock(&fc->lock);
2888}
2889
076ccb76 2890__poll_t fuse_file_poll(struct file *file, poll_table *wait)
95668a69 2891{
95668a69 2892 struct fuse_file *ff = file->private_data;
fcee216b 2893 struct fuse_mount *fm = ff->fm;
95668a69
TH
2894 struct fuse_poll_in inarg = { .fh = ff->fh, .kh = ff->kh };
2895 struct fuse_poll_out outarg;
7078187a 2896 FUSE_ARGS(args);
95668a69
TH
2897 int err;
2898
fcee216b 2899 if (fm->fc->no_poll)
95668a69
TH
2900 return DEFAULT_POLLMASK;
2901
2902 poll_wait(file, &ff->poll_wait, wait);
c71d227f 2903 inarg.events = mangle_poll(poll_requested_events(wait));
95668a69
TH
2904
2905 /*
2906 * Ask for notification iff there's someone waiting for it.
2907 * The client may ignore the flag and always notify.
2908 */
2909 if (waitqueue_active(&ff->poll_wait)) {
2910 inarg.flags |= FUSE_POLL_SCHEDULE_NOTIFY;
fcee216b 2911 fuse_register_polled_file(fm->fc, ff);
95668a69
TH
2912 }
2913
d5b48543
MS
2914 args.opcode = FUSE_POLL;
2915 args.nodeid = ff->nodeid;
2916 args.in_numargs = 1;
2917 args.in_args[0].size = sizeof(inarg);
2918 args.in_args[0].value = &inarg;
2919 args.out_numargs = 1;
2920 args.out_args[0].size = sizeof(outarg);
2921 args.out_args[0].value = &outarg;
fcee216b 2922 err = fuse_simple_request(fm, &args);
95668a69
TH
2923
2924 if (!err)
c71d227f 2925 return demangle_poll(outarg.revents);
95668a69 2926 if (err == -ENOSYS) {
fcee216b 2927 fm->fc->no_poll = 1;
95668a69
TH
2928 return DEFAULT_POLLMASK;
2929 }
a9a08845 2930 return EPOLLERR;
95668a69 2931}
08cbf542 2932EXPORT_SYMBOL_GPL(fuse_file_poll);
95668a69
TH
2933
2934/*
2935 * This is called from fuse_handle_notify() on FUSE_NOTIFY_POLL and
2936 * wakes up the poll waiters.
2937 */
2938int fuse_notify_poll_wakeup(struct fuse_conn *fc,
2939 struct fuse_notify_poll_wakeup_out *outarg)
2940{
2941 u64 kh = outarg->kh;
2942 struct rb_node **link;
2943
2944 spin_lock(&fc->lock);
2945
2946 link = fuse_find_polled_node(fc, kh, NULL);
2947 if (*link) {
2948 struct fuse_file *ff;
2949
2950 ff = rb_entry(*link, struct fuse_file, polled_node);
2951 wake_up_interruptible_sync(&ff->poll_wait);
2952 }
2953
2954 spin_unlock(&fc->lock);
2955 return 0;
2956}
2957
efb9fa9e
MP
2958static void fuse_do_truncate(struct file *file)
2959{
2960 struct inode *inode = file->f_mapping->host;
2961 struct iattr attr;
2962
2963 attr.ia_valid = ATTR_SIZE;
2964 attr.ia_size = i_size_read(inode);
2965
2966 attr.ia_file = file;
2967 attr.ia_valid |= ATTR_FILE;
2968
62490330 2969 fuse_do_setattr(file_dentry(file), &attr, file);
efb9fa9e
MP
2970}
2971
5da784cc 2972static inline loff_t fuse_round_up(struct fuse_conn *fc, loff_t off)
e5c5f05d 2973{
5da784cc 2974 return round_up(off, fc->max_pages << PAGE_SHIFT);
e5c5f05d
MP
2975}
2976
4273b793 2977static ssize_t
c8b8e32d 2978fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
4273b793 2979{
9d5722b7 2980 DECLARE_COMPLETION_ONSTACK(wait);
4273b793 2981 ssize_t ret = 0;
60b9df7a
MS
2982 struct file *file = iocb->ki_filp;
2983 struct fuse_file *ff = file->private_data;
4273b793 2984 loff_t pos = 0;
bcba24cc
MP
2985 struct inode *inode;
2986 loff_t i_size;
933a3752 2987 size_t count = iov_iter_count(iter), shortened = 0;
c8b8e32d 2988 loff_t offset = iocb->ki_pos;
36cf66ed 2989 struct fuse_io_priv *io;
4273b793 2990
4273b793 2991 pos = offset;
bcba24cc
MP
2992 inode = file->f_mapping->host;
2993 i_size = i_size_read(inode);
4273b793 2994
933a3752 2995 if ((iov_iter_rw(iter) == READ) && (offset >= i_size))
9fe55eea
SW
2996 return 0;
2997
bcba24cc 2998 io = kmalloc(sizeof(struct fuse_io_priv), GFP_KERNEL);
36cf66ed
MP
2999 if (!io)
3000 return -ENOMEM;
bcba24cc 3001 spin_lock_init(&io->lock);
744742d6 3002 kref_init(&io->refcnt);
bcba24cc
MP
3003 io->reqs = 1;
3004 io->bytes = -1;
3005 io->size = 0;
3006 io->offset = offset;
6f673763 3007 io->write = (iov_iter_rw(iter) == WRITE);
bcba24cc 3008 io->err = 0;
bcba24cc
MP
3009 /*
3010 * By default, we want to optimize all I/Os with async request
60b9df7a 3011 * submission to the client filesystem if supported.
bcba24cc 3012 */
69456535 3013 io->async = ff->fm->fc->async_dio;
bcba24cc 3014 io->iocb = iocb;
7879c4e5 3015 io->blocking = is_sync_kiocb(iocb);
bcba24cc 3016
933a3752
AV
3017 /* optimization for short read */
3018 if (io->async && !io->write && offset + count > i_size) {
69456535 3019 iov_iter_truncate(iter, fuse_round_up(ff->fm->fc, i_size - offset));
933a3752
AV
3020 shortened = count - iov_iter_count(iter);
3021 count -= shortened;
3022 }
3023
bcba24cc 3024 /*
7879c4e5
AS
3025 * We cannot asynchronously extend the size of a file.
3026 * In such case the aio will behave exactly like sync io.
bcba24cc 3027 */
933a3752 3028 if ((offset + count > i_size) && io->write)
7879c4e5 3029 io->blocking = true;
4273b793 3030
7879c4e5 3031 if (io->async && io->blocking) {
744742d6
SF
3032 /*
3033 * Additional reference to keep io around after
3034 * calling fuse_aio_complete()
3035 */
3036 kref_get(&io->refcnt);
9d5722b7 3037 io->done = &wait;
744742d6 3038 }
9d5722b7 3039
6f673763 3040 if (iov_iter_rw(iter) == WRITE) {
6b775b18 3041 ret = fuse_direct_io(io, iter, &pos, FUSE_DIO_WRITE);
fa5eee57 3042 fuse_invalidate_attr_mask(inode, FUSE_STATX_MODSIZE);
812408fb 3043 } else {
d22a943f 3044 ret = __fuse_direct_read(io, iter, &pos);
812408fb 3045 }
933a3752 3046 iov_iter_reexpand(iter, iov_iter_count(iter) + shortened);
36cf66ed 3047
bcba24cc 3048 if (io->async) {
ebacb812
LC
3049 bool blocking = io->blocking;
3050
bcba24cc
MP
3051 fuse_aio_complete(io, ret < 0 ? ret : 0, -1);
3052
3053 /* we have a non-extending, async request, so return */
ebacb812 3054 if (!blocking)
bcba24cc
MP
3055 return -EIOCBQUEUED;
3056
9d5722b7
CH
3057 wait_for_completion(&wait);
3058 ret = fuse_get_res_by_io(io);
bcba24cc
MP
3059 }
3060
744742d6 3061 kref_put(&io->refcnt, fuse_io_release);
9d5722b7 3062
6f673763 3063 if (iov_iter_rw(iter) == WRITE) {
d347739a 3064 fuse_write_update_attr(inode, pos, ret);
15352405 3065 /* For extending writes we already hold exclusive lock */
d347739a 3066 if (ret < 0 && offset + count > i_size)
efb9fa9e
MP
3067 fuse_do_truncate(file);
3068 }
4273b793
AA
3069
3070 return ret;
3071}
3072
26eb3bae
MS
3073static int fuse_writeback_range(struct inode *inode, loff_t start, loff_t end)
3074{
e388164e 3075 int err = filemap_write_and_wait_range(inode->i_mapping, start, LLONG_MAX);
26eb3bae
MS
3076
3077 if (!err)
3078 fuse_sync_writes(inode);
3079
3080 return err;
3081}
3082
cdadb11c
MS
3083static long fuse_file_fallocate(struct file *file, int mode, loff_t offset,
3084 loff_t length)
05ba1f08
AP
3085{
3086 struct fuse_file *ff = file->private_data;
1c68271c 3087 struct inode *inode = file_inode(file);
0ab08f57 3088 struct fuse_inode *fi = get_fuse_inode(inode);
fcee216b 3089 struct fuse_mount *fm = ff->fm;
7078187a 3090 FUSE_ARGS(args);
05ba1f08
AP
3091 struct fuse_fallocate_in inarg = {
3092 .fh = ff->fh,
3093 .offset = offset,
3094 .length = length,
3095 .mode = mode
3096 };
3097 int err;
44361e8c
MS
3098 bool block_faults = FUSE_IS_DAX(inode) &&
3099 (!(mode & FALLOC_FL_KEEP_SIZE) ||
3100 (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_ZERO_RANGE)));
6ae330ca 3101
6b1bdb56
RJ
3102 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
3103 FALLOC_FL_ZERO_RANGE))
4adb8302
MS
3104 return -EOPNOTSUPP;
3105
fcee216b 3106 if (fm->fc->no_fallocate)
519c6040
MS
3107 return -EOPNOTSUPP;
3108
44361e8c
MS
3109 inode_lock(inode);
3110 if (block_faults) {
3111 filemap_invalidate_lock(inode->i_mapping);
3112 err = fuse_dax_break_layouts(inode, 0, 0);
3113 if (err)
3114 goto out;
3115 }
6ae330ca 3116
44361e8c
MS
3117 if (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_ZERO_RANGE)) {
3118 loff_t endbyte = offset + length - 1;
26eb3bae 3119
44361e8c
MS
3120 err = fuse_writeback_range(inode, offset, endbyte);
3121 if (err)
3122 goto out;
3634a632
BF
3123 }
3124
0cbade02
LB
3125 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
3126 offset + length > i_size_read(inode)) {
3127 err = inode_newsize_ok(inode, offset + length);
3128 if (err)
35d6fcbb 3129 goto out;
0cbade02
LB
3130 }
3131
4a6f278d
MS
3132 err = file_modified(file);
3133 if (err)
3134 goto out;
3135
0ab08f57
MP
3136 if (!(mode & FALLOC_FL_KEEP_SIZE))
3137 set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
3138
d5b48543
MS
3139 args.opcode = FUSE_FALLOCATE;
3140 args.nodeid = ff->nodeid;
3141 args.in_numargs = 1;
3142 args.in_args[0].size = sizeof(inarg);
3143 args.in_args[0].value = &inarg;
fcee216b 3144 err = fuse_simple_request(fm, &args);
519c6040 3145 if (err == -ENOSYS) {
fcee216b 3146 fm->fc->no_fallocate = 1;
519c6040
MS
3147 err = -EOPNOTSUPP;
3148 }
bee6c307
BF
3149 if (err)
3150 goto out;
3151
3152 /* we could have extended the file */
b0aa7606 3153 if (!(mode & FALLOC_FL_KEEP_SIZE)) {
20235b43 3154 if (fuse_write_update_attr(inode, offset + length, length))
93d2269d 3155 file_update_time(file);
b0aa7606 3156 }
bee6c307 3157
6b1bdb56 3158 if (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_ZERO_RANGE))
bee6c307
BF
3159 truncate_pagecache_range(inode, offset, offset + length - 1);
3160
fa5eee57 3161 fuse_invalidate_attr_mask(inode, FUSE_STATX_MODSIZE);
bee6c307 3162
3634a632 3163out:
0ab08f57
MP
3164 if (!(mode & FALLOC_FL_KEEP_SIZE))
3165 clear_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
3166
6ae330ca 3167 if (block_faults)
8bcbbe9c 3168 filemap_invalidate_unlock(inode->i_mapping);
6ae330ca 3169
44361e8c 3170 inode_unlock(inode);
3634a632 3171
5c791fe1
MS
3172 fuse_flush_time_update(inode);
3173
05ba1f08
AP
3174 return err;
3175}
05ba1f08 3176
64bf5ff5
DC
3177static ssize_t __fuse_copy_file_range(struct file *file_in, loff_t pos_in,
3178 struct file *file_out, loff_t pos_out,
3179 size_t len, unsigned int flags)
88bc7d50
NV
3180{
3181 struct fuse_file *ff_in = file_in->private_data;
3182 struct fuse_file *ff_out = file_out->private_data;
a2bc9236 3183 struct inode *inode_in = file_inode(file_in);
88bc7d50
NV
3184 struct inode *inode_out = file_inode(file_out);
3185 struct fuse_inode *fi_out = get_fuse_inode(inode_out);
fcee216b
MR
3186 struct fuse_mount *fm = ff_in->fm;
3187 struct fuse_conn *fc = fm->fc;
88bc7d50
NV
3188 FUSE_ARGS(args);
3189 struct fuse_copy_file_range_in inarg = {
3190 .fh_in = ff_in->fh,
3191 .off_in = pos_in,
3192 .nodeid_out = ff_out->nodeid,
3193 .fh_out = ff_out->fh,
3194 .off_out = pos_out,
3195 .len = len,
3196 .flags = flags
3197 };
3198 struct fuse_write_out outarg;
3199 ssize_t err;
3200 /* mark unstable when write-back is not used, and file_out gets
3201 * extended */
3202 bool is_unstable = (!fc->writeback_cache) &&
3203 ((pos_out + len) > inode_out->i_size);
3204
3205 if (fc->no_copy_file_range)
3206 return -EOPNOTSUPP;
3207
5dae222a
AG
3208 if (file_inode(file_in)->i_sb != file_inode(file_out)->i_sb)
3209 return -EXDEV;
3210
2c4656df
MS
3211 inode_lock(inode_in);
3212 err = fuse_writeback_range(inode_in, pos_in, pos_in + len - 1);
3213 inode_unlock(inode_in);
3214 if (err)
3215 return err;
a2bc9236 3216
88bc7d50
NV
3217 inode_lock(inode_out);
3218
fe0da9c0
AG
3219 err = file_modified(file_out);
3220 if (err)
3221 goto out;
3222
9b46418c
MS
3223 /*
3224 * Write out dirty pages in the destination file before sending the COPY
3225 * request to userspace. After the request is completed, truncate off
3226 * pages (including partial ones) from the cache that have been copied,
3227 * since these contain stale data at that point.
3228 *
3229 * This should be mostly correct, but if the COPY writes to partial
3230 * pages (at the start or end) and the parts not covered by the COPY are
3231 * written through a memory map after calling fuse_writeback_range(),
3232 * then these partial page modifications will be lost on truncation.
3233 *
3234 * It is unlikely that someone would rely on such mixed style
3235 * modifications. Yet this does give less guarantees than if the
3236 * copying was performed with write(2).
3237 *
8bcbbe9c 3238 * To fix this a mapping->invalidate_lock could be used to prevent new
9b46418c
MS
3239 * faults while the copy is ongoing.
3240 */
2c4656df
MS
3241 err = fuse_writeback_range(inode_out, pos_out, pos_out + len - 1);
3242 if (err)
3243 goto out;
88bc7d50
NV
3244
3245 if (is_unstable)
3246 set_bit(FUSE_I_SIZE_UNSTABLE, &fi_out->state);
3247
d5b48543
MS
3248 args.opcode = FUSE_COPY_FILE_RANGE;
3249 args.nodeid = ff_in->nodeid;
3250 args.in_numargs = 1;
3251 args.in_args[0].size = sizeof(inarg);
3252 args.in_args[0].value = &inarg;
3253 args.out_numargs = 1;
3254 args.out_args[0].size = sizeof(outarg);
3255 args.out_args[0].value = &outarg;
fcee216b 3256 err = fuse_simple_request(fm, &args);
88bc7d50
NV
3257 if (err == -ENOSYS) {
3258 fc->no_copy_file_range = 1;
3259 err = -EOPNOTSUPP;
3260 }
3261 if (err)
3262 goto out;
3263
9b46418c
MS
3264 truncate_inode_pages_range(inode_out->i_mapping,
3265 ALIGN_DOWN(pos_out, PAGE_SIZE),
3266 ALIGN(pos_out + outarg.size, PAGE_SIZE) - 1);
3267
20235b43
MS
3268 file_update_time(file_out);
3269 fuse_write_update_attr(inode_out, pos_out + outarg.size, outarg.size);
88bc7d50
NV
3270
3271 err = outarg.size;
3272out:
3273 if (is_unstable)
3274 clear_bit(FUSE_I_SIZE_UNSTABLE, &fi_out->state);
3275
3276 inode_unlock(inode_out);
fe0da9c0 3277 file_accessed(file_in);
88bc7d50 3278
5c791fe1
MS
3279 fuse_flush_time_update(inode_out);
3280
88bc7d50
NV
3281 return err;
3282}
3283
64bf5ff5
DC
3284static ssize_t fuse_copy_file_range(struct file *src_file, loff_t src_off,
3285 struct file *dst_file, loff_t dst_off,
3286 size_t len, unsigned int flags)
3287{
3288 ssize_t ret;
3289
3290 ret = __fuse_copy_file_range(src_file, src_off, dst_file, dst_off,
3291 len, flags);
3292
5dae222a 3293 if (ret == -EOPNOTSUPP || ret == -EXDEV)
705bcfcb
AG
3294 ret = splice_copy_file_range(src_file, src_off, dst_file,
3295 dst_off, len);
64bf5ff5
DC
3296 return ret;
3297}
3298
4b6f5d20 3299static const struct file_operations fuse_file_operations = {
5559b8f4 3300 .llseek = fuse_file_llseek,
37c20f16 3301 .read_iter = fuse_file_read_iter,
84c3d55c 3302 .write_iter = fuse_file_write_iter,
b6aeaded
MS
3303 .mmap = fuse_file_mmap,
3304 .open = fuse_open,
3305 .flush = fuse_flush,
3306 .release = fuse_release,
3307 .fsync = fuse_fsync,
71421259 3308 .lock = fuse_file_lock,
2a9a609a 3309 .get_unmapped_area = thp_get_unmapped_area,
a9ff4f87 3310 .flock = fuse_file_flock,
5ca73468
AG
3311 .splice_read = fuse_splice_read,
3312 .splice_write = fuse_splice_write,
59efec7b
TH
3313 .unlocked_ioctl = fuse_file_ioctl,
3314 .compat_ioctl = fuse_file_compat_ioctl,
95668a69 3315 .poll = fuse_file_poll,
05ba1f08 3316 .fallocate = fuse_file_fallocate,
d4136d60 3317 .copy_file_range = fuse_copy_file_range,
413ef8cb
MS
3318};
3319
f5e54d6e 3320static const struct address_space_operations fuse_file_aops = {
5efd00e4 3321 .read_folio = fuse_read_folio,
76a0294e 3322 .readahead = fuse_readahead,
26d614df 3323 .writepages = fuse_writepages,
2bf06b8e 3324 .launder_folio = fuse_launder_folio,
187c82cb 3325 .dirty_folio = filemap_dirty_folio,
e1c420ac 3326 .migrate_folio = filemap_migrate_folio,
b2d2272f 3327 .bmap = fuse_bmap,
4273b793 3328 .direct_IO = fuse_direct_IO,
6b12c1b3
PE
3329 .write_begin = fuse_write_begin,
3330 .write_end = fuse_write_end,
b6aeaded
MS
3331};
3332
93a497b9 3333void fuse_init_file_inode(struct inode *inode, unsigned int flags)
b6aeaded 3334{
ab2257e9
MS
3335 struct fuse_inode *fi = get_fuse_inode(inode);
3336
45323fb7
MS
3337 inode->i_fop = &fuse_file_operations;
3338 inode->i_data.a_ops = &fuse_file_aops;
ab2257e9
MS
3339
3340 INIT_LIST_HEAD(&fi->write_files);
3341 INIT_LIST_HEAD(&fi->queued_writes);
3342 fi->writectr = 0;
cb098dd2 3343 fi->iocachectr = 0;
ab2257e9 3344 init_waitqueue_head(&fi->page_waitq);
205c1d80 3345 init_waitqueue_head(&fi->direct_io_waitq);
6b2fb799 3346 fi->writepages = RB_ROOT;
c2d0ad00
VG
3347
3348 if (IS_ENABLED(CONFIG_FUSE_DAX))
93a497b9 3349 fuse_dax_inode_init(inode, flags);
b6aeaded 3350}