fuse: remove an unnecessary if statement
[linux-2.6-block.git] / fs / fuse / file.c
CommitLineData
b6aeaded
MS
1/*
2 FUSE: Filesystem in Userspace
1729a16c 3 Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu>
b6aeaded
MS
4
5 This program can be distributed under the terms of the GNU GPL.
6 See the file COPYING.
7*/
8
9#include "fuse_i.h"
10
11#include <linux/pagemap.h>
12#include <linux/slab.h>
13#include <linux/kernel.h>
e8edc6e0 14#include <linux/sched.h>
7a36094d 15#include <linux/sched/signal.h>
08cbf542 16#include <linux/module.h>
478e0841 17#include <linux/swap.h>
3634a632 18#include <linux/falloc.h>
e2e40f2c 19#include <linux/uio.h>
31070f6c 20#include <linux/fs.h>
5970e15d 21#include <linux/filelock.h>
705bcfcb 22#include <linux/splice.h>
2e3f7dd0 23#include <linux/task_io_accounting_ops.h>
b6aeaded 24
b9d54c6f
MS
25static int fuse_send_open(struct fuse_mount *fm, u64 nodeid,
26 unsigned int open_flags, int opcode,
27 struct fuse_open_out *outargp)
b6aeaded 28{
b6aeaded 29 struct fuse_open_in inarg;
7078187a 30 FUSE_ARGS(args);
fd72faac
MS
31
32 memset(&inarg, 0, sizeof(inarg));
b9d54c6f 33 inarg.flags = open_flags & ~(O_CREAT | O_EXCL | O_NOCTTY);
fcee216b 34 if (!fm->fc->atomic_o_trunc)
6ff958ed 35 inarg.flags &= ~O_TRUNC;
643a666a
VG
36
37 if (fm->fc->handle_killpriv_v2 &&
38 (inarg.flags & O_TRUNC) && !capable(CAP_FSETID)) {
39 inarg.open_flags |= FUSE_OPEN_KILL_SUIDGID;
40 }
41
d5b48543
MS
42 args.opcode = opcode;
43 args.nodeid = nodeid;
44 args.in_numargs = 1;
45 args.in_args[0].size = sizeof(inarg);
46 args.in_args[0].value = &inarg;
47 args.out_numargs = 1;
48 args.out_args[0].size = sizeof(*outargp);
49 args.out_args[0].value = outargp;
fd72faac 50
fcee216b 51 return fuse_simple_request(fm, &args);
fd72faac
MS
52}
53
e26ee4ef 54struct fuse_file *fuse_file_alloc(struct fuse_mount *fm, bool release)
fd72faac
MS
55{
56 struct fuse_file *ff;
6b2db28a 57
dc69e98c 58 ff = kzalloc(sizeof(struct fuse_file), GFP_KERNEL_ACCOUNT);
6b2db28a
TH
59 if (unlikely(!ff))
60 return NULL;
61
fcee216b 62 ff->fm = fm;
e26ee4ef 63 if (release) {
fc8ff397
AG
64 ff->args = kzalloc(sizeof(*ff->args), GFP_KERNEL_ACCOUNT);
65 if (!ff->args) {
e26ee4ef
AG
66 kfree(ff);
67 return NULL;
68 }
fd72faac 69 }
6b2db28a
TH
70
71 INIT_LIST_HEAD(&ff->write_entry);
5d7bc7e8 72 mutex_init(&ff->readdir.lock);
4e8c2eb5 73 refcount_set(&ff->count, 1);
6b2db28a
TH
74 RB_CLEAR_NODE(&ff->polled_node);
75 init_waitqueue_head(&ff->poll_wait);
76
fcee216b 77 ff->kh = atomic64_inc_return(&fm->fc->khctr);
6b2db28a 78
fd72faac
MS
79 return ff;
80}
81
82void fuse_file_free(struct fuse_file *ff)
83{
fc8ff397 84 kfree(ff->args);
5d7bc7e8 85 mutex_destroy(&ff->readdir.lock);
fd72faac
MS
86 kfree(ff);
87}
88
267d8444 89static struct fuse_file *fuse_file_get(struct fuse_file *ff)
c756e0a4 90{
4e8c2eb5 91 refcount_inc(&ff->count);
c756e0a4
MS
92 return ff;
93}
94
fcee216b 95static void fuse_release_end(struct fuse_mount *fm, struct fuse_args *args,
4cb54866 96 int error)
819c4b3b 97{
4cb54866
MS
98 struct fuse_release_args *ra = container_of(args, typeof(*ra), args);
99
100 iput(ra->inode);
101 kfree(ra);
819c4b3b
MS
102}
103
e26ee4ef 104static void fuse_file_put(struct fuse_file *ff, bool sync)
c756e0a4 105{
4e8c2eb5 106 if (refcount_dec_and_test(&ff->count)) {
fc8ff397 107 struct fuse_release_args *ra = &ff->args->release_args;
e26ee4ef 108 struct fuse_args *args = (ra ? &ra->args : NULL);
8b0797a4 109
cb098dd2
AG
110 if (ra && ra->inode)
111 fuse_file_io_release(ff, ra->inode);
112
e26ee4ef
AG
113 if (!args) {
114 /* Do nothing when server does not implement 'open' */
7678ac50 115 } else if (sync) {
fcee216b
MR
116 fuse_simple_request(ff->fm, args);
117 fuse_release_end(ff->fm, args, 0);
5a18ec17 118 } else {
4cb54866 119 args->end = fuse_release_end;
fcee216b 120 if (fuse_simple_background(ff->fm, args,
4cb54866 121 GFP_KERNEL | __GFP_NOFAIL))
fcee216b 122 fuse_release_end(ff->fm, args, -ENOTCONN);
5a18ec17 123 }
c756e0a4
MS
124 kfree(ff);
125 }
126}
127
b9d54c6f
MS
128struct fuse_file *fuse_file_open(struct fuse_mount *fm, u64 nodeid,
129 unsigned int open_flags, bool isdir)
91fe96b4 130{
fcee216b 131 struct fuse_conn *fc = fm->fc;
91fe96b4 132 struct fuse_file *ff;
91fe96b4 133 int opcode = isdir ? FUSE_OPENDIR : FUSE_OPEN;
e26ee4ef 134 bool open = isdir ? !fc->no_opendir : !fc->no_open;
91fe96b4 135
e26ee4ef 136 ff = fuse_file_alloc(fm, open);
91fe96b4 137 if (!ff)
b9d54c6f 138 return ERR_PTR(-ENOMEM);
91fe96b4 139
7678ac50 140 ff->fh = 0;
fabf7e02
CA
141 /* Default for no-open */
142 ff->open_flags = FOPEN_KEEP_CACHE | (isdir ? FOPEN_CACHE_DIR : 0);
e26ee4ef 143 if (open) {
fc8ff397
AG
144 /* Store outarg for fuse_finish_open() */
145 struct fuse_open_out *outargp = &ff->args->open_outarg;
7678ac50
AG
146 int err;
147
fc8ff397 148 err = fuse_send_open(fm, nodeid, open_flags, opcode, outargp);
7678ac50 149 if (!err) {
fc8ff397
AG
150 ff->fh = outargp->fh;
151 ff->open_flags = outargp->open_flags;
d9a9ea94 152 } else if (err != -ENOSYS) {
7678ac50 153 fuse_file_free(ff);
b9d54c6f 154 return ERR_PTR(err);
7678ac50 155 } else {
e26ee4ef 156 /* No release needed */
fc8ff397
AG
157 kfree(ff->args);
158 ff->args = NULL;
d9a9ea94
CA
159 if (isdir)
160 fc->no_opendir = 1;
161 else
162 fc->no_open = 1;
7678ac50 163 }
91fe96b4
MS
164 }
165
166 if (isdir)
7678ac50 167 ff->open_flags &= ~FOPEN_DIRECT_IO;
91fe96b4 168
91fe96b4 169 ff->nodeid = nodeid;
91fe96b4 170
b9d54c6f
MS
171 return ff;
172}
173
174int fuse_do_open(struct fuse_mount *fm, u64 nodeid, struct file *file,
175 bool isdir)
176{
177 struct fuse_file *ff = fuse_file_open(fm, nodeid, file->f_flags, isdir);
178
179 if (!IS_ERR(ff))
180 file->private_data = ff;
181
182 return PTR_ERR_OR_ZERO(ff);
91fe96b4 183}
08cbf542 184EXPORT_SYMBOL_GPL(fuse_do_open);
91fe96b4 185
650b22b9
PE
186static void fuse_link_write_file(struct file *file)
187{
188 struct inode *inode = file_inode(file);
650b22b9
PE
189 struct fuse_inode *fi = get_fuse_inode(inode);
190 struct fuse_file *ff = file->private_data;
191 /*
192 * file may be written through mmap, so chain it onto the
193 * inodes's write_file list
194 */
f15ecfef 195 spin_lock(&fi->lock);
650b22b9
PE
196 if (list_empty(&ff->write_entry))
197 list_add(&ff->write_entry, &fi->write_files);
f15ecfef 198 spin_unlock(&fi->lock);
650b22b9
PE
199}
200
d2c487f1 201int fuse_finish_open(struct inode *inode, struct file *file)
fd72faac 202{
c7b7143c 203 struct fuse_file *ff = file->private_data;
a0822c55 204 struct fuse_conn *fc = get_fuse_conn(inode);
cb098dd2
AG
205 int err;
206
207 err = fuse_file_io_open(file, inode);
208 if (err)
209 return err;
c7b7143c 210
bbd84f33
KS
211 if (ff->open_flags & FOPEN_STREAM)
212 stream_open(inode, file);
213 else if (ff->open_flags & FOPEN_NONSEEKABLE)
a7c1b990 214 nonseekable_open(inode, file);
76224355 215
4d99ff8f
PE
216 if ((file->f_mode & FMODE_WRITE) && fc->writeback_cache)
217 fuse_link_write_file(file);
d2c487f1
AG
218
219 return 0;
fd72faac
MS
220}
221
0c9d7089
AG
222static void fuse_truncate_update_attr(struct inode *inode, struct file *file)
223{
224 struct fuse_conn *fc = get_fuse_conn(inode);
225 struct fuse_inode *fi = get_fuse_inode(inode);
226
227 spin_lock(&fi->lock);
228 fi->attr_version = atomic64_inc_return(&fc->attr_version);
229 i_size_write(inode, 0);
230 spin_unlock(&fi->lock);
231 file_update_time(file);
232 fuse_invalidate_attr_mask(inode, FUSE_STATX_MODSIZE);
233}
234
7de64d52 235static int fuse_open(struct inode *inode, struct file *file)
fd72faac 236{
fcee216b 237 struct fuse_mount *fm = get_fuse_mount(inode);
d2c487f1 238 struct fuse_inode *fi = get_fuse_inode(inode);
fcee216b 239 struct fuse_conn *fc = fm->fc;
d2c487f1 240 struct fuse_file *ff;
b6aeaded 241 int err;
0c9d7089
AG
242 bool is_truncate = (file->f_flags & O_TRUNC) && fc->atomic_o_trunc;
243 bool is_wb_truncate = is_truncate && fc->writeback_cache;
244 bool dax_truncate = is_truncate && FUSE_IS_DAX(inode);
b6aeaded 245
5d069dbe
MS
246 if (fuse_is_bad(inode))
247 return -EIO;
248
b6aeaded
MS
249 err = generic_file_open(inode, file);
250 if (err)
251 return err;
252
2fdbb8dd 253 if (is_wb_truncate || dax_truncate)
5955102c 254 inode_lock(inode);
75caeecd 255
6ae330ca 256 if (dax_truncate) {
8bcbbe9c 257 filemap_invalidate_lock(inode->i_mapping);
6ae330ca
VG
258 err = fuse_dax_break_layouts(inode, 0, 0);
259 if (err)
2fdbb8dd 260 goto out_inode_unlock;
6ae330ca 261 }
b6aeaded 262
2fdbb8dd
MS
263 if (is_wb_truncate || dax_truncate)
264 fuse_set_nowrite(inode);
265
7de64d52 266 err = fuse_do_open(fm, get_node_id(inode), file, false);
0c9d7089 267 if (!err) {
d2c487f1
AG
268 ff = file->private_data;
269 err = fuse_finish_open(inode, file);
270 if (err)
271 fuse_sync_release(fi, ff, file->f_flags);
272 else if (is_truncate)
0c9d7089
AG
273 fuse_truncate_update_attr(inode, file);
274 }
91fe96b4 275
2fdbb8dd
MS
276 if (is_wb_truncate || dax_truncate)
277 fuse_release_nowrite(inode);
278 if (!err) {
0c9d7089 279 if (is_truncate)
2fdbb8dd
MS
280 truncate_pagecache(inode, 0);
281 else if (!(ff->open_flags & FOPEN_KEEP_CACHE))
282 invalidate_inode_pages2(inode->i_mapping);
283 }
6ae330ca 284 if (dax_truncate)
8bcbbe9c 285 filemap_invalidate_unlock(inode->i_mapping);
2fdbb8dd
MS
286out_inode_unlock:
287 if (is_wb_truncate || dax_truncate)
5955102c 288 inode_unlock(inode);
75caeecd
MP
289
290 return err;
b6aeaded
MS
291}
292
ebf84d0c 293static void fuse_prepare_release(struct fuse_inode *fi, struct fuse_file *ff,
e26ee4ef 294 unsigned int flags, int opcode, bool sync)
64c6d8ed 295{
fcee216b 296 struct fuse_conn *fc = ff->fm->fc;
fc8ff397 297 struct fuse_release_args *ra = &ff->args->release_args;
b6aeaded 298
4a90451b
AG
299 if (fuse_file_passthrough(ff))
300 fuse_passthrough_release(ff, fuse_inode_backing(fi));
301
f15ecfef
KT
302 /* Inode is NULL on error path of fuse_create_open() */
303 if (likely(fi)) {
304 spin_lock(&fi->lock);
305 list_del(&ff->write_entry);
306 spin_unlock(&fi->lock);
307 }
8b0797a4 308 spin_lock(&fc->lock);
8b0797a4
MS
309 if (!RB_EMPTY_NODE(&ff->polled_node))
310 rb_erase(&ff->polled_node, &fc->polled_files);
311 spin_unlock(&fc->lock);
312
357ccf2b 313 wake_up_interruptible_all(&ff->poll_wait);
8b0797a4 314
e26ee4ef
AG
315 if (!ra)
316 return;
317
fc8ff397
AG
318 /* ff->args was used for open outarg */
319 memset(ff->args, 0, sizeof(*ff->args));
4cb54866
MS
320 ra->inarg.fh = ff->fh;
321 ra->inarg.flags = flags;
322 ra->args.in_numargs = 1;
323 ra->args.in_args[0].size = sizeof(struct fuse_release_in);
324 ra->args.in_args[0].value = &ra->inarg;
325 ra->args.opcode = opcode;
326 ra->args.nodeid = ff->nodeid;
327 ra->args.force = true;
328 ra->args.nocreds = true;
e26ee4ef
AG
329
330 /*
331 * Hold inode until release is finished.
332 * From fuse_sync_release() the refcount is 1 and everything's
333 * synchronous, so we are fine with not doing igrab() here.
334 */
335 ra->inode = sync ? NULL : igrab(&fi->inode);
fd72faac
MS
336}
337
b9d54c6f
MS
338void fuse_file_release(struct inode *inode, struct fuse_file *ff,
339 unsigned int open_flags, fl_owner_t id, bool isdir)
fd72faac 340{
b9d54c6f 341 struct fuse_inode *fi = get_fuse_inode(inode);
fc8ff397 342 struct fuse_release_args *ra = &ff->args->release_args;
2e64ff15 343 int opcode = isdir ? FUSE_RELEASEDIR : FUSE_RELEASE;
6b2db28a 344
e26ee4ef 345 fuse_prepare_release(fi, ff, open_flags, opcode, false);
6b2db28a 346
e26ee4ef 347 if (ra && ff->flock) {
4cb54866 348 ra->inarg.release_flags |= FUSE_RELEASE_FLOCK_UNLOCK;
b9d54c6f 349 ra->inarg.lock_owner = fuse_lock_owner_id(ff->fm->fc, id);
37fb3a30 350 }
6b2db28a 351
6b2db28a
TH
352 /*
353 * Normally this will send the RELEASE request, however if
354 * some asynchronous READ or WRITE requests are outstanding,
355 * the sending will be delayed.
5a18ec17
MS
356 *
357 * Make the release synchronous if this is a fuseblk mount,
358 * synchronous RELEASE is allowed (and desirable) in this case
359 * because the server can be trusted not to screw up.
6b2db28a 360 */
e26ee4ef 361 fuse_file_put(ff, ff->fm->fc->destroy);
b6aeaded
MS
362}
363
b9d54c6f
MS
364void fuse_release_common(struct file *file, bool isdir)
365{
366 fuse_file_release(file_inode(file), file->private_data, file->f_flags,
367 (fl_owner_t) file, isdir);
368}
369
04730fef
MS
370static int fuse_release(struct inode *inode, struct file *file)
371{
035ff33c
MS
372 struct fuse_conn *fc = get_fuse_conn(inode);
373
374 /*
375 * Dirty pages might remain despite write_inode_now() call from
376 * fuse_flush() due to writes racing with the close.
377 */
378 if (fc->writeback_cache)
379 write_inode_now(inode, 1);
380
2e64ff15 381 fuse_release_common(file, false);
8b0797a4
MS
382
383 /* return value is ignored by VFS */
384 return 0;
385}
386
54d601cb
MS
387void fuse_sync_release(struct fuse_inode *fi, struct fuse_file *ff,
388 unsigned int flags)
8b0797a4 389{
4e8c2eb5 390 WARN_ON(refcount_read(&ff->count) > 1);
e26ee4ef
AG
391 fuse_prepare_release(fi, ff, flags, FUSE_RELEASE, true);
392 fuse_file_put(ff, true);
04730fef 393}
08cbf542 394EXPORT_SYMBOL_GPL(fuse_sync_release);
04730fef 395
71421259 396/*
9c8ef561
MS
397 * Scramble the ID space with XTEA, so that the value of the files_struct
398 * pointer is not exposed to userspace.
71421259 399 */
f3332114 400u64 fuse_lock_owner_id(struct fuse_conn *fc, fl_owner_t id)
71421259 401{
9c8ef561
MS
402 u32 *k = fc->scramble_key;
403 u64 v = (unsigned long) id;
404 u32 v0 = v;
405 u32 v1 = v >> 32;
406 u32 sum = 0;
407 int i;
408
409 for (i = 0; i < 32; i++) {
410 v0 += ((v1 << 4 ^ v1 >> 5) + v1) ^ (sum + k[sum & 3]);
411 sum += 0x9E3779B9;
412 v1 += ((v0 << 4 ^ v0 >> 5) + v0) ^ (sum + k[sum>>11 & 3]);
413 }
414
415 return (u64) v0 + ((u64) v1 << 32);
71421259
MS
416}
417
33826ebb
MS
418struct fuse_writepage_args {
419 struct fuse_io_args ia;
6b2fb799 420 struct rb_node writepages_entry;
33826ebb
MS
421 struct list_head queue_entry;
422 struct fuse_writepage_args *next;
423 struct inode *inode;
660585b5 424 struct fuse_sync_bucket *bucket;
33826ebb
MS
425};
426
427static struct fuse_writepage_args *fuse_find_writeback(struct fuse_inode *fi,
2fe93bd4
MS
428 pgoff_t idx_from, pgoff_t idx_to)
429{
6b2fb799
MP
430 struct rb_node *n;
431
432 n = fi->writepages.rb_node;
2fe93bd4 433
6b2fb799
MP
434 while (n) {
435 struct fuse_writepage_args *wpa;
2fe93bd4
MS
436 pgoff_t curr_index;
437
6b2fb799 438 wpa = rb_entry(n, struct fuse_writepage_args, writepages_entry);
33826ebb
MS
439 WARN_ON(get_fuse_inode(wpa->inode) != fi);
440 curr_index = wpa->ia.write.in.offset >> PAGE_SHIFT;
6b2fb799
MP
441 if (idx_from >= curr_index + wpa->ia.ap.num_pages)
442 n = n->rb_right;
443 else if (idx_to < curr_index)
444 n = n->rb_left;
445 else
33826ebb 446 return wpa;
2fe93bd4
MS
447 }
448 return NULL;
449}
450
3be5a52b 451/*
ea8cd333 452 * Check if any page in a range is under writeback
3be5a52b
MS
453 *
454 * This is currently done by walking the list of writepage requests
455 * for the inode, which can be pretty inefficient.
456 */
ea8cd333
PE
457static bool fuse_range_is_writeback(struct inode *inode, pgoff_t idx_from,
458 pgoff_t idx_to)
3be5a52b 459{
3be5a52b 460 struct fuse_inode *fi = get_fuse_inode(inode);
2fe93bd4 461 bool found;
3be5a52b 462
f15ecfef 463 spin_lock(&fi->lock);
2fe93bd4 464 found = fuse_find_writeback(fi, idx_from, idx_to);
f15ecfef 465 spin_unlock(&fi->lock);
3be5a52b
MS
466
467 return found;
468}
469
ea8cd333
PE
470static inline bool fuse_page_is_writeback(struct inode *inode, pgoff_t index)
471{
472 return fuse_range_is_writeback(inode, index, index);
473}
474
3be5a52b
MS
475/*
476 * Wait for page writeback to be completed.
477 *
478 * Since fuse doesn't rely on the VM writeback tracking, this has to
479 * use some other means.
480 */
17b2cbe2 481static void fuse_wait_on_page_writeback(struct inode *inode, pgoff_t index)
3be5a52b
MS
482{
483 struct fuse_inode *fi = get_fuse_inode(inode);
484
485 wait_event(fi->page_waitq, !fuse_page_is_writeback(inode, index));
3be5a52b
MS
486}
487
fe38d7df
MP
488/*
489 * Wait for all pending writepages on the inode to finish.
490 *
491 * This is currently done by blocking further writes with FUSE_NOWRITE
492 * and waiting for all sent writes to complete.
493 *
494 * This must be called under i_mutex, otherwise the FUSE_NOWRITE usage
495 * could conflict with truncation.
496 */
497static void fuse_sync_writes(struct inode *inode)
498{
499 fuse_set_nowrite(inode);
500 fuse_release_nowrite(inode);
501}
502
91ec6c85 503static int fuse_flush(struct file *file, fl_owner_t id)
5a8bee63 504{
91ec6c85 505 struct inode *inode = file_inode(file);
5a8bee63 506 struct fuse_mount *fm = get_fuse_mount(inode);
91ec6c85
MS
507 struct fuse_file *ff = file->private_data;
508 struct fuse_flush_in inarg;
509 FUSE_ARGS(args);
510 int err;
511
512 if (fuse_is_bad(inode))
513 return -EIO;
514
515 if (ff->open_flags & FOPEN_NOFLUSH && !fm->fc->writeback_cache)
516 return 0;
a390ccb3 517
1e18bda8 518 err = write_inode_now(inode, 1);
fe38d7df 519 if (err)
91ec6c85 520 return err;
fe38d7df 521
5955102c 522 inode_lock(inode);
fe38d7df 523 fuse_sync_writes(inode);
5955102c 524 inode_unlock(inode);
fe38d7df 525
91ec6c85 526 err = filemap_check_errors(file->f_mapping);
9ebce595 527 if (err)
91ec6c85 528 return err;
9ebce595 529
614c026e 530 err = 0;
fcee216b 531 if (fm->fc->no_flush)
614c026e
MS
532 goto inval_attr_out;
533
91ec6c85
MS
534 memset(&inarg, 0, sizeof(inarg));
535 inarg.fh = ff->fh;
536 inarg.lock_owner = fuse_lock_owner_id(fm->fc, id);
537 args.opcode = FUSE_FLUSH;
538 args.nodeid = get_node_id(inode);
539 args.in_numargs = 1;
540 args.in_args[0].size = sizeof(inarg);
541 args.in_args[0].value = &inarg;
542 args.force = true;
543
544 err = fuse_simple_request(fm, &args);
b6aeaded 545 if (err == -ENOSYS) {
fcee216b 546 fm->fc->no_flush = 1;
b6aeaded
MS
547 err = 0;
548 }
cf576c58
EG
549
550inval_attr_out:
551 /*
552 * In memory i_blocks is not maintained by fuse, if writeback cache is
553 * enabled, i_blocks from cached attr may not be accurate.
554 */
fcee216b 555 if (!err && fm->fc->writeback_cache)
fa5eee57 556 fuse_invalidate_attr_mask(inode, STATX_BLOCKS);
b6aeaded
MS
557 return err;
558}
559
02c24a82 560int fuse_fsync_common(struct file *file, loff_t start, loff_t end,
a9c2d1e8 561 int datasync, int opcode)
b6aeaded 562{
7ea80859 563 struct inode *inode = file->f_mapping->host;
fcee216b 564 struct fuse_mount *fm = get_fuse_mount(inode);
b6aeaded 565 struct fuse_file *ff = file->private_data;
7078187a 566 FUSE_ARGS(args);
b6aeaded 567 struct fuse_fsync_in inarg;
a9c2d1e8
MS
568
569 memset(&inarg, 0, sizeof(inarg));
570 inarg.fh = ff->fh;
154603fe 571 inarg.fsync_flags = datasync ? FUSE_FSYNC_FDATASYNC : 0;
d5b48543
MS
572 args.opcode = opcode;
573 args.nodeid = get_node_id(inode);
574 args.in_numargs = 1;
575 args.in_args[0].size = sizeof(inarg);
576 args.in_args[0].value = &inarg;
fcee216b 577 return fuse_simple_request(fm, &args);
a9c2d1e8
MS
578}
579
580static int fuse_fsync(struct file *file, loff_t start, loff_t end,
581 int datasync)
582{
583 struct inode *inode = file->f_mapping->host;
584 struct fuse_conn *fc = get_fuse_conn(inode);
b6aeaded
MS
585 int err;
586
5d069dbe 587 if (fuse_is_bad(inode))
248d86e8
MS
588 return -EIO;
589
5955102c 590 inode_lock(inode);
02c24a82 591
3be5a52b
MS
592 /*
593 * Start writeback against all dirty pages of the inode, then
594 * wait for all outstanding writes, before sending the FSYNC
595 * request.
596 */
7e51fe1d 597 err = file_write_and_wait_range(file, start, end);
3be5a52b 598 if (err)
02c24a82 599 goto out;
3be5a52b
MS
600
601 fuse_sync_writes(inode);
ac7f052b
AK
602
603 /*
604 * Due to implementation of fuse writeback
7e51fe1d 605 * file_write_and_wait_range() does not catch errors.
ac7f052b
AK
606 * We have to do this directly after fuse_sync_writes()
607 */
7e51fe1d 608 err = file_check_and_advance_wb_err(file);
ac7f052b
AK
609 if (err)
610 goto out;
611
1e18bda8
MS
612 err = sync_inode_metadata(inode, 1);
613 if (err)
614 goto out;
3be5a52b 615
a9c2d1e8 616 if (fc->no_fsync)
22401e7b 617 goto out;
b0aa7606 618
a9c2d1e8 619 err = fuse_fsync_common(file, start, end, datasync, FUSE_FSYNC);
b6aeaded 620 if (err == -ENOSYS) {
a9c2d1e8 621 fc->no_fsync = 1;
b6aeaded
MS
622 err = 0;
623 }
02c24a82 624out:
5955102c 625 inode_unlock(inode);
b6aeaded 626
a9c2d1e8 627 return err;
82547981
MS
628}
629
00793ca5
MS
630void fuse_read_args_fill(struct fuse_io_args *ia, struct file *file, loff_t pos,
631 size_t count, int opcode)
632{
633 struct fuse_file *ff = file->private_data;
634 struct fuse_args *args = &ia->ap.args;
635
636 ia->read.in.fh = ff->fh;
637 ia->read.in.offset = pos;
638 ia->read.in.size = count;
639 ia->read.in.flags = file->f_flags;
640 args->opcode = opcode;
641 args->nodeid = ff->nodeid;
642 args->in_numargs = 1;
643 args->in_args[0].size = sizeof(ia->read.in);
644 args->in_args[0].value = &ia->read.in;
645 args->out_argvar = true;
646 args->out_numargs = 1;
647 args->out_args[0].size = count;
648}
649
45ac96ed
MS
650static void fuse_release_user_pages(struct fuse_args_pages *ap,
651 bool should_dirty)
187c5c36 652{
45ac96ed 653 unsigned int i;
187c5c36 654
45ac96ed 655 for (i = 0; i < ap->num_pages; i++) {
8fba54ae 656 if (should_dirty)
45ac96ed
MS
657 set_page_dirty_lock(ap->pages[i]);
658 put_page(ap->pages[i]);
187c5c36
MP
659 }
660}
661
744742d6
SF
662static void fuse_io_release(struct kref *kref)
663{
664 kfree(container_of(kref, struct fuse_io_priv, refcnt));
665}
666
9d5722b7
CH
667static ssize_t fuse_get_res_by_io(struct fuse_io_priv *io)
668{
669 if (io->err)
670 return io->err;
671
672 if (io->bytes >= 0 && io->write)
673 return -EIO;
674
675 return io->bytes < 0 ? io->size : io->bytes;
676}
677
06bbb761 678/*
01e9d11a
MP
679 * In case of short read, the caller sets 'pos' to the position of
680 * actual end of fuse request in IO request. Otherwise, if bytes_requested
681 * == bytes_transferred or rw == WRITE, the caller sets 'pos' to -1.
682 *
683 * An example:
c4e0cd4e 684 * User requested DIO read of 64K. It was split into two 32K fuse requests,
01e9d11a
MP
685 * both submitted asynchronously. The first of them was ACKed by userspace as
686 * fully completed (req->out.args[0].size == 32K) resulting in pos == -1. The
687 * second request was ACKed as short, e.g. only 1K was read, resulting in
688 * pos == 33K.
689 *
690 * Thus, when all fuse requests are completed, the minimal non-negative 'pos'
691 * will be equal to the length of the longest contiguous fragment of
692 * transferred data starting from the beginning of IO request.
693 */
694static void fuse_aio_complete(struct fuse_io_priv *io, int err, ssize_t pos)
695{
696 int left;
697
698 spin_lock(&io->lock);
699 if (err)
700 io->err = io->err ? : err;
701 else if (pos >= 0 && (io->bytes < 0 || pos < io->bytes))
702 io->bytes = pos;
703
704 left = --io->reqs;
7879c4e5 705 if (!left && io->blocking)
9d5722b7 706 complete(io->done);
01e9d11a
MP
707 spin_unlock(&io->lock);
708
7879c4e5 709 if (!left && !io->blocking) {
9d5722b7 710 ssize_t res = fuse_get_res_by_io(io);
01e9d11a 711
9d5722b7
CH
712 if (res >= 0) {
713 struct inode *inode = file_inode(io->iocb->ki_filp);
714 struct fuse_conn *fc = get_fuse_conn(inode);
715 struct fuse_inode *fi = get_fuse_inode(inode);
01e9d11a 716
f15ecfef 717 spin_lock(&fi->lock);
4510d86f 718 fi->attr_version = atomic64_inc_return(&fc->attr_version);
f15ecfef 719 spin_unlock(&fi->lock);
01e9d11a
MP
720 }
721
6b19b766 722 io->iocb->ki_complete(io->iocb, res);
01e9d11a 723 }
744742d6
SF
724
725 kref_put(&io->refcnt, fuse_io_release);
01e9d11a
MP
726}
727
45ac96ed
MS
728static struct fuse_io_args *fuse_io_alloc(struct fuse_io_priv *io,
729 unsigned int npages)
730{
731 struct fuse_io_args *ia;
732
733 ia = kzalloc(sizeof(*ia), GFP_KERNEL);
734 if (ia) {
735 ia->io = io;
736 ia->ap.pages = fuse_pages_alloc(npages, GFP_KERNEL,
737 &ia->ap.descs);
738 if (!ia->ap.pages) {
739 kfree(ia);
740 ia = NULL;
741 }
742 }
743 return ia;
744}
745
746static void fuse_io_free(struct fuse_io_args *ia)
01e9d11a 747{
45ac96ed
MS
748 kfree(ia->ap.pages);
749 kfree(ia);
750}
751
fcee216b 752static void fuse_aio_complete_req(struct fuse_mount *fm, struct fuse_args *args,
45ac96ed
MS
753 int err)
754{
755 struct fuse_io_args *ia = container_of(args, typeof(*ia), ap.args);
756 struct fuse_io_priv *io = ia->io;
01e9d11a
MP
757 ssize_t pos = -1;
758
45ac96ed 759 fuse_release_user_pages(&ia->ap, io->should_dirty);
01e9d11a 760
45ac96ed
MS
761 if (err) {
762 /* Nothing */
763 } else if (io->write) {
764 if (ia->write.out.size > ia->write.in.size) {
765 err = -EIO;
766 } else if (ia->write.in.size != ia->write.out.size) {
767 pos = ia->write.in.offset - io->offset +
768 ia->write.out.size;
769 }
01e9d11a 770 } else {
45ac96ed
MS
771 u32 outsize = args->out_args[0].size;
772
773 if (ia->read.in.size != outsize)
774 pos = ia->read.in.offset - io->offset + outsize;
01e9d11a
MP
775 }
776
45ac96ed
MS
777 fuse_aio_complete(io, err, pos);
778 fuse_io_free(ia);
01e9d11a
MP
779}
780
fcee216b 781static ssize_t fuse_async_req_send(struct fuse_mount *fm,
45ac96ed 782 struct fuse_io_args *ia, size_t num_bytes)
01e9d11a 783{
45ac96ed
MS
784 ssize_t err;
785 struct fuse_io_priv *io = ia->io;
786
01e9d11a 787 spin_lock(&io->lock);
744742d6 788 kref_get(&io->refcnt);
01e9d11a
MP
789 io->size += num_bytes;
790 io->reqs++;
791 spin_unlock(&io->lock);
792
45ac96ed 793 ia->ap.args.end = fuse_aio_complete_req;
bb737bbe 794 ia->ap.args.may_block = io->should_dirty;
fcee216b 795 err = fuse_simple_background(fm, &ia->ap.args, GFP_KERNEL);
f1ebdeff 796 if (err)
fcee216b 797 fuse_aio_complete_req(fm, &ia->ap.args, err);
01e9d11a 798
f1ebdeff 799 return num_bytes;
01e9d11a
MP
800}
801
45ac96ed
MS
802static ssize_t fuse_send_read(struct fuse_io_args *ia, loff_t pos, size_t count,
803 fl_owner_t owner)
04730fef 804{
45ac96ed 805 struct file *file = ia->io->iocb->ki_filp;
2106cb18 806 struct fuse_file *ff = file->private_data;
fcee216b 807 struct fuse_mount *fm = ff->fm;
f3332114 808
45ac96ed 809 fuse_read_args_fill(ia, file, pos, count, FUSE_READ);
f3332114 810 if (owner != NULL) {
45ac96ed 811 ia->read.in.read_flags |= FUSE_READ_LOCKOWNER;
fcee216b 812 ia->read.in.lock_owner = fuse_lock_owner_id(fm->fc, owner);
f3332114 813 }
36cf66ed 814
45ac96ed 815 if (ia->io->async)
fcee216b 816 return fuse_async_req_send(fm, ia, count);
36cf66ed 817
fcee216b 818 return fuse_simple_request(fm, &ia->ap.args);
04730fef
MS
819}
820
5c5c5e51
MS
821static void fuse_read_update_size(struct inode *inode, loff_t size,
822 u64 attr_ver)
823{
824 struct fuse_conn *fc = get_fuse_conn(inode);
825 struct fuse_inode *fi = get_fuse_inode(inode);
826
f15ecfef 827 spin_lock(&fi->lock);
484ce657 828 if (attr_ver >= fi->attr_version && size < inode->i_size &&
06a7c3c2 829 !test_bit(FUSE_I_SIZE_UNSTABLE, &fi->state)) {
4510d86f 830 fi->attr_version = atomic64_inc_return(&fc->attr_version);
5c5c5e51
MS
831 i_size_write(inode, size);
832 }
f15ecfef 833 spin_unlock(&fi->lock);
5c5c5e51
MS
834}
835
a0d45d84 836static void fuse_short_read(struct inode *inode, u64 attr_ver, size_t num_read,
134831e3 837 struct fuse_args_pages *ap)
a92adc82 838{
8373200b
PE
839 struct fuse_conn *fc = get_fuse_conn(inode);
840
a73d47f5
MS
841 /*
842 * If writeback_cache is enabled, a short read means there's a hole in
843 * the file. Some data after the hole is in page cache, but has not
844 * reached the client fs yet. So the hole is not present there.
845 */
846 if (!fc->writeback_cache) {
134831e3 847 loff_t pos = page_offset(ap->pages[0]) + num_read;
8373200b
PE
848 fuse_read_update_size(inode, pos, attr_ver);
849 }
a92adc82
PE
850}
851
482fce55 852static int fuse_do_readpage(struct file *file, struct page *page)
b6aeaded
MS
853{
854 struct inode *inode = page->mapping->host;
fcee216b 855 struct fuse_mount *fm = get_fuse_mount(inode);
5c5c5e51 856 loff_t pos = page_offset(page);
00793ca5
MS
857 struct fuse_page_desc desc = { .length = PAGE_SIZE };
858 struct fuse_io_args ia = {
859 .ap.args.page_zeroing = true,
860 .ap.args.out_pages = true,
861 .ap.num_pages = 1,
862 .ap.pages = &page,
863 .ap.descs = &desc,
864 };
865 ssize_t res;
5c5c5e51 866 u64 attr_ver;
248d86e8 867
3be5a52b 868 /*
25985edc 869 * Page writeback can extend beyond the lifetime of the
3be5a52b
MS
870 * page-cache page, so make sure we read a properly synced
871 * page.
872 */
873 fuse_wait_on_page_writeback(inode, page->index);
874
fcee216b 875 attr_ver = fuse_get_attr_version(fm->fc);
5c5c5e51 876
2f139829
MS
877 /* Don't overflow end offset */
878 if (pos + (desc.length - 1) == LLONG_MAX)
879 desc.length--;
880
00793ca5 881 fuse_read_args_fill(&ia, file, pos, desc.length, FUSE_READ);
fcee216b 882 res = fuse_simple_request(fm, &ia.ap.args);
00793ca5
MS
883 if (res < 0)
884 return res;
885 /*
886 * Short read means EOF. If file size is larger, truncate it
887 */
888 if (res < desc.length)
134831e3 889 fuse_short_read(inode, attr_ver, res, &ia.ap);
5c5c5e51 890
00793ca5 891 SetPageUptodate(page);
482fce55 892
00793ca5 893 return 0;
482fce55
MP
894}
895
5efd00e4 896static int fuse_read_folio(struct file *file, struct folio *folio)
482fce55 897{
5efd00e4 898 struct page *page = &folio->page;
482fce55
MP
899 struct inode *inode = page->mapping->host;
900 int err;
901
902 err = -EIO;
5d069dbe 903 if (fuse_is_bad(inode))
482fce55
MP
904 goto out;
905
906 err = fuse_do_readpage(file, page);
451418fc 907 fuse_invalidate_atime(inode);
b6aeaded
MS
908 out:
909 unlock_page(page);
910 return err;
911}
912
fcee216b 913static void fuse_readpages_end(struct fuse_mount *fm, struct fuse_args *args,
134831e3 914 int err)
db50b96c 915{
c1aa96a5 916 int i;
134831e3
MS
917 struct fuse_io_args *ia = container_of(args, typeof(*ia), ap.args);
918 struct fuse_args_pages *ap = &ia->ap;
919 size_t count = ia->read.in.size;
920 size_t num_read = args->out_args[0].size;
ce534fb0 921 struct address_space *mapping = NULL;
c1aa96a5 922
134831e3
MS
923 for (i = 0; mapping == NULL && i < ap->num_pages; i++)
924 mapping = ap->pages[i]->mapping;
5c5c5e51 925
ce534fb0
MS
926 if (mapping) {
927 struct inode *inode = mapping->host;
928
929 /*
930 * Short read means EOF. If file size is larger, truncate it
931 */
134831e3
MS
932 if (!err && num_read < count)
933 fuse_short_read(inode, ia->read.attr_ver, num_read, ap);
ce534fb0 934
451418fc 935 fuse_invalidate_atime(inode);
ce534fb0 936 }
c1aa96a5 937
134831e3
MS
938 for (i = 0; i < ap->num_pages; i++) {
939 struct page *page = ap->pages[i];
940
941 if (!err)
db50b96c 942 SetPageUptodate(page);
c1aa96a5
MS
943 else
944 SetPageError(page);
db50b96c 945 unlock_page(page);
09cbfeaf 946 put_page(page);
db50b96c 947 }
134831e3 948 if (ia->ff)
e26ee4ef 949 fuse_file_put(ia->ff, false);
134831e3
MS
950
951 fuse_io_free(ia);
c1aa96a5
MS
952}
953
134831e3 954static void fuse_send_readpages(struct fuse_io_args *ia, struct file *file)
c1aa96a5 955{
2106cb18 956 struct fuse_file *ff = file->private_data;
fcee216b 957 struct fuse_mount *fm = ff->fm;
134831e3
MS
958 struct fuse_args_pages *ap = &ia->ap;
959 loff_t pos = page_offset(ap->pages[0]);
960 size_t count = ap->num_pages << PAGE_SHIFT;
7df1e988 961 ssize_t res;
134831e3
MS
962 int err;
963
964 ap->args.out_pages = true;
965 ap->args.page_zeroing = true;
966 ap->args.page_replace = true;
2f139829
MS
967
968 /* Don't overflow end offset */
969 if (pos + (count - 1) == LLONG_MAX) {
970 count--;
971 ap->descs[ap->num_pages - 1].length--;
972 }
973 WARN_ON((loff_t) (pos + count) < 0);
974
134831e3 975 fuse_read_args_fill(ia, file, pos, count, FUSE_READ);
fcee216b
MR
976 ia->read.attr_ver = fuse_get_attr_version(fm->fc);
977 if (fm->fc->async_read) {
134831e3
MS
978 ia->ff = fuse_file_get(ff);
979 ap->args.end = fuse_readpages_end;
fcee216b 980 err = fuse_simple_background(fm, &ap->args, GFP_KERNEL);
134831e3
MS
981 if (!err)
982 return;
9cd68455 983 } else {
fcee216b 984 res = fuse_simple_request(fm, &ap->args);
7df1e988 985 err = res < 0 ? res : 0;
9cd68455 986 }
fcee216b 987 fuse_readpages_end(fm, &ap->args, err);
db50b96c
MS
988}
989
76a0294e 990static void fuse_readahead(struct readahead_control *rac)
db50b96c 991{
76a0294e 992 struct inode *inode = rac->mapping->host;
db50b96c 993 struct fuse_conn *fc = get_fuse_conn(inode);
76a0294e 994 unsigned int i, max_pages, nr_pages = 0;
db50b96c 995
5d069dbe 996 if (fuse_is_bad(inode))
76a0294e 997 return;
248d86e8 998
76a0294e
MWO
999 max_pages = min_t(unsigned int, fc->max_pages,
1000 fc->max_read / PAGE_SIZE);
db50b96c 1001
76a0294e
MWO
1002 for (;;) {
1003 struct fuse_io_args *ia;
1004 struct fuse_args_pages *ap;
1005
670d21c6
N
1006 if (fc->num_background >= fc->congestion_threshold &&
1007 rac->ra->async_size >= readahead_count(rac))
1008 /*
1009 * Congested and only async pages left, so skip the
1010 * rest.
1011 */
1012 break;
1013
76a0294e
MWO
1014 nr_pages = readahead_count(rac) - nr_pages;
1015 if (nr_pages > max_pages)
1016 nr_pages = max_pages;
1017 if (nr_pages == 0)
1018 break;
1019 ia = fuse_io_alloc(NULL, nr_pages);
1020 if (!ia)
1021 return;
1022 ap = &ia->ap;
1023 nr_pages = __readahead_batch(rac, ap->pages, nr_pages);
1024 for (i = 0; i < nr_pages; i++) {
1025 fuse_wait_on_page_writeback(inode,
1026 readahead_index(rac) + i);
1027 ap->descs[i].length = PAGE_SIZE;
1028 }
1029 ap->num_pages = nr_pages;
1030 fuse_send_readpages(ia, rac->file);
d3406ffa 1031 }
db50b96c
MS
1032}
1033
55752a3a 1034static ssize_t fuse_cache_read_iter(struct kiocb *iocb, struct iov_iter *to)
bcb4be80
MS
1035{
1036 struct inode *inode = iocb->ki_filp->f_mapping->host;
a8894274 1037 struct fuse_conn *fc = get_fuse_conn(inode);
bcb4be80 1038
a8894274
BF
1039 /*
1040 * In auto invalidate mode, always update attributes on read.
1041 * Otherwise, only update if we attempt to read past EOF (to ensure
1042 * i_size is up to date).
1043 */
1044 if (fc->auto_inval_data ||
37c20f16 1045 (iocb->ki_pos + iov_iter_count(to) > i_size_read(inode))) {
bcb4be80 1046 int err;
c6c745b8 1047 err = fuse_update_attributes(inode, iocb->ki_filp, STATX_SIZE);
bcb4be80
MS
1048 if (err)
1049 return err;
1050 }
1051
37c20f16 1052 return generic_file_read_iter(iocb, to);
bcb4be80
MS
1053}
1054
338f2e3f
MS
1055static void fuse_write_args_fill(struct fuse_io_args *ia, struct fuse_file *ff,
1056 loff_t pos, size_t count)
1057{
1058 struct fuse_args *args = &ia->ap.args;
1059
1060 ia->write.in.fh = ff->fh;
1061 ia->write.in.offset = pos;
1062 ia->write.in.size = count;
1063 args->opcode = FUSE_WRITE;
1064 args->nodeid = ff->nodeid;
1065 args->in_numargs = 2;
fcee216b 1066 if (ff->fm->fc->minor < 9)
338f2e3f
MS
1067 args->in_args[0].size = FUSE_COMPAT_WRITE_IN_SIZE;
1068 else
1069 args->in_args[0].size = sizeof(ia->write.in);
1070 args->in_args[0].value = &ia->write.in;
1071 args->in_args[1].size = count;
1072 args->out_numargs = 1;
1073 args->out_args[0].size = sizeof(ia->write.out);
1074 args->out_args[0].value = &ia->write.out;
1075}
1076
1077static unsigned int fuse_write_flags(struct kiocb *iocb)
1078{
1079 unsigned int flags = iocb->ki_filp->f_flags;
1080
91b94c5d 1081 if (iocb_is_dsync(iocb))
338f2e3f
MS
1082 flags |= O_DSYNC;
1083 if (iocb->ki_flags & IOCB_SYNC)
1084 flags |= O_SYNC;
1085
1086 return flags;
1087}
1088
45ac96ed
MS
1089static ssize_t fuse_send_write(struct fuse_io_args *ia, loff_t pos,
1090 size_t count, fl_owner_t owner)
b25e82e5 1091{
45ac96ed 1092 struct kiocb *iocb = ia->io->iocb;
e1c0eecb 1093 struct file *file = iocb->ki_filp;
2106cb18 1094 struct fuse_file *ff = file->private_data;
fcee216b 1095 struct fuse_mount *fm = ff->fm;
45ac96ed
MS
1096 struct fuse_write_in *inarg = &ia->write.in;
1097 ssize_t err;
2d698b07 1098
45ac96ed 1099 fuse_write_args_fill(ia, ff, pos, count);
338f2e3f 1100 inarg->flags = fuse_write_flags(iocb);
f3332114 1101 if (owner != NULL) {
f3332114 1102 inarg->write_flags |= FUSE_WRITE_LOCKOWNER;
fcee216b 1103 inarg->lock_owner = fuse_lock_owner_id(fm->fc, owner);
f3332114 1104 }
36cf66ed 1105
45ac96ed 1106 if (ia->io->async)
fcee216b 1107 return fuse_async_req_send(fm, ia, count);
45ac96ed 1108
fcee216b 1109 err = fuse_simple_request(fm, &ia->ap.args);
45ac96ed
MS
1110 if (!err && ia->write.out.size > count)
1111 err = -EIO;
36cf66ed 1112
45ac96ed 1113 return err ?: ia->write.out.size;
b6aeaded
MS
1114}
1115
d347739a 1116bool fuse_write_update_attr(struct inode *inode, loff_t pos, ssize_t written)
854512ec
MS
1117{
1118 struct fuse_conn *fc = get_fuse_conn(inode);
1119 struct fuse_inode *fi = get_fuse_inode(inode);
b0aa7606 1120 bool ret = false;
854512ec 1121
f15ecfef 1122 spin_lock(&fi->lock);
4510d86f 1123 fi->attr_version = atomic64_inc_return(&fc->attr_version);
d347739a 1124 if (written > 0 && pos > inode->i_size) {
854512ec 1125 i_size_write(inode, pos);
b0aa7606
MP
1126 ret = true;
1127 }
f15ecfef 1128 spin_unlock(&fi->lock);
b0aa7606 1129
d347739a
MS
1130 fuse_invalidate_attr_mask(inode, FUSE_STATX_MODSIZE);
1131
b0aa7606 1132 return ret;
854512ec
MS
1133}
1134
338f2e3f
MS
1135static ssize_t fuse_send_write_pages(struct fuse_io_args *ia,
1136 struct kiocb *iocb, struct inode *inode,
1137 loff_t pos, size_t count)
ea9b9907 1138{
338f2e3f
MS
1139 struct fuse_args_pages *ap = &ia->ap;
1140 struct file *file = iocb->ki_filp;
1141 struct fuse_file *ff = file->private_data;
fcee216b 1142 struct fuse_mount *fm = ff->fm;
338f2e3f 1143 unsigned int offset, i;
4f06dd92 1144 bool short_write;
338f2e3f 1145 int err;
ea9b9907 1146
338f2e3f
MS
1147 for (i = 0; i < ap->num_pages; i++)
1148 fuse_wait_on_page_writeback(inode, ap->pages[i]->index);
ea9b9907 1149
338f2e3f
MS
1150 fuse_write_args_fill(ia, ff, pos, count);
1151 ia->write.in.flags = fuse_write_flags(iocb);
b8667395
VG
1152 if (fm->fc->handle_killpriv_v2 && !capable(CAP_FSETID))
1153 ia->write.in.write_flags |= FUSE_WRITE_KILL_SUIDGID;
ea9b9907 1154
fcee216b 1155 err = fuse_simple_request(fm, &ap->args);
8aab336b
MS
1156 if (!err && ia->write.out.size > count)
1157 err = -EIO;
338f2e3f 1158
4f06dd92 1159 short_write = ia->write.out.size < count;
338f2e3f
MS
1160 offset = ap->descs[0].offset;
1161 count = ia->write.out.size;
1162 for (i = 0; i < ap->num_pages; i++) {
1163 struct page *page = ap->pages[i];
ea9b9907 1164
4f06dd92
VG
1165 if (err) {
1166 ClearPageUptodate(page);
1167 } else {
1168 if (count >= PAGE_SIZE - offset)
1169 count -= PAGE_SIZE - offset;
1170 else {
1171 if (short_write)
1172 ClearPageUptodate(page);
1173 count = 0;
1174 }
1175 offset = 0;
1176 }
1177 if (ia->write.page_locked && (i == ap->num_pages - 1))
1178 unlock_page(page);
09cbfeaf 1179 put_page(page);
ea9b9907
NP
1180 }
1181
338f2e3f 1182 return err;
ea9b9907
NP
1183}
1184
4f06dd92 1185static ssize_t fuse_fill_write_pages(struct fuse_io_args *ia,
338f2e3f
MS
1186 struct address_space *mapping,
1187 struct iov_iter *ii, loff_t pos,
1188 unsigned int max_pages)
ea9b9907 1189{
4f06dd92 1190 struct fuse_args_pages *ap = &ia->ap;
ea9b9907 1191 struct fuse_conn *fc = get_fuse_conn(mapping->host);
09cbfeaf 1192 unsigned offset = pos & (PAGE_SIZE - 1);
ea9b9907
NP
1193 size_t count = 0;
1194 int err;
1195
338f2e3f
MS
1196 ap->args.in_pages = true;
1197 ap->descs[0].offset = offset;
ea9b9907
NP
1198
1199 do {
1200 size_t tmp;
1201 struct page *page;
09cbfeaf
KS
1202 pgoff_t index = pos >> PAGE_SHIFT;
1203 size_t bytes = min_t(size_t, PAGE_SIZE - offset,
ea9b9907
NP
1204 iov_iter_count(ii));
1205
1206 bytes = min_t(size_t, bytes, fc->max_write - count);
1207
1208 again:
1209 err = -EFAULT;
a6294593 1210 if (fault_in_iov_iter_readable(ii, bytes))
ea9b9907
NP
1211 break;
1212
1213 err = -ENOMEM;
b7446e7c 1214 page = grab_cache_page_write_begin(mapping, index);
ea9b9907
NP
1215 if (!page)
1216 break;
1217
931e80e4 1218 if (mapping_writably_mapped(mapping))
1219 flush_dcache_page(page);
1220
f0b65f39 1221 tmp = copy_page_from_iter_atomic(page, offset, bytes, ii);
ea9b9907
NP
1222 flush_dcache_page(page);
1223
1224 if (!tmp) {
1225 unlock_page(page);
09cbfeaf 1226 put_page(page);
ea9b9907
NP
1227 goto again;
1228 }
1229
1230 err = 0;
338f2e3f
MS
1231 ap->pages[ap->num_pages] = page;
1232 ap->descs[ap->num_pages].length = tmp;
1233 ap->num_pages++;
ea9b9907 1234
ea9b9907
NP
1235 count += tmp;
1236 pos += tmp;
1237 offset += tmp;
09cbfeaf 1238 if (offset == PAGE_SIZE)
ea9b9907
NP
1239 offset = 0;
1240
4f06dd92
VG
1241 /* If we copied full page, mark it uptodate */
1242 if (tmp == PAGE_SIZE)
1243 SetPageUptodate(page);
1244
1245 if (PageUptodate(page)) {
1246 unlock_page(page);
1247 } else {
1248 ia->write.page_locked = true;
1249 break;
1250 }
78bb6cb9
MS
1251 if (!fc->big_writes)
1252 break;
ea9b9907 1253 } while (iov_iter_count(ii) && count < fc->max_write &&
338f2e3f 1254 ap->num_pages < max_pages && offset == 0);
ea9b9907
NP
1255
1256 return count > 0 ? count : err;
1257}
1258
5da784cc
CS
1259static inline unsigned int fuse_wr_pages(loff_t pos, size_t len,
1260 unsigned int max_pages)
d07f09f5 1261{
5da784cc 1262 return min_t(unsigned int,
09cbfeaf
KS
1263 ((pos + len - 1) >> PAGE_SHIFT) -
1264 (pos >> PAGE_SHIFT) + 1,
5da784cc 1265 max_pages);
d07f09f5
MP
1266}
1267
596df33d 1268static ssize_t fuse_perform_write(struct kiocb *iocb, struct iov_iter *ii)
ea9b9907 1269{
596df33d 1270 struct address_space *mapping = iocb->ki_filp->f_mapping;
ea9b9907
NP
1271 struct inode *inode = mapping->host;
1272 struct fuse_conn *fc = get_fuse_conn(inode);
06a7c3c2 1273 struct fuse_inode *fi = get_fuse_inode(inode);
596df33d 1274 loff_t pos = iocb->ki_pos;
ea9b9907
NP
1275 int err = 0;
1276 ssize_t res = 0;
1277
06a7c3c2
MP
1278 if (inode->i_size < pos + iov_iter_count(ii))
1279 set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
1280
ea9b9907 1281 do {
ea9b9907 1282 ssize_t count;
338f2e3f
MS
1283 struct fuse_io_args ia = {};
1284 struct fuse_args_pages *ap = &ia.ap;
5da784cc
CS
1285 unsigned int nr_pages = fuse_wr_pages(pos, iov_iter_count(ii),
1286 fc->max_pages);
ea9b9907 1287
338f2e3f
MS
1288 ap->pages = fuse_pages_alloc(nr_pages, GFP_KERNEL, &ap->descs);
1289 if (!ap->pages) {
1290 err = -ENOMEM;
ea9b9907
NP
1291 break;
1292 }
1293
4f06dd92 1294 count = fuse_fill_write_pages(&ia, mapping, ii, pos, nr_pages);
ea9b9907
NP
1295 if (count <= 0) {
1296 err = count;
1297 } else {
338f2e3f
MS
1298 err = fuse_send_write_pages(&ia, iocb, inode,
1299 pos, count);
ea9b9907 1300 if (!err) {
338f2e3f
MS
1301 size_t num_written = ia.write.out.size;
1302
ea9b9907
NP
1303 res += num_written;
1304 pos += num_written;
1305
1306 /* break out of the loop on short write */
1307 if (num_written != count)
1308 err = -EIO;
1309 }
1310 }
338f2e3f 1311 kfree(ap->pages);
ea9b9907
NP
1312 } while (!err && iov_iter_count(ii));
1313
d347739a 1314 fuse_write_update_attr(inode, pos, res);
06a7c3c2 1315 clear_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
ea9b9907 1316
70e986c3
CH
1317 if (!res)
1318 return err;
1319 iocb->ki_pos += res;
1320 return res;
ea9b9907
NP
1321}
1322
699cf824
BS
1323static bool fuse_io_past_eof(struct kiocb *iocb, struct iov_iter *iter)
1324{
1325 struct inode *inode = file_inode(iocb->ki_filp);
1326
1327 return iocb->ki_pos + iov_iter_count(iter) > i_size_read(inode);
1328}
1329
1330/*
1331 * @return true if an exclusive lock for direct IO writes is needed
1332 */
1333static bool fuse_dio_wr_exclusive_lock(struct kiocb *iocb, struct iov_iter *from)
1334{
1335 struct file *file = iocb->ki_filp;
1336 struct fuse_file *ff = file->private_data;
1337 struct inode *inode = file_inode(iocb->ki_filp);
205c1d80 1338 struct fuse_inode *fi = get_fuse_inode(inode);
699cf824
BS
1339
1340 /* Server side has to advise that it supports parallel dio writes. */
1341 if (!(ff->open_flags & FOPEN_PARALLEL_DIRECT_WRITES))
1342 return true;
1343
1344 /*
1345 * Append will need to know the eventual EOF - always needs an
1346 * exclusive lock.
1347 */
1348 if (iocb->ki_flags & IOCB_APPEND)
1349 return true;
1350
205c1d80
AG
1351 /* shared locks are not allowed with parallel page cache IO */
1352 if (test_bit(FUSE_I_CACHE_IO_MODE, &fi->state))
1353 return false;
699cf824
BS
1354
1355 /* Parallel dio beyond EOF is not supported, at least for now. */
1356 if (fuse_io_past_eof(iocb, from))
1357 return true;
1358
1359 return false;
1360}
1361
9bbb6717
BS
1362static void fuse_dio_lock(struct kiocb *iocb, struct iov_iter *from,
1363 bool *exclusive)
1364{
1365 struct inode *inode = file_inode(iocb->ki_filp);
205c1d80 1366 struct fuse_file *ff = iocb->ki_filp->private_data;
9bbb6717
BS
1367
1368 *exclusive = fuse_dio_wr_exclusive_lock(iocb, from);
1369 if (*exclusive) {
1370 inode_lock(inode);
1371 } else {
1372 inode_lock_shared(inode);
1373 /*
205c1d80
AG
1374 * New parallal dio allowed only if inode is not in caching
1375 * mode and denies new opens in caching mode. This check
1376 * should be performed only after taking shared inode lock.
1377 * Previous past eof check was without inode lock and might
1378 * have raced, so check it again.
9bbb6717 1379 */
205c1d80 1380 if (fuse_io_past_eof(iocb, from) ||
4a90451b 1381 fuse_file_uncached_io_start(inode, ff, NULL) != 0) {
9bbb6717
BS
1382 inode_unlock_shared(inode);
1383 inode_lock(inode);
1384 *exclusive = true;
1385 }
1386 }
1387}
1388
205c1d80 1389static void fuse_dio_unlock(struct kiocb *iocb, bool exclusive)
9bbb6717 1390{
205c1d80
AG
1391 struct inode *inode = file_inode(iocb->ki_filp);
1392 struct fuse_file *ff = iocb->ki_filp->private_data;
1393
9bbb6717
BS
1394 if (exclusive) {
1395 inode_unlock(inode);
1396 } else {
205c1d80
AG
1397 /* Allow opens in caching mode after last parallel dio end */
1398 fuse_file_uncached_io_end(inode, ff);
9bbb6717
BS
1399 inode_unlock_shared(inode);
1400 }
1401}
1402
55752a3a 1403static ssize_t fuse_cache_write_iter(struct kiocb *iocb, struct iov_iter *from)
ea9b9907
NP
1404{
1405 struct file *file = iocb->ki_filp;
1406 struct address_space *mapping = file->f_mapping;
ea9b9907
NP
1407 ssize_t written = 0;
1408 struct inode *inode = mapping->host;
2e3f7dd0 1409 ssize_t err, count;
8981bdfd 1410 struct fuse_conn *fc = get_fuse_conn(inode);
ea9b9907 1411
8981bdfd 1412 if (fc->writeback_cache) {
4d99ff8f 1413 /* Update size (EOF optimization) and mode (SUID clearing) */
c6c745b8
MS
1414 err = fuse_update_attributes(mapping->host, file,
1415 STATX_SIZE | STATX_MODE);
4d99ff8f
PE
1416 if (err)
1417 return err;
1418
8981bdfd 1419 if (fc->handle_killpriv_v2 &&
9452e93e
CB
1420 setattr_should_drop_suidgid(&nop_mnt_idmap,
1421 file_inode(file))) {
8981bdfd
VG
1422 goto writethrough;
1423 }
1424
84c3d55c 1425 return generic_file_write_iter(iocb, from);
4d99ff8f
PE
1426 }
1427
8981bdfd 1428writethrough:
5955102c 1429 inode_lock(inode);
ea9b9907 1430
2e3f7dd0 1431 err = count = generic_write_checks(iocb, from);
3309dd04 1432 if (err <= 0)
ea9b9907
NP
1433 goto out;
1434
2e3f7dd0
ZJ
1435 task_io_account_write(count);
1436
5fa8e0a1 1437 err = file_remove_privs(file);
ea9b9907
NP
1438 if (err)
1439 goto out;
1440
c3b2da31
JB
1441 err = file_update_time(file);
1442 if (err)
1443 goto out;
ea9b9907 1444
2ba48ce5 1445 if (iocb->ki_flags & IOCB_DIRECT) {
1af5bb49 1446 written = generic_file_direct_write(iocb, from);
84c3d55c 1447 if (written < 0 || !iov_iter_count(from))
4273b793 1448 goto out;
64d1b4dd
CH
1449 written = direct_write_fallback(iocb, from, written,
1450 fuse_perform_write(iocb, from));
4273b793 1451 } else {
596df33d 1452 written = fuse_perform_write(iocb, from);
4273b793 1453 }
ea9b9907 1454out:
5955102c 1455 inode_unlock(inode);
e1c0eecb
MS
1456 if (written > 0)
1457 written = generic_write_sync(iocb, written);
ea9b9907
NP
1458
1459 return written ? written : err;
1460}
1461
7c190c8b
MP
1462static inline unsigned long fuse_get_user_addr(const struct iov_iter *ii)
1463{
de4f5fed 1464 return (unsigned long)iter_iov(ii)->iov_base + ii->iov_offset;
7c190c8b
MP
1465}
1466
1467static inline size_t fuse_get_frag_size(const struct iov_iter *ii,
1468 size_t max_size)
1469{
1470 return min(iov_iter_single_seg_count(ii), max_size);
1471}
1472
45ac96ed
MS
1473static int fuse_get_user_pages(struct fuse_args_pages *ap, struct iov_iter *ii,
1474 size_t *nbytesp, int write,
1475 unsigned int max_pages)
413ef8cb 1476{
7c190c8b 1477 size_t nbytes = 0; /* # bytes already packed in req */
742f9927 1478 ssize_t ret = 0;
b98d023a 1479
f4975c67 1480 /* Special case for kernel I/O: can copy directly into the buffer */
00e23707 1481 if (iov_iter_is_kvec(ii)) {
7c190c8b
MP
1482 unsigned long user_addr = fuse_get_user_addr(ii);
1483 size_t frag_size = fuse_get_frag_size(ii, *nbytesp);
1484
f4975c67 1485 if (write)
45ac96ed 1486 ap->args.in_args[1].value = (void *) user_addr;
f4975c67 1487 else
45ac96ed 1488 ap->args.out_args[0].value = (void *) user_addr;
f4975c67 1489
b98d023a
MP
1490 iov_iter_advance(ii, frag_size);
1491 *nbytesp = frag_size;
f4975c67
MS
1492 return 0;
1493 }
413ef8cb 1494
45ac96ed 1495 while (nbytes < *nbytesp && ap->num_pages < max_pages) {
7c190c8b 1496 unsigned npages;
f67da30c 1497 size_t start;
1ef255e2 1498 ret = iov_iter_get_pages2(ii, &ap->pages[ap->num_pages],
2c80929c 1499 *nbytesp - nbytes,
45ac96ed 1500 max_pages - ap->num_pages,
c7f3888a 1501 &start);
7c190c8b 1502 if (ret < 0)
742f9927 1503 break;
7c190c8b 1504
c9c37e2e 1505 nbytes += ret;
7c190c8b 1506
c9c37e2e 1507 ret += start;
6c88632b 1508 npages = DIV_ROUND_UP(ret, PAGE_SIZE);
7c190c8b 1509
45ac96ed
MS
1510 ap->descs[ap->num_pages].offset = start;
1511 fuse_page_descs_length_init(ap->descs, ap->num_pages, npages);
7c190c8b 1512
45ac96ed
MS
1513 ap->num_pages += npages;
1514 ap->descs[ap->num_pages - 1].length -=
c9c37e2e 1515 (PAGE_SIZE - ret) & (PAGE_SIZE - 1);
7c190c8b 1516 }
f4975c67 1517
0c4bcfde 1518 ap->args.user_pages = true;
f4975c67 1519 if (write)
cabdb4fa 1520 ap->args.in_pages = true;
f4975c67 1521 else
cabdb4fa 1522 ap->args.out_pages = true;
f4975c67 1523
7c190c8b 1524 *nbytesp = nbytes;
f4975c67 1525
2c932d4c 1526 return ret < 0 ? ret : 0;
413ef8cb
MS
1527}
1528
d22a943f
AV
1529ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter,
1530 loff_t *ppos, int flags)
413ef8cb 1531{
ea8cd333
PE
1532 int write = flags & FUSE_DIO_WRITE;
1533 int cuse = flags & FUSE_DIO_CUSE;
e1c0eecb 1534 struct file *file = io->iocb->ki_filp;
80e4f252
HX
1535 struct address_space *mapping = file->f_mapping;
1536 struct inode *inode = mapping->host;
2106cb18 1537 struct fuse_file *ff = file->private_data;
fcee216b 1538 struct fuse_conn *fc = ff->fm->fc;
413ef8cb
MS
1539 size_t nmax = write ? fc->max_write : fc->max_read;
1540 loff_t pos = *ppos;
d22a943f 1541 size_t count = iov_iter_count(iter);
09cbfeaf
KS
1542 pgoff_t idx_from = pos >> PAGE_SHIFT;
1543 pgoff_t idx_to = (pos + count - 1) >> PAGE_SHIFT;
413ef8cb 1544 ssize_t res = 0;
742f9927 1545 int err = 0;
45ac96ed
MS
1546 struct fuse_io_args *ia;
1547 unsigned int max_pages;
80e4f252 1548 bool fopen_direct_io = ff->open_flags & FOPEN_DIRECT_IO;
248d86e8 1549
45ac96ed
MS
1550 max_pages = iov_iter_npages(iter, fc->max_pages);
1551 ia = fuse_io_alloc(io, max_pages);
1552 if (!ia)
1553 return -ENOMEM;
413ef8cb 1554
c55e0a55 1555 if (fopen_direct_io && fc->direct_io_allow_mmap) {
b5a2a3a0
HX
1556 res = filemap_write_and_wait_range(mapping, pos, pos + count - 1);
1557 if (res) {
1558 fuse_io_free(ia);
1559 return res;
1560 }
1561 }
ea8cd333
PE
1562 if (!cuse && fuse_range_is_writeback(inode, idx_from, idx_to)) {
1563 if (!write)
5955102c 1564 inode_lock(inode);
ea8cd333
PE
1565 fuse_sync_writes(inode);
1566 if (!write)
5955102c 1567 inode_unlock(inode);
ea8cd333
PE
1568 }
1569
80e4f252
HX
1570 if (fopen_direct_io && write) {
1571 res = invalidate_inode_pages2_range(mapping, idx_from, idx_to);
1572 if (res) {
1573 fuse_io_free(ia);
1574 return res;
1575 }
1576 }
1577
fcb14cb1 1578 io->should_dirty = !write && user_backed_iter(iter);
413ef8cb 1579 while (count) {
45ac96ed 1580 ssize_t nres;
2106cb18 1581 fl_owner_t owner = current->files;
f4975c67 1582 size_t nbytes = min(count, nmax);
45ac96ed
MS
1583
1584 err = fuse_get_user_pages(&ia->ap, iter, &nbytes, write,
1585 max_pages);
742f9927 1586 if (err && !nbytes)
413ef8cb 1587 break;
f4975c67 1588
4a2abf99 1589 if (write) {
45ac96ed 1590 if (!capable(CAP_FSETID))
10c52c84 1591 ia->write.in.write_flags |= FUSE_WRITE_KILL_SUIDGID;
4a2abf99 1592
45ac96ed 1593 nres = fuse_send_write(ia, pos, nbytes, owner);
4a2abf99 1594 } else {
45ac96ed 1595 nres = fuse_send_read(ia, pos, nbytes, owner);
4a2abf99 1596 }
2106cb18 1597
45ac96ed
MS
1598 if (!io->async || nres < 0) {
1599 fuse_release_user_pages(&ia->ap, io->should_dirty);
1600 fuse_io_free(ia);
1601 }
1602 ia = NULL;
1603 if (nres < 0) {
f658adee 1604 iov_iter_revert(iter, nbytes);
45ac96ed 1605 err = nres;
413ef8cb
MS
1606 break;
1607 }
45ac96ed
MS
1608 WARN_ON(nres > nbytes);
1609
413ef8cb
MS
1610 count -= nres;
1611 res += nres;
1612 pos += nres;
f658adee
MS
1613 if (nres != nbytes) {
1614 iov_iter_revert(iter, nbytes - nres);
413ef8cb 1615 break;
f658adee 1616 }
56cf34ff 1617 if (count) {
45ac96ed
MS
1618 max_pages = iov_iter_npages(iter, fc->max_pages);
1619 ia = fuse_io_alloc(io, max_pages);
1620 if (!ia)
56cf34ff
MS
1621 break;
1622 }
413ef8cb 1623 }
45ac96ed
MS
1624 if (ia)
1625 fuse_io_free(ia);
d09cb9d7 1626 if (res > 0)
413ef8cb 1627 *ppos = pos;
413ef8cb 1628
742f9927 1629 return res > 0 ? res : err;
413ef8cb 1630}
08cbf542 1631EXPORT_SYMBOL_GPL(fuse_direct_io);
413ef8cb 1632
36cf66ed 1633static ssize_t __fuse_direct_read(struct fuse_io_priv *io,
d22a943f
AV
1634 struct iov_iter *iter,
1635 loff_t *ppos)
413ef8cb 1636{
d09cb9d7 1637 ssize_t res;
e1c0eecb 1638 struct inode *inode = file_inode(io->iocb->ki_filp);
d09cb9d7 1639
d22a943f 1640 res = fuse_direct_io(io, iter, ppos, 0);
d09cb9d7 1641
9a2eb24d 1642 fuse_invalidate_atime(inode);
d09cb9d7
MS
1643
1644 return res;
413ef8cb
MS
1645}
1646
23c94e1c
MR
1647static ssize_t fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter);
1648
15316263 1649static ssize_t fuse_direct_read_iter(struct kiocb *iocb, struct iov_iter *to)
b98d023a 1650{
23c94e1c
MR
1651 ssize_t res;
1652
1653 if (!is_sync_kiocb(iocb) && iocb->ki_flags & IOCB_DIRECT) {
23c94e1c
MR
1654 res = fuse_direct_IO(iocb, to);
1655 } else {
1656 struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(iocb);
1657
1658 res = __fuse_direct_read(&io, to, &iocb->ki_pos);
1659 }
1660
1661 return res;
b98d023a
MP
1662}
1663
15316263 1664static ssize_t fuse_direct_write_iter(struct kiocb *iocb, struct iov_iter *from)
4273b793 1665{
e1c0eecb
MS
1666 struct inode *inode = file_inode(iocb->ki_filp);
1667 struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(iocb);
15316263 1668 ssize_t res;
9bbb6717 1669 bool exclusive;
4273b793 1670
9bbb6717 1671 fuse_dio_lock(iocb, from, &exclusive);
3309dd04 1672 res = generic_write_checks(iocb, from);
23c94e1c 1673 if (res > 0) {
2e3f7dd0 1674 task_io_account_write(res);
23c94e1c
MR
1675 if (!is_sync_kiocb(iocb) && iocb->ki_flags & IOCB_DIRECT) {
1676 res = fuse_direct_IO(iocb, from);
1677 } else {
1678 res = fuse_direct_io(&io, from, &iocb->ki_pos,
1679 FUSE_DIO_WRITE);
d347739a 1680 fuse_write_update_attr(inode, iocb->ki_pos, res);
23c94e1c
MR
1681 }
1682 }
205c1d80 1683 fuse_dio_unlock(iocb, exclusive);
4273b793
AA
1684
1685 return res;
1686}
1687
55752a3a
MS
1688static ssize_t fuse_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
1689{
2f7b6f5b
MS
1690 struct file *file = iocb->ki_filp;
1691 struct fuse_file *ff = file->private_data;
c2d0ad00 1692 struct inode *inode = file_inode(file);
2f7b6f5b 1693
5d069dbe 1694 if (fuse_is_bad(inode))
2f7b6f5b 1695 return -EIO;
55752a3a 1696
c2d0ad00
VG
1697 if (FUSE_IS_DAX(inode))
1698 return fuse_dax_read_iter(iocb, to);
1699
57e1176e
AG
1700 /* FOPEN_DIRECT_IO overrides FOPEN_PASSTHROUGH */
1701 if (ff->open_flags & FOPEN_DIRECT_IO)
55752a3a 1702 return fuse_direct_read_iter(iocb, to);
57e1176e
AG
1703 else if (fuse_file_passthrough(ff))
1704 return fuse_passthrough_read_iter(iocb, to);
1705 else
1706 return fuse_cache_read_iter(iocb, to);
55752a3a
MS
1707}
1708
1709static ssize_t fuse_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
1710{
2f7b6f5b
MS
1711 struct file *file = iocb->ki_filp;
1712 struct fuse_file *ff = file->private_data;
c2d0ad00 1713 struct inode *inode = file_inode(file);
2f7b6f5b 1714
5d069dbe 1715 if (fuse_is_bad(inode))
2f7b6f5b 1716 return -EIO;
55752a3a 1717
c2d0ad00
VG
1718 if (FUSE_IS_DAX(inode))
1719 return fuse_dax_write_iter(iocb, from);
1720
57e1176e
AG
1721 /* FOPEN_DIRECT_IO overrides FOPEN_PASSTHROUGH */
1722 if (ff->open_flags & FOPEN_DIRECT_IO)
55752a3a 1723 return fuse_direct_write_iter(iocb, from);
57e1176e
AG
1724 else if (fuse_file_passthrough(ff))
1725 return fuse_passthrough_write_iter(iocb, from);
1726 else
1727 return fuse_cache_write_iter(iocb, from);
55752a3a
MS
1728}
1729
5ca73468
AG
1730static ssize_t fuse_splice_read(struct file *in, loff_t *ppos,
1731 struct pipe_inode_info *pipe, size_t len,
1732 unsigned int flags)
1733{
1734 struct fuse_file *ff = in->private_data;
1735
1736 /* FOPEN_DIRECT_IO overrides FOPEN_PASSTHROUGH */
1737 if (fuse_file_passthrough(ff) && !(ff->open_flags & FOPEN_DIRECT_IO))
1738 return fuse_passthrough_splice_read(in, ppos, pipe, len, flags);
1739 else
1740 return filemap_splice_read(in, ppos, pipe, len, flags);
1741}
1742
1743static ssize_t fuse_splice_write(struct pipe_inode_info *pipe, struct file *out,
1744 loff_t *ppos, size_t len, unsigned int flags)
1745{
1746 struct fuse_file *ff = out->private_data;
1747
1748 /* FOPEN_DIRECT_IO overrides FOPEN_PASSTHROUGH */
1749 if (fuse_file_passthrough(ff) && !(ff->open_flags & FOPEN_DIRECT_IO))
1750 return fuse_passthrough_splice_write(pipe, out, ppos, len, flags);
1751 else
1752 return iter_file_splice_write(pipe, out, ppos, len, flags);
1753}
1754
33826ebb 1755static void fuse_writepage_free(struct fuse_writepage_args *wpa)
b6aeaded 1756{
33826ebb 1757 struct fuse_args_pages *ap = &wpa->ia.ap;
385b1268
PE
1758 int i;
1759
660585b5
MS
1760 if (wpa->bucket)
1761 fuse_sync_bucket_dec(wpa->bucket);
1762
33826ebb
MS
1763 for (i = 0; i < ap->num_pages; i++)
1764 __free_page(ap->pages[i]);
1765
1766 if (wpa->ia.ff)
e26ee4ef 1767 fuse_file_put(wpa->ia.ff, false);
8b284dc4 1768
33826ebb
MS
1769 kfree(ap->pages);
1770 kfree(wpa);
3be5a52b
MS
1771}
1772
fcee216b 1773static void fuse_writepage_finish(struct fuse_mount *fm,
33826ebb 1774 struct fuse_writepage_args *wpa)
3be5a52b 1775{
33826ebb
MS
1776 struct fuse_args_pages *ap = &wpa->ia.ap;
1777 struct inode *inode = wpa->inode;
3be5a52b 1778 struct fuse_inode *fi = get_fuse_inode(inode);
de1414a6 1779 struct backing_dev_info *bdi = inode_to_bdi(inode);
385b1268 1780 int i;
3be5a52b 1781
33826ebb 1782 for (i = 0; i < ap->num_pages; i++) {
93f78d88 1783 dec_wb_stat(&bdi->wb, WB_WRITEBACK);
33826ebb 1784 dec_node_page_state(ap->pages[i], NR_WRITEBACK_TEMP);
93f78d88 1785 wb_writeout_inc(&bdi->wb);
385b1268 1786 }
3be5a52b
MS
1787 wake_up(&fi->page_waitq);
1788}
1789
f15ecfef 1790/* Called under fi->lock, may release and reacquire it */
fcee216b 1791static void fuse_send_writepage(struct fuse_mount *fm,
33826ebb 1792 struct fuse_writepage_args *wpa, loff_t size)
f15ecfef
KT
1793__releases(fi->lock)
1794__acquires(fi->lock)
3be5a52b 1795{
33826ebb
MS
1796 struct fuse_writepage_args *aux, *next;
1797 struct fuse_inode *fi = get_fuse_inode(wpa->inode);
1798 struct fuse_write_in *inarg = &wpa->ia.write.in;
1799 struct fuse_args *args = &wpa->ia.ap.args;
1800 __u64 data_size = wpa->ia.ap.num_pages * PAGE_SIZE;
1801 int err;
3be5a52b 1802
33826ebb 1803 fi->writectr++;
385b1268
PE
1804 if (inarg->offset + data_size <= size) {
1805 inarg->size = data_size;
3be5a52b 1806 } else if (inarg->offset < size) {
385b1268 1807 inarg->size = size - inarg->offset;
3be5a52b
MS
1808 } else {
1809 /* Got truncated off completely */
1810 goto out_free;
b6aeaded 1811 }
3be5a52b 1812
33826ebb
MS
1813 args->in_args[1].size = inarg->size;
1814 args->force = true;
1815 args->nocreds = true;
1816
fcee216b 1817 err = fuse_simple_background(fm, args, GFP_ATOMIC);
33826ebb
MS
1818 if (err == -ENOMEM) {
1819 spin_unlock(&fi->lock);
fcee216b 1820 err = fuse_simple_background(fm, args, GFP_NOFS | __GFP_NOFAIL);
33826ebb
MS
1821 spin_lock(&fi->lock);
1822 }
1823
f15ecfef 1824 /* Fails on broken connection only */
33826ebb 1825 if (unlikely(err))
f15ecfef
KT
1826 goto out_free;
1827
3be5a52b
MS
1828 return;
1829
1830 out_free:
33826ebb 1831 fi->writectr--;
69a6487a 1832 rb_erase(&wpa->writepages_entry, &fi->writepages);
fcee216b 1833 fuse_writepage_finish(fm, wpa);
f15ecfef 1834 spin_unlock(&fi->lock);
e2653bd5
MS
1835
1836 /* After fuse_writepage_finish() aux request list is private */
33826ebb
MS
1837 for (aux = wpa->next; aux; aux = next) {
1838 next = aux->next;
1839 aux->next = NULL;
1840 fuse_writepage_free(aux);
e2653bd5
MS
1841 }
1842
33826ebb 1843 fuse_writepage_free(wpa);
f15ecfef 1844 spin_lock(&fi->lock);
b6aeaded
MS
1845}
1846
3be5a52b
MS
1847/*
1848 * If fi->writectr is positive (no truncate or fsync going on) send
1849 * all queued writepage requests.
1850 *
f15ecfef 1851 * Called with fi->lock
3be5a52b
MS
1852 */
1853void fuse_flush_writepages(struct inode *inode)
f15ecfef
KT
1854__releases(fi->lock)
1855__acquires(fi->lock)
b6aeaded 1856{
fcee216b 1857 struct fuse_mount *fm = get_fuse_mount(inode);
3be5a52b 1858 struct fuse_inode *fi = get_fuse_inode(inode);
9de5be06 1859 loff_t crop = i_size_read(inode);
33826ebb 1860 struct fuse_writepage_args *wpa;
3be5a52b
MS
1861
1862 while (fi->writectr >= 0 && !list_empty(&fi->queued_writes)) {
33826ebb
MS
1863 wpa = list_entry(fi->queued_writes.next,
1864 struct fuse_writepage_args, queue_entry);
1865 list_del_init(&wpa->queue_entry);
fcee216b 1866 fuse_send_writepage(fm, wpa, crop);
3be5a52b
MS
1867 }
1868}
1869
c146024e
MS
1870static struct fuse_writepage_args *fuse_insert_writeback(struct rb_root *root,
1871 struct fuse_writepage_args *wpa)
6b2fb799
MP
1872{
1873 pgoff_t idx_from = wpa->ia.write.in.offset >> PAGE_SHIFT;
1874 pgoff_t idx_to = idx_from + wpa->ia.ap.num_pages - 1;
1875 struct rb_node **p = &root->rb_node;
1876 struct rb_node *parent = NULL;
1877
1878 WARN_ON(!wpa->ia.ap.num_pages);
1879 while (*p) {
1880 struct fuse_writepage_args *curr;
1881 pgoff_t curr_index;
1882
1883 parent = *p;
1884 curr = rb_entry(parent, struct fuse_writepage_args,
1885 writepages_entry);
1886 WARN_ON(curr->inode != wpa->inode);
1887 curr_index = curr->ia.write.in.offset >> PAGE_SHIFT;
1888
1889 if (idx_from >= curr_index + curr->ia.ap.num_pages)
1890 p = &(*p)->rb_right;
1891 else if (idx_to < curr_index)
1892 p = &(*p)->rb_left;
1893 else
c146024e 1894 return curr;
6b2fb799
MP
1895 }
1896
1897 rb_link_node(&wpa->writepages_entry, parent, p);
1898 rb_insert_color(&wpa->writepages_entry, root);
c146024e
MS
1899 return NULL;
1900}
1901
1902static void tree_insert(struct rb_root *root, struct fuse_writepage_args *wpa)
1903{
1904 WARN_ON(fuse_insert_writeback(root, wpa));
6b2fb799
MP
1905}
1906
fcee216b 1907static void fuse_writepage_end(struct fuse_mount *fm, struct fuse_args *args,
33826ebb 1908 int error)
3be5a52b 1909{
33826ebb
MS
1910 struct fuse_writepage_args *wpa =
1911 container_of(args, typeof(*wpa), ia.ap.args);
1912 struct inode *inode = wpa->inode;
3be5a52b 1913 struct fuse_inode *fi = get_fuse_inode(inode);
3466958b 1914 struct fuse_conn *fc = get_fuse_conn(inode);
3be5a52b 1915
33826ebb 1916 mapping_set_error(inode->i_mapping, error);
3466958b
VG
1917 /*
1918 * A writeback finished and this might have updated mtime/ctime on
1919 * server making local mtime/ctime stale. Hence invalidate attrs.
1920 * Do this only if writeback_cache is not enabled. If writeback_cache
1921 * is enabled, we trust local ctime/mtime.
1922 */
1923 if (!fc->writeback_cache)
fa5eee57 1924 fuse_invalidate_attr_mask(inode, FUSE_STATX_MODIFY);
f15ecfef 1925 spin_lock(&fi->lock);
69a6487a 1926 rb_erase(&wpa->writepages_entry, &fi->writepages);
33826ebb 1927 while (wpa->next) {
fcee216b 1928 struct fuse_mount *fm = get_fuse_mount(inode);
33826ebb
MS
1929 struct fuse_write_in *inarg = &wpa->ia.write.in;
1930 struct fuse_writepage_args *next = wpa->next;
1931
1932 wpa->next = next->next;
1933 next->next = NULL;
1934 next->ia.ff = fuse_file_get(wpa->ia.ff);
6b2fb799 1935 tree_insert(&fi->writepages, next);
6eaf4782
MP
1936
1937 /*
1938 * Skip fuse_flush_writepages() to make it easy to crop requests
1939 * based on primary request size.
1940 *
1941 * 1st case (trivial): there are no concurrent activities using
1942 * fuse_set/release_nowrite. Then we're on safe side because
1943 * fuse_flush_writepages() would call fuse_send_writepage()
1944 * anyway.
1945 *
1946 * 2nd case: someone called fuse_set_nowrite and it is waiting
1947 * now for completion of all in-flight requests. This happens
1948 * rarely and no more than once per page, so this should be
1949 * okay.
1950 *
1951 * 3rd case: someone (e.g. fuse_do_setattr()) is in the middle
1952 * of fuse_set_nowrite..fuse_release_nowrite section. The fact
1953 * that fuse_set_nowrite returned implies that all in-flight
1954 * requests were completed along with all of their secondary
1955 * requests. Further primary requests are blocked by negative
1956 * writectr. Hence there cannot be any in-flight requests and
1957 * no invocations of fuse_writepage_end() while we're in
1958 * fuse_set_nowrite..fuse_release_nowrite section.
1959 */
fcee216b 1960 fuse_send_writepage(fm, next, inarg->offset + inarg->size);
8b284dc4 1961 }
3be5a52b 1962 fi->writectr--;
fcee216b 1963 fuse_writepage_finish(fm, wpa);
f15ecfef 1964 spin_unlock(&fi->lock);
33826ebb 1965 fuse_writepage_free(wpa);
3be5a52b
MS
1966}
1967
a9667ac8 1968static struct fuse_file *__fuse_write_file_get(struct fuse_inode *fi)
adcadfa8 1969{
84840efc 1970 struct fuse_file *ff;
adcadfa8 1971
f15ecfef 1972 spin_lock(&fi->lock);
84840efc
MS
1973 ff = list_first_entry_or_null(&fi->write_files, struct fuse_file,
1974 write_entry);
1975 if (ff)
72523425 1976 fuse_file_get(ff);
f15ecfef 1977 spin_unlock(&fi->lock);
adcadfa8
PE
1978
1979 return ff;
1980}
1981
a9667ac8 1982static struct fuse_file *fuse_write_file_get(struct fuse_inode *fi)
1e18bda8 1983{
a9667ac8 1984 struct fuse_file *ff = __fuse_write_file_get(fi);
1e18bda8
MS
1985 WARN_ON(!ff);
1986 return ff;
1987}
1988
1989int fuse_write_inode(struct inode *inode, struct writeback_control *wbc)
1990{
1e18bda8
MS
1991 struct fuse_inode *fi = get_fuse_inode(inode);
1992 struct fuse_file *ff;
1993 int err;
1994
5c791fe1
MS
1995 /*
1996 * Inode is always written before the last reference is dropped and
1997 * hence this should not be reached from reclaim.
1998 *
1999 * Writing back the inode from reclaim can deadlock if the request
2000 * processing itself needs an allocation. Allocations triggering
2001 * reclaim while serving a request can't be prevented, because it can
2002 * involve any number of unrelated userspace processes.
2003 */
2004 WARN_ON(wbc->for_reclaim);
2005
a9667ac8 2006 ff = __fuse_write_file_get(fi);
ab9e13f7 2007 err = fuse_flush_times(inode, ff);
1e18bda8 2008 if (ff)
e26ee4ef 2009 fuse_file_put(ff, false);
1e18bda8
MS
2010
2011 return err;
2012}
2013
33826ebb
MS
2014static struct fuse_writepage_args *fuse_writepage_args_alloc(void)
2015{
2016 struct fuse_writepage_args *wpa;
2017 struct fuse_args_pages *ap;
2018
2019 wpa = kzalloc(sizeof(*wpa), GFP_NOFS);
2020 if (wpa) {
2021 ap = &wpa->ia.ap;
2022 ap->num_pages = 0;
2023 ap->pages = fuse_pages_alloc(1, GFP_NOFS, &ap->descs);
2024 if (!ap->pages) {
2025 kfree(wpa);
2026 wpa = NULL;
2027 }
2028 }
2029 return wpa;
2030
2031}
2032
660585b5
MS
2033static void fuse_writepage_add_to_bucket(struct fuse_conn *fc,
2034 struct fuse_writepage_args *wpa)
2035{
2036 if (!fc->sync_fs)
2037 return;
2038
2039 rcu_read_lock();
2040 /* Prevent resurrection of dead bucket in unlikely race with syncfs */
2041 do {
2042 wpa->bucket = rcu_dereference(fc->curr_bucket);
2043 } while (unlikely(!atomic_inc_not_zero(&wpa->bucket->count)));
2044 rcu_read_unlock();
2045}
2046
e0887e09 2047static int fuse_writepage_locked(struct folio *folio)
3be5a52b 2048{
e0887e09 2049 struct address_space *mapping = folio->mapping;
3be5a52b
MS
2050 struct inode *inode = mapping->host;
2051 struct fuse_conn *fc = get_fuse_conn(inode);
2052 struct fuse_inode *fi = get_fuse_inode(inode);
33826ebb
MS
2053 struct fuse_writepage_args *wpa;
2054 struct fuse_args_pages *ap;
e0887e09 2055 struct folio *tmp_folio;
72523425 2056 int error = -ENOMEM;
3be5a52b 2057
e0887e09 2058 folio_start_writeback(folio);
3be5a52b 2059
33826ebb
MS
2060 wpa = fuse_writepage_args_alloc();
2061 if (!wpa)
3be5a52b 2062 goto err;
33826ebb 2063 ap = &wpa->ia.ap;
3be5a52b 2064
e0887e09
MWO
2065 tmp_folio = folio_alloc(GFP_NOFS | __GFP_HIGHMEM, 0);
2066 if (!tmp_folio)
3be5a52b
MS
2067 goto err_free;
2068
72523425 2069 error = -EIO;
a9667ac8 2070 wpa->ia.ff = fuse_write_file_get(fi);
33826ebb 2071 if (!wpa->ia.ff)
27f1b363 2072 goto err_nofile;
72523425 2073
660585b5 2074 fuse_writepage_add_to_bucket(fc, wpa);
e0887e09 2075 fuse_write_args_fill(&wpa->ia, wpa->ia.ff, folio_pos(folio), 0);
3be5a52b 2076
e0887e09 2077 folio_copy(tmp_folio, folio);
33826ebb
MS
2078 wpa->ia.write.in.write_flags |= FUSE_WRITE_CACHE;
2079 wpa->next = NULL;
2080 ap->args.in_pages = true;
2081 ap->num_pages = 1;
e0887e09 2082 ap->pages[0] = &tmp_folio->page;
33826ebb
MS
2083 ap->descs[0].offset = 0;
2084 ap->descs[0].length = PAGE_SIZE;
2085 ap->args.end = fuse_writepage_end;
2086 wpa->inode = inode;
3be5a52b 2087
93f78d88 2088 inc_wb_stat(&inode_to_bdi(inode)->wb, WB_WRITEBACK);
e0887e09 2089 node_stat_add_folio(tmp_folio, NR_WRITEBACK_TEMP);
3be5a52b 2090
f15ecfef 2091 spin_lock(&fi->lock);
6b2fb799 2092 tree_insert(&fi->writepages, wpa);
33826ebb 2093 list_add_tail(&wpa->queue_entry, &fi->queued_writes);
3be5a52b 2094 fuse_flush_writepages(inode);
f15ecfef 2095 spin_unlock(&fi->lock);
3be5a52b 2096
e0887e09 2097 folio_end_writeback(folio);
4a4ac4eb 2098
3be5a52b
MS
2099 return 0;
2100
27f1b363 2101err_nofile:
e0887e09 2102 folio_put(tmp_folio);
3be5a52b 2103err_free:
33826ebb 2104 kfree(wpa);
3be5a52b 2105err:
e0887e09
MWO
2106 mapping_set_error(folio->mapping, error);
2107 folio_end_writeback(folio);
72523425 2108 return error;
3be5a52b
MS
2109}
2110
26d614df 2111struct fuse_fill_wb_data {
33826ebb 2112 struct fuse_writepage_args *wpa;
26d614df
PE
2113 struct fuse_file *ff;
2114 struct inode *inode;
2d033eaa 2115 struct page **orig_pages;
33826ebb 2116 unsigned int max_pages;
26d614df
PE
2117};
2118
33826ebb
MS
2119static bool fuse_pages_realloc(struct fuse_fill_wb_data *data)
2120{
2121 struct fuse_args_pages *ap = &data->wpa->ia.ap;
2122 struct fuse_conn *fc = get_fuse_conn(data->inode);
2123 struct page **pages;
2124 struct fuse_page_desc *descs;
2125 unsigned int npages = min_t(unsigned int,
2126 max_t(unsigned int, data->max_pages * 2,
2127 FUSE_DEFAULT_MAX_PAGES_PER_REQ),
2128 fc->max_pages);
2129 WARN_ON(npages <= data->max_pages);
2130
2131 pages = fuse_pages_alloc(npages, GFP_NOFS, &descs);
2132 if (!pages)
2133 return false;
2134
2135 memcpy(pages, ap->pages, sizeof(struct page *) * ap->num_pages);
2136 memcpy(descs, ap->descs, sizeof(struct fuse_page_desc) * ap->num_pages);
2137 kfree(ap->pages);
2138 ap->pages = pages;
2139 ap->descs = descs;
2140 data->max_pages = npages;
2141
2142 return true;
2143}
2144
26d614df
PE
2145static void fuse_writepages_send(struct fuse_fill_wb_data *data)
2146{
33826ebb 2147 struct fuse_writepage_args *wpa = data->wpa;
26d614df 2148 struct inode *inode = data->inode;
26d614df 2149 struct fuse_inode *fi = get_fuse_inode(inode);
33826ebb 2150 int num_pages = wpa->ia.ap.num_pages;
2d033eaa 2151 int i;
26d614df 2152
33826ebb 2153 wpa->ia.ff = fuse_file_get(data->ff);
f15ecfef 2154 spin_lock(&fi->lock);
33826ebb 2155 list_add_tail(&wpa->queue_entry, &fi->queued_writes);
26d614df 2156 fuse_flush_writepages(inode);
f15ecfef 2157 spin_unlock(&fi->lock);
2d033eaa
MP
2158
2159 for (i = 0; i < num_pages; i++)
2160 end_page_writeback(data->orig_pages[i]);
26d614df
PE
2161}
2162
7f305ca1 2163/*
c146024e
MS
2164 * Check under fi->lock if the page is under writeback, and insert it onto the
2165 * rb_tree if not. Otherwise iterate auxiliary write requests, to see if there's
419234d5
MS
2166 * one already added for a page at this offset. If there's none, then insert
2167 * this new request onto the auxiliary list, otherwise reuse the existing one by
c146024e 2168 * swapping the new temp page with the old one.
7f305ca1 2169 */
c146024e
MS
2170static bool fuse_writepage_add(struct fuse_writepage_args *new_wpa,
2171 struct page *page)
8b284dc4 2172{
33826ebb
MS
2173 struct fuse_inode *fi = get_fuse_inode(new_wpa->inode);
2174 struct fuse_writepage_args *tmp;
2175 struct fuse_writepage_args *old_wpa;
2176 struct fuse_args_pages *new_ap = &new_wpa->ia.ap;
8b284dc4 2177
33826ebb 2178 WARN_ON(new_ap->num_pages != 0);
c146024e 2179 new_ap->num_pages = 1;
8b284dc4 2180
f15ecfef 2181 spin_lock(&fi->lock);
c146024e 2182 old_wpa = fuse_insert_writeback(&fi->writepages, new_wpa);
33826ebb 2183 if (!old_wpa) {
f15ecfef 2184 spin_unlock(&fi->lock);
c146024e 2185 return true;
f6011081 2186 }
8b284dc4 2187
33826ebb 2188 for (tmp = old_wpa->next; tmp; tmp = tmp->next) {
7f305ca1
MS
2189 pgoff_t curr_index;
2190
33826ebb
MS
2191 WARN_ON(tmp->inode != new_wpa->inode);
2192 curr_index = tmp->ia.write.in.offset >> PAGE_SHIFT;
419234d5 2193 if (curr_index == page->index) {
33826ebb
MS
2194 WARN_ON(tmp->ia.ap.num_pages != 1);
2195 swap(tmp->ia.ap.pages[0], new_ap->pages[0]);
7f305ca1 2196 break;
8b284dc4
MS
2197 }
2198 }
2199
7f305ca1 2200 if (!tmp) {
33826ebb
MS
2201 new_wpa->next = old_wpa->next;
2202 old_wpa->next = new_wpa;
7f305ca1 2203 }
41b6e41f 2204
f15ecfef 2205 spin_unlock(&fi->lock);
7f305ca1
MS
2206
2207 if (tmp) {
33826ebb 2208 struct backing_dev_info *bdi = inode_to_bdi(new_wpa->inode);
8b284dc4 2209
93f78d88 2210 dec_wb_stat(&bdi->wb, WB_WRITEBACK);
33826ebb 2211 dec_node_page_state(new_ap->pages[0], NR_WRITEBACK_TEMP);
93f78d88 2212 wb_writeout_inc(&bdi->wb);
33826ebb 2213 fuse_writepage_free(new_wpa);
8b284dc4 2214 }
7f305ca1 2215
c146024e 2216 return false;
8b284dc4
MS
2217}
2218
6ddf3af9
MS
2219static bool fuse_writepage_need_send(struct fuse_conn *fc, struct page *page,
2220 struct fuse_args_pages *ap,
2221 struct fuse_fill_wb_data *data)
2222{
2223 WARN_ON(!ap->num_pages);
2224
2225 /*
2226 * Being under writeback is unlikely but possible. For example direct
2227 * read to an mmaped fuse file will set the page dirty twice; once when
2228 * the pages are faulted with get_user_pages(), and then after the read
2229 * completed.
2230 */
2231 if (fuse_page_is_writeback(data->inode, page->index))
2232 return true;
2233
2234 /* Reached max pages */
2235 if (ap->num_pages == fc->max_pages)
2236 return true;
2237
2238 /* Reached max write bytes */
2239 if ((ap->num_pages + 1) * PAGE_SIZE > fc->max_write)
2240 return true;
2241
2242 /* Discontinuity */
2243 if (data->orig_pages[ap->num_pages - 1]->index + 1 != page->index)
2244 return true;
2245
2246 /* Need to grow the pages array? If so, did the expansion fail? */
2247 if (ap->num_pages == data->max_pages && !fuse_pages_realloc(data))
2248 return true;
2249
2250 return false;
2251}
2252
d585bdbe 2253static int fuse_writepages_fill(struct folio *folio,
26d614df
PE
2254 struct writeback_control *wbc, void *_data)
2255{
2256 struct fuse_fill_wb_data *data = _data;
33826ebb
MS
2257 struct fuse_writepage_args *wpa = data->wpa;
2258 struct fuse_args_pages *ap = &wpa->ia.ap;
26d614df 2259 struct inode *inode = data->inode;
f15ecfef 2260 struct fuse_inode *fi = get_fuse_inode(inode);
26d614df
PE
2261 struct fuse_conn *fc = get_fuse_conn(inode);
2262 struct page *tmp_page;
2263 int err;
2264
2265 if (!data->ff) {
2266 err = -EIO;
a9667ac8 2267 data->ff = fuse_write_file_get(fi);
26d614df
PE
2268 if (!data->ff)
2269 goto out_unlock;
2270 }
2271
d585bdbe 2272 if (wpa && fuse_writepage_need_send(fc, &folio->page, ap, data)) {
8b284dc4 2273 fuse_writepages_send(data);
33826ebb 2274 data->wpa = NULL;
26d614df 2275 }
e52a8250 2276
26d614df
PE
2277 err = -ENOMEM;
2278 tmp_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
2279 if (!tmp_page)
2280 goto out_unlock;
2281
2282 /*
2283 * The page must not be redirtied until the writeout is completed
2284 * (i.e. userspace has sent a reply to the write request). Otherwise
2285 * there could be more than one temporary page instance for each real
2286 * page.
2287 *
2288 * This is ensured by holding the page lock in page_mkwrite() while
2289 * checking fuse_page_is_writeback(). We already hold the page lock
2290 * since clear_page_dirty_for_io() and keep it held until we add the
33826ebb 2291 * request to the fi->writepages list and increment ap->num_pages.
26d614df
PE
2292 * After this fuse_page_is_writeback() will indicate that the page is
2293 * under writeback, so we can release the page lock.
2294 */
33826ebb 2295 if (data->wpa == NULL) {
26d614df 2296 err = -ENOMEM;
33826ebb
MS
2297 wpa = fuse_writepage_args_alloc();
2298 if (!wpa) {
26d614df
PE
2299 __free_page(tmp_page);
2300 goto out_unlock;
2301 }
660585b5
MS
2302 fuse_writepage_add_to_bucket(fc, wpa);
2303
33826ebb 2304 data->max_pages = 1;
26d614df 2305
33826ebb 2306 ap = &wpa->ia.ap;
d585bdbe 2307 fuse_write_args_fill(&wpa->ia, data->ff, folio_pos(folio), 0);
33826ebb
MS
2308 wpa->ia.write.in.write_flags |= FUSE_WRITE_CACHE;
2309 wpa->next = NULL;
2310 ap->args.in_pages = true;
2311 ap->args.end = fuse_writepage_end;
2312 ap->num_pages = 0;
2313 wpa->inode = inode;
26d614df 2314 }
d585bdbe 2315 folio_start_writeback(folio);
26d614df 2316
d585bdbe 2317 copy_highpage(tmp_page, &folio->page);
33826ebb
MS
2318 ap->pages[ap->num_pages] = tmp_page;
2319 ap->descs[ap->num_pages].offset = 0;
2320 ap->descs[ap->num_pages].length = PAGE_SIZE;
d585bdbe 2321 data->orig_pages[ap->num_pages] = &folio->page;
26d614df 2322
93f78d88 2323 inc_wb_stat(&inode_to_bdi(inode)->wb, WB_WRITEBACK);
11fb9989 2324 inc_node_page_state(tmp_page, NR_WRITEBACK_TEMP);
8b284dc4
MS
2325
2326 err = 0;
c146024e
MS
2327 if (data->wpa) {
2328 /*
2329 * Protected by fi->lock against concurrent access by
2330 * fuse_page_is_writeback().
2331 */
2332 spin_lock(&fi->lock);
2333 ap->num_pages++;
2334 spin_unlock(&fi->lock);
d585bdbe 2335 } else if (fuse_writepage_add(wpa, &folio->page)) {
c146024e
MS
2336 data->wpa = wpa;
2337 } else {
d585bdbe 2338 folio_end_writeback(folio);
8b284dc4 2339 }
26d614df 2340out_unlock:
d585bdbe 2341 folio_unlock(folio);
26d614df
PE
2342
2343 return err;
2344}
2345
2346static int fuse_writepages(struct address_space *mapping,
2347 struct writeback_control *wbc)
2348{
2349 struct inode *inode = mapping->host;
5da784cc 2350 struct fuse_conn *fc = get_fuse_conn(inode);
26d614df
PE
2351 struct fuse_fill_wb_data data;
2352 int err;
2353
2354 err = -EIO;
5d069dbe 2355 if (fuse_is_bad(inode))
26d614df
PE
2356 goto out;
2357
670d21c6
N
2358 if (wbc->sync_mode == WB_SYNC_NONE &&
2359 fc->num_background >= fc->congestion_threshold)
2360 return 0;
2361
26d614df 2362 data.inode = inode;
33826ebb 2363 data.wpa = NULL;
26d614df
PE
2364 data.ff = NULL;
2365
2d033eaa 2366 err = -ENOMEM;
5da784cc 2367 data.orig_pages = kcalloc(fc->max_pages,
f2b3455e 2368 sizeof(struct page *),
2d033eaa
MP
2369 GFP_NOFS);
2370 if (!data.orig_pages)
2371 goto out;
2372
26d614df 2373 err = write_cache_pages(mapping, wbc, fuse_writepages_fill, &data);
33826ebb 2374 if (data.wpa) {
33826ebb 2375 WARN_ON(!data.wpa->ia.ap.num_pages);
26d614df 2376 fuse_writepages_send(&data);
26d614df
PE
2377 }
2378 if (data.ff)
e26ee4ef 2379 fuse_file_put(data.ff, false);
2d033eaa
MP
2380
2381 kfree(data.orig_pages);
26d614df
PE
2382out:
2383 return err;
2384}
2385
6b12c1b3
PE
2386/*
2387 * It's worthy to make sure that space is reserved on disk for the write,
2388 * but how to implement it without killing performance need more thinking.
2389 */
2390static int fuse_write_begin(struct file *file, struct address_space *mapping,
9d6b0cd7 2391 loff_t pos, unsigned len, struct page **pagep, void **fsdata)
6b12c1b3 2392{
09cbfeaf 2393 pgoff_t index = pos >> PAGE_SHIFT;
a455589f 2394 struct fuse_conn *fc = get_fuse_conn(file_inode(file));
6b12c1b3
PE
2395 struct page *page;
2396 loff_t fsize;
2397 int err = -ENOMEM;
2398
2399 WARN_ON(!fc->writeback_cache);
2400
b7446e7c 2401 page = grab_cache_page_write_begin(mapping, index);
6b12c1b3
PE
2402 if (!page)
2403 goto error;
2404
2405 fuse_wait_on_page_writeback(mapping->host, page->index);
2406
09cbfeaf 2407 if (PageUptodate(page) || len == PAGE_SIZE)
6b12c1b3
PE
2408 goto success;
2409 /*
2410 * Check if the start this page comes after the end of file, in which
2411 * case the readpage can be optimized away.
2412 */
2413 fsize = i_size_read(mapping->host);
09cbfeaf
KS
2414 if (fsize <= (pos & PAGE_MASK)) {
2415 size_t off = pos & ~PAGE_MASK;
6b12c1b3
PE
2416 if (off)
2417 zero_user_segment(page, 0, off);
2418 goto success;
2419 }
2420 err = fuse_do_readpage(file, page);
2421 if (err)
2422 goto cleanup;
2423success:
2424 *pagep = page;
2425 return 0;
2426
2427cleanup:
2428 unlock_page(page);
09cbfeaf 2429 put_page(page);
6b12c1b3
PE
2430error:
2431 return err;
2432}
2433
2434static int fuse_write_end(struct file *file, struct address_space *mapping,
2435 loff_t pos, unsigned len, unsigned copied,
2436 struct page *page, void *fsdata)
2437{
2438 struct inode *inode = page->mapping->host;
2439
59c3b76c
MS
2440 /* Haven't copied anything? Skip zeroing, size extending, dirtying. */
2441 if (!copied)
2442 goto unlock;
2443
8c56e03d 2444 pos += copied;
6b12c1b3
PE
2445 if (!PageUptodate(page)) {
2446 /* Zero any unwritten bytes at the end of the page */
8c56e03d 2447 size_t endoff = pos & ~PAGE_MASK;
6b12c1b3 2448 if (endoff)
09cbfeaf 2449 zero_user_segment(page, endoff, PAGE_SIZE);
6b12c1b3
PE
2450 SetPageUptodate(page);
2451 }
2452
8c56e03d
MS
2453 if (pos > inode->i_size)
2454 i_size_write(inode, pos);
2455
6b12c1b3 2456 set_page_dirty(page);
59c3b76c
MS
2457
2458unlock:
6b12c1b3 2459 unlock_page(page);
09cbfeaf 2460 put_page(page);
6b12c1b3
PE
2461
2462 return copied;
2463}
2464
2bf06b8e 2465static int fuse_launder_folio(struct folio *folio)
3be5a52b
MS
2466{
2467 int err = 0;
2bf06b8e
MWO
2468 if (folio_clear_dirty_for_io(folio)) {
2469 struct inode *inode = folio->mapping->host;
3993382b
MS
2470
2471 /* Serialize with pending writeback for the same page */
2bf06b8e 2472 fuse_wait_on_page_writeback(inode, folio->index);
e0887e09 2473 err = fuse_writepage_locked(folio);
3be5a52b 2474 if (!err)
2bf06b8e 2475 fuse_wait_on_page_writeback(inode, folio->index);
3be5a52b
MS
2476 }
2477 return err;
2478}
2479
2480/*
36ea2337
MS
2481 * Write back dirty data/metadata now (there may not be any suitable
2482 * open files later for data)
3be5a52b
MS
2483 */
2484static void fuse_vma_close(struct vm_area_struct *vma)
2485{
36ea2337
MS
2486 int err;
2487
2488 err = write_inode_now(vma->vm_file->f_mapping->host, 1);
2489 mapping_set_error(vma->vm_file->f_mapping, err);
3be5a52b
MS
2490}
2491
2492/*
2493 * Wait for writeback against this page to complete before allowing it
2494 * to be marked dirty again, and hence written back again, possibly
2495 * before the previous writepage completed.
2496 *
2497 * Block here, instead of in ->writepage(), so that the userspace fs
2498 * can only block processes actually operating on the filesystem.
2499 *
2500 * Otherwise unprivileged userspace fs would be able to block
2501 * unrelated:
2502 *
2503 * - page migration
2504 * - sync(2)
2505 * - try_to_free_pages() with order > PAGE_ALLOC_COSTLY_ORDER
2506 */
46fb504a 2507static vm_fault_t fuse_page_mkwrite(struct vm_fault *vmf)
3be5a52b 2508{
c2ec175c 2509 struct page *page = vmf->page;
11bac800 2510 struct inode *inode = file_inode(vmf->vma->vm_file);
cca24370 2511
11bac800 2512 file_update_time(vmf->vma->vm_file);
cca24370
MS
2513 lock_page(page);
2514 if (page->mapping != inode->i_mapping) {
2515 unlock_page(page);
2516 return VM_FAULT_NOPAGE;
2517 }
3be5a52b
MS
2518
2519 fuse_wait_on_page_writeback(inode, page->index);
cca24370 2520 return VM_FAULT_LOCKED;
3be5a52b
MS
2521}
2522
f0f37e2f 2523static const struct vm_operations_struct fuse_file_vm_ops = {
3be5a52b
MS
2524 .close = fuse_vma_close,
2525 .fault = filemap_fault,
f1820361 2526 .map_pages = filemap_map_pages,
3be5a52b
MS
2527 .page_mkwrite = fuse_page_mkwrite,
2528};
2529
2530static int fuse_file_mmap(struct file *file, struct vm_area_struct *vma)
2531{
55752a3a 2532 struct fuse_file *ff = file->private_data;
e78662e8 2533 struct fuse_conn *fc = ff->fm->fc;
fda0b98e 2534 struct inode *inode = file_inode(file);
cb098dd2 2535 int rc;
55752a3a 2536
2a9a609a 2537 /* DAX mmap is superior to direct_io mmap */
fda0b98e 2538 if (FUSE_IS_DAX(inode))
2a9a609a
SH
2539 return fuse_dax_mmap(file, vma);
2540
fda0b98e
AG
2541 /*
2542 * If inode is in passthrough io mode, because it has some file open
2543 * in passthrough mode, either mmap to backing file or fail mmap,
2544 * because mixing cached mmap and passthrough io mode is not allowed.
2545 */
4a90451b 2546 if (fuse_file_passthrough(ff))
fda0b98e
AG
2547 return fuse_passthrough_mmap(file, vma);
2548 else if (fuse_inode_backing(get_fuse_inode(inode)))
4a90451b
AG
2549 return -ENODEV;
2550
205c1d80
AG
2551 /*
2552 * FOPEN_DIRECT_IO handling is special compared to O_DIRECT,
2553 * as does not allow MAP_SHARED mmap without FUSE_DIRECT_IO_ALLOW_MMAP.
2554 */
55752a3a 2555 if (ff->open_flags & FOPEN_DIRECT_IO) {
9511176b
BS
2556 /*
2557 * Can't provide the coherency needed for MAP_SHARED
c55e0a55 2558 * if FUSE_DIRECT_IO_ALLOW_MMAP isn't set.
e78662e8 2559 */
c55e0a55 2560 if ((vma->vm_flags & VM_MAYSHARE) && !fc->direct_io_allow_mmap)
55752a3a
MS
2561 return -ENODEV;
2562
2563 invalidate_inode_pages2(file->f_mapping);
2564
9511176b
BS
2565 if (!(vma->vm_flags & VM_MAYSHARE)) {
2566 /* MAP_PRIVATE */
2567 return generic_file_mmap(file, vma);
2568 }
cb098dd2 2569
205c1d80
AG
2570 /*
2571 * First mmap of direct_io file enters caching inode io mode.
2572 * Also waits for parallel dio writers to go into serial mode
2573 * (exclusive instead of shared lock).
2574 */
fda0b98e 2575 rc = fuse_file_cached_io_start(inode, ff);
cb098dd2
AG
2576 if (rc)
2577 return rc;
55752a3a
MS
2578 }
2579
650b22b9
PE
2580 if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
2581 fuse_link_write_file(file);
2582
3be5a52b
MS
2583 file_accessed(file);
2584 vma->vm_ops = &fuse_file_vm_ops;
b6aeaded
MS
2585 return 0;
2586}
2587
0b6e9ea0
SF
2588static int convert_fuse_file_lock(struct fuse_conn *fc,
2589 const struct fuse_file_lock *ffl,
71421259
MS
2590 struct file_lock *fl)
2591{
2592 switch (ffl->type) {
2593 case F_UNLCK:
2594 break;
2595
2596 case F_RDLCK:
2597 case F_WRLCK:
2598 if (ffl->start > OFFSET_MAX || ffl->end > OFFSET_MAX ||
2599 ffl->end < ffl->start)
2600 return -EIO;
2601
2602 fl->fl_start = ffl->start;
2603 fl->fl_end = ffl->end;
0b6e9ea0
SF
2604
2605 /*
9d5b86ac
BC
2606 * Convert pid into init's pid namespace. The locks API will
2607 * translate it into the caller's pid namespace.
0b6e9ea0
SF
2608 */
2609 rcu_read_lock();
9d5b86ac 2610 fl->fl_pid = pid_nr_ns(find_pid_ns(ffl->pid, fc->pid_ns), &init_pid_ns);
0b6e9ea0 2611 rcu_read_unlock();
71421259
MS
2612 break;
2613
2614 default:
2615 return -EIO;
2616 }
2617 fl->fl_type = ffl->type;
2618 return 0;
2619}
2620
7078187a 2621static void fuse_lk_fill(struct fuse_args *args, struct file *file,
a9ff4f87 2622 const struct file_lock *fl, int opcode, pid_t pid,
7078187a 2623 int flock, struct fuse_lk_in *inarg)
71421259 2624{
6131ffaa 2625 struct inode *inode = file_inode(file);
9c8ef561 2626 struct fuse_conn *fc = get_fuse_conn(inode);
71421259 2627 struct fuse_file *ff = file->private_data;
7078187a
MS
2628
2629 memset(inarg, 0, sizeof(*inarg));
2630 inarg->fh = ff->fh;
2631 inarg->owner = fuse_lock_owner_id(fc, fl->fl_owner);
2632 inarg->lk.start = fl->fl_start;
2633 inarg->lk.end = fl->fl_end;
2634 inarg->lk.type = fl->fl_type;
2635 inarg->lk.pid = pid;
a9ff4f87 2636 if (flock)
7078187a 2637 inarg->lk_flags |= FUSE_LK_FLOCK;
d5b48543
MS
2638 args->opcode = opcode;
2639 args->nodeid = get_node_id(inode);
2640 args->in_numargs = 1;
2641 args->in_args[0].size = sizeof(*inarg);
2642 args->in_args[0].value = inarg;
71421259
MS
2643}
2644
2645static int fuse_getlk(struct file *file, struct file_lock *fl)
2646{
6131ffaa 2647 struct inode *inode = file_inode(file);
fcee216b 2648 struct fuse_mount *fm = get_fuse_mount(inode);
7078187a
MS
2649 FUSE_ARGS(args);
2650 struct fuse_lk_in inarg;
71421259
MS
2651 struct fuse_lk_out outarg;
2652 int err;
2653
7078187a 2654 fuse_lk_fill(&args, file, fl, FUSE_GETLK, 0, 0, &inarg);
d5b48543
MS
2655 args.out_numargs = 1;
2656 args.out_args[0].size = sizeof(outarg);
2657 args.out_args[0].value = &outarg;
fcee216b 2658 err = fuse_simple_request(fm, &args);
71421259 2659 if (!err)
fcee216b 2660 err = convert_fuse_file_lock(fm->fc, &outarg.lk, fl);
71421259
MS
2661
2662 return err;
2663}
2664
a9ff4f87 2665static int fuse_setlk(struct file *file, struct file_lock *fl, int flock)
71421259 2666{
6131ffaa 2667 struct inode *inode = file_inode(file);
fcee216b 2668 struct fuse_mount *fm = get_fuse_mount(inode);
7078187a
MS
2669 FUSE_ARGS(args);
2670 struct fuse_lk_in inarg;
71421259 2671 int opcode = (fl->fl_flags & FL_SLEEP) ? FUSE_SETLKW : FUSE_SETLK;
0b6e9ea0 2672 struct pid *pid = fl->fl_type != F_UNLCK ? task_tgid(current) : NULL;
fcee216b 2673 pid_t pid_nr = pid_nr_ns(pid, fm->fc->pid_ns);
71421259
MS
2674 int err;
2675
8fb47a4f 2676 if (fl->fl_lmops && fl->fl_lmops->lm_grant) {
48e90761
MS
2677 /* NLM needs asynchronous locks, which we don't support yet */
2678 return -ENOLCK;
2679 }
2680
0b6e9ea0 2681 fuse_lk_fill(&args, file, fl, opcode, pid_nr, flock, &inarg);
fcee216b 2682 err = fuse_simple_request(fm, &args);
71421259 2683
a4d27e75
MS
2684 /* locking is restartable */
2685 if (err == -EINTR)
2686 err = -ERESTARTSYS;
7078187a 2687
71421259
MS
2688 return err;
2689}
2690
2691static int fuse_file_lock(struct file *file, int cmd, struct file_lock *fl)
2692{
6131ffaa 2693 struct inode *inode = file_inode(file);
71421259
MS
2694 struct fuse_conn *fc = get_fuse_conn(inode);
2695 int err;
2696
48e90761
MS
2697 if (cmd == F_CANCELLK) {
2698 err = 0;
2699 } else if (cmd == F_GETLK) {
71421259 2700 if (fc->no_lock) {
9d6a8c5c 2701 posix_test_lock(file, fl);
71421259
MS
2702 err = 0;
2703 } else
2704 err = fuse_getlk(file, fl);
2705 } else {
2706 if (fc->no_lock)
48e90761 2707 err = posix_lock_file(file, fl, NULL);
71421259 2708 else
a9ff4f87 2709 err = fuse_setlk(file, fl, 0);
71421259
MS
2710 }
2711 return err;
2712}
2713
a9ff4f87
MS
2714static int fuse_file_flock(struct file *file, int cmd, struct file_lock *fl)
2715{
6131ffaa 2716 struct inode *inode = file_inode(file);
a9ff4f87
MS
2717 struct fuse_conn *fc = get_fuse_conn(inode);
2718 int err;
2719
37fb3a30 2720 if (fc->no_flock) {
4f656367 2721 err = locks_lock_file_wait(file, fl);
a9ff4f87 2722 } else {
37fb3a30
MS
2723 struct fuse_file *ff = file->private_data;
2724
a9ff4f87 2725 /* emulate flock with POSIX locks */
37fb3a30 2726 ff->flock = true;
a9ff4f87
MS
2727 err = fuse_setlk(file, fl, 1);
2728 }
2729
2730 return err;
2731}
2732
b2d2272f
MS
2733static sector_t fuse_bmap(struct address_space *mapping, sector_t block)
2734{
2735 struct inode *inode = mapping->host;
fcee216b 2736 struct fuse_mount *fm = get_fuse_mount(inode);
7078187a 2737 FUSE_ARGS(args);
b2d2272f
MS
2738 struct fuse_bmap_in inarg;
2739 struct fuse_bmap_out outarg;
2740 int err;
2741
fcee216b 2742 if (!inode->i_sb->s_bdev || fm->fc->no_bmap)
b2d2272f
MS
2743 return 0;
2744
b2d2272f
MS
2745 memset(&inarg, 0, sizeof(inarg));
2746 inarg.block = block;
2747 inarg.blocksize = inode->i_sb->s_blocksize;
d5b48543
MS
2748 args.opcode = FUSE_BMAP;
2749 args.nodeid = get_node_id(inode);
2750 args.in_numargs = 1;
2751 args.in_args[0].size = sizeof(inarg);
2752 args.in_args[0].value = &inarg;
2753 args.out_numargs = 1;
2754 args.out_args[0].size = sizeof(outarg);
2755 args.out_args[0].value = &outarg;
fcee216b 2756 err = fuse_simple_request(fm, &args);
b2d2272f 2757 if (err == -ENOSYS)
fcee216b 2758 fm->fc->no_bmap = 1;
b2d2272f
MS
2759
2760 return err ? 0 : outarg.block;
2761}
2762
0b5da8db
R
2763static loff_t fuse_lseek(struct file *file, loff_t offset, int whence)
2764{
2765 struct inode *inode = file->f_mapping->host;
fcee216b 2766 struct fuse_mount *fm = get_fuse_mount(inode);
0b5da8db
R
2767 struct fuse_file *ff = file->private_data;
2768 FUSE_ARGS(args);
2769 struct fuse_lseek_in inarg = {
2770 .fh = ff->fh,
2771 .offset = offset,
2772 .whence = whence
2773 };
2774 struct fuse_lseek_out outarg;
2775 int err;
2776
fcee216b 2777 if (fm->fc->no_lseek)
0b5da8db
R
2778 goto fallback;
2779
d5b48543
MS
2780 args.opcode = FUSE_LSEEK;
2781 args.nodeid = ff->nodeid;
2782 args.in_numargs = 1;
2783 args.in_args[0].size = sizeof(inarg);
2784 args.in_args[0].value = &inarg;
2785 args.out_numargs = 1;
2786 args.out_args[0].size = sizeof(outarg);
2787 args.out_args[0].value = &outarg;
fcee216b 2788 err = fuse_simple_request(fm, &args);
0b5da8db
R
2789 if (err) {
2790 if (err == -ENOSYS) {
fcee216b 2791 fm->fc->no_lseek = 1;
0b5da8db
R
2792 goto fallback;
2793 }
2794 return err;
2795 }
2796
2797 return vfs_setpos(file, outarg.offset, inode->i_sb->s_maxbytes);
2798
2799fallback:
c6c745b8 2800 err = fuse_update_attributes(inode, file, STATX_SIZE);
0b5da8db
R
2801 if (!err)
2802 return generic_file_llseek(file, offset, whence);
2803 else
2804 return err;
2805}
2806
965c8e59 2807static loff_t fuse_file_llseek(struct file *file, loff_t offset, int whence)
5559b8f4
MS
2808{
2809 loff_t retval;
6131ffaa 2810 struct inode *inode = file_inode(file);
5559b8f4 2811
0b5da8db
R
2812 switch (whence) {
2813 case SEEK_SET:
2814 case SEEK_CUR:
2815 /* No i_mutex protection necessary for SEEK_CUR and SEEK_SET */
965c8e59 2816 retval = generic_file_llseek(file, offset, whence);
0b5da8db
R
2817 break;
2818 case SEEK_END:
5955102c 2819 inode_lock(inode);
c6c745b8 2820 retval = fuse_update_attributes(inode, file, STATX_SIZE);
0b5da8db
R
2821 if (!retval)
2822 retval = generic_file_llseek(file, offset, whence);
5955102c 2823 inode_unlock(inode);
0b5da8db
R
2824 break;
2825 case SEEK_HOLE:
2826 case SEEK_DATA:
5955102c 2827 inode_lock(inode);
0b5da8db 2828 retval = fuse_lseek(file, offset, whence);
5955102c 2829 inode_unlock(inode);
0b5da8db
R
2830 break;
2831 default:
2832 retval = -EINVAL;
2833 }
c07c3d19 2834
5559b8f4
MS
2835 return retval;
2836}
2837
95668a69
TH
2838/*
2839 * All files which have been polled are linked to RB tree
2840 * fuse_conn->polled_files which is indexed by kh. Walk the tree and
2841 * find the matching one.
2842 */
2843static struct rb_node **fuse_find_polled_node(struct fuse_conn *fc, u64 kh,
2844 struct rb_node **parent_out)
2845{
2846 struct rb_node **link = &fc->polled_files.rb_node;
2847 struct rb_node *last = NULL;
2848
2849 while (*link) {
2850 struct fuse_file *ff;
2851
2852 last = *link;
2853 ff = rb_entry(last, struct fuse_file, polled_node);
2854
2855 if (kh < ff->kh)
2856 link = &last->rb_left;
2857 else if (kh > ff->kh)
2858 link = &last->rb_right;
2859 else
2860 return link;
2861 }
2862
2863 if (parent_out)
2864 *parent_out = last;
2865 return link;
2866}
2867
2868/*
2869 * The file is about to be polled. Make sure it's on the polled_files
2870 * RB tree. Note that files once added to the polled_files tree are
2871 * not removed before the file is released. This is because a file
2872 * polled once is likely to be polled again.
2873 */
2874static void fuse_register_polled_file(struct fuse_conn *fc,
2875 struct fuse_file *ff)
2876{
2877 spin_lock(&fc->lock);
2878 if (RB_EMPTY_NODE(&ff->polled_node)) {
3f649ab7 2879 struct rb_node **link, *parent;
95668a69
TH
2880
2881 link = fuse_find_polled_node(fc, ff->kh, &parent);
2882 BUG_ON(*link);
2883 rb_link_node(&ff->polled_node, parent, link);
2884 rb_insert_color(&ff->polled_node, &fc->polled_files);
2885 }
2886 spin_unlock(&fc->lock);
2887}
2888
076ccb76 2889__poll_t fuse_file_poll(struct file *file, poll_table *wait)
95668a69 2890{
95668a69 2891 struct fuse_file *ff = file->private_data;
fcee216b 2892 struct fuse_mount *fm = ff->fm;
95668a69
TH
2893 struct fuse_poll_in inarg = { .fh = ff->fh, .kh = ff->kh };
2894 struct fuse_poll_out outarg;
7078187a 2895 FUSE_ARGS(args);
95668a69
TH
2896 int err;
2897
fcee216b 2898 if (fm->fc->no_poll)
95668a69
TH
2899 return DEFAULT_POLLMASK;
2900
2901 poll_wait(file, &ff->poll_wait, wait);
c71d227f 2902 inarg.events = mangle_poll(poll_requested_events(wait));
95668a69
TH
2903
2904 /*
2905 * Ask for notification iff there's someone waiting for it.
2906 * The client may ignore the flag and always notify.
2907 */
2908 if (waitqueue_active(&ff->poll_wait)) {
2909 inarg.flags |= FUSE_POLL_SCHEDULE_NOTIFY;
fcee216b 2910 fuse_register_polled_file(fm->fc, ff);
95668a69
TH
2911 }
2912
d5b48543
MS
2913 args.opcode = FUSE_POLL;
2914 args.nodeid = ff->nodeid;
2915 args.in_numargs = 1;
2916 args.in_args[0].size = sizeof(inarg);
2917 args.in_args[0].value = &inarg;
2918 args.out_numargs = 1;
2919 args.out_args[0].size = sizeof(outarg);
2920 args.out_args[0].value = &outarg;
fcee216b 2921 err = fuse_simple_request(fm, &args);
95668a69
TH
2922
2923 if (!err)
c71d227f 2924 return demangle_poll(outarg.revents);
95668a69 2925 if (err == -ENOSYS) {
fcee216b 2926 fm->fc->no_poll = 1;
95668a69
TH
2927 return DEFAULT_POLLMASK;
2928 }
a9a08845 2929 return EPOLLERR;
95668a69 2930}
08cbf542 2931EXPORT_SYMBOL_GPL(fuse_file_poll);
95668a69
TH
2932
2933/*
2934 * This is called from fuse_handle_notify() on FUSE_NOTIFY_POLL and
2935 * wakes up the poll waiters.
2936 */
2937int fuse_notify_poll_wakeup(struct fuse_conn *fc,
2938 struct fuse_notify_poll_wakeup_out *outarg)
2939{
2940 u64 kh = outarg->kh;
2941 struct rb_node **link;
2942
2943 spin_lock(&fc->lock);
2944
2945 link = fuse_find_polled_node(fc, kh, NULL);
2946 if (*link) {
2947 struct fuse_file *ff;
2948
2949 ff = rb_entry(*link, struct fuse_file, polled_node);
2950 wake_up_interruptible_sync(&ff->poll_wait);
2951 }
2952
2953 spin_unlock(&fc->lock);
2954 return 0;
2955}
2956
efb9fa9e
MP
2957static void fuse_do_truncate(struct file *file)
2958{
2959 struct inode *inode = file->f_mapping->host;
2960 struct iattr attr;
2961
2962 attr.ia_valid = ATTR_SIZE;
2963 attr.ia_size = i_size_read(inode);
2964
2965 attr.ia_file = file;
2966 attr.ia_valid |= ATTR_FILE;
2967
62490330 2968 fuse_do_setattr(file_dentry(file), &attr, file);
efb9fa9e
MP
2969}
2970
5da784cc 2971static inline loff_t fuse_round_up(struct fuse_conn *fc, loff_t off)
e5c5f05d 2972{
5da784cc 2973 return round_up(off, fc->max_pages << PAGE_SHIFT);
e5c5f05d
MP
2974}
2975
4273b793 2976static ssize_t
c8b8e32d 2977fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
4273b793 2978{
9d5722b7 2979 DECLARE_COMPLETION_ONSTACK(wait);
4273b793 2980 ssize_t ret = 0;
60b9df7a
MS
2981 struct file *file = iocb->ki_filp;
2982 struct fuse_file *ff = file->private_data;
4273b793 2983 loff_t pos = 0;
bcba24cc
MP
2984 struct inode *inode;
2985 loff_t i_size;
933a3752 2986 size_t count = iov_iter_count(iter), shortened = 0;
c8b8e32d 2987 loff_t offset = iocb->ki_pos;
36cf66ed 2988 struct fuse_io_priv *io;
4273b793 2989
4273b793 2990 pos = offset;
bcba24cc
MP
2991 inode = file->f_mapping->host;
2992 i_size = i_size_read(inode);
4273b793 2993
933a3752 2994 if ((iov_iter_rw(iter) == READ) && (offset >= i_size))
9fe55eea
SW
2995 return 0;
2996
bcba24cc 2997 io = kmalloc(sizeof(struct fuse_io_priv), GFP_KERNEL);
36cf66ed
MP
2998 if (!io)
2999 return -ENOMEM;
bcba24cc 3000 spin_lock_init(&io->lock);
744742d6 3001 kref_init(&io->refcnt);
bcba24cc
MP
3002 io->reqs = 1;
3003 io->bytes = -1;
3004 io->size = 0;
3005 io->offset = offset;
6f673763 3006 io->write = (iov_iter_rw(iter) == WRITE);
bcba24cc 3007 io->err = 0;
bcba24cc
MP
3008 /*
3009 * By default, we want to optimize all I/Os with async request
60b9df7a 3010 * submission to the client filesystem if supported.
bcba24cc 3011 */
69456535 3012 io->async = ff->fm->fc->async_dio;
bcba24cc 3013 io->iocb = iocb;
7879c4e5 3014 io->blocking = is_sync_kiocb(iocb);
bcba24cc 3015
933a3752
AV
3016 /* optimization for short read */
3017 if (io->async && !io->write && offset + count > i_size) {
69456535 3018 iov_iter_truncate(iter, fuse_round_up(ff->fm->fc, i_size - offset));
933a3752
AV
3019 shortened = count - iov_iter_count(iter);
3020 count -= shortened;
3021 }
3022
bcba24cc 3023 /*
7879c4e5
AS
3024 * We cannot asynchronously extend the size of a file.
3025 * In such case the aio will behave exactly like sync io.
bcba24cc 3026 */
933a3752 3027 if ((offset + count > i_size) && io->write)
7879c4e5 3028 io->blocking = true;
4273b793 3029
7879c4e5 3030 if (io->async && io->blocking) {
744742d6
SF
3031 /*
3032 * Additional reference to keep io around after
3033 * calling fuse_aio_complete()
3034 */
3035 kref_get(&io->refcnt);
9d5722b7 3036 io->done = &wait;
744742d6 3037 }
9d5722b7 3038
6f673763 3039 if (iov_iter_rw(iter) == WRITE) {
6b775b18 3040 ret = fuse_direct_io(io, iter, &pos, FUSE_DIO_WRITE);
fa5eee57 3041 fuse_invalidate_attr_mask(inode, FUSE_STATX_MODSIZE);
812408fb 3042 } else {
d22a943f 3043 ret = __fuse_direct_read(io, iter, &pos);
812408fb 3044 }
933a3752 3045 iov_iter_reexpand(iter, iov_iter_count(iter) + shortened);
36cf66ed 3046
bcba24cc 3047 if (io->async) {
ebacb812
LC
3048 bool blocking = io->blocking;
3049
bcba24cc
MP
3050 fuse_aio_complete(io, ret < 0 ? ret : 0, -1);
3051
3052 /* we have a non-extending, async request, so return */
ebacb812 3053 if (!blocking)
bcba24cc
MP
3054 return -EIOCBQUEUED;
3055
9d5722b7
CH
3056 wait_for_completion(&wait);
3057 ret = fuse_get_res_by_io(io);
bcba24cc
MP
3058 }
3059
744742d6 3060 kref_put(&io->refcnt, fuse_io_release);
9d5722b7 3061
6f673763 3062 if (iov_iter_rw(iter) == WRITE) {
d347739a 3063 fuse_write_update_attr(inode, pos, ret);
15352405 3064 /* For extending writes we already hold exclusive lock */
d347739a 3065 if (ret < 0 && offset + count > i_size)
efb9fa9e
MP
3066 fuse_do_truncate(file);
3067 }
4273b793
AA
3068
3069 return ret;
3070}
3071
26eb3bae
MS
3072static int fuse_writeback_range(struct inode *inode, loff_t start, loff_t end)
3073{
e388164e 3074 int err = filemap_write_and_wait_range(inode->i_mapping, start, LLONG_MAX);
26eb3bae
MS
3075
3076 if (!err)
3077 fuse_sync_writes(inode);
3078
3079 return err;
3080}
3081
cdadb11c
MS
3082static long fuse_file_fallocate(struct file *file, int mode, loff_t offset,
3083 loff_t length)
05ba1f08
AP
3084{
3085 struct fuse_file *ff = file->private_data;
1c68271c 3086 struct inode *inode = file_inode(file);
0ab08f57 3087 struct fuse_inode *fi = get_fuse_inode(inode);
fcee216b 3088 struct fuse_mount *fm = ff->fm;
7078187a 3089 FUSE_ARGS(args);
05ba1f08
AP
3090 struct fuse_fallocate_in inarg = {
3091 .fh = ff->fh,
3092 .offset = offset,
3093 .length = length,
3094 .mode = mode
3095 };
3096 int err;
44361e8c
MS
3097 bool block_faults = FUSE_IS_DAX(inode) &&
3098 (!(mode & FALLOC_FL_KEEP_SIZE) ||
3099 (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_ZERO_RANGE)));
6ae330ca 3100
6b1bdb56
RJ
3101 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
3102 FALLOC_FL_ZERO_RANGE))
4adb8302
MS
3103 return -EOPNOTSUPP;
3104
fcee216b 3105 if (fm->fc->no_fallocate)
519c6040
MS
3106 return -EOPNOTSUPP;
3107
44361e8c
MS
3108 inode_lock(inode);
3109 if (block_faults) {
3110 filemap_invalidate_lock(inode->i_mapping);
3111 err = fuse_dax_break_layouts(inode, 0, 0);
3112 if (err)
3113 goto out;
3114 }
6ae330ca 3115
44361e8c
MS
3116 if (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_ZERO_RANGE)) {
3117 loff_t endbyte = offset + length - 1;
26eb3bae 3118
44361e8c
MS
3119 err = fuse_writeback_range(inode, offset, endbyte);
3120 if (err)
3121 goto out;
3634a632
BF
3122 }
3123
0cbade02
LB
3124 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
3125 offset + length > i_size_read(inode)) {
3126 err = inode_newsize_ok(inode, offset + length);
3127 if (err)
35d6fcbb 3128 goto out;
0cbade02
LB
3129 }
3130
4a6f278d
MS
3131 err = file_modified(file);
3132 if (err)
3133 goto out;
3134
0ab08f57
MP
3135 if (!(mode & FALLOC_FL_KEEP_SIZE))
3136 set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
3137
d5b48543
MS
3138 args.opcode = FUSE_FALLOCATE;
3139 args.nodeid = ff->nodeid;
3140 args.in_numargs = 1;
3141 args.in_args[0].size = sizeof(inarg);
3142 args.in_args[0].value = &inarg;
fcee216b 3143 err = fuse_simple_request(fm, &args);
519c6040 3144 if (err == -ENOSYS) {
fcee216b 3145 fm->fc->no_fallocate = 1;
519c6040
MS
3146 err = -EOPNOTSUPP;
3147 }
bee6c307
BF
3148 if (err)
3149 goto out;
3150
3151 /* we could have extended the file */
b0aa7606 3152 if (!(mode & FALLOC_FL_KEEP_SIZE)) {
20235b43 3153 if (fuse_write_update_attr(inode, offset + length, length))
93d2269d 3154 file_update_time(file);
b0aa7606 3155 }
bee6c307 3156
6b1bdb56 3157 if (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_ZERO_RANGE))
bee6c307
BF
3158 truncate_pagecache_range(inode, offset, offset + length - 1);
3159
fa5eee57 3160 fuse_invalidate_attr_mask(inode, FUSE_STATX_MODSIZE);
bee6c307 3161
3634a632 3162out:
0ab08f57
MP
3163 if (!(mode & FALLOC_FL_KEEP_SIZE))
3164 clear_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
3165
6ae330ca 3166 if (block_faults)
8bcbbe9c 3167 filemap_invalidate_unlock(inode->i_mapping);
6ae330ca 3168
44361e8c 3169 inode_unlock(inode);
3634a632 3170
5c791fe1
MS
3171 fuse_flush_time_update(inode);
3172
05ba1f08
AP
3173 return err;
3174}
05ba1f08 3175
64bf5ff5
DC
3176static ssize_t __fuse_copy_file_range(struct file *file_in, loff_t pos_in,
3177 struct file *file_out, loff_t pos_out,
3178 size_t len, unsigned int flags)
88bc7d50
NV
3179{
3180 struct fuse_file *ff_in = file_in->private_data;
3181 struct fuse_file *ff_out = file_out->private_data;
a2bc9236 3182 struct inode *inode_in = file_inode(file_in);
88bc7d50
NV
3183 struct inode *inode_out = file_inode(file_out);
3184 struct fuse_inode *fi_out = get_fuse_inode(inode_out);
fcee216b
MR
3185 struct fuse_mount *fm = ff_in->fm;
3186 struct fuse_conn *fc = fm->fc;
88bc7d50
NV
3187 FUSE_ARGS(args);
3188 struct fuse_copy_file_range_in inarg = {
3189 .fh_in = ff_in->fh,
3190 .off_in = pos_in,
3191 .nodeid_out = ff_out->nodeid,
3192 .fh_out = ff_out->fh,
3193 .off_out = pos_out,
3194 .len = len,
3195 .flags = flags
3196 };
3197 struct fuse_write_out outarg;
3198 ssize_t err;
3199 /* mark unstable when write-back is not used, and file_out gets
3200 * extended */
3201 bool is_unstable = (!fc->writeback_cache) &&
3202 ((pos_out + len) > inode_out->i_size);
3203
3204 if (fc->no_copy_file_range)
3205 return -EOPNOTSUPP;
3206
5dae222a
AG
3207 if (file_inode(file_in)->i_sb != file_inode(file_out)->i_sb)
3208 return -EXDEV;
3209
2c4656df
MS
3210 inode_lock(inode_in);
3211 err = fuse_writeback_range(inode_in, pos_in, pos_in + len - 1);
3212 inode_unlock(inode_in);
3213 if (err)
3214 return err;
a2bc9236 3215
88bc7d50
NV
3216 inode_lock(inode_out);
3217
fe0da9c0
AG
3218 err = file_modified(file_out);
3219 if (err)
3220 goto out;
3221
9b46418c
MS
3222 /*
3223 * Write out dirty pages in the destination file before sending the COPY
3224 * request to userspace. After the request is completed, truncate off
3225 * pages (including partial ones) from the cache that have been copied,
3226 * since these contain stale data at that point.
3227 *
3228 * This should be mostly correct, but if the COPY writes to partial
3229 * pages (at the start or end) and the parts not covered by the COPY are
3230 * written through a memory map after calling fuse_writeback_range(),
3231 * then these partial page modifications will be lost on truncation.
3232 *
3233 * It is unlikely that someone would rely on such mixed style
3234 * modifications. Yet this does give less guarantees than if the
3235 * copying was performed with write(2).
3236 *
8bcbbe9c 3237 * To fix this a mapping->invalidate_lock could be used to prevent new
9b46418c
MS
3238 * faults while the copy is ongoing.
3239 */
2c4656df
MS
3240 err = fuse_writeback_range(inode_out, pos_out, pos_out + len - 1);
3241 if (err)
3242 goto out;
88bc7d50
NV
3243
3244 if (is_unstable)
3245 set_bit(FUSE_I_SIZE_UNSTABLE, &fi_out->state);
3246
d5b48543
MS
3247 args.opcode = FUSE_COPY_FILE_RANGE;
3248 args.nodeid = ff_in->nodeid;
3249 args.in_numargs = 1;
3250 args.in_args[0].size = sizeof(inarg);
3251 args.in_args[0].value = &inarg;
3252 args.out_numargs = 1;
3253 args.out_args[0].size = sizeof(outarg);
3254 args.out_args[0].value = &outarg;
fcee216b 3255 err = fuse_simple_request(fm, &args);
88bc7d50
NV
3256 if (err == -ENOSYS) {
3257 fc->no_copy_file_range = 1;
3258 err = -EOPNOTSUPP;
3259 }
3260 if (err)
3261 goto out;
3262
9b46418c
MS
3263 truncate_inode_pages_range(inode_out->i_mapping,
3264 ALIGN_DOWN(pos_out, PAGE_SIZE),
3265 ALIGN(pos_out + outarg.size, PAGE_SIZE) - 1);
3266
20235b43
MS
3267 file_update_time(file_out);
3268 fuse_write_update_attr(inode_out, pos_out + outarg.size, outarg.size);
88bc7d50
NV
3269
3270 err = outarg.size;
3271out:
3272 if (is_unstable)
3273 clear_bit(FUSE_I_SIZE_UNSTABLE, &fi_out->state);
3274
3275 inode_unlock(inode_out);
fe0da9c0 3276 file_accessed(file_in);
88bc7d50 3277
5c791fe1
MS
3278 fuse_flush_time_update(inode_out);
3279
88bc7d50
NV
3280 return err;
3281}
3282
64bf5ff5
DC
3283static ssize_t fuse_copy_file_range(struct file *src_file, loff_t src_off,
3284 struct file *dst_file, loff_t dst_off,
3285 size_t len, unsigned int flags)
3286{
3287 ssize_t ret;
3288
3289 ret = __fuse_copy_file_range(src_file, src_off, dst_file, dst_off,
3290 len, flags);
3291
5dae222a 3292 if (ret == -EOPNOTSUPP || ret == -EXDEV)
705bcfcb
AG
3293 ret = splice_copy_file_range(src_file, src_off, dst_file,
3294 dst_off, len);
64bf5ff5
DC
3295 return ret;
3296}
3297
4b6f5d20 3298static const struct file_operations fuse_file_operations = {
5559b8f4 3299 .llseek = fuse_file_llseek,
37c20f16 3300 .read_iter = fuse_file_read_iter,
84c3d55c 3301 .write_iter = fuse_file_write_iter,
b6aeaded
MS
3302 .mmap = fuse_file_mmap,
3303 .open = fuse_open,
3304 .flush = fuse_flush,
3305 .release = fuse_release,
3306 .fsync = fuse_fsync,
71421259 3307 .lock = fuse_file_lock,
2a9a609a 3308 .get_unmapped_area = thp_get_unmapped_area,
a9ff4f87 3309 .flock = fuse_file_flock,
5ca73468
AG
3310 .splice_read = fuse_splice_read,
3311 .splice_write = fuse_splice_write,
59efec7b
TH
3312 .unlocked_ioctl = fuse_file_ioctl,
3313 .compat_ioctl = fuse_file_compat_ioctl,
95668a69 3314 .poll = fuse_file_poll,
05ba1f08 3315 .fallocate = fuse_file_fallocate,
d4136d60 3316 .copy_file_range = fuse_copy_file_range,
413ef8cb
MS
3317};
3318
f5e54d6e 3319static const struct address_space_operations fuse_file_aops = {
5efd00e4 3320 .read_folio = fuse_read_folio,
76a0294e 3321 .readahead = fuse_readahead,
26d614df 3322 .writepages = fuse_writepages,
2bf06b8e 3323 .launder_folio = fuse_launder_folio,
187c82cb 3324 .dirty_folio = filemap_dirty_folio,
e1c420ac 3325 .migrate_folio = filemap_migrate_folio,
b2d2272f 3326 .bmap = fuse_bmap,
4273b793 3327 .direct_IO = fuse_direct_IO,
6b12c1b3
PE
3328 .write_begin = fuse_write_begin,
3329 .write_end = fuse_write_end,
b6aeaded
MS
3330};
3331
93a497b9 3332void fuse_init_file_inode(struct inode *inode, unsigned int flags)
b6aeaded 3333{
ab2257e9
MS
3334 struct fuse_inode *fi = get_fuse_inode(inode);
3335
45323fb7
MS
3336 inode->i_fop = &fuse_file_operations;
3337 inode->i_data.a_ops = &fuse_file_aops;
ab2257e9
MS
3338
3339 INIT_LIST_HEAD(&fi->write_files);
3340 INIT_LIST_HEAD(&fi->queued_writes);
3341 fi->writectr = 0;
cb098dd2 3342 fi->iocachectr = 0;
ab2257e9 3343 init_waitqueue_head(&fi->page_waitq);
205c1d80 3344 init_waitqueue_head(&fi->direct_io_waitq);
6b2fb799 3345 fi->writepages = RB_ROOT;
c2d0ad00
VG
3346
3347 if (IS_ENABLED(CONFIG_FUSE_DAX))
93a497b9 3348 fuse_dax_inode_init(inode, flags);
b6aeaded 3349}