Merge tag 'mm-hotfixes-stable-2023-11-17-14-04' of git://git.kernel.org/pub/scm/linux...
[linux-block.git] / fs / fuse / dev.c
CommitLineData
334f485d
MS
1/*
2 FUSE: Filesystem in Userspace
1729a16c 3 Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu>
334f485d
MS
4
5 This program can be distributed under the terms of the GNU GPL.
6 See the file COPYING.
7*/
8
9#include "fuse_i.h"
10
11#include <linux/init.h>
12#include <linux/module.h>
13#include <linux/poll.h>
174cd4b1 14#include <linux/sched/signal.h>
334f485d
MS
15#include <linux/uio.h>
16#include <linux/miscdevice.h>
17#include <linux/pagemap.h>
18#include <linux/file.h>
19#include <linux/slab.h>
dd3bb14f 20#include <linux/pipe_fs_i.h>
ce534fb0
MS
21#include <linux/swap.h>
22#include <linux/splice.h>
0b6e9ea0 23#include <linux/sched.h>
334f485d
MS
24
25MODULE_ALIAS_MISCDEV(FUSE_MINOR);
578454ff 26MODULE_ALIAS("devname:fuse");
334f485d 27
c59fd85e
KT
28/* Ordinary requests have even IDs, while interrupts IDs are odd */
29#define FUSE_INT_REQ_BIT (1ULL << 0)
30#define FUSE_REQ_ID_STEP (1ULL << 1)
31
e18b890b 32static struct kmem_cache *fuse_req_cachep;
334f485d 33
cc080e9e 34static struct fuse_dev *fuse_get_dev(struct file *file)
334f485d 35{
0720b315
MS
36 /*
37 * Lockless access is OK, because file->private data is set
38 * once during mount and is valid until the file is released.
39 */
6aa7de05 40 return READ_ONCE(file->private_data);
334f485d
MS
41}
42
fcee216b 43static void fuse_request_init(struct fuse_mount *fm, struct fuse_req *req)
334f485d 44{
334f485d 45 INIT_LIST_HEAD(&req->list);
a4d27e75 46 INIT_LIST_HEAD(&req->intr_entry);
334f485d 47 init_waitqueue_head(&req->waitq);
ec99f6d3 48 refcount_set(&req->count, 1);
33e14b4d 49 __set_bit(FR_PENDING, &req->flags);
fcee216b 50 req->fm = fm;
334f485d
MS
51}
52
fcee216b 53static struct fuse_req *fuse_request_alloc(struct fuse_mount *fm, gfp_t flags)
334f485d 54{
8a7aa286 55 struct fuse_req *req = kmem_cache_zalloc(fuse_req_cachep, flags);
7213394c 56 if (req)
fcee216b 57 fuse_request_init(fm, req);
4250c066 58
334f485d
MS
59 return req;
60}
4250c066 61
66abc359 62static void fuse_request_free(struct fuse_req *req)
e52a8250 63{
334f485d
MS
64 kmem_cache_free(fuse_req_cachep, req);
65}
66
66abc359 67static void __fuse_get_request(struct fuse_req *req)
334f485d 68{
ec99f6d3 69 refcount_inc(&req->count);
334f485d
MS
70}
71
72/* Must be called with > 1 refcount */
73static void __fuse_put_request(struct fuse_req *req)
74{
ec99f6d3 75 refcount_dec(&req->count);
334f485d
MS
76}
77
9759bd51
MS
78void fuse_set_initialized(struct fuse_conn *fc)
79{
80 /* Make sure stores before this are seen on another CPU */
81 smp_wmb();
82 fc->initialized = 1;
83}
84
0aada884
MP
85static bool fuse_block_alloc(struct fuse_conn *fc, bool for_background)
86{
87 return !fc->initialized || (for_background && fc->blocked);
88}
89
b8f95e5d
MS
90static void fuse_drop_waiting(struct fuse_conn *fc)
91{
2d84a2d1
MS
92 /*
93 * lockess check of fc->connected is okay, because atomic_dec_and_test()
c4e0cd4e 94 * provides a memory barrier matched with the one in fuse_wait_aborted()
2d84a2d1
MS
95 * to ensure no wake-up is missed.
96 */
97 if (atomic_dec_and_test(&fc->num_waiting) &&
98 !READ_ONCE(fc->connected)) {
b8f95e5d
MS
99 /* wake up aborters */
100 wake_up_all(&fc->blocked_waitq);
101 }
102}
103
8f622e94 104static void fuse_put_request(struct fuse_req *req);
66abc359 105
fcee216b 106static struct fuse_req *fuse_get_req(struct fuse_mount *fm, bool for_background)
334f485d 107{
fcee216b 108 struct fuse_conn *fc = fm->fc;
08a53cdc 109 struct fuse_req *req;
08a53cdc 110 int err;
9bc5ddda 111 atomic_inc(&fc->num_waiting);
0aada884
MP
112
113 if (fuse_block_alloc(fc, for_background)) {
0aada884 114 err = -EINTR;
7d3a07fc
AV
115 if (wait_event_killable_exclusive(fc->blocked_waitq,
116 !fuse_block_alloc(fc, for_background)))
0aada884
MP
117 goto out;
118 }
9759bd51
MS
119 /* Matches smp_wmb() in fuse_set_initialized() */
120 smp_rmb();
08a53cdc 121
51eb01e7
MS
122 err = -ENOTCONN;
123 if (!fc->connected)
124 goto out;
125
de155226
MS
126 err = -ECONNREFUSED;
127 if (fc->conn_error)
128 goto out;
129
fcee216b 130 req = fuse_request_alloc(fm, GFP_KERNEL);
9bc5ddda 131 err = -ENOMEM;
722d2bea
MP
132 if (!req) {
133 if (for_background)
134 wake_up(&fc->blocked_waitq);
9bc5ddda 135 goto out;
722d2bea 136 }
334f485d 137
8cb08329
EB
138 req->in.h.uid = from_kuid(fc->user_ns, current_fsuid());
139 req->in.h.gid = from_kgid(fc->user_ns, current_fsgid());
c9582eb0
EB
140 req->in.h.pid = pid_nr_ns(task_pid(current), fc->pid_ns);
141
825d6d33
MS
142 __set_bit(FR_WAITING, &req->flags);
143 if (for_background)
144 __set_bit(FR_BACKGROUND, &req->flags);
145
c9582eb0
EB
146 if (unlikely(req->in.h.uid == ((uid_t)-1) ||
147 req->in.h.gid == ((gid_t)-1))) {
8f622e94 148 fuse_put_request(req);
c9582eb0
EB
149 return ERR_PTR(-EOVERFLOW);
150 }
334f485d 151 return req;
9bc5ddda
MS
152
153 out:
b8f95e5d 154 fuse_drop_waiting(fc);
9bc5ddda 155 return ERR_PTR(err);
334f485d 156}
8b41e671 157
8f622e94 158static void fuse_put_request(struct fuse_req *req)
7128ec2a 159{
fcee216b 160 struct fuse_conn *fc = req->fm->fc;
8f622e94 161
ec99f6d3 162 if (refcount_dec_and_test(&req->count)) {
825d6d33 163 if (test_bit(FR_BACKGROUND, &req->flags)) {
722d2bea
MP
164 /*
165 * We get here in the unlikely case that a background
166 * request was allocated but not sent
167 */
ae2dffa3 168 spin_lock(&fc->bg_lock);
722d2bea
MP
169 if (!fc->blocked)
170 wake_up(&fc->blocked_waitq);
ae2dffa3 171 spin_unlock(&fc->bg_lock);
722d2bea
MP
172 }
173
825d6d33
MS
174 if (test_bit(FR_WAITING, &req->flags)) {
175 __clear_bit(FR_WAITING, &req->flags);
b8f95e5d 176 fuse_drop_waiting(fc);
73e0e738 177 }
33649c91 178
40ac7ab2 179 fuse_request_free(req);
7128ec2a
MS
180 }
181}
182
14d46d7a 183unsigned int fuse_len_args(unsigned int numargs, struct fuse_arg *args)
d12def1b
MS
184{
185 unsigned nbytes = 0;
186 unsigned i;
187
188 for (i = 0; i < numargs; i++)
189 nbytes += args[i].size;
190
191 return nbytes;
192}
14d46d7a 193EXPORT_SYMBOL_GPL(fuse_len_args);
d12def1b 194
79d96eff 195u64 fuse_get_unique(struct fuse_iqueue *fiq)
d12def1b 196{
c59fd85e
KT
197 fiq->reqctr += FUSE_REQ_ID_STEP;
198 return fiq->reqctr;
d12def1b 199}
79d96eff 200EXPORT_SYMBOL_GPL(fuse_get_unique);
d12def1b 201
be2ff42c
KT
202static unsigned int fuse_req_hash(u64 unique)
203{
204 return hash_long(unique & ~FUSE_INT_REQ_BIT, FUSE_PQ_HASH_BITS);
205}
206
06bbb761 207/*
ae3aad77
SH
208 * A new request is available, wake fiq->waitq
209 */
210static void fuse_dev_wake_and_unlock(struct fuse_iqueue *fiq)
211__releases(fiq->lock)
212{
213 wake_up(&fiq->waitq);
214 kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
215 spin_unlock(&fiq->lock);
216}
217
218const struct fuse_iqueue_ops fuse_dev_fiq_ops = {
219 .wake_forget_and_unlock = fuse_dev_wake_and_unlock,
220 .wake_interrupt_and_unlock = fuse_dev_wake_and_unlock,
221 .wake_pending_and_unlock = fuse_dev_wake_and_unlock,
222};
223EXPORT_SYMBOL_GPL(fuse_dev_fiq_ops);
224
225static void queue_request_and_unlock(struct fuse_iqueue *fiq,
226 struct fuse_req *req)
227__releases(fiq->lock)
d12def1b 228{
d12def1b 229 req->in.h.len = sizeof(struct fuse_in_header) +
14d46d7a
SH
230 fuse_len_args(req->args->in_numargs,
231 (struct fuse_arg *) req->args->in_args);
f88996a9 232 list_add_tail(&req->list, &fiq->pending);
ae3aad77 233 fiq->ops->wake_pending_and_unlock(fiq);
d12def1b
MS
234}
235
07e77dca
MS
236void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget,
237 u64 nodeid, u64 nlookup)
238{
f88996a9
MS
239 struct fuse_iqueue *fiq = &fc->iq;
240
02c048b9
MS
241 forget->forget_one.nodeid = nodeid;
242 forget->forget_one.nlookup = nlookup;
07e77dca 243
76e43c8c 244 spin_lock(&fiq->lock);
e16714d8 245 if (fiq->connected) {
f88996a9
MS
246 fiq->forget_list_tail->next = forget;
247 fiq->forget_list_tail = forget;
ae3aad77 248 fiq->ops->wake_forget_and_unlock(fiq);
5dfcc87f
MS
249 } else {
250 kfree(forget);
ae3aad77 251 spin_unlock(&fiq->lock);
5dfcc87f 252 }
07e77dca
MS
253}
254
d12def1b
MS
255static void flush_bg_queue(struct fuse_conn *fc)
256{
e287179a
KT
257 struct fuse_iqueue *fiq = &fc->iq;
258
7a6d3c8b 259 while (fc->active_background < fc->max_background &&
d12def1b
MS
260 !list_empty(&fc->bg_queue)) {
261 struct fuse_req *req;
262
e287179a 263 req = list_first_entry(&fc->bg_queue, struct fuse_req, list);
d12def1b
MS
264 list_del(&req->list);
265 fc->active_background++;
76e43c8c 266 spin_lock(&fiq->lock);
f88996a9 267 req->in.h.unique = fuse_get_unique(fiq);
ae3aad77 268 queue_request_and_unlock(fiq, req);
d12def1b
MS
269 }
270}
271
334f485d
MS
272/*
273 * This function is called when a request is finished. Either a reply
f9a2842e 274 * has arrived or it was aborted (and not yet sent) or some error
f43b155a 275 * occurred during communication with userspace, or the device file
51eb01e7
MS
276 * was closed. The requester thread is woken up (if still waiting),
277 * the 'end' callback is called if given, else the reference to the
278 * request is released
334f485d 279 */
8f622e94 280void fuse_request_end(struct fuse_req *req)
334f485d 281{
fcee216b
MR
282 struct fuse_mount *fm = req->fm;
283 struct fuse_conn *fc = fm->fc;
4ce60812 284 struct fuse_iqueue *fiq = &fc->iq;
365ae710 285
efe2800f 286 if (test_and_set_bit(FR_FINISHED, &req->flags))
b8f95e5d 287 goto put_request;
2b319d1f 288
217316a6
KT
289 /*
290 * test_and_set_bit() implies smp_mb() between bit
e1e71c16 291 * changing and below FR_INTERRUPTED check. Pairs with
217316a6
KT
292 * smp_mb() from queue_interrupt().
293 */
e1e71c16 294 if (test_bit(FR_INTERRUPTED, &req->flags)) {
76e43c8c 295 spin_lock(&fiq->lock);
217316a6 296 list_del_init(&req->intr_entry);
76e43c8c 297 spin_unlock(&fiq->lock);
217316a6 298 }
33e14b4d
MS
299 WARN_ON(test_bit(FR_PENDING, &req->flags));
300 WARN_ON(test_bit(FR_SENT, &req->flags));
825d6d33 301 if (test_bit(FR_BACKGROUND, &req->flags)) {
ae2dffa3 302 spin_lock(&fc->bg_lock);
825d6d33 303 clear_bit(FR_BACKGROUND, &req->flags);
908a572b 304 if (fc->num_background == fc->max_background) {
51eb01e7 305 fc->blocked = 0;
722d2bea 306 wake_up(&fc->blocked_waitq);
908a572b
MS
307 } else if (!fc->blocked) {
308 /*
309 * Wake up next waiter, if any. It's okay to use
310 * waitqueue_active(), as we've already synced up
311 * fc->blocked with waiters with the wake_up() call
312 * above.
313 */
314 if (waitqueue_active(&fc->blocked_waitq))
315 wake_up(&fc->blocked_waitq);
316 }
722d2bea 317
51eb01e7 318 fc->num_background--;
d12def1b
MS
319 fc->active_background--;
320 flush_bg_queue(fc);
ae2dffa3 321 spin_unlock(&fc->bg_lock);
5e0fed71
KT
322 } else {
323 /* Wake up waiter sleeping in request_wait_answer() */
324 wake_up(&req->waitq);
334f485d 325 }
5e0fed71 326
3e8cb8b2 327 if (test_bit(FR_ASYNC, &req->flags))
fcee216b 328 req->args->end(fm, req->args, req->out.h.error);
b8f95e5d 329put_request:
8f622e94 330 fuse_put_request(req);
334f485d 331}
04ec5af0 332EXPORT_SYMBOL_GPL(fuse_request_end);
334f485d 333
8f622e94 334static int queue_interrupt(struct fuse_req *req)
a4d27e75 335{
fcee216b 336 struct fuse_iqueue *fiq = &req->fm->fc->iq;
8f622e94 337
76e43c8c 338 spin_lock(&fiq->lock);
b782911b
KT
339 /* Check for we've sent request to interrupt this req */
340 if (unlikely(!test_bit(FR_INTERRUPTED, &req->flags))) {
76e43c8c 341 spin_unlock(&fiq->lock);
b782911b
KT
342 return -EINVAL;
343 }
344
8f7bb368
MS
345 if (list_empty(&req->intr_entry)) {
346 list_add_tail(&req->intr_entry, &fiq->interrupts);
217316a6
KT
347 /*
348 * Pairs with smp_mb() implied by test_and_set_bit()
75d89258 349 * from fuse_request_end().
217316a6
KT
350 */
351 smp_mb();
352 if (test_bit(FR_FINISHED, &req->flags)) {
353 list_del_init(&req->intr_entry);
76e43c8c 354 spin_unlock(&fiq->lock);
b782911b 355 return 0;
217316a6 356 }
ae3aad77
SH
357 fiq->ops->wake_interrupt_and_unlock(fiq);
358 } else {
359 spin_unlock(&fiq->lock);
8f7bb368 360 }
b782911b 361 return 0;
a4d27e75
MS
362}
363
8f622e94 364static void request_wait_answer(struct fuse_req *req)
334f485d 365{
fcee216b 366 struct fuse_conn *fc = req->fm->fc;
4ce60812 367 struct fuse_iqueue *fiq = &fc->iq;
c4775267
MS
368 int err;
369
a4d27e75
MS
370 if (!fc->no_interrupt) {
371 /* Any signal may interrupt this */
c4775267 372 err = wait_event_interruptible(req->waitq,
33e14b4d 373 test_bit(FR_FINISHED, &req->flags));
c4775267 374 if (!err)
a4d27e75
MS
375 return;
376
825d6d33 377 set_bit(FR_INTERRUPTED, &req->flags);
8f7bb368
MS
378 /* matches barrier in fuse_dev_do_read() */
379 smp_mb__after_atomic();
33e14b4d 380 if (test_bit(FR_SENT, &req->flags))
8f622e94 381 queue_interrupt(req);
a4d27e75
MS
382 }
383
825d6d33 384 if (!test_bit(FR_FORCE, &req->flags)) {
a4d27e75 385 /* Only fatal signals may interrupt this */
7d3a07fc 386 err = wait_event_killable(req->waitq,
33e14b4d 387 test_bit(FR_FINISHED, &req->flags));
c4775267 388 if (!err)
a131de0a
MS
389 return;
390
76e43c8c 391 spin_lock(&fiq->lock);
a131de0a 392 /* Request is not yet in userspace, bail out */
33e14b4d 393 if (test_bit(FR_PENDING, &req->flags)) {
a131de0a 394 list_del(&req->list);
76e43c8c 395 spin_unlock(&fiq->lock);
a131de0a
MS
396 __fuse_put_request(req);
397 req->out.h.error = -EINTR;
398 return;
399 }
76e43c8c 400 spin_unlock(&fiq->lock);
51eb01e7 401 }
334f485d 402
a131de0a
MS
403 /*
404 * Either request is already in userspace, or it was forced.
405 * Wait it out.
406 */
33e14b4d 407 wait_event(req->waitq, test_bit(FR_FINISHED, &req->flags));
334f485d
MS
408}
409
8f622e94 410static void __fuse_request_send(struct fuse_req *req)
334f485d 411{
fcee216b 412 struct fuse_iqueue *fiq = &req->fm->fc->iq;
e16714d8 413
825d6d33 414 BUG_ON(test_bit(FR_BACKGROUND, &req->flags));
76e43c8c 415 spin_lock(&fiq->lock);
e16714d8 416 if (!fiq->connected) {
76e43c8c 417 spin_unlock(&fiq->lock);
334f485d 418 req->out.h.error = -ENOTCONN;
c4775267 419 } else {
f88996a9 420 req->in.h.unique = fuse_get_unique(fiq);
334f485d 421 /* acquire extra reference, since request is still needed
04ec5af0 422 after fuse_request_end() */
334f485d 423 __fuse_get_request(req);
ae3aad77 424 queue_request_and_unlock(fiq, req);
334f485d 425
8f622e94 426 request_wait_answer(req);
04ec5af0 427 /* Pairs with smp_wmb() in fuse_request_end() */
c4775267 428 smp_rmb();
334f485d 429 }
334f485d 430}
6a4e922c 431
21f62174
MS
432static void fuse_adjust_compat(struct fuse_conn *fc, struct fuse_args *args)
433{
d5b48543
MS
434 if (fc->minor < 4 && args->opcode == FUSE_STATFS)
435 args->out_args[0].size = FUSE_COMPAT_STATFS_SIZE;
21f62174
MS
436
437 if (fc->minor < 9) {
d5b48543 438 switch (args->opcode) {
21f62174
MS
439 case FUSE_LOOKUP:
440 case FUSE_CREATE:
441 case FUSE_MKNOD:
442 case FUSE_MKDIR:
443 case FUSE_SYMLINK:
444 case FUSE_LINK:
d5b48543 445 args->out_args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE;
21f62174
MS
446 break;
447 case FUSE_GETATTR:
448 case FUSE_SETATTR:
d5b48543 449 args->out_args[0].size = FUSE_COMPAT_ATTR_OUT_SIZE;
21f62174
MS
450 break;
451 }
452 }
453 if (fc->minor < 12) {
d5b48543 454 switch (args->opcode) {
21f62174 455 case FUSE_CREATE:
d5b48543 456 args->in_args[0].size = sizeof(struct fuse_open_in);
21f62174
MS
457 break;
458 case FUSE_MKNOD:
d5b48543 459 args->in_args[0].size = FUSE_COMPAT_MKNOD_IN_SIZE;
21f62174
MS
460 break;
461 }
462 }
463}
464
8f622e94 465static void fuse_force_creds(struct fuse_req *req)
e413754b 466{
fcee216b 467 struct fuse_conn *fc = req->fm->fc;
8f622e94 468
e413754b
MS
469 req->in.h.uid = from_kuid_munged(fc->user_ns, current_fsuid());
470 req->in.h.gid = from_kgid_munged(fc->user_ns, current_fsgid());
471 req->in.h.pid = pid_nr_ns(task_pid(current), fc->pid_ns);
472}
473
5addcd5d 474static void fuse_args_to_req(struct fuse_req *req, struct fuse_args *args)
68583165 475{
68583165
MS
476 req->in.h.opcode = args->opcode;
477 req->in.h.nodeid = args->nodeid;
d4993774 478 req->args = args;
15d937d7
MS
479 if (args->is_ext)
480 req->in.h.total_extlen = args->in_args[args->ext_idx].size / 8;
3e8cb8b2
MS
481 if (args->end)
482 __set_bit(FR_ASYNC, &req->flags);
68583165
MS
483}
484
fcee216b 485ssize_t fuse_simple_request(struct fuse_mount *fm, struct fuse_args *args)
7078187a 486{
fcee216b 487 struct fuse_conn *fc = fm->fc;
7078187a
MS
488 struct fuse_req *req;
489 ssize_t ret;
490
c500ebaa 491 if (args->force) {
e413754b 492 atomic_inc(&fc->num_waiting);
fcee216b 493 req = fuse_request_alloc(fm, GFP_KERNEL | __GFP_NOFAIL);
e413754b
MS
494
495 if (!args->nocreds)
8f622e94 496 fuse_force_creds(req);
e413754b
MS
497
498 __set_bit(FR_WAITING, &req->flags);
c500ebaa
MS
499 __set_bit(FR_FORCE, &req->flags);
500 } else {
e413754b 501 WARN_ON(args->nocreds);
fcee216b 502 req = fuse_get_req(fm, false);
c500ebaa
MS
503 if (IS_ERR(req))
504 return PTR_ERR(req);
505 }
7078187a 506
21f62174
MS
507 /* Needs to be done after fuse_get_req() so that fc->minor is valid */
508 fuse_adjust_compat(fc, args);
68583165 509 fuse_args_to_req(req, args);
21f62174 510
454a7613
MS
511 if (!args->noreply)
512 __set_bit(FR_ISREPLY, &req->flags);
8f622e94 513 __fuse_request_send(req);
7078187a 514 ret = req->out.h.error;
d5b48543 515 if (!ret && args->out_argvar) {
093f38a2 516 BUG_ON(args->out_numargs == 0);
d4993774 517 ret = args->out_args[args->out_numargs - 1].size;
7078187a 518 }
8f622e94 519 fuse_put_request(req);
7078187a
MS
520
521 return ret;
522}
523
8f622e94 524static bool fuse_request_queue_background(struct fuse_req *req)
d12def1b 525{
fcee216b
MR
526 struct fuse_mount *fm = req->fm;
527 struct fuse_conn *fc = fm->fc;
63825b4e
KT
528 bool queued = false;
529
530 WARN_ON(!test_bit(FR_BACKGROUND, &req->flags));
825d6d33
MS
531 if (!test_bit(FR_WAITING, &req->flags)) {
532 __set_bit(FR_WAITING, &req->flags);
5437f241
MS
533 atomic_inc(&fc->num_waiting);
534 }
825d6d33 535 __set_bit(FR_ISREPLY, &req->flags);
ae2dffa3 536 spin_lock(&fc->bg_lock);
63825b4e
KT
537 if (likely(fc->connected)) {
538 fc->num_background++;
539 if (fc->num_background == fc->max_background)
540 fc->blocked = 1;
63825b4e
KT
541 list_add_tail(&req->list, &fc->bg_queue);
542 flush_bg_queue(fc);
543 queued = true;
d12def1b 544 }
ae2dffa3 545 spin_unlock(&fc->bg_lock);
63825b4e
KT
546
547 return queued;
d12def1b
MS
548}
549
fcee216b 550int fuse_simple_background(struct fuse_mount *fm, struct fuse_args *args,
12597287
MS
551 gfp_t gfp_flags)
552{
553 struct fuse_req *req;
554
555 if (args->force) {
556 WARN_ON(!args->nocreds);
fcee216b 557 req = fuse_request_alloc(fm, gfp_flags);
12597287
MS
558 if (!req)
559 return -ENOMEM;
560 __set_bit(FR_BACKGROUND, &req->flags);
561 } else {
562 WARN_ON(args->nocreds);
fcee216b 563 req = fuse_get_req(fm, true);
12597287
MS
564 if (IS_ERR(req))
565 return PTR_ERR(req);
566 }
567
568 fuse_args_to_req(req, args);
569
8f622e94
MR
570 if (!fuse_request_queue_background(req)) {
571 fuse_put_request(req);
12597287
MS
572 return -ENOTCONN;
573 }
574
575 return 0;
576}
577EXPORT_SYMBOL_GPL(fuse_simple_background);
578
fcee216b 579static int fuse_simple_notify_reply(struct fuse_mount *fm,
75b399dd 580 struct fuse_args *args, u64 unique)
2d45ba38 581{
75b399dd 582 struct fuse_req *req;
fcee216b 583 struct fuse_iqueue *fiq = &fm->fc->iq;
75b399dd
MS
584 int err = 0;
585
fcee216b 586 req = fuse_get_req(fm, false);
75b399dd
MS
587 if (IS_ERR(req))
588 return PTR_ERR(req);
2d45ba38 589
825d6d33 590 __clear_bit(FR_ISREPLY, &req->flags);
2d45ba38 591 req->in.h.unique = unique;
75b399dd
MS
592
593 fuse_args_to_req(req, args);
75b399dd 594
76e43c8c 595 spin_lock(&fiq->lock);
e16714d8 596 if (fiq->connected) {
ae3aad77 597 queue_request_and_unlock(fiq, req);
75b399dd
MS
598 } else {
599 err = -ENODEV;
600 spin_unlock(&fiq->lock);
8f622e94 601 fuse_put_request(req);
2d45ba38 602 }
2d45ba38
MS
603
604 return err;
605}
606
334f485d
MS
607/*
608 * Lock the request. Up to the next unlock_request() there mustn't be
609 * anything that could cause a page-fault. If the request was already
f9a2842e 610 * aborted bail out.
334f485d 611 */
dc00809a 612static int lock_request(struct fuse_req *req)
334f485d
MS
613{
614 int err = 0;
615 if (req) {
dc00809a 616 spin_lock(&req->waitq.lock);
825d6d33 617 if (test_bit(FR_ABORTED, &req->flags))
334f485d
MS
618 err = -ENOENT;
619 else
825d6d33 620 set_bit(FR_LOCKED, &req->flags);
dc00809a 621 spin_unlock(&req->waitq.lock);
334f485d
MS
622 }
623 return err;
624}
625
626/*
0d8e84b0
MS
627 * Unlock request. If it was aborted while locked, caller is responsible
628 * for unlocking and ending the request.
334f485d 629 */
dc00809a 630static int unlock_request(struct fuse_req *req)
334f485d 631{
0d8e84b0 632 int err = 0;
334f485d 633 if (req) {
dc00809a 634 spin_lock(&req->waitq.lock);
825d6d33 635 if (test_bit(FR_ABORTED, &req->flags))
0d8e84b0
MS
636 err = -ENOENT;
637 else
825d6d33 638 clear_bit(FR_LOCKED, &req->flags);
dc00809a 639 spin_unlock(&req->waitq.lock);
334f485d 640 }
0d8e84b0 641 return err;
334f485d
MS
642}
643
644struct fuse_copy_state {
645 int write;
646 struct fuse_req *req;
6c09e94a 647 struct iov_iter *iter;
dd3bb14f
MS
648 struct pipe_buffer *pipebufs;
649 struct pipe_buffer *currbuf;
650 struct pipe_inode_info *pipe;
334f485d 651 unsigned long nr_segs;
334f485d 652 struct page *pg;
334f485d 653 unsigned len;
c55a01d3 654 unsigned offset;
ce534fb0 655 unsigned move_pages:1;
334f485d
MS
656};
657
dc00809a 658static void fuse_copy_init(struct fuse_copy_state *cs, int write,
6c09e94a 659 struct iov_iter *iter)
334f485d
MS
660{
661 memset(cs, 0, sizeof(*cs));
662 cs->write = write;
6c09e94a 663 cs->iter = iter;
334f485d
MS
664}
665
666/* Unmap and put previous page of userspace buffer */
8bfc016d 667static void fuse_copy_finish(struct fuse_copy_state *cs)
334f485d 668{
dd3bb14f
MS
669 if (cs->currbuf) {
670 struct pipe_buffer *buf = cs->currbuf;
671
c55a01d3 672 if (cs->write)
c3021629 673 buf->len = PAGE_SIZE - cs->len;
dd3bb14f 674 cs->currbuf = NULL;
c55a01d3 675 } else if (cs->pg) {
334f485d
MS
676 if (cs->write) {
677 flush_dcache_page(cs->pg);
678 set_page_dirty_lock(cs->pg);
679 }
680 put_page(cs->pg);
334f485d 681 }
c55a01d3 682 cs->pg = NULL;
334f485d
MS
683}
684
685/*
686 * Get another pagefull of userspace buffer, and map it to kernel
687 * address space, and lock request
688 */
689static int fuse_copy_fill(struct fuse_copy_state *cs)
690{
c55a01d3 691 struct page *page;
334f485d
MS
692 int err;
693
dc00809a 694 err = unlock_request(cs->req);
0d8e84b0
MS
695 if (err)
696 return err;
697
334f485d 698 fuse_copy_finish(cs);
dd3bb14f
MS
699 if (cs->pipebufs) {
700 struct pipe_buffer *buf = cs->pipebufs;
701
c3021629 702 if (!cs->write) {
fba597db 703 err = pipe_buf_confirm(cs->pipe, buf);
c3021629
MS
704 if (err)
705 return err;
706
707 BUG_ON(!cs->nr_segs);
708 cs->currbuf = buf;
c55a01d3
MS
709 cs->pg = buf->page;
710 cs->offset = buf->offset;
c3021629 711 cs->len = buf->len;
c3021629
MS
712 cs->pipebufs++;
713 cs->nr_segs--;
714 } else {
6718b6f8 715 if (cs->nr_segs >= cs->pipe->max_usage)
c3021629
MS
716 return -EIO;
717
718 page = alloc_page(GFP_HIGHUSER);
719 if (!page)
720 return -ENOMEM;
721
722 buf->page = page;
723 buf->offset = 0;
724 buf->len = 0;
725
726 cs->currbuf = buf;
c55a01d3
MS
727 cs->pg = page;
728 cs->offset = 0;
c3021629
MS
729 cs->len = PAGE_SIZE;
730 cs->pipebufs++;
731 cs->nr_segs++;
732 }
dd3bb14f 733 } else {
6c09e94a 734 size_t off;
1ef255e2 735 err = iov_iter_get_pages2(cs->iter, &page, PAGE_SIZE, 1, &off);
dd3bb14f
MS
736 if (err < 0)
737 return err;
6c09e94a
AV
738 BUG_ON(!err);
739 cs->len = err;
740 cs->offset = off;
c55a01d3 741 cs->pg = page;
334f485d 742 }
334f485d 743
dc00809a 744 return lock_request(cs->req);
334f485d
MS
745}
746
747/* Do as much copy to/from userspace buffer as we can */
8bfc016d 748static int fuse_copy_do(struct fuse_copy_state *cs, void **val, unsigned *size)
334f485d
MS
749{
750 unsigned ncpy = min(*size, cs->len);
751 if (val) {
5fe0fc9f 752 void *pgaddr = kmap_local_page(cs->pg);
c55a01d3
MS
753 void *buf = pgaddr + cs->offset;
754
334f485d 755 if (cs->write)
c55a01d3 756 memcpy(buf, *val, ncpy);
334f485d 757 else
c55a01d3
MS
758 memcpy(*val, buf, ncpy);
759
5fe0fc9f 760 kunmap_local(pgaddr);
334f485d
MS
761 *val += ncpy;
762 }
763 *size -= ncpy;
764 cs->len -= ncpy;
c55a01d3 765 cs->offset += ncpy;
334f485d
MS
766 return ncpy;
767}
768
063aaad7 769static int fuse_check_folio(struct folio *folio)
ce534fb0 770{
063aaad7
VMO
771 if (folio_mapped(folio) ||
772 folio->mapping != NULL ||
773 (folio->flags & PAGE_FLAGS_CHECK_AT_PREP &
ce534fb0
MS
774 ~(1 << PG_locked |
775 1 << PG_referenced |
776 1 << PG_uptodate |
777 1 << PG_lru |
778 1 << PG_active |
b89ecd60 779 1 << PG_workingset |
a5005c3c 780 1 << PG_reclaim |
ec1c86b2
YZ
781 1 << PG_waiters |
782 LRU_GEN_MASK | LRU_REFS_MASK))) {
063aaad7 783 dump_page(&folio->page, "fuse: trying to steal weird page");
ce534fb0
MS
784 return 1;
785 }
786 return 0;
787}
788
789static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
790{
791 int err;
063aaad7
VMO
792 struct folio *oldfolio = page_folio(*pagep);
793 struct folio *newfolio;
ce534fb0 794 struct pipe_buffer *buf = cs->pipebufs;
ce534fb0 795
063aaad7 796 folio_get(oldfolio);
dc00809a 797 err = unlock_request(cs->req);
0d8e84b0 798 if (err)
d78092e4 799 goto out_put_old;
0d8e84b0 800
ce534fb0
MS
801 fuse_copy_finish(cs);
802
fba597db 803 err = pipe_buf_confirm(cs->pipe, buf);
ce534fb0 804 if (err)
d78092e4 805 goto out_put_old;
ce534fb0
MS
806
807 BUG_ON(!cs->nr_segs);
808 cs->currbuf = buf;
809 cs->len = buf->len;
810 cs->pipebufs++;
811 cs->nr_segs--;
812
813 if (cs->len != PAGE_SIZE)
814 goto out_fallback;
815
c928f642 816 if (!pipe_buf_try_steal(cs->pipe, buf))
ce534fb0
MS
817 goto out_fallback;
818
063aaad7 819 newfolio = page_folio(buf->page);
ce534fb0 820
063aaad7
VMO
821 if (!folio_test_uptodate(newfolio))
822 folio_mark_uptodate(newfolio);
ce534fb0 823
063aaad7 824 folio_clear_mappedtodisk(newfolio);
ce534fb0 825
063aaad7 826 if (fuse_check_folio(newfolio) != 0)
ce534fb0
MS
827 goto out_fallback_unlock;
828
ce534fb0
MS
829 /*
830 * This is a new and locked page, it shouldn't be mapped or
831 * have any special flags on it
832 */
063aaad7 833 if (WARN_ON(folio_mapped(oldfolio)))
ce534fb0 834 goto out_fallback_unlock;
063aaad7 835 if (WARN_ON(folio_has_private(oldfolio)))
ce534fb0 836 goto out_fallback_unlock;
063aaad7
VMO
837 if (WARN_ON(folio_test_dirty(oldfolio) ||
838 folio_test_writeback(oldfolio)))
ce534fb0 839 goto out_fallback_unlock;
063aaad7 840 if (WARN_ON(folio_test_mlocked(oldfolio)))
ce534fb0
MS
841 goto out_fallback_unlock;
842
063aaad7 843 replace_page_cache_folio(oldfolio, newfolio);
ef6a3c63 844
063aaad7 845 folio_get(newfolio);
47344172
MS
846
847 if (!(buf->flags & PIPE_BUF_FLAG_LRU))
063aaad7 848 folio_add_lru(newfolio);
47344172 849
712a9510
MS
850 /*
851 * Release while we have extra ref on stolen page. Otherwise
852 * anon_pipe_buf_release() might think the page can be reused.
853 */
854 pipe_buf_release(cs->pipe, buf);
855
ce534fb0 856 err = 0;
dc00809a 857 spin_lock(&cs->req->waitq.lock);
825d6d33 858 if (test_bit(FR_ABORTED, &cs->req->flags))
ce534fb0
MS
859 err = -ENOENT;
860 else
063aaad7 861 *pagep = &newfolio->page;
dc00809a 862 spin_unlock(&cs->req->waitq.lock);
ce534fb0
MS
863
864 if (err) {
063aaad7
VMO
865 folio_unlock(newfolio);
866 folio_put(newfolio);
d78092e4 867 goto out_put_old;
ce534fb0
MS
868 }
869
063aaad7 870 folio_unlock(oldfolio);
d78092e4 871 /* Drop ref for ap->pages[] array */
063aaad7 872 folio_put(oldfolio);
ce534fb0
MS
873 cs->len = 0;
874
d78092e4
MS
875 err = 0;
876out_put_old:
877 /* Drop ref obtained in this function */
063aaad7 878 folio_put(oldfolio);
d78092e4 879 return err;
ce534fb0
MS
880
881out_fallback_unlock:
063aaad7 882 folio_unlock(newfolio);
ce534fb0 883out_fallback:
c55a01d3
MS
884 cs->pg = buf->page;
885 cs->offset = buf->offset;
ce534fb0 886
dc00809a 887 err = lock_request(cs->req);
d78092e4
MS
888 if (!err)
889 err = 1;
ce534fb0 890
d78092e4 891 goto out_put_old;
ce534fb0
MS
892}
893
c3021629
MS
894static int fuse_ref_page(struct fuse_copy_state *cs, struct page *page,
895 unsigned offset, unsigned count)
896{
897 struct pipe_buffer *buf;
0d8e84b0 898 int err;
c3021629 899
6718b6f8 900 if (cs->nr_segs >= cs->pipe->max_usage)
c3021629
MS
901 return -EIO;
902
d78092e4 903 get_page(page);
dc00809a 904 err = unlock_request(cs->req);
d78092e4
MS
905 if (err) {
906 put_page(page);
0d8e84b0 907 return err;
d78092e4 908 }
0d8e84b0 909
c3021629
MS
910 fuse_copy_finish(cs);
911
912 buf = cs->pipebufs;
c3021629
MS
913 buf->page = page;
914 buf->offset = offset;
915 buf->len = count;
916
917 cs->pipebufs++;
918 cs->nr_segs++;
919 cs->len = 0;
920
921 return 0;
922}
923
334f485d
MS
924/*
925 * Copy a page in the request to/from the userspace buffer. Must be
926 * done atomically
927 */
ce534fb0 928static int fuse_copy_page(struct fuse_copy_state *cs, struct page **pagep,
8bfc016d 929 unsigned offset, unsigned count, int zeroing)
334f485d 930{
ce534fb0
MS
931 int err;
932 struct page *page = *pagep;
933
b6777c40
MS
934 if (page && zeroing && count < PAGE_SIZE)
935 clear_highpage(page);
936
334f485d 937 while (count) {
c3021629 938 if (cs->write && cs->pipebufs && page) {
0c4bcfde
MS
939 /*
940 * Can't control lifetime of pipe buffers, so always
941 * copy user pages.
942 */
943 if (cs->req->args->user_pages) {
944 err = fuse_copy_fill(cs);
945 if (err)
946 return err;
947 } else {
948 return fuse_ref_page(cs, page, offset, count);
949 }
c3021629 950 } else if (!cs->len) {
ce534fb0
MS
951 if (cs->move_pages && page &&
952 offset == 0 && count == PAGE_SIZE) {
953 err = fuse_try_move_page(cs, pagep);
954 if (err <= 0)
955 return err;
956 } else {
957 err = fuse_copy_fill(cs);
958 if (err)
959 return err;
960 }
1729a16c 961 }
334f485d 962 if (page) {
5fe0fc9f 963 void *mapaddr = kmap_local_page(page);
334f485d
MS
964 void *buf = mapaddr + offset;
965 offset += fuse_copy_do(cs, &buf, &count);
5fe0fc9f 966 kunmap_local(mapaddr);
334f485d
MS
967 } else
968 offset += fuse_copy_do(cs, NULL, &count);
969 }
970 if (page && !cs->write)
971 flush_dcache_page(page);
972 return 0;
973}
974
975/* Copy pages in the request to/from userspace buffer */
976static int fuse_copy_pages(struct fuse_copy_state *cs, unsigned nbytes,
977 int zeroing)
978{
979 unsigned i;
980 struct fuse_req *req = cs->req;
05ea48cc
MS
981 struct fuse_args_pages *ap = container_of(req->args, typeof(*ap), args);
982
334f485d 983
05ea48cc 984 for (i = 0; i < ap->num_pages && (nbytes || zeroing); i++) {
ce534fb0 985 int err;
05ea48cc
MS
986 unsigned int offset = ap->descs[i].offset;
987 unsigned int count = min(nbytes, ap->descs[i].length);
ce534fb0 988
05ea48cc 989 err = fuse_copy_page(cs, &ap->pages[i], offset, count, zeroing);
334f485d
MS
990 if (err)
991 return err;
992
993 nbytes -= count;
334f485d
MS
994 }
995 return 0;
996}
997
998/* Copy a single argument in the request to/from userspace buffer */
999static int fuse_copy_one(struct fuse_copy_state *cs, void *val, unsigned size)
1000{
1001 while (size) {
1729a16c
MS
1002 if (!cs->len) {
1003 int err = fuse_copy_fill(cs);
1004 if (err)
1005 return err;
1006 }
334f485d
MS
1007 fuse_copy_do(cs, &val, &size);
1008 }
1009 return 0;
1010}
1011
1012/* Copy request arguments to/from userspace buffer */
1013static int fuse_copy_args(struct fuse_copy_state *cs, unsigned numargs,
1014 unsigned argpages, struct fuse_arg *args,
1015 int zeroing)
1016{
1017 int err = 0;
1018 unsigned i;
1019
1020 for (i = 0; !err && i < numargs; i++) {
1021 struct fuse_arg *arg = &args[i];
1022 if (i == numargs - 1 && argpages)
1023 err = fuse_copy_pages(cs, arg->size, zeroing);
1024 else
1025 err = fuse_copy_one(cs, arg->value, arg->size);
1026 }
1027 return err;
1028}
1029
f88996a9 1030static int forget_pending(struct fuse_iqueue *fiq)
07e77dca 1031{
f88996a9 1032 return fiq->forget_list_head.next != NULL;
07e77dca
MS
1033}
1034
f88996a9 1035static int request_pending(struct fuse_iqueue *fiq)
a4d27e75 1036{
f88996a9
MS
1037 return !list_empty(&fiq->pending) || !list_empty(&fiq->interrupts) ||
1038 forget_pending(fiq);
a4d27e75
MS
1039}
1040
a4d27e75
MS
1041/*
1042 * Transfer an interrupt request to userspace
1043 *
1044 * Unlike other requests this is assembled on demand, without a need
1045 * to allocate a separate fuse_req structure.
1046 *
76e43c8c 1047 * Called with fiq->lock held, releases it
a4d27e75 1048 */
fd22d62e
MS
1049static int fuse_read_interrupt(struct fuse_iqueue *fiq,
1050 struct fuse_copy_state *cs,
c3021629 1051 size_t nbytes, struct fuse_req *req)
76e43c8c 1052__releases(fiq->lock)
a4d27e75 1053{
a4d27e75
MS
1054 struct fuse_in_header ih;
1055 struct fuse_interrupt_in arg;
1056 unsigned reqsize = sizeof(ih) + sizeof(arg);
1057 int err;
1058
1059 list_del_init(&req->intr_entry);
a4d27e75
MS
1060 memset(&ih, 0, sizeof(ih));
1061 memset(&arg, 0, sizeof(arg));
1062 ih.len = reqsize;
1063 ih.opcode = FUSE_INTERRUPT;
3a5358d1 1064 ih.unique = (req->in.h.unique | FUSE_INT_REQ_BIT);
a4d27e75
MS
1065 arg.unique = req->in.h.unique;
1066
76e43c8c 1067 spin_unlock(&fiq->lock);
c3021629 1068 if (nbytes < reqsize)
a4d27e75
MS
1069 return -EINVAL;
1070
c3021629 1071 err = fuse_copy_one(cs, &ih, sizeof(ih));
a4d27e75 1072 if (!err)
c3021629
MS
1073 err = fuse_copy_one(cs, &arg, sizeof(arg));
1074 fuse_copy_finish(cs);
a4d27e75
MS
1075
1076 return err ? err : reqsize;
1077}
1078
4388c5aa
VG
1079struct fuse_forget_link *fuse_dequeue_forget(struct fuse_iqueue *fiq,
1080 unsigned int max,
1081 unsigned int *countp)
07e77dca 1082{
f88996a9 1083 struct fuse_forget_link *head = fiq->forget_list_head.next;
02c048b9
MS
1084 struct fuse_forget_link **newhead = &head;
1085 unsigned count;
07e77dca 1086
02c048b9
MS
1087 for (count = 0; *newhead != NULL && count < max; count++)
1088 newhead = &(*newhead)->next;
1089
f88996a9 1090 fiq->forget_list_head.next = *newhead;
02c048b9 1091 *newhead = NULL;
f88996a9
MS
1092 if (fiq->forget_list_head.next == NULL)
1093 fiq->forget_list_tail = &fiq->forget_list_head;
07e77dca 1094
02c048b9
MS
1095 if (countp != NULL)
1096 *countp = count;
1097
1098 return head;
07e77dca 1099}
4388c5aa 1100EXPORT_SYMBOL(fuse_dequeue_forget);
07e77dca 1101
fd22d62e 1102static int fuse_read_single_forget(struct fuse_iqueue *fiq,
07e77dca
MS
1103 struct fuse_copy_state *cs,
1104 size_t nbytes)
76e43c8c 1105__releases(fiq->lock)
07e77dca
MS
1106{
1107 int err;
4388c5aa 1108 struct fuse_forget_link *forget = fuse_dequeue_forget(fiq, 1, NULL);
07e77dca 1109 struct fuse_forget_in arg = {
02c048b9 1110 .nlookup = forget->forget_one.nlookup,
07e77dca
MS
1111 };
1112 struct fuse_in_header ih = {
1113 .opcode = FUSE_FORGET,
02c048b9 1114 .nodeid = forget->forget_one.nodeid,
f88996a9 1115 .unique = fuse_get_unique(fiq),
07e77dca
MS
1116 .len = sizeof(ih) + sizeof(arg),
1117 };
1118
76e43c8c 1119 spin_unlock(&fiq->lock);
07e77dca
MS
1120 kfree(forget);
1121 if (nbytes < ih.len)
1122 return -EINVAL;
1123
1124 err = fuse_copy_one(cs, &ih, sizeof(ih));
1125 if (!err)
1126 err = fuse_copy_one(cs, &arg, sizeof(arg));
1127 fuse_copy_finish(cs);
1128
1129 if (err)
1130 return err;
1131
1132 return ih.len;
1133}
1134
fd22d62e 1135static int fuse_read_batch_forget(struct fuse_iqueue *fiq,
02c048b9 1136 struct fuse_copy_state *cs, size_t nbytes)
76e43c8c 1137__releases(fiq->lock)
02c048b9
MS
1138{
1139 int err;
1140 unsigned max_forgets;
1141 unsigned count;
1142 struct fuse_forget_link *head;
1143 struct fuse_batch_forget_in arg = { .count = 0 };
1144 struct fuse_in_header ih = {
1145 .opcode = FUSE_BATCH_FORGET,
f88996a9 1146 .unique = fuse_get_unique(fiq),
02c048b9
MS
1147 .len = sizeof(ih) + sizeof(arg),
1148 };
1149
1150 if (nbytes < ih.len) {
76e43c8c 1151 spin_unlock(&fiq->lock);
02c048b9
MS
1152 return -EINVAL;
1153 }
1154
1155 max_forgets = (nbytes - ih.len) / sizeof(struct fuse_forget_one);
4388c5aa 1156 head = fuse_dequeue_forget(fiq, max_forgets, &count);
76e43c8c 1157 spin_unlock(&fiq->lock);
02c048b9
MS
1158
1159 arg.count = count;
1160 ih.len += count * sizeof(struct fuse_forget_one);
1161 err = fuse_copy_one(cs, &ih, sizeof(ih));
1162 if (!err)
1163 err = fuse_copy_one(cs, &arg, sizeof(arg));
1164
1165 while (head) {
1166 struct fuse_forget_link *forget = head;
1167
1168 if (!err) {
1169 err = fuse_copy_one(cs, &forget->forget_one,
1170 sizeof(forget->forget_one));
1171 }
1172 head = forget->next;
1173 kfree(forget);
1174 }
1175
1176 fuse_copy_finish(cs);
1177
1178 if (err)
1179 return err;
1180
1181 return ih.len;
1182}
1183
fd22d62e
MS
1184static int fuse_read_forget(struct fuse_conn *fc, struct fuse_iqueue *fiq,
1185 struct fuse_copy_state *cs,
02c048b9 1186 size_t nbytes)
76e43c8c 1187__releases(fiq->lock)
02c048b9 1188{
f88996a9 1189 if (fc->minor < 16 || fiq->forget_list_head.next->next == NULL)
fd22d62e 1190 return fuse_read_single_forget(fiq, cs, nbytes);
02c048b9 1191 else
fd22d62e 1192 return fuse_read_batch_forget(fiq, cs, nbytes);
02c048b9
MS
1193}
1194
334f485d
MS
1195/*
1196 * Read a single request into the userspace filesystem's buffer. This
1197 * function waits until a request is available, then removes it from
1198 * the pending list and copies request data to userspace buffer. If
f9a2842e
MS
1199 * no reply is needed (FORGET) or request has been aborted or there
1200 * was an error during the copying then it's finished by calling
04ec5af0 1201 * fuse_request_end(). Otherwise add it to the processing list, and set
334f485d
MS
1202 * the 'sent' flag.
1203 */
c3696046 1204static ssize_t fuse_dev_do_read(struct fuse_dev *fud, struct file *file,
c3021629 1205 struct fuse_copy_state *cs, size_t nbytes)
334f485d 1206{
82cbdcd3 1207 ssize_t err;
c3696046 1208 struct fuse_conn *fc = fud->fc;
f88996a9 1209 struct fuse_iqueue *fiq = &fc->iq;
c3696046 1210 struct fuse_pqueue *fpq = &fud->pq;
334f485d 1211 struct fuse_req *req;
d4993774 1212 struct fuse_args *args;
334f485d 1213 unsigned reqsize;
be2ff42c 1214 unsigned int hash;
334f485d 1215
1fb027d7
KS
1216 /*
1217 * Require sane minimum read buffer - that has capacity for fixed part
1218 * of any request header + negotiated max_write room for data.
1219 *
1220 * Historically libfuse reserves 4K for fixed header room, but e.g.
1221 * GlusterFS reserves only 80 bytes
1222 *
1223 * = `sizeof(fuse_in_header) + sizeof(fuse_write_in)`
1224 *
1225 * which is the absolute minimum any sane filesystem should be using
1226 * for header room.
1227 */
1228 if (nbytes < max_t(size_t, FUSE_MIN_READ_BUFFER,
1229 sizeof(struct fuse_in_header) +
1230 sizeof(struct fuse_write_in) +
1231 fc->max_write))
1232 return -EINVAL;
1233
1d3d752b 1234 restart:
76e43c8c
EB
1235 for (;;) {
1236 spin_lock(&fiq->lock);
1237 if (!fiq->connected || request_pending(fiq))
1238 break;
1239 spin_unlock(&fiq->lock);
e5ac1d1e 1240
76e43c8c
EB
1241 if (file->f_flags & O_NONBLOCK)
1242 return -EAGAIN;
1243 err = wait_event_interruptible_exclusive(fiq->waitq,
5250921b 1244 !fiq->connected || request_pending(fiq));
76e43c8c
EB
1245 if (err)
1246 return err;
1247 }
5250921b 1248
3b7008b2 1249 if (!fiq->connected) {
eb98e3bd 1250 err = fc->aborted ? -ECONNABORTED : -ENODEV;
334f485d 1251 goto err_unlock;
3b7008b2 1252 }
334f485d 1253
f88996a9
MS
1254 if (!list_empty(&fiq->interrupts)) {
1255 req = list_entry(fiq->interrupts.next, struct fuse_req,
a4d27e75 1256 intr_entry);
fd22d62e 1257 return fuse_read_interrupt(fiq, cs, nbytes, req);
a4d27e75
MS
1258 }
1259
f88996a9
MS
1260 if (forget_pending(fiq)) {
1261 if (list_empty(&fiq->pending) || fiq->forget_batch-- > 0)
fd22d62e 1262 return fuse_read_forget(fc, fiq, cs, nbytes);
07e77dca 1263
f88996a9
MS
1264 if (fiq->forget_batch <= -8)
1265 fiq->forget_batch = 16;
07e77dca
MS
1266 }
1267
f88996a9 1268 req = list_entry(fiq->pending.next, struct fuse_req, list);
33e14b4d 1269 clear_bit(FR_PENDING, &req->flags);
ef759258 1270 list_del_init(&req->list);
76e43c8c 1271 spin_unlock(&fiq->lock);
4ce60812 1272
d4993774
MS
1273 args = req->args;
1274 reqsize = req->in.h.len;
5d6d3a30 1275
1d3d752b 1276 /* If request is too large, reply with an error and restart the read */
c3021629 1277 if (nbytes < reqsize) {
1d3d752b
MS
1278 req->out.h.error = -EIO;
1279 /* SETXATTR is special, since it may contain too large data */
d4993774 1280 if (args->opcode == FUSE_SETXATTR)
1d3d752b 1281 req->out.h.error = -E2BIG;
8f622e94 1282 fuse_request_end(req);
1d3d752b 1283 goto restart;
334f485d 1284 }
45a91cb1 1285 spin_lock(&fpq->lock);
80ef0867
MS
1286 /*
1287 * Must not put request on fpq->io queue after having been shut down by
1288 * fuse_abort_conn()
1289 */
1290 if (!fpq->connected) {
1291 req->out.h.error = err = -ECONNABORTED;
1292 goto out_end;
1293
1294 }
82cbdcd3 1295 list_add(&req->list, &fpq->io);
45a91cb1 1296 spin_unlock(&fpq->lock);
c3021629 1297 cs->req = req;
d4993774 1298 err = fuse_copy_one(cs, &req->in.h, sizeof(req->in.h));
1d3d752b 1299 if (!err)
d4993774
MS
1300 err = fuse_copy_args(cs, args->in_numargs, args->in_pages,
1301 (struct fuse_arg *) args->in_args, 0);
c3021629 1302 fuse_copy_finish(cs);
45a91cb1 1303 spin_lock(&fpq->lock);
825d6d33 1304 clear_bit(FR_LOCKED, &req->flags);
e96edd94 1305 if (!fpq->connected) {
eb98e3bd 1306 err = fc->aborted ? -ECONNABORTED : -ENODEV;
82cbdcd3 1307 goto out_end;
c9c9d7df 1308 }
334f485d 1309 if (err) {
c9c9d7df 1310 req->out.h.error = -EIO;
82cbdcd3 1311 goto out_end;
334f485d 1312 }
825d6d33 1313 if (!test_bit(FR_ISREPLY, &req->flags)) {
82cbdcd3
MS
1314 err = reqsize;
1315 goto out_end;
334f485d 1316 }
be2ff42c
KT
1317 hash = fuse_req_hash(req->in.h.unique);
1318 list_move_tail(&req->list, &fpq->processing[hash]);
bc78abbd 1319 __fuse_get_request(req);
82cbdcd3 1320 set_bit(FR_SENT, &req->flags);
4c316f2f 1321 spin_unlock(&fpq->lock);
82cbdcd3
MS
1322 /* matches barrier in request_wait_answer() */
1323 smp_mb__after_atomic();
1324 if (test_bit(FR_INTERRUPTED, &req->flags))
8f622e94
MR
1325 queue_interrupt(req);
1326 fuse_put_request(req);
82cbdcd3 1327
334f485d
MS
1328 return reqsize;
1329
82cbdcd3 1330out_end:
77cd9d48
MS
1331 if (!test_bit(FR_PRIVATE, &req->flags))
1332 list_del_init(&req->list);
45a91cb1 1333 spin_unlock(&fpq->lock);
8f622e94 1334 fuse_request_end(req);
82cbdcd3
MS
1335 return err;
1336
334f485d 1337 err_unlock:
76e43c8c 1338 spin_unlock(&fiq->lock);
334f485d
MS
1339 return err;
1340}
1341
94e4fe2c
TVB
1342static int fuse_dev_open(struct inode *inode, struct file *file)
1343{
1344 /*
1345 * The fuse device's file's private_data is used to hold
1346 * the fuse_conn(ection) when it is mounted, and is used to
1347 * keep track of whether the file has been mounted already.
1348 */
1349 file->private_data = NULL;
1350 return 0;
1351}
1352
fbdbacca 1353static ssize_t fuse_dev_read(struct kiocb *iocb, struct iov_iter *to)
c3021629
MS
1354{
1355 struct fuse_copy_state cs;
1356 struct file *file = iocb->ki_filp;
cc080e9e
MS
1357 struct fuse_dev *fud = fuse_get_dev(file);
1358
1359 if (!fud)
c3021629
MS
1360 return -EPERM;
1361
fcb14cb1 1362 if (!user_backed_iter(to))
fbdbacca
AV
1363 return -EINVAL;
1364
dc00809a 1365 fuse_copy_init(&cs, 1, to);
c3021629 1366
c3696046 1367 return fuse_dev_do_read(fud, file, &cs, iov_iter_count(to));
c3021629
MS
1368}
1369
c3021629
MS
1370static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
1371 struct pipe_inode_info *pipe,
1372 size_t len, unsigned int flags)
1373{
d82718e3 1374 int total, ret;
c3021629 1375 int page_nr = 0;
c3021629
MS
1376 struct pipe_buffer *bufs;
1377 struct fuse_copy_state cs;
cc080e9e
MS
1378 struct fuse_dev *fud = fuse_get_dev(in);
1379
1380 if (!fud)
c3021629
MS
1381 return -EPERM;
1382
6718b6f8 1383 bufs = kvmalloc_array(pipe->max_usage, sizeof(struct pipe_buffer),
d6d931ad 1384 GFP_KERNEL);
c3021629
MS
1385 if (!bufs)
1386 return -ENOMEM;
1387
dc00809a 1388 fuse_copy_init(&cs, 1, NULL);
c3021629
MS
1389 cs.pipebufs = bufs;
1390 cs.pipe = pipe;
c3696046 1391 ret = fuse_dev_do_read(fud, in, &cs, len);
c3021629
MS
1392 if (ret < 0)
1393 goto out;
1394
6718b6f8 1395 if (pipe_occupancy(pipe->head, pipe->tail) + cs.nr_segs > pipe->max_usage) {
c3021629 1396 ret = -EIO;
d82718e3 1397 goto out;
c3021629
MS
1398 }
1399
d82718e3 1400 for (ret = total = 0; page_nr < cs.nr_segs; total += ret) {
28a625cb
MS
1401 /*
1402 * Need to be careful about this. Having buf->ops in module
1403 * code can Oops if the buffer persists after module unload.
1404 */
d82718e3 1405 bufs[page_nr].ops = &nosteal_pipe_buf_ops;
84588a93 1406 bufs[page_nr].flags = 0;
d82718e3
AV
1407 ret = add_to_pipe(pipe, &bufs[page_nr++]);
1408 if (unlikely(ret < 0))
1409 break;
c3021629 1410 }
d82718e3
AV
1411 if (total)
1412 ret = total;
c3021629
MS
1413out:
1414 for (; page_nr < cs.nr_segs; page_nr++)
09cbfeaf 1415 put_page(bufs[page_nr].page);
c3021629 1416
d6d931ad 1417 kvfree(bufs);
c3021629
MS
1418 return ret;
1419}
1420
95668a69
TH
1421static int fuse_notify_poll(struct fuse_conn *fc, unsigned int size,
1422 struct fuse_copy_state *cs)
1423{
1424 struct fuse_notify_poll_wakeup_out outarg;
f6d47a17 1425 int err = -EINVAL;
95668a69
TH
1426
1427 if (size != sizeof(outarg))
f6d47a17 1428 goto err;
95668a69
TH
1429
1430 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1431 if (err)
f6d47a17 1432 goto err;
95668a69 1433
f6d47a17 1434 fuse_copy_finish(cs);
95668a69 1435 return fuse_notify_poll_wakeup(fc, &outarg);
f6d47a17
MS
1436
1437err:
1438 fuse_copy_finish(cs);
1439 return err;
95668a69
TH
1440}
1441
3b463ae0
JM
1442static int fuse_notify_inval_inode(struct fuse_conn *fc, unsigned int size,
1443 struct fuse_copy_state *cs)
1444{
1445 struct fuse_notify_inval_inode_out outarg;
1446 int err = -EINVAL;
1447
1448 if (size != sizeof(outarg))
1449 goto err;
1450
1451 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1452 if (err)
1453 goto err;
1454 fuse_copy_finish(cs);
1455
1456 down_read(&fc->killsb);
fcee216b
MR
1457 err = fuse_reverse_inval_inode(fc, outarg.ino,
1458 outarg.off, outarg.len);
3b463ae0
JM
1459 up_read(&fc->killsb);
1460 return err;
1461
1462err:
1463 fuse_copy_finish(cs);
1464 return err;
1465}
1466
1467static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
1468 struct fuse_copy_state *cs)
1469{
1470 struct fuse_notify_inval_entry_out outarg;
b2d82ee3
FW
1471 int err = -ENOMEM;
1472 char *buf;
3b463ae0
JM
1473 struct qstr name;
1474
b2d82ee3
FW
1475 buf = kzalloc(FUSE_NAME_MAX + 1, GFP_KERNEL);
1476 if (!buf)
1477 goto err;
1478
1479 err = -EINVAL;
3b463ae0
JM
1480 if (size < sizeof(outarg))
1481 goto err;
1482
1483 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1484 if (err)
1485 goto err;
1486
1487 err = -ENAMETOOLONG;
1488 if (outarg.namelen > FUSE_NAME_MAX)
1489 goto err;
1490
c2183d1e
MS
1491 err = -EINVAL;
1492 if (size != sizeof(outarg) + outarg.namelen + 1)
1493 goto err;
1494
3b463ae0
JM
1495 name.name = buf;
1496 name.len = outarg.namelen;
1497 err = fuse_copy_one(cs, buf, outarg.namelen + 1);
1498 if (err)
1499 goto err;
1500 fuse_copy_finish(cs);
1501 buf[outarg.namelen] = 0;
3b463ae0
JM
1502
1503 down_read(&fc->killsb);
4f8d3702 1504 err = fuse_reverse_inval_entry(fc, outarg.parent, 0, &name, outarg.flags);
451d0f59
JM
1505 up_read(&fc->killsb);
1506 kfree(buf);
1507 return err;
1508
1509err:
1510 kfree(buf);
1511 fuse_copy_finish(cs);
1512 return err;
1513}
1514
1515static int fuse_notify_delete(struct fuse_conn *fc, unsigned int size,
1516 struct fuse_copy_state *cs)
1517{
1518 struct fuse_notify_delete_out outarg;
1519 int err = -ENOMEM;
1520 char *buf;
1521 struct qstr name;
1522
1523 buf = kzalloc(FUSE_NAME_MAX + 1, GFP_KERNEL);
1524 if (!buf)
1525 goto err;
1526
1527 err = -EINVAL;
1528 if (size < sizeof(outarg))
1529 goto err;
1530
1531 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1532 if (err)
1533 goto err;
1534
1535 err = -ENAMETOOLONG;
1536 if (outarg.namelen > FUSE_NAME_MAX)
1537 goto err;
1538
1539 err = -EINVAL;
1540 if (size != sizeof(outarg) + outarg.namelen + 1)
1541 goto err;
1542
1543 name.name = buf;
1544 name.len = outarg.namelen;
1545 err = fuse_copy_one(cs, buf, outarg.namelen + 1);
1546 if (err)
1547 goto err;
1548 fuse_copy_finish(cs);
1549 buf[outarg.namelen] = 0;
451d0f59
JM
1550
1551 down_read(&fc->killsb);
4f8d3702 1552 err = fuse_reverse_inval_entry(fc, outarg.parent, outarg.child, &name, 0);
3b463ae0 1553 up_read(&fc->killsb);
b2d82ee3 1554 kfree(buf);
3b463ae0
JM
1555 return err;
1556
1557err:
b2d82ee3 1558 kfree(buf);
3b463ae0
JM
1559 fuse_copy_finish(cs);
1560 return err;
1561}
1562
a1d75f25
MS
1563static int fuse_notify_store(struct fuse_conn *fc, unsigned int size,
1564 struct fuse_copy_state *cs)
1565{
1566 struct fuse_notify_store_out outarg;
1567 struct inode *inode;
1568 struct address_space *mapping;
1569 u64 nodeid;
1570 int err;
1571 pgoff_t index;
1572 unsigned int offset;
1573 unsigned int num;
1574 loff_t file_size;
1575 loff_t end;
1576
1577 err = -EINVAL;
1578 if (size < sizeof(outarg))
1579 goto out_finish;
1580
1581 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1582 if (err)
1583 goto out_finish;
1584
1585 err = -EINVAL;
1586 if (size - sizeof(outarg) != outarg.size)
1587 goto out_finish;
1588
1589 nodeid = outarg.nodeid;
1590
1591 down_read(&fc->killsb);
1592
1593 err = -ENOENT;
fcee216b 1594 inode = fuse_ilookup(fc, nodeid, NULL);
a1d75f25
MS
1595 if (!inode)
1596 goto out_up_killsb;
1597
1598 mapping = inode->i_mapping;
09cbfeaf
KS
1599 index = outarg.offset >> PAGE_SHIFT;
1600 offset = outarg.offset & ~PAGE_MASK;
a1d75f25
MS
1601 file_size = i_size_read(inode);
1602 end = outarg.offset + outarg.size;
1603 if (end > file_size) {
1604 file_size = end;
d347739a 1605 fuse_write_update_attr(inode, file_size, outarg.size);
a1d75f25
MS
1606 }
1607
1608 num = outarg.size;
1609 while (num) {
1610 struct page *page;
1611 unsigned int this_num;
1612
1613 err = -ENOMEM;
1614 page = find_or_create_page(mapping, index,
1615 mapping_gfp_mask(mapping));
1616 if (!page)
1617 goto out_iput;
1618
09cbfeaf 1619 this_num = min_t(unsigned, num, PAGE_SIZE - offset);
a1d75f25 1620 err = fuse_copy_page(cs, &page, offset, this_num, 0);
063ec1e5 1621 if (!err && offset == 0 &&
09cbfeaf 1622 (this_num == PAGE_SIZE || file_size == end))
a1d75f25
MS
1623 SetPageUptodate(page);
1624 unlock_page(page);
09cbfeaf 1625 put_page(page);
a1d75f25
MS
1626
1627 if (err)
1628 goto out_iput;
1629
1630 num -= this_num;
1631 offset = 0;
1632 index++;
1633 }
1634
1635 err = 0;
1636
1637out_iput:
1638 iput(inode);
1639out_up_killsb:
1640 up_read(&fc->killsb);
1641out_finish:
1642 fuse_copy_finish(cs);
1643 return err;
1644}
1645
75b399dd
MS
1646struct fuse_retrieve_args {
1647 struct fuse_args_pages ap;
1648 struct fuse_notify_retrieve_in inarg;
1649};
1650
fcee216b 1651static void fuse_retrieve_end(struct fuse_mount *fm, struct fuse_args *args,
75b399dd 1652 int error)
2d45ba38 1653{
75b399dd
MS
1654 struct fuse_retrieve_args *ra =
1655 container_of(args, typeof(*ra), ap.args);
1656
1657 release_pages(ra->ap.pages, ra->ap.num_pages);
1658 kfree(ra);
2d45ba38
MS
1659}
1660
fcee216b 1661static int fuse_retrieve(struct fuse_mount *fm, struct inode *inode,
2d45ba38
MS
1662 struct fuse_notify_retrieve_out *outarg)
1663{
1664 int err;
1665 struct address_space *mapping = inode->i_mapping;
2d45ba38
MS
1666 pgoff_t index;
1667 loff_t file_size;
1668 unsigned int num;
1669 unsigned int offset;
0157443c 1670 size_t total_len = 0;
5da784cc 1671 unsigned int num_pages;
fcee216b 1672 struct fuse_conn *fc = fm->fc;
75b399dd
MS
1673 struct fuse_retrieve_args *ra;
1674 size_t args_size = sizeof(*ra);
1675 struct fuse_args_pages *ap;
1676 struct fuse_args *args;
2d45ba38 1677
09cbfeaf 1678 offset = outarg->offset & ~PAGE_MASK;
4d53dc99
MP
1679 file_size = i_size_read(inode);
1680
7640682e 1681 num = min(outarg->size, fc->max_write);
4d53dc99
MP
1682 if (outarg->offset > file_size)
1683 num = 0;
1684 else if (outarg->offset + num > file_size)
1685 num = file_size - outarg->offset;
1686
1687 num_pages = (num + offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
5da784cc 1688 num_pages = min(num_pages, fc->max_pages);
4d53dc99 1689
75b399dd 1690 args_size += num_pages * (sizeof(ap->pages[0]) + sizeof(ap->descs[0]));
2d45ba38 1691
75b399dd
MS
1692 ra = kzalloc(args_size, GFP_KERNEL);
1693 if (!ra)
1694 return -ENOMEM;
1695
1696 ap = &ra->ap;
1697 ap->pages = (void *) (ra + 1);
1698 ap->descs = (void *) (ap->pages + num_pages);
1699
1700 args = &ap->args;
1701 args->nodeid = outarg->nodeid;
1702 args->opcode = FUSE_NOTIFY_REPLY;
1703 args->in_numargs = 2;
1704 args->in_pages = true;
1705 args->end = fuse_retrieve_end;
2d45ba38 1706
09cbfeaf 1707 index = outarg->offset >> PAGE_SHIFT;
2d45ba38 1708
75b399dd 1709 while (num && ap->num_pages < num_pages) {
2d45ba38
MS
1710 struct page *page;
1711 unsigned int this_num;
1712
1713 page = find_get_page(mapping, index);
1714 if (!page)
1715 break;
1716
09cbfeaf 1717 this_num = min_t(unsigned, num, PAGE_SIZE - offset);
75b399dd
MS
1718 ap->pages[ap->num_pages] = page;
1719 ap->descs[ap->num_pages].offset = offset;
1720 ap->descs[ap->num_pages].length = this_num;
1721 ap->num_pages++;
2d45ba38 1722
c9e67d48 1723 offset = 0;
2d45ba38
MS
1724 num -= this_num;
1725 total_len += this_num;
48706d0a 1726 index++;
2d45ba38 1727 }
75b399dd
MS
1728 ra->inarg.offset = outarg->offset;
1729 ra->inarg.size = total_len;
1730 args->in_args[0].size = sizeof(ra->inarg);
1731 args->in_args[0].value = &ra->inarg;
1732 args->in_args[1].size = total_len;
2d45ba38 1733
fcee216b 1734 err = fuse_simple_notify_reply(fm, args, outarg->notify_unique);
75b399dd 1735 if (err)
fcee216b 1736 fuse_retrieve_end(fm, args, err);
2d45ba38
MS
1737
1738 return err;
1739}
1740
1741static int fuse_notify_retrieve(struct fuse_conn *fc, unsigned int size,
1742 struct fuse_copy_state *cs)
1743{
1744 struct fuse_notify_retrieve_out outarg;
fcee216b 1745 struct fuse_mount *fm;
2d45ba38 1746 struct inode *inode;
fcee216b 1747 u64 nodeid;
2d45ba38
MS
1748 int err;
1749
1750 err = -EINVAL;
1751 if (size != sizeof(outarg))
1752 goto copy_finish;
1753
1754 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1755 if (err)
1756 goto copy_finish;
1757
1758 fuse_copy_finish(cs);
1759
1760 down_read(&fc->killsb);
1761 err = -ENOENT;
fcee216b 1762 nodeid = outarg.nodeid;
2d45ba38 1763
fcee216b
MR
1764 inode = fuse_ilookup(fc, nodeid, &fm);
1765 if (inode) {
1766 err = fuse_retrieve(fm, inode, &outarg);
1767 iput(inode);
2d45ba38
MS
1768 }
1769 up_read(&fc->killsb);
1770
1771 return err;
1772
1773copy_finish:
1774 fuse_copy_finish(cs);
1775 return err;
1776}
1777
8599396b
TH
1778static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code,
1779 unsigned int size, struct fuse_copy_state *cs)
1780{
0d278362
MS
1781 /* Don't try to move pages (yet) */
1782 cs->move_pages = 0;
1783
8599396b 1784 switch (code) {
95668a69
TH
1785 case FUSE_NOTIFY_POLL:
1786 return fuse_notify_poll(fc, size, cs);
1787
3b463ae0
JM
1788 case FUSE_NOTIFY_INVAL_INODE:
1789 return fuse_notify_inval_inode(fc, size, cs);
1790
1791 case FUSE_NOTIFY_INVAL_ENTRY:
1792 return fuse_notify_inval_entry(fc, size, cs);
1793
a1d75f25
MS
1794 case FUSE_NOTIFY_STORE:
1795 return fuse_notify_store(fc, size, cs);
1796
2d45ba38
MS
1797 case FUSE_NOTIFY_RETRIEVE:
1798 return fuse_notify_retrieve(fc, size, cs);
1799
451d0f59
JM
1800 case FUSE_NOTIFY_DELETE:
1801 return fuse_notify_delete(fc, size, cs);
1802
8599396b 1803 default:
f6d47a17 1804 fuse_copy_finish(cs);
8599396b
TH
1805 return -EINVAL;
1806 }
1807}
1808
334f485d 1809/* Look up request on processing list by unique ID */
3a2b5b9c 1810static struct fuse_req *request_find(struct fuse_pqueue *fpq, u64 unique)
334f485d 1811{
be2ff42c 1812 unsigned int hash = fuse_req_hash(unique);
05726aca 1813 struct fuse_req *req;
334f485d 1814
be2ff42c 1815 list_for_each_entry(req, &fpq->processing[hash], list) {
3a5358d1 1816 if (req->in.h.unique == unique)
334f485d
MS
1817 return req;
1818 }
1819 return NULL;
1820}
1821
d4993774 1822static int copy_out_args(struct fuse_copy_state *cs, struct fuse_args *args,
334f485d
MS
1823 unsigned nbytes)
1824{
1825 unsigned reqsize = sizeof(struct fuse_out_header);
1826
14d46d7a 1827 reqsize += fuse_len_args(args->out_numargs, args->out_args);
334f485d 1828
d4993774 1829 if (reqsize < nbytes || (reqsize > nbytes && !args->out_argvar))
334f485d
MS
1830 return -EINVAL;
1831 else if (reqsize > nbytes) {
d4993774 1832 struct fuse_arg *lastarg = &args->out_args[args->out_numargs-1];
334f485d 1833 unsigned diffsize = reqsize - nbytes;
d4993774 1834
334f485d
MS
1835 if (diffsize > lastarg->size)
1836 return -EINVAL;
1837 lastarg->size -= diffsize;
1838 }
d4993774
MS
1839 return fuse_copy_args(cs, args->out_numargs, args->out_pages,
1840 args->out_args, args->page_zeroing);
334f485d
MS
1841}
1842
1843/*
1844 * Write a single reply to a request. First the header is copied from
1845 * the write buffer. The request is then searched on the processing
1846 * list by the unique ID found in the header. If found, then remove
1847 * it from the list and copy the rest of the buffer to the request.
04ec5af0 1848 * The request is finished by calling fuse_request_end().
334f485d 1849 */
c3696046 1850static ssize_t fuse_dev_do_write(struct fuse_dev *fud,
dd3bb14f 1851 struct fuse_copy_state *cs, size_t nbytes)
334f485d
MS
1852{
1853 int err;
c3696046
MS
1854 struct fuse_conn *fc = fud->fc;
1855 struct fuse_pqueue *fpq = &fud->pq;
334f485d
MS
1856 struct fuse_req *req;
1857 struct fuse_out_header oh;
334f485d 1858
7407a10d 1859 err = -EINVAL;
334f485d 1860 if (nbytes < sizeof(struct fuse_out_header))
7407a10d 1861 goto out;
334f485d 1862
dd3bb14f 1863 err = fuse_copy_one(cs, &oh, sizeof(oh));
334f485d 1864 if (err)
7407a10d 1865 goto copy_finish;
8599396b
TH
1866
1867 err = -EINVAL;
1868 if (oh.len != nbytes)
7407a10d 1869 goto copy_finish;
8599396b
TH
1870
1871 /*
1872 * Zero oh.unique indicates unsolicited notification message
1873 * and error contains notification code.
1874 */
1875 if (!oh.unique) {
dd3bb14f 1876 err = fuse_notify(fc, oh.error, nbytes - sizeof(oh), cs);
7407a10d 1877 goto out;
8599396b
TH
1878 }
1879
334f485d 1880 err = -EINVAL;
49221cf8 1881 if (oh.error <= -512 || oh.error > 0)
7407a10d 1882 goto copy_finish;
334f485d 1883
45a91cb1 1884 spin_lock(&fpq->lock);
7407a10d
KT
1885 req = NULL;
1886 if (fpq->connected)
1887 req = request_find(fpq, oh.unique & ~FUSE_INT_REQ_BIT);
69a53bf2 1888
7407a10d
KT
1889 err = -ENOENT;
1890 if (!req) {
1891 spin_unlock(&fpq->lock);
1892 goto copy_finish;
1893 }
334f485d 1894
3a5358d1
KT
1895 /* Is it an interrupt reply ID? */
1896 if (oh.unique & FUSE_INT_REQ_BIT) {
d2d2d4fb 1897 __fuse_get_request(req);
45a91cb1
MS
1898 spin_unlock(&fpq->lock);
1899
7407a10d
KT
1900 err = 0;
1901 if (nbytes != sizeof(struct fuse_out_header))
1902 err = -EINVAL;
1903 else if (oh.error == -ENOSYS)
a4d27e75
MS
1904 fc->no_interrupt = 1;
1905 else if (oh.error == -EAGAIN)
8f622e94 1906 err = queue_interrupt(req);
7407a10d 1907
8f622e94 1908 fuse_put_request(req);
a4d27e75 1909
7407a10d 1910 goto copy_finish;
a4d27e75
MS
1911 }
1912
33e14b4d 1913 clear_bit(FR_SENT, &req->flags);
3a2b5b9c 1914 list_move(&req->list, &fpq->io);
334f485d 1915 req->out.h = oh;
825d6d33 1916 set_bit(FR_LOCKED, &req->flags);
45a91cb1 1917 spin_unlock(&fpq->lock);
dd3bb14f 1918 cs->req = req;
d4993774 1919 if (!req->args->page_replace)
ce534fb0 1920 cs->move_pages = 0;
334f485d 1921
d4993774
MS
1922 if (oh.error)
1923 err = nbytes != sizeof(oh) ? -EINVAL : 0;
1924 else
1925 err = copy_out_args(cs, req->args, nbytes);
dd3bb14f 1926 fuse_copy_finish(cs);
334f485d 1927
45a91cb1 1928 spin_lock(&fpq->lock);
825d6d33 1929 clear_bit(FR_LOCKED, &req->flags);
e96edd94 1930 if (!fpq->connected)
0d8e84b0
MS
1931 err = -ENOENT;
1932 else if (err)
334f485d 1933 req->out.h.error = -EIO;
77cd9d48
MS
1934 if (!test_bit(FR_PRIVATE, &req->flags))
1935 list_del_init(&req->list);
45a91cb1 1936 spin_unlock(&fpq->lock);
46c34a34 1937
8f622e94 1938 fuse_request_end(req);
7407a10d 1939out:
334f485d
MS
1940 return err ? err : nbytes;
1941
7407a10d 1942copy_finish:
dd3bb14f 1943 fuse_copy_finish(cs);
7407a10d 1944 goto out;
334f485d
MS
1945}
1946
fbdbacca 1947static ssize_t fuse_dev_write(struct kiocb *iocb, struct iov_iter *from)
dd3bb14f
MS
1948{
1949 struct fuse_copy_state cs;
cc080e9e
MS
1950 struct fuse_dev *fud = fuse_get_dev(iocb->ki_filp);
1951
1952 if (!fud)
dd3bb14f
MS
1953 return -EPERM;
1954
fcb14cb1 1955 if (!user_backed_iter(from))
fbdbacca
AV
1956 return -EINVAL;
1957
dc00809a 1958 fuse_copy_init(&cs, 0, from);
dd3bb14f 1959
c3696046 1960 return fuse_dev_do_write(fud, &cs, iov_iter_count(from));
dd3bb14f
MS
1961}
1962
1963static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
1964 struct file *out, loff_t *ppos,
1965 size_t len, unsigned int flags)
1966{
8cefc107 1967 unsigned int head, tail, mask, count;
dd3bb14f
MS
1968 unsigned nbuf;
1969 unsigned idx;
1970 struct pipe_buffer *bufs;
1971 struct fuse_copy_state cs;
cc080e9e 1972 struct fuse_dev *fud;
dd3bb14f
MS
1973 size_t rem;
1974 ssize_t ret;
1975
cc080e9e
MS
1976 fud = fuse_get_dev(out);
1977 if (!fud)
dd3bb14f
MS
1978 return -EPERM;
1979
a2477b0e
AR
1980 pipe_lock(pipe);
1981
8cefc107
DH
1982 head = pipe->head;
1983 tail = pipe->tail;
1984 mask = pipe->ring_size - 1;
1985 count = head - tail;
1986
1987 bufs = kvmalloc_array(count, sizeof(struct pipe_buffer), GFP_KERNEL);
a2477b0e
AR
1988 if (!bufs) {
1989 pipe_unlock(pipe);
dd3bb14f 1990 return -ENOMEM;
a2477b0e 1991 }
dd3bb14f 1992
dd3bb14f
MS
1993 nbuf = 0;
1994 rem = 0;
76f6777c 1995 for (idx = tail; idx != head && rem < len; idx++)
8cefc107 1996 rem += pipe->bufs[idx & mask].len;
dd3bb14f
MS
1997
1998 ret = -EINVAL;
15fab63e
MW
1999 if (rem < len)
2000 goto out_free;
dd3bb14f
MS
2001
2002 rem = len;
2003 while (rem) {
2004 struct pipe_buffer *ibuf;
2005 struct pipe_buffer *obuf;
2006
0e9fb6f1
VA
2007 if (WARN_ON(nbuf >= count || tail == head))
2008 goto out_free;
2009
8cefc107 2010 ibuf = &pipe->bufs[tail & mask];
dd3bb14f
MS
2011 obuf = &bufs[nbuf];
2012
2013 if (rem >= ibuf->len) {
2014 *obuf = *ibuf;
2015 ibuf->ops = NULL;
8cefc107
DH
2016 tail++;
2017 pipe->tail = tail;
dd3bb14f 2018 } else {
15fab63e
MW
2019 if (!pipe_buf_get(pipe, ibuf))
2020 goto out_free;
2021
dd3bb14f
MS
2022 *obuf = *ibuf;
2023 obuf->flags &= ~PIPE_BUF_FLAG_GIFT;
2024 obuf->len = rem;
2025 ibuf->offset += obuf->len;
2026 ibuf->len -= obuf->len;
2027 }
2028 nbuf++;
2029 rem -= obuf->len;
2030 }
2031 pipe_unlock(pipe);
2032
dc00809a 2033 fuse_copy_init(&cs, 0, NULL);
dd3bb14f 2034 cs.pipebufs = bufs;
6c09e94a 2035 cs.nr_segs = nbuf;
dd3bb14f
MS
2036 cs.pipe = pipe;
2037
ce534fb0
MS
2038 if (flags & SPLICE_F_MOVE)
2039 cs.move_pages = 1;
2040
c3696046 2041 ret = fuse_dev_do_write(fud, &cs, len);
dd3bb14f 2042
9509941e 2043 pipe_lock(pipe);
15fab63e 2044out_free:
712a9510
MS
2045 for (idx = 0; idx < nbuf; idx++) {
2046 struct pipe_buffer *buf = &bufs[idx];
2047
2048 if (buf->ops)
2049 pipe_buf_release(pipe, buf);
2050 }
9509941e 2051 pipe_unlock(pipe);
a779638c 2052
d6d931ad 2053 kvfree(bufs);
dd3bb14f
MS
2054 return ret;
2055}
2056
076ccb76 2057static __poll_t fuse_dev_poll(struct file *file, poll_table *wait)
334f485d 2058{
a9a08845 2059 __poll_t mask = EPOLLOUT | EPOLLWRNORM;
f88996a9 2060 struct fuse_iqueue *fiq;
cc080e9e
MS
2061 struct fuse_dev *fud = fuse_get_dev(file);
2062
2063 if (!fud)
a9a08845 2064 return EPOLLERR;
334f485d 2065
cc080e9e 2066 fiq = &fud->fc->iq;
f88996a9 2067 poll_wait(file, &fiq->waitq, wait);
334f485d 2068
76e43c8c 2069 spin_lock(&fiq->lock);
e16714d8 2070 if (!fiq->connected)
a9a08845 2071 mask = EPOLLERR;
f88996a9 2072 else if (request_pending(fiq))
a9a08845 2073 mask |= EPOLLIN | EPOLLRDNORM;
76e43c8c 2074 spin_unlock(&fiq->lock);
334f485d
MS
2075
2076 return mask;
2077}
2078
34061750 2079/* Abort all requests on the given list (pending or processing) */
8f622e94 2080static void end_requests(struct list_head *head)
334f485d
MS
2081{
2082 while (!list_empty(head)) {
2083 struct fuse_req *req;
2084 req = list_entry(head->next, struct fuse_req, list);
334f485d 2085 req->out.h.error = -ECONNABORTED;
33e14b4d 2086 clear_bit(FR_SENT, &req->flags);
f377cb79 2087 list_del_init(&req->list);
8f622e94 2088 fuse_request_end(req);
334f485d
MS
2089 }
2090}
2091
357ccf2b
BG
2092static void end_polls(struct fuse_conn *fc)
2093{
2094 struct rb_node *p;
2095
2096 p = rb_first(&fc->polled_files);
2097
2098 while (p) {
2099 struct fuse_file *ff;
2100 ff = rb_entry(p, struct fuse_file, polled_node);
2101 wake_up_interruptible_all(&ff->poll_wait);
2102
2103 p = rb_next(p);
2104 }
2105}
2106
69a53bf2
MS
2107/*
2108 * Abort all requests.
2109 *
b716d425
MS
2110 * Emergency exit in case of a malicious or accidental deadlock, or just a hung
2111 * filesystem.
2112 *
2113 * The same effect is usually achievable through killing the filesystem daemon
2114 * and all users of the filesystem. The exception is the combination of an
2115 * asynchronous request and the tricky deadlock (see
72ef5e52 2116 * Documentation/filesystems/fuse.rst).
69a53bf2 2117 *
b716d425
MS
2118 * Aborting requests under I/O goes as follows: 1: Separate out unlocked
2119 * requests, they should be finished off immediately. Locked requests will be
2120 * finished after unlock; see unlock_request(). 2: Finish off the unlocked
2121 * requests. It is possible that some request will finish before we can. This
2122 * is OK, the request will in that case be removed from the list before we touch
2123 * it.
69a53bf2 2124 */
eb98e3bd 2125void fuse_abort_conn(struct fuse_conn *fc)
69a53bf2 2126{
f88996a9
MS
2127 struct fuse_iqueue *fiq = &fc->iq;
2128
d7133114 2129 spin_lock(&fc->lock);
69a53bf2 2130 if (fc->connected) {
c3696046 2131 struct fuse_dev *fud;
b716d425 2132 struct fuse_req *req, *next;
75f3ee4c 2133 LIST_HEAD(to_end);
be2ff42c 2134 unsigned int i;
b716d425 2135
63825b4e
KT
2136 /* Background queuing checks fc->connected under bg_lock */
2137 spin_lock(&fc->bg_lock);
69a53bf2 2138 fc->connected = 0;
63825b4e
KT
2139 spin_unlock(&fc->bg_lock);
2140
9759bd51 2141 fuse_set_initialized(fc);
c3696046
MS
2142 list_for_each_entry(fud, &fc->devices, entry) {
2143 struct fuse_pqueue *fpq = &fud->pq;
2144
2145 spin_lock(&fpq->lock);
2146 fpq->connected = 0;
2147 list_for_each_entry_safe(req, next, &fpq->io, list) {
2148 req->out.h.error = -ECONNABORTED;
2149 spin_lock(&req->waitq.lock);
2150 set_bit(FR_ABORTED, &req->flags);
2151 if (!test_bit(FR_LOCKED, &req->flags)) {
2152 set_bit(FR_PRIVATE, &req->flags);
87114373 2153 __fuse_get_request(req);
75f3ee4c 2154 list_move(&req->list, &to_end);
c3696046
MS
2155 }
2156 spin_unlock(&req->waitq.lock);
77cd9d48 2157 }
be2ff42c
KT
2158 for (i = 0; i < FUSE_PQ_HASH_SIZE; i++)
2159 list_splice_tail_init(&fpq->processing[i],
2160 &to_end);
c3696046 2161 spin_unlock(&fpq->lock);
b716d425 2162 }
ae2dffa3
KT
2163 spin_lock(&fc->bg_lock);
2164 fc->blocked = 0;
41f98274
MS
2165 fc->max_background = UINT_MAX;
2166 flush_bg_queue(fc);
ae2dffa3 2167 spin_unlock(&fc->bg_lock);
8c91189a 2168
76e43c8c 2169 spin_lock(&fiq->lock);
8c91189a 2170 fiq->connected = 0;
75f3ee4c 2171 list_for_each_entry(req, &fiq->pending, list)
a8a86d78 2172 clear_bit(FR_PENDING, &req->flags);
75f3ee4c 2173 list_splice_tail_init(&fiq->pending, &to_end);
8c91189a 2174 while (forget_pending(fiq))
4388c5aa 2175 kfree(fuse_dequeue_forget(fiq, 1, NULL));
76e43c8c
EB
2176 wake_up_all(&fiq->waitq);
2177 spin_unlock(&fiq->lock);
8c91189a 2178 kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
ee314a87
MS
2179 end_polls(fc);
2180 wake_up_all(&fc->blocked_waitq);
2181 spin_unlock(&fc->lock);
8c91189a 2182
8f622e94 2183 end_requests(&to_end);
ee314a87
MS
2184 } else {
2185 spin_unlock(&fc->lock);
69a53bf2 2186 }
69a53bf2 2187}
08cbf542 2188EXPORT_SYMBOL_GPL(fuse_abort_conn);
69a53bf2 2189
b8f95e5d
MS
2190void fuse_wait_aborted(struct fuse_conn *fc)
2191{
2d84a2d1
MS
2192 /* matches implicit memory barrier in fuse_drop_waiting() */
2193 smp_mb();
b8f95e5d
MS
2194 wait_event(fc->blocked_waitq, atomic_read(&fc->num_waiting) == 0);
2195}
2196
08cbf542 2197int fuse_dev_release(struct inode *inode, struct file *file)
334f485d 2198{
cc080e9e
MS
2199 struct fuse_dev *fud = fuse_get_dev(file);
2200
2201 if (fud) {
2202 struct fuse_conn *fc = fud->fc;
c3696046 2203 struct fuse_pqueue *fpq = &fud->pq;
45ff350b 2204 LIST_HEAD(to_end);
be2ff42c 2205 unsigned int i;
c3696046 2206
45ff350b 2207 spin_lock(&fpq->lock);
c3696046 2208 WARN_ON(!list_empty(&fpq->io));
be2ff42c
KT
2209 for (i = 0; i < FUSE_PQ_HASH_SIZE; i++)
2210 list_splice_init(&fpq->processing[i], &to_end);
45ff350b
MS
2211 spin_unlock(&fpq->lock);
2212
8f622e94 2213 end_requests(&to_end);
45ff350b 2214
c3696046
MS
2215 /* Are we the last open device? */
2216 if (atomic_dec_and_test(&fc->dev_count)) {
2217 WARN_ON(fc->iq.fasync != NULL);
eb98e3bd 2218 fuse_abort_conn(fc);
c3696046 2219 }
cc080e9e 2220 fuse_dev_free(fud);
385a17bf 2221 }
334f485d
MS
2222 return 0;
2223}
08cbf542 2224EXPORT_SYMBOL_GPL(fuse_dev_release);
334f485d 2225
385a17bf
JD
2226static int fuse_dev_fasync(int fd, struct file *file, int on)
2227{
cc080e9e
MS
2228 struct fuse_dev *fud = fuse_get_dev(file);
2229
2230 if (!fud)
a87046d8 2231 return -EPERM;
385a17bf
JD
2232
2233 /* No locking - fasync_helper does its own locking */
cc080e9e 2234 return fasync_helper(fd, file, on, &fud->fc->iq.fasync);
385a17bf
JD
2235}
2236
00c570f4
MS
2237static int fuse_device_clone(struct fuse_conn *fc, struct file *new)
2238{
cc080e9e
MS
2239 struct fuse_dev *fud;
2240
00c570f4
MS
2241 if (new->private_data)
2242 return -EINVAL;
2243
0cd1eb9a 2244 fud = fuse_dev_alloc_install(fc);
cc080e9e
MS
2245 if (!fud)
2246 return -ENOMEM;
2247
2248 new->private_data = fud;
c3696046 2249 atomic_inc(&fc->dev_count);
00c570f4
MS
2250
2251 return 0;
2252}
2253
2254static long fuse_dev_ioctl(struct file *file, unsigned int cmd,
2255 unsigned long arg)
2256{
f8425c93
AB
2257 int res;
2258 int oldfd;
2259 struct fuse_dev *fud = NULL;
4a892c0f 2260 struct fd f;
00c570f4 2261
6076f5f3
AB
2262 switch (cmd) {
2263 case FUSE_DEV_IOC_CLONE:
4a892c0f
AV
2264 if (get_user(oldfd, (__u32 __user *)arg))
2265 return -EFAULT;
2266
2267 f = fdget(oldfd);
2268 if (!f.file)
2269 return -EINVAL;
2270
2271 /*
2272 * Check against file->f_op because CUSE
2273 * uses the same ioctl handler.
2274 */
2275 if (f.file->f_op == file->f_op)
2276 fud = fuse_get_dev(f.file);
2277
2278 res = -EINVAL;
2279 if (fud) {
2280 mutex_lock(&fuse_mutex);
2281 res = fuse_device_clone(fud->fc, file);
2282 mutex_unlock(&fuse_mutex);
00c570f4 2283 }
4a892c0f 2284 fdput(f);
f8425c93
AB
2285 break;
2286 default:
2287 res = -ENOTTY;
2288 break;
00c570f4 2289 }
f8425c93 2290 return res;
00c570f4
MS
2291}
2292
4b6f5d20 2293const struct file_operations fuse_dev_operations = {
334f485d 2294 .owner = THIS_MODULE,
94e4fe2c 2295 .open = fuse_dev_open,
334f485d 2296 .llseek = no_llseek,
fbdbacca 2297 .read_iter = fuse_dev_read,
c3021629 2298 .splice_read = fuse_dev_splice_read,
fbdbacca 2299 .write_iter = fuse_dev_write,
dd3bb14f 2300 .splice_write = fuse_dev_splice_write,
334f485d
MS
2301 .poll = fuse_dev_poll,
2302 .release = fuse_dev_release,
385a17bf 2303 .fasync = fuse_dev_fasync,
00c570f4 2304 .unlocked_ioctl = fuse_dev_ioctl,
1832f2d8 2305 .compat_ioctl = compat_ptr_ioctl,
334f485d 2306};
08cbf542 2307EXPORT_SYMBOL_GPL(fuse_dev_operations);
334f485d
MS
2308
2309static struct miscdevice fuse_miscdevice = {
2310 .minor = FUSE_MINOR,
2311 .name = "fuse",
2312 .fops = &fuse_dev_operations,
2313};
2314
2315int __init fuse_dev_init(void)
2316{
2317 int err = -ENOMEM;
2318 fuse_req_cachep = kmem_cache_create("fuse_request",
2319 sizeof(struct fuse_req),
20c2df83 2320 0, 0, NULL);
334f485d
MS
2321 if (!fuse_req_cachep)
2322 goto out;
2323
2324 err = misc_register(&fuse_miscdevice);
2325 if (err)
2326 goto out_cache_clean;
2327
2328 return 0;
2329
2330 out_cache_clean:
2331 kmem_cache_destroy(fuse_req_cachep);
2332 out:
2333 return err;
2334}
2335
2336void fuse_dev_cleanup(void)
2337{
2338 misc_deregister(&fuse_miscdevice);
2339 kmem_cache_destroy(fuse_req_cachep);
2340}