fuse: fix sparse warning in ioctl
[linux-2.6-block.git] / fs / fuse / dev.c
CommitLineData
334f485d
MS
1/*
2 FUSE: Filesystem in Userspace
1729a16c 3 Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu>
334f485d
MS
4
5 This program can be distributed under the terms of the GNU GPL.
6 See the file COPYING.
7*/
8
9#include "fuse_i.h"
10
11#include <linux/init.h>
12#include <linux/module.h>
13#include <linux/poll.h>
14#include <linux/uio.h>
15#include <linux/miscdevice.h>
16#include <linux/pagemap.h>
17#include <linux/file.h>
18#include <linux/slab.h>
19
20MODULE_ALIAS_MISCDEV(FUSE_MINOR);
21
e18b890b 22static struct kmem_cache *fuse_req_cachep;
334f485d 23
8bfc016d 24static struct fuse_conn *fuse_get_conn(struct file *file)
334f485d 25{
0720b315
MS
26 /*
27 * Lockless access is OK, because file->private data is set
28 * once during mount and is valid until the file is released.
29 */
30 return file->private_data;
334f485d
MS
31}
32
8bfc016d 33static void fuse_request_init(struct fuse_req *req)
334f485d
MS
34{
35 memset(req, 0, sizeof(*req));
36 INIT_LIST_HEAD(&req->list);
a4d27e75 37 INIT_LIST_HEAD(&req->intr_entry);
334f485d
MS
38 init_waitqueue_head(&req->waitq);
39 atomic_set(&req->count, 1);
40}
41
42struct fuse_req *fuse_request_alloc(void)
43{
e94b1766 44 struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, GFP_KERNEL);
334f485d
MS
45 if (req)
46 fuse_request_init(req);
47 return req;
48}
49
3be5a52b
MS
50struct fuse_req *fuse_request_alloc_nofs(void)
51{
52 struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, GFP_NOFS);
53 if (req)
54 fuse_request_init(req);
55 return req;
56}
57
334f485d
MS
58void fuse_request_free(struct fuse_req *req)
59{
60 kmem_cache_free(fuse_req_cachep, req);
61}
62
8bfc016d 63static void block_sigs(sigset_t *oldset)
334f485d
MS
64{
65 sigset_t mask;
66
67 siginitsetinv(&mask, sigmask(SIGKILL));
68 sigprocmask(SIG_BLOCK, &mask, oldset);
69}
70
8bfc016d 71static void restore_sigs(sigset_t *oldset)
334f485d
MS
72{
73 sigprocmask(SIG_SETMASK, oldset, NULL);
74}
75
334f485d
MS
76static void __fuse_get_request(struct fuse_req *req)
77{
78 atomic_inc(&req->count);
79}
80
81/* Must be called with > 1 refcount */
82static void __fuse_put_request(struct fuse_req *req)
83{
84 BUG_ON(atomic_read(&req->count) < 2);
85 atomic_dec(&req->count);
86}
87
33649c91
MS
88static void fuse_req_init_context(struct fuse_req *req)
89{
90 req->in.h.uid = current->fsuid;
91 req->in.h.gid = current->fsgid;
92 req->in.h.pid = current->pid;
93}
94
ce1d5a49 95struct fuse_req *fuse_get_req(struct fuse_conn *fc)
334f485d 96{
08a53cdc
MS
97 struct fuse_req *req;
98 sigset_t oldset;
9bc5ddda 99 int intr;
08a53cdc
MS
100 int err;
101
9bc5ddda 102 atomic_inc(&fc->num_waiting);
08a53cdc 103 block_sigs(&oldset);
9bc5ddda 104 intr = wait_event_interruptible(fc->blocked_waitq, !fc->blocked);
08a53cdc 105 restore_sigs(&oldset);
9bc5ddda
MS
106 err = -EINTR;
107 if (intr)
108 goto out;
08a53cdc 109
51eb01e7
MS
110 err = -ENOTCONN;
111 if (!fc->connected)
112 goto out;
113
08a53cdc 114 req = fuse_request_alloc();
9bc5ddda 115 err = -ENOMEM;
ce1d5a49 116 if (!req)
9bc5ddda 117 goto out;
334f485d 118
33649c91 119 fuse_req_init_context(req);
9bc5ddda 120 req->waiting = 1;
334f485d 121 return req;
9bc5ddda
MS
122
123 out:
124 atomic_dec(&fc->num_waiting);
125 return ERR_PTR(err);
334f485d
MS
126}
127
33649c91
MS
128/*
129 * Return request in fuse_file->reserved_req. However that may
130 * currently be in use. If that is the case, wait for it to become
131 * available.
132 */
133static struct fuse_req *get_reserved_req(struct fuse_conn *fc,
134 struct file *file)
135{
136 struct fuse_req *req = NULL;
137 struct fuse_file *ff = file->private_data;
138
139 do {
de5e3dec 140 wait_event(fc->reserved_req_waitq, ff->reserved_req);
33649c91
MS
141 spin_lock(&fc->lock);
142 if (ff->reserved_req) {
143 req = ff->reserved_req;
144 ff->reserved_req = NULL;
145 get_file(file);
146 req->stolen_file = file;
147 }
148 spin_unlock(&fc->lock);
149 } while (!req);
150
151 return req;
152}
153
154/*
155 * Put stolen request back into fuse_file->reserved_req
156 */
157static void put_reserved_req(struct fuse_conn *fc, struct fuse_req *req)
158{
159 struct file *file = req->stolen_file;
160 struct fuse_file *ff = file->private_data;
161
162 spin_lock(&fc->lock);
163 fuse_request_init(req);
164 BUG_ON(ff->reserved_req);
165 ff->reserved_req = req;
de5e3dec 166 wake_up_all(&fc->reserved_req_waitq);
33649c91
MS
167 spin_unlock(&fc->lock);
168 fput(file);
169}
170
171/*
172 * Gets a requests for a file operation, always succeeds
173 *
174 * This is used for sending the FLUSH request, which must get to
175 * userspace, due to POSIX locks which may need to be unlocked.
176 *
177 * If allocation fails due to OOM, use the reserved request in
178 * fuse_file.
179 *
180 * This is very unlikely to deadlock accidentally, since the
181 * filesystem should not have it's own file open. If deadlock is
182 * intentional, it can still be broken by "aborting" the filesystem.
183 */
184struct fuse_req *fuse_get_req_nofail(struct fuse_conn *fc, struct file *file)
185{
186 struct fuse_req *req;
187
188 atomic_inc(&fc->num_waiting);
189 wait_event(fc->blocked_waitq, !fc->blocked);
190 req = fuse_request_alloc();
191 if (!req)
192 req = get_reserved_req(fc, file);
193
194 fuse_req_init_context(req);
195 req->waiting = 1;
196 return req;
197}
198
334f485d 199void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req)
7128ec2a
MS
200{
201 if (atomic_dec_and_test(&req->count)) {
9bc5ddda
MS
202 if (req->waiting)
203 atomic_dec(&fc->num_waiting);
33649c91
MS
204
205 if (req->stolen_file)
206 put_reserved_req(fc, req);
207 else
208 fuse_request_free(req);
7128ec2a
MS
209 }
210}
211
d12def1b
MS
212static unsigned len_args(unsigned numargs, struct fuse_arg *args)
213{
214 unsigned nbytes = 0;
215 unsigned i;
216
217 for (i = 0; i < numargs; i++)
218 nbytes += args[i].size;
219
220 return nbytes;
221}
222
223static u64 fuse_get_unique(struct fuse_conn *fc)
224{
225 fc->reqctr++;
226 /* zero is special */
227 if (fc->reqctr == 0)
228 fc->reqctr = 1;
229
230 return fc->reqctr;
231}
232
233static void queue_request(struct fuse_conn *fc, struct fuse_req *req)
234{
235 req->in.h.unique = fuse_get_unique(fc);
236 req->in.h.len = sizeof(struct fuse_in_header) +
237 len_args(req->in.numargs, (struct fuse_arg *) req->in.args);
238 list_add_tail(&req->list, &fc->pending);
239 req->state = FUSE_REQ_PENDING;
240 if (!req->waiting) {
241 req->waiting = 1;
242 atomic_inc(&fc->num_waiting);
243 }
244 wake_up(&fc->waitq);
245 kill_fasync(&fc->fasync, SIGIO, POLL_IN);
246}
247
248static void flush_bg_queue(struct fuse_conn *fc)
249{
250 while (fc->active_background < FUSE_MAX_BACKGROUND &&
251 !list_empty(&fc->bg_queue)) {
252 struct fuse_req *req;
253
254 req = list_entry(fc->bg_queue.next, struct fuse_req, list);
255 list_del(&req->list);
256 fc->active_background++;
257 queue_request(fc, req);
258 }
259}
260
334f485d
MS
261/*
262 * This function is called when a request is finished. Either a reply
f9a2842e 263 * has arrived or it was aborted (and not yet sent) or some error
f43b155a 264 * occurred during communication with userspace, or the device file
51eb01e7
MS
265 * was closed. The requester thread is woken up (if still waiting),
266 * the 'end' callback is called if given, else the reference to the
267 * request is released
7128ec2a 268 *
d7133114 269 * Called with fc->lock, unlocks it
334f485d
MS
270 */
271static void request_end(struct fuse_conn *fc, struct fuse_req *req)
105f4d7a 272 __releases(fc->lock)
334f485d 273{
51eb01e7
MS
274 void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;
275 req->end = NULL;
d77a1d5b 276 list_del(&req->list);
a4d27e75 277 list_del(&req->intr_entry);
83cfd493 278 req->state = FUSE_REQ_FINISHED;
51eb01e7
MS
279 if (req->background) {
280 if (fc->num_background == FUSE_MAX_BACKGROUND) {
281 fc->blocked = 0;
282 wake_up_all(&fc->blocked_waitq);
283 }
f92b99b9
MS
284 if (fc->num_background == FUSE_CONGESTION_THRESHOLD) {
285 clear_bdi_congested(&fc->bdi, READ);
286 clear_bdi_congested(&fc->bdi, WRITE);
287 }
51eb01e7 288 fc->num_background--;
d12def1b
MS
289 fc->active_background--;
290 flush_bg_queue(fc);
334f485d 291 }
51eb01e7 292 spin_unlock(&fc->lock);
51eb01e7
MS
293 wake_up(&req->waitq);
294 if (end)
295 end(fc, req);
e9bb09dd 296 fuse_put_request(fc, req);
334f485d
MS
297}
298
a4d27e75
MS
299static void wait_answer_interruptible(struct fuse_conn *fc,
300 struct fuse_req *req)
4dbf930e 301 __releases(fc->lock) __acquires(fc->lock)
a4d27e75
MS
302{
303 if (signal_pending(current))
304 return;
305
306 spin_unlock(&fc->lock);
307 wait_event_interruptible(req->waitq, req->state == FUSE_REQ_FINISHED);
308 spin_lock(&fc->lock);
309}
310
311static void queue_interrupt(struct fuse_conn *fc, struct fuse_req *req)
312{
313 list_add_tail(&req->intr_entry, &fc->interrupts);
314 wake_up(&fc->waitq);
315 kill_fasync(&fc->fasync, SIGIO, POLL_IN);
316}
317
7c352bdf 318static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
4dbf930e 319 __releases(fc->lock) __acquires(fc->lock)
334f485d 320{
a4d27e75
MS
321 if (!fc->no_interrupt) {
322 /* Any signal may interrupt this */
323 wait_answer_interruptible(fc, req);
334f485d 324
a4d27e75
MS
325 if (req->aborted)
326 goto aborted;
327 if (req->state == FUSE_REQ_FINISHED)
328 return;
329
330 req->interrupted = 1;
331 if (req->state == FUSE_REQ_SENT)
332 queue_interrupt(fc, req);
333 }
334
a131de0a 335 if (!req->force) {
a4d27e75
MS
336 sigset_t oldset;
337
338 /* Only fatal signals may interrupt this */
51eb01e7 339 block_sigs(&oldset);
a4d27e75 340 wait_answer_interruptible(fc, req);
51eb01e7 341 restore_sigs(&oldset);
a131de0a
MS
342
343 if (req->aborted)
344 goto aborted;
345 if (req->state == FUSE_REQ_FINISHED)
346 return;
347
348 /* Request is not yet in userspace, bail out */
349 if (req->state == FUSE_REQ_PENDING) {
350 list_del(&req->list);
351 __fuse_put_request(req);
352 req->out.h.error = -EINTR;
353 return;
354 }
51eb01e7 355 }
334f485d 356
a131de0a
MS
357 /*
358 * Either request is already in userspace, or it was forced.
359 * Wait it out.
360 */
361 spin_unlock(&fc->lock);
362 wait_event(req->waitq, req->state == FUSE_REQ_FINISHED);
363 spin_lock(&fc->lock);
a4d27e75 364
a131de0a
MS
365 if (!req->aborted)
366 return;
a4d27e75
MS
367
368 aborted:
a131de0a 369 BUG_ON(req->state != FUSE_REQ_FINISHED);
334f485d
MS
370 if (req->locked) {
371 /* This is uninterruptible sleep, because data is
372 being copied to/from the buffers of req. During
373 locked state, there mustn't be any filesystem
374 operation (e.g. page fault), since that could lead
375 to deadlock */
d7133114 376 spin_unlock(&fc->lock);
334f485d 377 wait_event(req->waitq, !req->locked);
d7133114 378 spin_lock(&fc->lock);
334f485d 379 }
334f485d
MS
380}
381
b93f858a 382void fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
334f485d
MS
383{
384 req->isreply = 1;
d7133114 385 spin_lock(&fc->lock);
1e9a4ed9 386 if (!fc->connected)
334f485d
MS
387 req->out.h.error = -ENOTCONN;
388 else if (fc->conn_error)
389 req->out.h.error = -ECONNREFUSED;
390 else {
391 queue_request(fc, req);
392 /* acquire extra reference, since request is still needed
393 after request_end() */
394 __fuse_get_request(req);
395
7c352bdf 396 request_wait_answer(fc, req);
334f485d 397 }
d7133114 398 spin_unlock(&fc->lock);
334f485d
MS
399}
400
b93f858a
TH
401static void fuse_request_send_nowait_locked(struct fuse_conn *fc,
402 struct fuse_req *req)
d12def1b
MS
403{
404 req->background = 1;
405 fc->num_background++;
406 if (fc->num_background == FUSE_MAX_BACKGROUND)
407 fc->blocked = 1;
408 if (fc->num_background == FUSE_CONGESTION_THRESHOLD) {
409 set_bdi_congested(&fc->bdi, READ);
410 set_bdi_congested(&fc->bdi, WRITE);
411 }
412 list_add_tail(&req->list, &fc->bg_queue);
413 flush_bg_queue(fc);
414}
415
b93f858a 416static void fuse_request_send_nowait(struct fuse_conn *fc, struct fuse_req *req)
334f485d 417{
d7133114 418 spin_lock(&fc->lock);
1e9a4ed9 419 if (fc->connected) {
b93f858a 420 fuse_request_send_nowait_locked(fc, req);
d7133114 421 spin_unlock(&fc->lock);
334f485d
MS
422 } else {
423 req->out.h.error = -ENOTCONN;
424 request_end(fc, req);
425 }
426}
427
b93f858a 428void fuse_request_send_noreply(struct fuse_conn *fc, struct fuse_req *req)
334f485d
MS
429{
430 req->isreply = 0;
b93f858a 431 fuse_request_send_nowait(fc, req);
334f485d
MS
432}
433
b93f858a 434void fuse_request_send_background(struct fuse_conn *fc, struct fuse_req *req)
334f485d
MS
435{
436 req->isreply = 1;
b93f858a 437 fuse_request_send_nowait(fc, req);
334f485d
MS
438}
439
3be5a52b
MS
440/*
441 * Called under fc->lock
442 *
443 * fc->connected must have been checked previously
444 */
b93f858a
TH
445void fuse_request_send_background_locked(struct fuse_conn *fc,
446 struct fuse_req *req)
3be5a52b
MS
447{
448 req->isreply = 1;
b93f858a 449 fuse_request_send_nowait_locked(fc, req);
3be5a52b
MS
450}
451
334f485d
MS
452/*
453 * Lock the request. Up to the next unlock_request() there mustn't be
454 * anything that could cause a page-fault. If the request was already
f9a2842e 455 * aborted bail out.
334f485d 456 */
d7133114 457static int lock_request(struct fuse_conn *fc, struct fuse_req *req)
334f485d
MS
458{
459 int err = 0;
460 if (req) {
d7133114 461 spin_lock(&fc->lock);
f9a2842e 462 if (req->aborted)
334f485d
MS
463 err = -ENOENT;
464 else
465 req->locked = 1;
d7133114 466 spin_unlock(&fc->lock);
334f485d
MS
467 }
468 return err;
469}
470
471/*
f9a2842e 472 * Unlock request. If it was aborted during being locked, the
334f485d
MS
473 * requester thread is currently waiting for it to be unlocked, so
474 * wake it up.
475 */
d7133114 476static void unlock_request(struct fuse_conn *fc, struct fuse_req *req)
334f485d
MS
477{
478 if (req) {
d7133114 479 spin_lock(&fc->lock);
334f485d 480 req->locked = 0;
f9a2842e 481 if (req->aborted)
334f485d 482 wake_up(&req->waitq);
d7133114 483 spin_unlock(&fc->lock);
334f485d
MS
484 }
485}
486
487struct fuse_copy_state {
d7133114 488 struct fuse_conn *fc;
334f485d
MS
489 int write;
490 struct fuse_req *req;
491 const struct iovec *iov;
492 unsigned long nr_segs;
493 unsigned long seglen;
494 unsigned long addr;
495 struct page *pg;
496 void *mapaddr;
497 void *buf;
498 unsigned len;
499};
500
d7133114
MS
501static void fuse_copy_init(struct fuse_copy_state *cs, struct fuse_conn *fc,
502 int write, struct fuse_req *req,
503 const struct iovec *iov, unsigned long nr_segs)
334f485d
MS
504{
505 memset(cs, 0, sizeof(*cs));
d7133114 506 cs->fc = fc;
334f485d
MS
507 cs->write = write;
508 cs->req = req;
509 cs->iov = iov;
510 cs->nr_segs = nr_segs;
511}
512
513/* Unmap and put previous page of userspace buffer */
8bfc016d 514static void fuse_copy_finish(struct fuse_copy_state *cs)
334f485d
MS
515{
516 if (cs->mapaddr) {
517 kunmap_atomic(cs->mapaddr, KM_USER0);
518 if (cs->write) {
519 flush_dcache_page(cs->pg);
520 set_page_dirty_lock(cs->pg);
521 }
522 put_page(cs->pg);
523 cs->mapaddr = NULL;
524 }
525}
526
527/*
528 * Get another pagefull of userspace buffer, and map it to kernel
529 * address space, and lock request
530 */
531static int fuse_copy_fill(struct fuse_copy_state *cs)
532{
533 unsigned long offset;
534 int err;
535
d7133114 536 unlock_request(cs->fc, cs->req);
334f485d
MS
537 fuse_copy_finish(cs);
538 if (!cs->seglen) {
539 BUG_ON(!cs->nr_segs);
540 cs->seglen = cs->iov[0].iov_len;
541 cs->addr = (unsigned long) cs->iov[0].iov_base;
1729a16c
MS
542 cs->iov++;
543 cs->nr_segs--;
334f485d
MS
544 }
545 down_read(&current->mm->mmap_sem);
546 err = get_user_pages(current, current->mm, cs->addr, 1, cs->write, 0,
547 &cs->pg, NULL);
548 up_read(&current->mm->mmap_sem);
549 if (err < 0)
550 return err;
551 BUG_ON(err != 1);
552 offset = cs->addr % PAGE_SIZE;
553 cs->mapaddr = kmap_atomic(cs->pg, KM_USER0);
554 cs->buf = cs->mapaddr + offset;
555 cs->len = min(PAGE_SIZE - offset, cs->seglen);
556 cs->seglen -= cs->len;
557 cs->addr += cs->len;
558
d7133114 559 return lock_request(cs->fc, cs->req);
334f485d
MS
560}
561
562/* Do as much copy to/from userspace buffer as we can */
8bfc016d 563static int fuse_copy_do(struct fuse_copy_state *cs, void **val, unsigned *size)
334f485d
MS
564{
565 unsigned ncpy = min(*size, cs->len);
566 if (val) {
567 if (cs->write)
568 memcpy(cs->buf, *val, ncpy);
569 else
570 memcpy(*val, cs->buf, ncpy);
571 *val += ncpy;
572 }
573 *size -= ncpy;
574 cs->len -= ncpy;
575 cs->buf += ncpy;
576 return ncpy;
577}
578
579/*
580 * Copy a page in the request to/from the userspace buffer. Must be
581 * done atomically
582 */
8bfc016d
MS
583static int fuse_copy_page(struct fuse_copy_state *cs, struct page *page,
584 unsigned offset, unsigned count, int zeroing)
334f485d
MS
585{
586 if (page && zeroing && count < PAGE_SIZE) {
587 void *mapaddr = kmap_atomic(page, KM_USER1);
588 memset(mapaddr, 0, PAGE_SIZE);
589 kunmap_atomic(mapaddr, KM_USER1);
590 }
591 while (count) {
1729a16c
MS
592 if (!cs->len) {
593 int err = fuse_copy_fill(cs);
594 if (err)
595 return err;
596 }
334f485d
MS
597 if (page) {
598 void *mapaddr = kmap_atomic(page, KM_USER1);
599 void *buf = mapaddr + offset;
600 offset += fuse_copy_do(cs, &buf, &count);
601 kunmap_atomic(mapaddr, KM_USER1);
602 } else
603 offset += fuse_copy_do(cs, NULL, &count);
604 }
605 if (page && !cs->write)
606 flush_dcache_page(page);
607 return 0;
608}
609
610/* Copy pages in the request to/from userspace buffer */
611static int fuse_copy_pages(struct fuse_copy_state *cs, unsigned nbytes,
612 int zeroing)
613{
614 unsigned i;
615 struct fuse_req *req = cs->req;
616 unsigned offset = req->page_offset;
617 unsigned count = min(nbytes, (unsigned) PAGE_SIZE - offset);
618
619 for (i = 0; i < req->num_pages && (nbytes || zeroing); i++) {
620 struct page *page = req->pages[i];
621 int err = fuse_copy_page(cs, page, offset, count, zeroing);
622 if (err)
623 return err;
624
625 nbytes -= count;
626 count = min(nbytes, (unsigned) PAGE_SIZE);
627 offset = 0;
628 }
629 return 0;
630}
631
632/* Copy a single argument in the request to/from userspace buffer */
633static int fuse_copy_one(struct fuse_copy_state *cs, void *val, unsigned size)
634{
635 while (size) {
1729a16c
MS
636 if (!cs->len) {
637 int err = fuse_copy_fill(cs);
638 if (err)
639 return err;
640 }
334f485d
MS
641 fuse_copy_do(cs, &val, &size);
642 }
643 return 0;
644}
645
646/* Copy request arguments to/from userspace buffer */
647static int fuse_copy_args(struct fuse_copy_state *cs, unsigned numargs,
648 unsigned argpages, struct fuse_arg *args,
649 int zeroing)
650{
651 int err = 0;
652 unsigned i;
653
654 for (i = 0; !err && i < numargs; i++) {
655 struct fuse_arg *arg = &args[i];
656 if (i == numargs - 1 && argpages)
657 err = fuse_copy_pages(cs, arg->size, zeroing);
658 else
659 err = fuse_copy_one(cs, arg->value, arg->size);
660 }
661 return err;
662}
663
a4d27e75
MS
664static int request_pending(struct fuse_conn *fc)
665{
666 return !list_empty(&fc->pending) || !list_empty(&fc->interrupts);
667}
668
334f485d
MS
669/* Wait until a request is available on the pending list */
670static void request_wait(struct fuse_conn *fc)
671{
672 DECLARE_WAITQUEUE(wait, current);
673
674 add_wait_queue_exclusive(&fc->waitq, &wait);
a4d27e75 675 while (fc->connected && !request_pending(fc)) {
334f485d
MS
676 set_current_state(TASK_INTERRUPTIBLE);
677 if (signal_pending(current))
678 break;
679
d7133114 680 spin_unlock(&fc->lock);
334f485d 681 schedule();
d7133114 682 spin_lock(&fc->lock);
334f485d
MS
683 }
684 set_current_state(TASK_RUNNING);
685 remove_wait_queue(&fc->waitq, &wait);
686}
687
a4d27e75
MS
688/*
689 * Transfer an interrupt request to userspace
690 *
691 * Unlike other requests this is assembled on demand, without a need
692 * to allocate a separate fuse_req structure.
693 *
694 * Called with fc->lock held, releases it
695 */
696static int fuse_read_interrupt(struct fuse_conn *fc, struct fuse_req *req,
697 const struct iovec *iov, unsigned long nr_segs)
105f4d7a 698 __releases(fc->lock)
a4d27e75
MS
699{
700 struct fuse_copy_state cs;
701 struct fuse_in_header ih;
702 struct fuse_interrupt_in arg;
703 unsigned reqsize = sizeof(ih) + sizeof(arg);
704 int err;
705
706 list_del_init(&req->intr_entry);
707 req->intr_unique = fuse_get_unique(fc);
708 memset(&ih, 0, sizeof(ih));
709 memset(&arg, 0, sizeof(arg));
710 ih.len = reqsize;
711 ih.opcode = FUSE_INTERRUPT;
712 ih.unique = req->intr_unique;
713 arg.unique = req->in.h.unique;
714
715 spin_unlock(&fc->lock);
716 if (iov_length(iov, nr_segs) < reqsize)
717 return -EINVAL;
718
719 fuse_copy_init(&cs, fc, 1, NULL, iov, nr_segs);
720 err = fuse_copy_one(&cs, &ih, sizeof(ih));
721 if (!err)
722 err = fuse_copy_one(&cs, &arg, sizeof(arg));
723 fuse_copy_finish(&cs);
724
725 return err ? err : reqsize;
726}
727
334f485d
MS
728/*
729 * Read a single request into the userspace filesystem's buffer. This
730 * function waits until a request is available, then removes it from
731 * the pending list and copies request data to userspace buffer. If
f9a2842e
MS
732 * no reply is needed (FORGET) or request has been aborted or there
733 * was an error during the copying then it's finished by calling
334f485d
MS
734 * request_end(). Otherwise add it to the processing list, and set
735 * the 'sent' flag.
736 */
ee0b3e67
BP
737static ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov,
738 unsigned long nr_segs, loff_t pos)
334f485d
MS
739{
740 int err;
334f485d
MS
741 struct fuse_req *req;
742 struct fuse_in *in;
743 struct fuse_copy_state cs;
744 unsigned reqsize;
ee0b3e67 745 struct file *file = iocb->ki_filp;
0720b315
MS
746 struct fuse_conn *fc = fuse_get_conn(file);
747 if (!fc)
748 return -EPERM;
334f485d 749
1d3d752b 750 restart:
d7133114 751 spin_lock(&fc->lock);
e5ac1d1e
JD
752 err = -EAGAIN;
753 if ((file->f_flags & O_NONBLOCK) && fc->connected &&
a4d27e75 754 !request_pending(fc))
e5ac1d1e
JD
755 goto err_unlock;
756
334f485d
MS
757 request_wait(fc);
758 err = -ENODEV;
9ba7cbba 759 if (!fc->connected)
334f485d
MS
760 goto err_unlock;
761 err = -ERESTARTSYS;
a4d27e75 762 if (!request_pending(fc))
334f485d
MS
763 goto err_unlock;
764
a4d27e75
MS
765 if (!list_empty(&fc->interrupts)) {
766 req = list_entry(fc->interrupts.next, struct fuse_req,
767 intr_entry);
768 return fuse_read_interrupt(fc, req, iov, nr_segs);
769 }
770
334f485d 771 req = list_entry(fc->pending.next, struct fuse_req, list);
83cfd493 772 req->state = FUSE_REQ_READING;
d77a1d5b 773 list_move(&req->list, &fc->io);
334f485d
MS
774
775 in = &req->in;
1d3d752b
MS
776 reqsize = in->h.len;
777 /* If request is too large, reply with an error and restart the read */
778 if (iov_length(iov, nr_segs) < reqsize) {
779 req->out.h.error = -EIO;
780 /* SETXATTR is special, since it may contain too large data */
781 if (in->h.opcode == FUSE_SETXATTR)
782 req->out.h.error = -E2BIG;
783 request_end(fc, req);
784 goto restart;
334f485d 785 }
d7133114
MS
786 spin_unlock(&fc->lock);
787 fuse_copy_init(&cs, fc, 1, req, iov, nr_segs);
1d3d752b
MS
788 err = fuse_copy_one(&cs, &in->h, sizeof(in->h));
789 if (!err)
790 err = fuse_copy_args(&cs, in->numargs, in->argpages,
791 (struct fuse_arg *) in->args, 0);
334f485d 792 fuse_copy_finish(&cs);
d7133114 793 spin_lock(&fc->lock);
334f485d 794 req->locked = 0;
c9c9d7df
MS
795 if (req->aborted) {
796 request_end(fc, req);
797 return -ENODEV;
798 }
334f485d 799 if (err) {
c9c9d7df 800 req->out.h.error = -EIO;
334f485d
MS
801 request_end(fc, req);
802 return err;
803 }
804 if (!req->isreply)
805 request_end(fc, req);
806 else {
83cfd493 807 req->state = FUSE_REQ_SENT;
d77a1d5b 808 list_move_tail(&req->list, &fc->processing);
a4d27e75
MS
809 if (req->interrupted)
810 queue_interrupt(fc, req);
d7133114 811 spin_unlock(&fc->lock);
334f485d
MS
812 }
813 return reqsize;
814
815 err_unlock:
d7133114 816 spin_unlock(&fc->lock);
334f485d
MS
817 return err;
818}
819
95668a69
TH
820static int fuse_notify_poll(struct fuse_conn *fc, unsigned int size,
821 struct fuse_copy_state *cs)
822{
823 struct fuse_notify_poll_wakeup_out outarg;
824 int err;
825
826 if (size != sizeof(outarg))
827 return -EINVAL;
828
829 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
830 if (err)
831 return err;
832
833 return fuse_notify_poll_wakeup(fc, &outarg);
834}
835
8599396b
TH
836static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code,
837 unsigned int size, struct fuse_copy_state *cs)
838{
839 switch (code) {
95668a69
TH
840 case FUSE_NOTIFY_POLL:
841 return fuse_notify_poll(fc, size, cs);
842
8599396b
TH
843 default:
844 return -EINVAL;
845 }
846}
847
334f485d
MS
848/* Look up request on processing list by unique ID */
849static struct fuse_req *request_find(struct fuse_conn *fc, u64 unique)
850{
851 struct list_head *entry;
852
853 list_for_each(entry, &fc->processing) {
854 struct fuse_req *req;
855 req = list_entry(entry, struct fuse_req, list);
a4d27e75 856 if (req->in.h.unique == unique || req->intr_unique == unique)
334f485d
MS
857 return req;
858 }
859 return NULL;
860}
861
862static int copy_out_args(struct fuse_copy_state *cs, struct fuse_out *out,
863 unsigned nbytes)
864{
865 unsigned reqsize = sizeof(struct fuse_out_header);
866
867 if (out->h.error)
868 return nbytes != reqsize ? -EINVAL : 0;
869
870 reqsize += len_args(out->numargs, out->args);
871
872 if (reqsize < nbytes || (reqsize > nbytes && !out->argvar))
873 return -EINVAL;
874 else if (reqsize > nbytes) {
875 struct fuse_arg *lastarg = &out->args[out->numargs-1];
876 unsigned diffsize = reqsize - nbytes;
877 if (diffsize > lastarg->size)
878 return -EINVAL;
879 lastarg->size -= diffsize;
880 }
881 return fuse_copy_args(cs, out->numargs, out->argpages, out->args,
882 out->page_zeroing);
883}
884
885/*
886 * Write a single reply to a request. First the header is copied from
887 * the write buffer. The request is then searched on the processing
888 * list by the unique ID found in the header. If found, then remove
889 * it from the list and copy the rest of the buffer to the request.
890 * The request is finished by calling request_end()
891 */
ee0b3e67
BP
892static ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov,
893 unsigned long nr_segs, loff_t pos)
334f485d
MS
894{
895 int err;
896 unsigned nbytes = iov_length(iov, nr_segs);
897 struct fuse_req *req;
898 struct fuse_out_header oh;
899 struct fuse_copy_state cs;
ee0b3e67 900 struct fuse_conn *fc = fuse_get_conn(iocb->ki_filp);
334f485d 901 if (!fc)
a87046d8 902 return -EPERM;
334f485d 903
d7133114 904 fuse_copy_init(&cs, fc, 0, NULL, iov, nr_segs);
334f485d
MS
905 if (nbytes < sizeof(struct fuse_out_header))
906 return -EINVAL;
907
908 err = fuse_copy_one(&cs, &oh, sizeof(oh));
909 if (err)
910 goto err_finish;
8599396b
TH
911
912 err = -EINVAL;
913 if (oh.len != nbytes)
914 goto err_finish;
915
916 /*
917 * Zero oh.unique indicates unsolicited notification message
918 * and error contains notification code.
919 */
920 if (!oh.unique) {
921 err = fuse_notify(fc, oh.error, nbytes - sizeof(oh), &cs);
922 fuse_copy_finish(&cs);
923 return err ? err : nbytes;
924 }
925
334f485d 926 err = -EINVAL;
8599396b 927 if (oh.error <= -1000 || oh.error > 0)
334f485d
MS
928 goto err_finish;
929
d7133114 930 spin_lock(&fc->lock);
69a53bf2
MS
931 err = -ENOENT;
932 if (!fc->connected)
933 goto err_unlock;
934
334f485d 935 req = request_find(fc, oh.unique);
334f485d
MS
936 if (!req)
937 goto err_unlock;
938
f9a2842e 939 if (req->aborted) {
d7133114 940 spin_unlock(&fc->lock);
334f485d 941 fuse_copy_finish(&cs);
d7133114 942 spin_lock(&fc->lock);
222f1d69 943 request_end(fc, req);
334f485d
MS
944 return -ENOENT;
945 }
a4d27e75
MS
946 /* Is it an interrupt reply? */
947 if (req->intr_unique == oh.unique) {
948 err = -EINVAL;
949 if (nbytes != sizeof(struct fuse_out_header))
950 goto err_unlock;
951
952 if (oh.error == -ENOSYS)
953 fc->no_interrupt = 1;
954 else if (oh.error == -EAGAIN)
955 queue_interrupt(fc, req);
956
957 spin_unlock(&fc->lock);
958 fuse_copy_finish(&cs);
959 return nbytes;
960 }
961
962 req->state = FUSE_REQ_WRITING;
d77a1d5b 963 list_move(&req->list, &fc->io);
334f485d
MS
964 req->out.h = oh;
965 req->locked = 1;
966 cs.req = req;
d7133114 967 spin_unlock(&fc->lock);
334f485d
MS
968
969 err = copy_out_args(&cs, &req->out, nbytes);
970 fuse_copy_finish(&cs);
971
d7133114 972 spin_lock(&fc->lock);
334f485d
MS
973 req->locked = 0;
974 if (!err) {
f9a2842e 975 if (req->aborted)
334f485d 976 err = -ENOENT;
f9a2842e 977 } else if (!req->aborted)
334f485d
MS
978 req->out.h.error = -EIO;
979 request_end(fc, req);
980
981 return err ? err : nbytes;
982
983 err_unlock:
d7133114 984 spin_unlock(&fc->lock);
334f485d
MS
985 err_finish:
986 fuse_copy_finish(&cs);
987 return err;
988}
989
334f485d
MS
990static unsigned fuse_dev_poll(struct file *file, poll_table *wait)
991{
334f485d 992 unsigned mask = POLLOUT | POLLWRNORM;
7025d9ad 993 struct fuse_conn *fc = fuse_get_conn(file);
334f485d 994 if (!fc)
7025d9ad 995 return POLLERR;
334f485d
MS
996
997 poll_wait(file, &fc->waitq, wait);
998
d7133114 999 spin_lock(&fc->lock);
7025d9ad
MS
1000 if (!fc->connected)
1001 mask = POLLERR;
a4d27e75 1002 else if (request_pending(fc))
7025d9ad 1003 mask |= POLLIN | POLLRDNORM;
d7133114 1004 spin_unlock(&fc->lock);
334f485d
MS
1005
1006 return mask;
1007}
1008
69a53bf2
MS
1009/*
1010 * Abort all requests on the given list (pending or processing)
1011 *
d7133114 1012 * This function releases and reacquires fc->lock
69a53bf2 1013 */
334f485d
MS
1014static void end_requests(struct fuse_conn *fc, struct list_head *head)
1015{
1016 while (!list_empty(head)) {
1017 struct fuse_req *req;
1018 req = list_entry(head->next, struct fuse_req, list);
334f485d
MS
1019 req->out.h.error = -ECONNABORTED;
1020 request_end(fc, req);
d7133114 1021 spin_lock(&fc->lock);
334f485d
MS
1022 }
1023}
1024
69a53bf2
MS
1025/*
1026 * Abort requests under I/O
1027 *
f9a2842e 1028 * The requests are set to aborted and finished, and the request
69a53bf2
MS
1029 * waiter is woken up. This will make request_wait_answer() wait
1030 * until the request is unlocked and then return.
64c6d8ed
MS
1031 *
1032 * If the request is asynchronous, then the end function needs to be
1033 * called after waiting for the request to be unlocked (if it was
1034 * locked).
69a53bf2
MS
1035 */
1036static void end_io_requests(struct fuse_conn *fc)
4dbf930e 1037 __releases(fc->lock) __acquires(fc->lock)
69a53bf2
MS
1038{
1039 while (!list_empty(&fc->io)) {
64c6d8ed
MS
1040 struct fuse_req *req =
1041 list_entry(fc->io.next, struct fuse_req, list);
1042 void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;
1043
f9a2842e 1044 req->aborted = 1;
69a53bf2
MS
1045 req->out.h.error = -ECONNABORTED;
1046 req->state = FUSE_REQ_FINISHED;
1047 list_del_init(&req->list);
1048 wake_up(&req->waitq);
64c6d8ed
MS
1049 if (end) {
1050 req->end = NULL;
64c6d8ed 1051 __fuse_get_request(req);
d7133114 1052 spin_unlock(&fc->lock);
64c6d8ed
MS
1053 wait_event(req->waitq, !req->locked);
1054 end(fc, req);
e9bb09dd 1055 fuse_put_request(fc, req);
d7133114 1056 spin_lock(&fc->lock);
64c6d8ed 1057 }
69a53bf2
MS
1058 }
1059}
1060
1061/*
1062 * Abort all requests.
1063 *
1064 * Emergency exit in case of a malicious or accidental deadlock, or
1065 * just a hung filesystem.
1066 *
1067 * The same effect is usually achievable through killing the
1068 * filesystem daemon and all users of the filesystem. The exception
1069 * is the combination of an asynchronous request and the tricky
1070 * deadlock (see Documentation/filesystems/fuse.txt).
1071 *
1072 * During the aborting, progression of requests from the pending and
1073 * processing lists onto the io list, and progression of new requests
1074 * onto the pending list is prevented by req->connected being false.
1075 *
1076 * Progression of requests under I/O to the processing list is
f9a2842e
MS
1077 * prevented by the req->aborted flag being true for these requests.
1078 * For this reason requests on the io list must be aborted first.
69a53bf2
MS
1079 */
1080void fuse_abort_conn(struct fuse_conn *fc)
1081{
d7133114 1082 spin_lock(&fc->lock);
69a53bf2
MS
1083 if (fc->connected) {
1084 fc->connected = 0;
51eb01e7 1085 fc->blocked = 0;
69a53bf2
MS
1086 end_io_requests(fc);
1087 end_requests(fc, &fc->pending);
1088 end_requests(fc, &fc->processing);
1089 wake_up_all(&fc->waitq);
51eb01e7 1090 wake_up_all(&fc->blocked_waitq);
385a17bf 1091 kill_fasync(&fc->fasync, SIGIO, POLL_IN);
69a53bf2 1092 }
d7133114 1093 spin_unlock(&fc->lock);
69a53bf2
MS
1094}
1095
334f485d
MS
1096static int fuse_dev_release(struct inode *inode, struct file *file)
1097{
0720b315 1098 struct fuse_conn *fc = fuse_get_conn(file);
334f485d 1099 if (fc) {
d7133114 1100 spin_lock(&fc->lock);
1e9a4ed9 1101 fc->connected = 0;
334f485d
MS
1102 end_requests(fc, &fc->pending);
1103 end_requests(fc, &fc->processing);
d7133114 1104 spin_unlock(&fc->lock);
bafa9654 1105 fuse_conn_put(fc);
385a17bf 1106 }
f543f253 1107
334f485d
MS
1108 return 0;
1109}
1110
385a17bf
JD
1111static int fuse_dev_fasync(int fd, struct file *file, int on)
1112{
1113 struct fuse_conn *fc = fuse_get_conn(file);
1114 if (!fc)
a87046d8 1115 return -EPERM;
385a17bf
JD
1116
1117 /* No locking - fasync_helper does its own locking */
1118 return fasync_helper(fd, file, on, &fc->fasync);
1119}
1120
4b6f5d20 1121const struct file_operations fuse_dev_operations = {
334f485d
MS
1122 .owner = THIS_MODULE,
1123 .llseek = no_llseek,
ee0b3e67
BP
1124 .read = do_sync_read,
1125 .aio_read = fuse_dev_read,
1126 .write = do_sync_write,
1127 .aio_write = fuse_dev_write,
334f485d
MS
1128 .poll = fuse_dev_poll,
1129 .release = fuse_dev_release,
385a17bf 1130 .fasync = fuse_dev_fasync,
334f485d
MS
1131};
1132
1133static struct miscdevice fuse_miscdevice = {
1134 .minor = FUSE_MINOR,
1135 .name = "fuse",
1136 .fops = &fuse_dev_operations,
1137};
1138
1139int __init fuse_dev_init(void)
1140{
1141 int err = -ENOMEM;
1142 fuse_req_cachep = kmem_cache_create("fuse_request",
1143 sizeof(struct fuse_req),
20c2df83 1144 0, 0, NULL);
334f485d
MS
1145 if (!fuse_req_cachep)
1146 goto out;
1147
1148 err = misc_register(&fuse_miscdevice);
1149 if (err)
1150 goto out_cache_clean;
1151
1152 return 0;
1153
1154 out_cache_clean:
1155 kmem_cache_destroy(fuse_req_cachep);
1156 out:
1157 return err;
1158}
1159
1160void fuse_dev_cleanup(void)
1161{
1162 misc_deregister(&fuse_miscdevice);
1163 kmem_cache_destroy(fuse_req_cachep);
1164}