mm: export remove_from_page_cache() to modules
[linux-2.6-block.git] / fs / fuse / file.c
CommitLineData
b6aeaded
MS
1/*
2 FUSE: Filesystem in Userspace
1729a16c 3 Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu>
b6aeaded
MS
4
5 This program can be distributed under the terms of the GNU GPL.
6 See the file COPYING.
7*/
8
9#include "fuse_i.h"
10
11#include <linux/pagemap.h>
12#include <linux/slab.h>
13#include <linux/kernel.h>
e8edc6e0 14#include <linux/sched.h>
08cbf542 15#include <linux/module.h>
b6aeaded 16
4b6f5d20 17static const struct file_operations fuse_direct_io_file_operations;
45323fb7 18
91fe96b4
MS
19static int fuse_send_open(struct fuse_conn *fc, u64 nodeid, struct file *file,
20 int opcode, struct fuse_open_out *outargp)
b6aeaded 21{
b6aeaded 22 struct fuse_open_in inarg;
fd72faac
MS
23 struct fuse_req *req;
24 int err;
25
ce1d5a49
MS
26 req = fuse_get_req(fc);
27 if (IS_ERR(req))
28 return PTR_ERR(req);
fd72faac
MS
29
30 memset(&inarg, 0, sizeof(inarg));
6ff958ed
MS
31 inarg.flags = file->f_flags & ~(O_CREAT | O_EXCL | O_NOCTTY);
32 if (!fc->atomic_o_trunc)
33 inarg.flags &= ~O_TRUNC;
91fe96b4
MS
34 req->in.h.opcode = opcode;
35 req->in.h.nodeid = nodeid;
fd72faac
MS
36 req->in.numargs = 1;
37 req->in.args[0].size = sizeof(inarg);
38 req->in.args[0].value = &inarg;
39 req->out.numargs = 1;
40 req->out.args[0].size = sizeof(*outargp);
41 req->out.args[0].value = outargp;
b93f858a 42 fuse_request_send(fc, req);
fd72faac
MS
43 err = req->out.h.error;
44 fuse_put_request(fc, req);
45
46 return err;
47}
48
acf99433 49struct fuse_file *fuse_file_alloc(struct fuse_conn *fc)
fd72faac
MS
50{
51 struct fuse_file *ff;
6b2db28a 52
fd72faac 53 ff = kmalloc(sizeof(struct fuse_file), GFP_KERNEL);
6b2db28a
TH
54 if (unlikely(!ff))
55 return NULL;
56
da5e4714 57 ff->fc = fc;
6b2db28a
TH
58 ff->reserved_req = fuse_request_alloc();
59 if (unlikely(!ff->reserved_req)) {
60 kfree(ff);
61 return NULL;
fd72faac 62 }
6b2db28a
TH
63
64 INIT_LIST_HEAD(&ff->write_entry);
65 atomic_set(&ff->count, 0);
66 RB_CLEAR_NODE(&ff->polled_node);
67 init_waitqueue_head(&ff->poll_wait);
68
69 spin_lock(&fc->lock);
70 ff->kh = ++fc->khctr;
71 spin_unlock(&fc->lock);
72
fd72faac
MS
73 return ff;
74}
75
76void fuse_file_free(struct fuse_file *ff)
77{
33649c91 78 fuse_request_free(ff->reserved_req);
fd72faac
MS
79 kfree(ff);
80}
81
c7b7143c 82struct fuse_file *fuse_file_get(struct fuse_file *ff)
c756e0a4
MS
83{
84 atomic_inc(&ff->count);
85 return ff;
86}
87
819c4b3b
MS
88static void fuse_release_end(struct fuse_conn *fc, struct fuse_req *req)
89{
b0be46eb 90 path_put(&req->misc.release.path);
819c4b3b
MS
91}
92
c756e0a4
MS
93static void fuse_file_put(struct fuse_file *ff)
94{
95 if (atomic_dec_and_test(&ff->count)) {
96 struct fuse_req *req = ff->reserved_req;
8b0797a4 97
819c4b3b 98 req->end = fuse_release_end;
8b0797a4 99 fuse_request_send_background(ff->fc, req);
c756e0a4
MS
100 kfree(ff);
101 }
102}
103
08cbf542
TH
104int fuse_do_open(struct fuse_conn *fc, u64 nodeid, struct file *file,
105 bool isdir)
91fe96b4
MS
106{
107 struct fuse_open_out outarg;
108 struct fuse_file *ff;
109 int err;
110 int opcode = isdir ? FUSE_OPENDIR : FUSE_OPEN;
111
112 ff = fuse_file_alloc(fc);
113 if (!ff)
114 return -ENOMEM;
115
116 err = fuse_send_open(fc, nodeid, file, opcode, &outarg);
117 if (err) {
118 fuse_file_free(ff);
119 return err;
120 }
121
122 if (isdir)
123 outarg.open_flags &= ~FOPEN_DIRECT_IO;
124
125 ff->fh = outarg.fh;
126 ff->nodeid = nodeid;
127 ff->open_flags = outarg.open_flags;
128 file->private_data = fuse_file_get(ff);
129
130 return 0;
131}
08cbf542 132EXPORT_SYMBOL_GPL(fuse_do_open);
91fe96b4 133
c7b7143c 134void fuse_finish_open(struct inode *inode, struct file *file)
fd72faac 135{
c7b7143c
MS
136 struct fuse_file *ff = file->private_data;
137
138 if (ff->open_flags & FOPEN_DIRECT_IO)
fd72faac 139 file->f_op = &fuse_direct_io_file_operations;
c7b7143c 140 if (!(ff->open_flags & FOPEN_KEEP_CACHE))
b1009979 141 invalidate_inode_pages2(inode->i_mapping);
c7b7143c 142 if (ff->open_flags & FOPEN_NONSEEKABLE)
a7c1b990 143 nonseekable_open(inode, file);
fd72faac
MS
144}
145
91fe96b4 146int fuse_open_common(struct inode *inode, struct file *file, bool isdir)
fd72faac 147{
acf99433 148 struct fuse_conn *fc = get_fuse_conn(inode);
b6aeaded 149 int err;
b6aeaded 150
dd190d06
MS
151 /* VFS checks this, but only _after_ ->open() */
152 if (file->f_flags & O_DIRECT)
153 return -EINVAL;
154
b6aeaded
MS
155 err = generic_file_open(inode, file);
156 if (err)
157 return err;
158
91fe96b4 159 err = fuse_do_open(fc, get_node_id(inode), file, isdir);
fd72faac 160 if (err)
91fe96b4 161 return err;
b6aeaded 162
91fe96b4
MS
163 fuse_finish_open(inode, file);
164
165 return 0;
b6aeaded
MS
166}
167
8b0797a4 168static void fuse_prepare_release(struct fuse_file *ff, int flags, int opcode)
64c6d8ed 169{
8b0797a4 170 struct fuse_conn *fc = ff->fc;
33649c91 171 struct fuse_req *req = ff->reserved_req;
b57d4264 172 struct fuse_release_in *inarg = &req->misc.release.in;
b6aeaded 173
8b0797a4
MS
174 spin_lock(&fc->lock);
175 list_del(&ff->write_entry);
176 if (!RB_EMPTY_NODE(&ff->polled_node))
177 rb_erase(&ff->polled_node, &fc->polled_files);
178 spin_unlock(&fc->lock);
179
180 wake_up_interruptible_sync(&ff->poll_wait);
181
b6aeaded 182 inarg->fh = ff->fh;
fd72faac 183 inarg->flags = flags;
51eb01e7 184 req->in.h.opcode = opcode;
c7b7143c 185 req->in.h.nodeid = ff->nodeid;
b6aeaded
MS
186 req->in.numargs = 1;
187 req->in.args[0].size = sizeof(struct fuse_release_in);
188 req->in.args[0].value = inarg;
fd72faac
MS
189}
190
8b0797a4 191void fuse_release_common(struct file *file, int opcode)
fd72faac 192{
6b2db28a
TH
193 struct fuse_file *ff;
194 struct fuse_req *req;
b6aeaded 195
6b2db28a
TH
196 ff = file->private_data;
197 if (unlikely(!ff))
8b0797a4 198 return;
6b2db28a 199
6b2db28a 200 req = ff->reserved_req;
8b0797a4 201 fuse_prepare_release(ff, file->f_flags, opcode);
6b2db28a
TH
202
203 /* Hold vfsmount and dentry until release is finished */
b0be46eb
MS
204 path_get(&file->f_path);
205 req->misc.release.path = file->f_path;
6b2db28a 206
6b2db28a
TH
207 /*
208 * Normally this will send the RELEASE request, however if
209 * some asynchronous READ or WRITE requests are outstanding,
210 * the sending will be delayed.
211 */
212 fuse_file_put(ff);
b6aeaded
MS
213}
214
04730fef
MS
215static int fuse_open(struct inode *inode, struct file *file)
216{
91fe96b4 217 return fuse_open_common(inode, file, false);
04730fef
MS
218}
219
220static int fuse_release(struct inode *inode, struct file *file)
221{
8b0797a4
MS
222 fuse_release_common(file, FUSE_RELEASE);
223
224 /* return value is ignored by VFS */
225 return 0;
226}
227
228void fuse_sync_release(struct fuse_file *ff, int flags)
229{
230 WARN_ON(atomic_read(&ff->count) > 1);
231 fuse_prepare_release(ff, flags, FUSE_RELEASE);
232 ff->reserved_req->force = 1;
233 fuse_request_send(ff->fc, ff->reserved_req);
234 fuse_put_request(ff->fc, ff->reserved_req);
235 kfree(ff);
04730fef 236}
08cbf542 237EXPORT_SYMBOL_GPL(fuse_sync_release);
04730fef 238
71421259 239/*
9c8ef561
MS
240 * Scramble the ID space with XTEA, so that the value of the files_struct
241 * pointer is not exposed to userspace.
71421259 242 */
f3332114 243u64 fuse_lock_owner_id(struct fuse_conn *fc, fl_owner_t id)
71421259 244{
9c8ef561
MS
245 u32 *k = fc->scramble_key;
246 u64 v = (unsigned long) id;
247 u32 v0 = v;
248 u32 v1 = v >> 32;
249 u32 sum = 0;
250 int i;
251
252 for (i = 0; i < 32; i++) {
253 v0 += ((v1 << 4 ^ v1 >> 5) + v1) ^ (sum + k[sum & 3]);
254 sum += 0x9E3779B9;
255 v1 += ((v0 << 4 ^ v0 >> 5) + v0) ^ (sum + k[sum>>11 & 3]);
256 }
257
258 return (u64) v0 + ((u64) v1 << 32);
71421259
MS
259}
260
3be5a52b
MS
261/*
262 * Check if page is under writeback
263 *
264 * This is currently done by walking the list of writepage requests
265 * for the inode, which can be pretty inefficient.
266 */
267static bool fuse_page_is_writeback(struct inode *inode, pgoff_t index)
268{
269 struct fuse_conn *fc = get_fuse_conn(inode);
270 struct fuse_inode *fi = get_fuse_inode(inode);
271 struct fuse_req *req;
272 bool found = false;
273
274 spin_lock(&fc->lock);
275 list_for_each_entry(req, &fi->writepages, writepages_entry) {
276 pgoff_t curr_index;
277
278 BUG_ON(req->inode != inode);
279 curr_index = req->misc.write.in.offset >> PAGE_CACHE_SHIFT;
280 if (curr_index == index) {
281 found = true;
282 break;
283 }
284 }
285 spin_unlock(&fc->lock);
286
287 return found;
288}
289
290/*
291 * Wait for page writeback to be completed.
292 *
293 * Since fuse doesn't rely on the VM writeback tracking, this has to
294 * use some other means.
295 */
296static int fuse_wait_on_page_writeback(struct inode *inode, pgoff_t index)
297{
298 struct fuse_inode *fi = get_fuse_inode(inode);
299
300 wait_event(fi->page_waitq, !fuse_page_is_writeback(inode, index));
301 return 0;
302}
303
75e1fcc0 304static int fuse_flush(struct file *file, fl_owner_t id)
b6aeaded 305{
7706a9d6 306 struct inode *inode = file->f_path.dentry->d_inode;
b6aeaded
MS
307 struct fuse_conn *fc = get_fuse_conn(inode);
308 struct fuse_file *ff = file->private_data;
309 struct fuse_req *req;
310 struct fuse_flush_in inarg;
311 int err;
312
248d86e8
MS
313 if (is_bad_inode(inode))
314 return -EIO;
315
b6aeaded
MS
316 if (fc->no_flush)
317 return 0;
318
33649c91 319 req = fuse_get_req_nofail(fc, file);
b6aeaded
MS
320 memset(&inarg, 0, sizeof(inarg));
321 inarg.fh = ff->fh;
9c8ef561 322 inarg.lock_owner = fuse_lock_owner_id(fc, id);
b6aeaded
MS
323 req->in.h.opcode = FUSE_FLUSH;
324 req->in.h.nodeid = get_node_id(inode);
b6aeaded
MS
325 req->in.numargs = 1;
326 req->in.args[0].size = sizeof(inarg);
327 req->in.args[0].value = &inarg;
71421259 328 req->force = 1;
b93f858a 329 fuse_request_send(fc, req);
b6aeaded
MS
330 err = req->out.h.error;
331 fuse_put_request(fc, req);
332 if (err == -ENOSYS) {
333 fc->no_flush = 1;
334 err = 0;
335 }
336 return err;
337}
338
3be5a52b
MS
339/*
340 * Wait for all pending writepages on the inode to finish.
341 *
342 * This is currently done by blocking further writes with FUSE_NOWRITE
343 * and waiting for all sent writes to complete.
344 *
345 * This must be called under i_mutex, otherwise the FUSE_NOWRITE usage
346 * could conflict with truncation.
347 */
348static void fuse_sync_writes(struct inode *inode)
349{
350 fuse_set_nowrite(inode);
351 fuse_release_nowrite(inode);
352}
353
82547981
MS
354int fuse_fsync_common(struct file *file, struct dentry *de, int datasync,
355 int isdir)
b6aeaded
MS
356{
357 struct inode *inode = de->d_inode;
358 struct fuse_conn *fc = get_fuse_conn(inode);
359 struct fuse_file *ff = file->private_data;
360 struct fuse_req *req;
361 struct fuse_fsync_in inarg;
362 int err;
363
248d86e8
MS
364 if (is_bad_inode(inode))
365 return -EIO;
366
82547981 367 if ((!isdir && fc->no_fsync) || (isdir && fc->no_fsyncdir))
b6aeaded
MS
368 return 0;
369
3be5a52b
MS
370 /*
371 * Start writeback against all dirty pages of the inode, then
372 * wait for all outstanding writes, before sending the FSYNC
373 * request.
374 */
375 err = write_inode_now(inode, 0);
376 if (err)
377 return err;
378
379 fuse_sync_writes(inode);
380
ce1d5a49
MS
381 req = fuse_get_req(fc);
382 if (IS_ERR(req))
383 return PTR_ERR(req);
b6aeaded
MS
384
385 memset(&inarg, 0, sizeof(inarg));
386 inarg.fh = ff->fh;
387 inarg.fsync_flags = datasync ? 1 : 0;
82547981 388 req->in.h.opcode = isdir ? FUSE_FSYNCDIR : FUSE_FSYNC;
b6aeaded 389 req->in.h.nodeid = get_node_id(inode);
b6aeaded
MS
390 req->in.numargs = 1;
391 req->in.args[0].size = sizeof(inarg);
392 req->in.args[0].value = &inarg;
b93f858a 393 fuse_request_send(fc, req);
b6aeaded
MS
394 err = req->out.h.error;
395 fuse_put_request(fc, req);
396 if (err == -ENOSYS) {
82547981
MS
397 if (isdir)
398 fc->no_fsyncdir = 1;
399 else
400 fc->no_fsync = 1;
b6aeaded
MS
401 err = 0;
402 }
403 return err;
404}
405
82547981
MS
406static int fuse_fsync(struct file *file, struct dentry *de, int datasync)
407{
408 return fuse_fsync_common(file, de, datasync, 0);
409}
410
2106cb18
MS
411void fuse_read_fill(struct fuse_req *req, struct file *file, loff_t pos,
412 size_t count, int opcode)
b6aeaded 413{
5c5c5e51 414 struct fuse_read_in *inarg = &req->misc.read.in;
a6643094 415 struct fuse_file *ff = file->private_data;
b6aeaded 416
361b1eb5
MS
417 inarg->fh = ff->fh;
418 inarg->offset = pos;
419 inarg->size = count;
a6643094 420 inarg->flags = file->f_flags;
361b1eb5 421 req->in.h.opcode = opcode;
2106cb18 422 req->in.h.nodeid = ff->nodeid;
b6aeaded
MS
423 req->in.numargs = 1;
424 req->in.args[0].size = sizeof(struct fuse_read_in);
c1aa96a5 425 req->in.args[0].value = inarg;
b6aeaded
MS
426 req->out.argvar = 1;
427 req->out.numargs = 1;
428 req->out.args[0].size = count;
b6aeaded
MS
429}
430
8bfc016d 431static size_t fuse_send_read(struct fuse_req *req, struct file *file,
2106cb18 432 loff_t pos, size_t count, fl_owner_t owner)
04730fef 433{
2106cb18
MS
434 struct fuse_file *ff = file->private_data;
435 struct fuse_conn *fc = ff->fc;
f3332114 436
2106cb18 437 fuse_read_fill(req, file, pos, count, FUSE_READ);
f3332114 438 if (owner != NULL) {
5c5c5e51 439 struct fuse_read_in *inarg = &req->misc.read.in;
f3332114
MS
440
441 inarg->read_flags |= FUSE_READ_LOCKOWNER;
442 inarg->lock_owner = fuse_lock_owner_id(fc, owner);
443 }
b93f858a 444 fuse_request_send(fc, req);
361b1eb5 445 return req->out.args[0].size;
04730fef
MS
446}
447
5c5c5e51
MS
448static void fuse_read_update_size(struct inode *inode, loff_t size,
449 u64 attr_ver)
450{
451 struct fuse_conn *fc = get_fuse_conn(inode);
452 struct fuse_inode *fi = get_fuse_inode(inode);
453
454 spin_lock(&fc->lock);
455 if (attr_ver == fi->attr_version && size < inode->i_size) {
456 fi->attr_version = ++fc->attr_version;
457 i_size_write(inode, size);
458 }
459 spin_unlock(&fc->lock);
460}
461
b6aeaded
MS
462static int fuse_readpage(struct file *file, struct page *page)
463{
464 struct inode *inode = page->mapping->host;
465 struct fuse_conn *fc = get_fuse_conn(inode);
248d86e8 466 struct fuse_req *req;
5c5c5e51
MS
467 size_t num_read;
468 loff_t pos = page_offset(page);
469 size_t count = PAGE_CACHE_SIZE;
470 u64 attr_ver;
248d86e8
MS
471 int err;
472
473 err = -EIO;
474 if (is_bad_inode(inode))
475 goto out;
476
3be5a52b
MS
477 /*
478 * Page writeback can extend beyond the liftime of the
479 * page-cache page, so make sure we read a properly synced
480 * page.
481 */
482 fuse_wait_on_page_writeback(inode, page->index);
483
ce1d5a49
MS
484 req = fuse_get_req(fc);
485 err = PTR_ERR(req);
486 if (IS_ERR(req))
b6aeaded
MS
487 goto out;
488
5c5c5e51
MS
489 attr_ver = fuse_get_attr_version(fc);
490
b6aeaded 491 req->out.page_zeroing = 1;
f4975c67 492 req->out.argpages = 1;
b6aeaded
MS
493 req->num_pages = 1;
494 req->pages[0] = page;
2106cb18 495 num_read = fuse_send_read(req, file, pos, count, NULL);
b6aeaded
MS
496 err = req->out.h.error;
497 fuse_put_request(fc, req);
5c5c5e51
MS
498
499 if (!err) {
500 /*
501 * Short read means EOF. If file size is larger, truncate it
502 */
503 if (num_read < count)
504 fuse_read_update_size(inode, pos + num_read, attr_ver);
505
b6aeaded 506 SetPageUptodate(page);
5c5c5e51
MS
507 }
508
b36c31ba 509 fuse_invalidate_attr(inode); /* atime changed */
b6aeaded
MS
510 out:
511 unlock_page(page);
512 return err;
513}
514
c1aa96a5 515static void fuse_readpages_end(struct fuse_conn *fc, struct fuse_req *req)
db50b96c 516{
c1aa96a5 517 int i;
5c5c5e51
MS
518 size_t count = req->misc.read.in.size;
519 size_t num_read = req->out.args[0].size;
520 struct inode *inode = req->pages[0]->mapping->host;
c1aa96a5 521
5c5c5e51
MS
522 /*
523 * Short read means EOF. If file size is larger, truncate it
524 */
525 if (!req->out.h.error && num_read < count) {
526 loff_t pos = page_offset(req->pages[0]) + num_read;
527 fuse_read_update_size(inode, pos, req->misc.read.attr_ver);
528 }
529
530 fuse_invalidate_attr(inode); /* atime changed */
c1aa96a5 531
db50b96c
MS
532 for (i = 0; i < req->num_pages; i++) {
533 struct page *page = req->pages[i];
534 if (!req->out.h.error)
535 SetPageUptodate(page);
c1aa96a5
MS
536 else
537 SetPageError(page);
db50b96c 538 unlock_page(page);
b5dd3285 539 page_cache_release(page);
db50b96c 540 }
c756e0a4
MS
541 if (req->ff)
542 fuse_file_put(req->ff);
c1aa96a5
MS
543}
544
2106cb18 545static void fuse_send_readpages(struct fuse_req *req, struct file *file)
c1aa96a5 546{
2106cb18
MS
547 struct fuse_file *ff = file->private_data;
548 struct fuse_conn *fc = ff->fc;
c1aa96a5
MS
549 loff_t pos = page_offset(req->pages[0]);
550 size_t count = req->num_pages << PAGE_CACHE_SHIFT;
f4975c67
MS
551
552 req->out.argpages = 1;
c1aa96a5 553 req->out.page_zeroing = 1;
2106cb18 554 fuse_read_fill(req, file, pos, count, FUSE_READ);
5c5c5e51 555 req->misc.read.attr_ver = fuse_get_attr_version(fc);
9cd68455 556 if (fc->async_read) {
c756e0a4 557 req->ff = fuse_file_get(ff);
9cd68455 558 req->end = fuse_readpages_end;
b93f858a 559 fuse_request_send_background(fc, req);
9cd68455 560 } else {
b93f858a 561 fuse_request_send(fc, req);
9cd68455 562 fuse_readpages_end(fc, req);
e9bb09dd 563 fuse_put_request(fc, req);
9cd68455 564 }
db50b96c
MS
565}
566
c756e0a4 567struct fuse_fill_data {
db50b96c 568 struct fuse_req *req;
a6643094 569 struct file *file;
db50b96c
MS
570 struct inode *inode;
571};
572
573static int fuse_readpages_fill(void *_data, struct page *page)
574{
c756e0a4 575 struct fuse_fill_data *data = _data;
db50b96c
MS
576 struct fuse_req *req = data->req;
577 struct inode *inode = data->inode;
578 struct fuse_conn *fc = get_fuse_conn(inode);
579
3be5a52b
MS
580 fuse_wait_on_page_writeback(inode, page->index);
581
db50b96c
MS
582 if (req->num_pages &&
583 (req->num_pages == FUSE_MAX_PAGES_PER_REQ ||
584 (req->num_pages + 1) * PAGE_CACHE_SIZE > fc->max_read ||
585 req->pages[req->num_pages - 1]->index + 1 != page->index)) {
2106cb18 586 fuse_send_readpages(req, data->file);
ce1d5a49
MS
587 data->req = req = fuse_get_req(fc);
588 if (IS_ERR(req)) {
db50b96c 589 unlock_page(page);
ce1d5a49 590 return PTR_ERR(req);
db50b96c 591 }
db50b96c 592 }
b5dd3285 593 page_cache_get(page);
db50b96c 594 req->pages[req->num_pages] = page;
1729a16c 595 req->num_pages++;
db50b96c
MS
596 return 0;
597}
598
599static int fuse_readpages(struct file *file, struct address_space *mapping,
600 struct list_head *pages, unsigned nr_pages)
601{
602 struct inode *inode = mapping->host;
603 struct fuse_conn *fc = get_fuse_conn(inode);
c756e0a4 604 struct fuse_fill_data data;
db50b96c 605 int err;
248d86e8 606
1d7ea732 607 err = -EIO;
248d86e8 608 if (is_bad_inode(inode))
2e990021 609 goto out;
248d86e8 610
a6643094 611 data.file = file;
db50b96c 612 data.inode = inode;
ce1d5a49 613 data.req = fuse_get_req(fc);
1d7ea732 614 err = PTR_ERR(data.req);
ce1d5a49 615 if (IS_ERR(data.req))
2e990021 616 goto out;
db50b96c
MS
617
618 err = read_cache_pages(mapping, pages, fuse_readpages_fill, &data);
d3406ffa
MS
619 if (!err) {
620 if (data.req->num_pages)
2106cb18 621 fuse_send_readpages(data.req, file);
d3406ffa
MS
622 else
623 fuse_put_request(fc, data.req);
624 }
2e990021 625out:
1d7ea732 626 return err;
db50b96c
MS
627}
628
bcb4be80
MS
629static ssize_t fuse_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
630 unsigned long nr_segs, loff_t pos)
631{
632 struct inode *inode = iocb->ki_filp->f_mapping->host;
633
634 if (pos + iov_length(iov, nr_segs) > i_size_read(inode)) {
635 int err;
636 /*
637 * If trying to read past EOF, make sure the i_size
638 * attribute is up-to-date.
639 */
640 err = fuse_update_attributes(inode, NULL, iocb->ki_filp, NULL);
641 if (err)
642 return err;
643 }
644
645 return generic_file_aio_read(iocb, iov, nr_segs, pos);
646}
647
2d698b07 648static void fuse_write_fill(struct fuse_req *req, struct fuse_file *ff,
2106cb18 649 loff_t pos, size_t count)
b6aeaded 650{
b25e82e5
MS
651 struct fuse_write_in *inarg = &req->misc.write.in;
652 struct fuse_write_out *outarg = &req->misc.write.out;
b6aeaded 653
b25e82e5
MS
654 inarg->fh = ff->fh;
655 inarg->offset = pos;
656 inarg->size = count;
b6aeaded 657 req->in.h.opcode = FUSE_WRITE;
2106cb18 658 req->in.h.nodeid = ff->nodeid;
b6aeaded 659 req->in.numargs = 2;
2106cb18 660 if (ff->fc->minor < 9)
f3332114
MS
661 req->in.args[0].size = FUSE_COMPAT_WRITE_IN_SIZE;
662 else
663 req->in.args[0].size = sizeof(struct fuse_write_in);
b25e82e5 664 req->in.args[0].value = inarg;
b6aeaded
MS
665 req->in.args[1].size = count;
666 req->out.numargs = 1;
667 req->out.args[0].size = sizeof(struct fuse_write_out);
b25e82e5
MS
668 req->out.args[0].value = outarg;
669}
670
671static size_t fuse_send_write(struct fuse_req *req, struct file *file,
2106cb18 672 loff_t pos, size_t count, fl_owner_t owner)
b25e82e5 673{
2106cb18
MS
674 struct fuse_file *ff = file->private_data;
675 struct fuse_conn *fc = ff->fc;
2d698b07
MS
676 struct fuse_write_in *inarg = &req->misc.write.in;
677
2106cb18 678 fuse_write_fill(req, ff, pos, count);
2d698b07 679 inarg->flags = file->f_flags;
f3332114 680 if (owner != NULL) {
f3332114
MS
681 inarg->write_flags |= FUSE_WRITE_LOCKOWNER;
682 inarg->lock_owner = fuse_lock_owner_id(fc, owner);
683 }
b93f858a 684 fuse_request_send(fc, req);
b25e82e5 685 return req->misc.write.out.size;
b6aeaded
MS
686}
687
5e6f58a1
NP
688static int fuse_write_begin(struct file *file, struct address_space *mapping,
689 loff_t pos, unsigned len, unsigned flags,
690 struct page **pagep, void **fsdata)
b6aeaded 691{
5e6f58a1
NP
692 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
693
54566b2c 694 *pagep = grab_cache_page_write_begin(mapping, index, flags);
5e6f58a1
NP
695 if (!*pagep)
696 return -ENOMEM;
b6aeaded
MS
697 return 0;
698}
699
854512ec
MS
700static void fuse_write_update_size(struct inode *inode, loff_t pos)
701{
702 struct fuse_conn *fc = get_fuse_conn(inode);
703 struct fuse_inode *fi = get_fuse_inode(inode);
704
705 spin_lock(&fc->lock);
706 fi->attr_version = ++fc->attr_version;
707 if (pos > inode->i_size)
708 i_size_write(inode, pos);
709 spin_unlock(&fc->lock);
710}
711
5e6f58a1
NP
712static int fuse_buffered_write(struct file *file, struct inode *inode,
713 loff_t pos, unsigned count, struct page *page)
b6aeaded
MS
714{
715 int err;
04730fef 716 size_t nres;
b6aeaded 717 struct fuse_conn *fc = get_fuse_conn(inode);
5e6f58a1 718 unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
248d86e8
MS
719 struct fuse_req *req;
720
721 if (is_bad_inode(inode))
722 return -EIO;
723
3be5a52b
MS
724 /*
725 * Make sure writepages on the same page are not mixed up with
726 * plain writes.
727 */
728 fuse_wait_on_page_writeback(inode, page->index);
729
ce1d5a49
MS
730 req = fuse_get_req(fc);
731 if (IS_ERR(req))
732 return PTR_ERR(req);
b6aeaded 733
f4975c67 734 req->in.argpages = 1;
b6aeaded
MS
735 req->num_pages = 1;
736 req->pages[0] = page;
737 req->page_offset = offset;
2106cb18 738 nres = fuse_send_write(req, file, pos, count, NULL);
b6aeaded
MS
739 err = req->out.h.error;
740 fuse_put_request(fc, req);
5e6f58a1 741 if (!err && !nres)
b6aeaded
MS
742 err = -EIO;
743 if (!err) {
5e6f58a1 744 pos += nres;
854512ec 745 fuse_write_update_size(inode, pos);
5e6f58a1 746 if (count == PAGE_CACHE_SIZE)
b6aeaded 747 SetPageUptodate(page);
b36c31ba
MS
748 }
749 fuse_invalidate_attr(inode);
5e6f58a1
NP
750 return err ? err : nres;
751}
752
753static int fuse_write_end(struct file *file, struct address_space *mapping,
754 loff_t pos, unsigned len, unsigned copied,
755 struct page *page, void *fsdata)
756{
757 struct inode *inode = mapping->host;
758 int res = 0;
759
760 if (copied)
761 res = fuse_buffered_write(file, inode, pos, copied, page);
762
763 unlock_page(page);
764 page_cache_release(page);
765 return res;
b6aeaded
MS
766}
767
ea9b9907
NP
768static size_t fuse_send_write_pages(struct fuse_req *req, struct file *file,
769 struct inode *inode, loff_t pos,
770 size_t count)
771{
772 size_t res;
773 unsigned offset;
774 unsigned i;
775
776 for (i = 0; i < req->num_pages; i++)
777 fuse_wait_on_page_writeback(inode, req->pages[i]->index);
778
2106cb18 779 res = fuse_send_write(req, file, pos, count, NULL);
ea9b9907
NP
780
781 offset = req->page_offset;
782 count = res;
783 for (i = 0; i < req->num_pages; i++) {
784 struct page *page = req->pages[i];
785
786 if (!req->out.h.error && !offset && count >= PAGE_CACHE_SIZE)
787 SetPageUptodate(page);
788
789 if (count > PAGE_CACHE_SIZE - offset)
790 count -= PAGE_CACHE_SIZE - offset;
791 else
792 count = 0;
793 offset = 0;
794
795 unlock_page(page);
796 page_cache_release(page);
797 }
798
799 return res;
800}
801
802static ssize_t fuse_fill_write_pages(struct fuse_req *req,
803 struct address_space *mapping,
804 struct iov_iter *ii, loff_t pos)
805{
806 struct fuse_conn *fc = get_fuse_conn(mapping->host);
807 unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
808 size_t count = 0;
809 int err;
810
f4975c67 811 req->in.argpages = 1;
ea9b9907
NP
812 req->page_offset = offset;
813
814 do {
815 size_t tmp;
816 struct page *page;
817 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
818 size_t bytes = min_t(size_t, PAGE_CACHE_SIZE - offset,
819 iov_iter_count(ii));
820
821 bytes = min_t(size_t, bytes, fc->max_write - count);
822
823 again:
824 err = -EFAULT;
825 if (iov_iter_fault_in_readable(ii, bytes))
826 break;
827
828 err = -ENOMEM;
54566b2c 829 page = grab_cache_page_write_begin(mapping, index, 0);
ea9b9907
NP
830 if (!page)
831 break;
832
931e80e4 833 if (mapping_writably_mapped(mapping))
834 flush_dcache_page(page);
835
ea9b9907
NP
836 pagefault_disable();
837 tmp = iov_iter_copy_from_user_atomic(page, ii, offset, bytes);
838 pagefault_enable();
839 flush_dcache_page(page);
840
841 if (!tmp) {
842 unlock_page(page);
843 page_cache_release(page);
844 bytes = min(bytes, iov_iter_single_seg_count(ii));
845 goto again;
846 }
847
848 err = 0;
849 req->pages[req->num_pages] = page;
850 req->num_pages++;
851
852 iov_iter_advance(ii, tmp);
853 count += tmp;
854 pos += tmp;
855 offset += tmp;
856 if (offset == PAGE_CACHE_SIZE)
857 offset = 0;
858
78bb6cb9
MS
859 if (!fc->big_writes)
860 break;
ea9b9907
NP
861 } while (iov_iter_count(ii) && count < fc->max_write &&
862 req->num_pages < FUSE_MAX_PAGES_PER_REQ && offset == 0);
863
864 return count > 0 ? count : err;
865}
866
867static ssize_t fuse_perform_write(struct file *file,
868 struct address_space *mapping,
869 struct iov_iter *ii, loff_t pos)
870{
871 struct inode *inode = mapping->host;
872 struct fuse_conn *fc = get_fuse_conn(inode);
873 int err = 0;
874 ssize_t res = 0;
875
876 if (is_bad_inode(inode))
877 return -EIO;
878
879 do {
880 struct fuse_req *req;
881 ssize_t count;
882
883 req = fuse_get_req(fc);
884 if (IS_ERR(req)) {
885 err = PTR_ERR(req);
886 break;
887 }
888
889 count = fuse_fill_write_pages(req, mapping, ii, pos);
890 if (count <= 0) {
891 err = count;
892 } else {
893 size_t num_written;
894
895 num_written = fuse_send_write_pages(req, file, inode,
896 pos, count);
897 err = req->out.h.error;
898 if (!err) {
899 res += num_written;
900 pos += num_written;
901
902 /* break out of the loop on short write */
903 if (num_written != count)
904 err = -EIO;
905 }
906 }
907 fuse_put_request(fc, req);
908 } while (!err && iov_iter_count(ii));
909
910 if (res > 0)
911 fuse_write_update_size(inode, pos);
912
913 fuse_invalidate_attr(inode);
914
915 return res > 0 ? res : err;
916}
917
918static ssize_t fuse_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
919 unsigned long nr_segs, loff_t pos)
920{
921 struct file *file = iocb->ki_filp;
922 struct address_space *mapping = file->f_mapping;
923 size_t count = 0;
924 ssize_t written = 0;
925 struct inode *inode = mapping->host;
926 ssize_t err;
927 struct iov_iter i;
928
929 WARN_ON(iocb->ki_pos != pos);
930
931 err = generic_segment_checks(iov, &nr_segs, &count, VERIFY_READ);
932 if (err)
933 return err;
934
935 mutex_lock(&inode->i_mutex);
936 vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
937
938 /* We can write back this queue in page reclaim */
939 current->backing_dev_info = mapping->backing_dev_info;
940
941 err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
942 if (err)
943 goto out;
944
945 if (count == 0)
946 goto out;
947
2f1936b8 948 err = file_remove_suid(file);
ea9b9907
NP
949 if (err)
950 goto out;
951
952 file_update_time(file);
953
954 iov_iter_init(&i, iov, nr_segs, count, 0);
955 written = fuse_perform_write(file, mapping, &i, pos);
956 if (written >= 0)
957 iocb->ki_pos = pos + written;
958
959out:
960 current->backing_dev_info = NULL;
961 mutex_unlock(&inode->i_mutex);
962
963 return written ? written : err;
964}
965
413ef8cb
MS
966static void fuse_release_user_pages(struct fuse_req *req, int write)
967{
968 unsigned i;
969
970 for (i = 0; i < req->num_pages; i++) {
971 struct page *page = req->pages[i];
972 if (write)
973 set_page_dirty_lock(page);
974 put_page(page);
975 }
976}
977
978static int fuse_get_user_pages(struct fuse_req *req, const char __user *buf,
ce60a2f1 979 size_t *nbytesp, int write)
413ef8cb 980{
ce60a2f1 981 size_t nbytes = *nbytesp;
413ef8cb
MS
982 unsigned long user_addr = (unsigned long) buf;
983 unsigned offset = user_addr & ~PAGE_MASK;
984 int npages;
985
f4975c67
MS
986 /* Special case for kernel I/O: can copy directly into the buffer */
987 if (segment_eq(get_fs(), KERNEL_DS)) {
988 if (write)
989 req->in.args[1].value = (void *) user_addr;
990 else
991 req->out.args[0].value = (void *) user_addr;
992
993 return 0;
994 }
413ef8cb 995
ce60a2f1 996 nbytes = min_t(size_t, nbytes, FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT);
413ef8cb 997 npages = (nbytes + offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
bd730967 998 npages = clamp(npages, 1, FUSE_MAX_PAGES_PER_REQ);
1bf94ca7 999 npages = get_user_pages_fast(user_addr, npages, !write, req->pages);
413ef8cb
MS
1000 if (npages < 0)
1001 return npages;
1002
1003 req->num_pages = npages;
1004 req->page_offset = offset;
f4975c67
MS
1005
1006 if (write)
1007 req->in.argpages = 1;
1008 else
1009 req->out.argpages = 1;
1010
1011 nbytes = (req->num_pages << PAGE_SHIFT) - req->page_offset;
1012 *nbytesp = min(*nbytesp, nbytes);
1013
413ef8cb
MS
1014 return 0;
1015}
1016
08cbf542
TH
1017ssize_t fuse_direct_io(struct file *file, const char __user *buf,
1018 size_t count, loff_t *ppos, int write)
413ef8cb 1019{
2106cb18
MS
1020 struct fuse_file *ff = file->private_data;
1021 struct fuse_conn *fc = ff->fc;
413ef8cb
MS
1022 size_t nmax = write ? fc->max_write : fc->max_read;
1023 loff_t pos = *ppos;
1024 ssize_t res = 0;
248d86e8
MS
1025 struct fuse_req *req;
1026
ce1d5a49
MS
1027 req = fuse_get_req(fc);
1028 if (IS_ERR(req))
1029 return PTR_ERR(req);
413ef8cb
MS
1030
1031 while (count) {
413ef8cb 1032 size_t nres;
2106cb18 1033 fl_owner_t owner = current->files;
f4975c67
MS
1034 size_t nbytes = min(count, nmax);
1035 int err = fuse_get_user_pages(req, buf, &nbytes, write);
413ef8cb
MS
1036 if (err) {
1037 res = err;
1038 break;
1039 }
f4975c67 1040
413ef8cb 1041 if (write)
2106cb18 1042 nres = fuse_send_write(req, file, pos, nbytes, owner);
413ef8cb 1043 else
2106cb18
MS
1044 nres = fuse_send_read(req, file, pos, nbytes, owner);
1045
413ef8cb
MS
1046 fuse_release_user_pages(req, !write);
1047 if (req->out.h.error) {
1048 if (!res)
1049 res = req->out.h.error;
1050 break;
1051 } else if (nres > nbytes) {
1052 res = -EIO;
1053 break;
1054 }
1055 count -= nres;
1056 res += nres;
1057 pos += nres;
1058 buf += nres;
1059 if (nres != nbytes)
1060 break;
56cf34ff
MS
1061 if (count) {
1062 fuse_put_request(fc, req);
1063 req = fuse_get_req(fc);
1064 if (IS_ERR(req))
1065 break;
1066 }
413ef8cb 1067 }
f60311d5
AA
1068 if (!IS_ERR(req))
1069 fuse_put_request(fc, req);
d09cb9d7 1070 if (res > 0)
413ef8cb 1071 *ppos = pos;
413ef8cb
MS
1072
1073 return res;
1074}
08cbf542 1075EXPORT_SYMBOL_GPL(fuse_direct_io);
413ef8cb
MS
1076
1077static ssize_t fuse_direct_read(struct file *file, char __user *buf,
1078 size_t count, loff_t *ppos)
1079{
d09cb9d7
MS
1080 ssize_t res;
1081 struct inode *inode = file->f_path.dentry->d_inode;
1082
1083 if (is_bad_inode(inode))
1084 return -EIO;
1085
1086 res = fuse_direct_io(file, buf, count, ppos, 0);
1087
1088 fuse_invalidate_attr(inode);
1089
1090 return res;
413ef8cb
MS
1091}
1092
1093static ssize_t fuse_direct_write(struct file *file, const char __user *buf,
1094 size_t count, loff_t *ppos)
1095{
7706a9d6 1096 struct inode *inode = file->f_path.dentry->d_inode;
413ef8cb 1097 ssize_t res;
d09cb9d7
MS
1098
1099 if (is_bad_inode(inode))
1100 return -EIO;
1101
413ef8cb 1102 /* Don't allow parallel writes to the same file */
1b1dcc1b 1103 mutex_lock(&inode->i_mutex);
889f7848 1104 res = generic_write_checks(file, ppos, &count, 0);
d09cb9d7 1105 if (!res) {
889f7848 1106 res = fuse_direct_io(file, buf, count, ppos, 1);
d09cb9d7
MS
1107 if (res > 0)
1108 fuse_write_update_size(inode, *ppos);
1109 }
1b1dcc1b 1110 mutex_unlock(&inode->i_mutex);
d09cb9d7
MS
1111
1112 fuse_invalidate_attr(inode);
1113
413ef8cb
MS
1114 return res;
1115}
1116
3be5a52b 1117static void fuse_writepage_free(struct fuse_conn *fc, struct fuse_req *req)
b6aeaded 1118{
3be5a52b
MS
1119 __free_page(req->pages[0]);
1120 fuse_file_put(req->ff);
3be5a52b
MS
1121}
1122
1123static void fuse_writepage_finish(struct fuse_conn *fc, struct fuse_req *req)
1124{
1125 struct inode *inode = req->inode;
1126 struct fuse_inode *fi = get_fuse_inode(inode);
1127 struct backing_dev_info *bdi = inode->i_mapping->backing_dev_info;
1128
1129 list_del(&req->writepages_entry);
1130 dec_bdi_stat(bdi, BDI_WRITEBACK);
1131 dec_zone_page_state(req->pages[0], NR_WRITEBACK_TEMP);
1132 bdi_writeout_inc(bdi);
1133 wake_up(&fi->page_waitq);
1134}
1135
1136/* Called under fc->lock, may release and reacquire it */
1137static void fuse_send_writepage(struct fuse_conn *fc, struct fuse_req *req)
5d9ec854
HH
1138__releases(&fc->lock)
1139__acquires(&fc->lock)
3be5a52b
MS
1140{
1141 struct fuse_inode *fi = get_fuse_inode(req->inode);
1142 loff_t size = i_size_read(req->inode);
1143 struct fuse_write_in *inarg = &req->misc.write.in;
1144
1145 if (!fc->connected)
1146 goto out_free;
1147
1148 if (inarg->offset + PAGE_CACHE_SIZE <= size) {
1149 inarg->size = PAGE_CACHE_SIZE;
1150 } else if (inarg->offset < size) {
1151 inarg->size = size & (PAGE_CACHE_SIZE - 1);
1152 } else {
1153 /* Got truncated off completely */
1154 goto out_free;
b6aeaded 1155 }
3be5a52b
MS
1156
1157 req->in.args[1].size = inarg->size;
1158 fi->writectr++;
b93f858a 1159 fuse_request_send_background_locked(fc, req);
3be5a52b
MS
1160 return;
1161
1162 out_free:
1163 fuse_writepage_finish(fc, req);
1164 spin_unlock(&fc->lock);
1165 fuse_writepage_free(fc, req);
e9bb09dd 1166 fuse_put_request(fc, req);
3be5a52b 1167 spin_lock(&fc->lock);
b6aeaded
MS
1168}
1169
3be5a52b
MS
1170/*
1171 * If fi->writectr is positive (no truncate or fsync going on) send
1172 * all queued writepage requests.
1173 *
1174 * Called with fc->lock
1175 */
1176void fuse_flush_writepages(struct inode *inode)
5d9ec854
HH
1177__releases(&fc->lock)
1178__acquires(&fc->lock)
b6aeaded 1179{
3be5a52b
MS
1180 struct fuse_conn *fc = get_fuse_conn(inode);
1181 struct fuse_inode *fi = get_fuse_inode(inode);
1182 struct fuse_req *req;
1183
1184 while (fi->writectr >= 0 && !list_empty(&fi->queued_writes)) {
1185 req = list_entry(fi->queued_writes.next, struct fuse_req, list);
1186 list_del_init(&req->list);
1187 fuse_send_writepage(fc, req);
1188 }
1189}
1190
1191static void fuse_writepage_end(struct fuse_conn *fc, struct fuse_req *req)
1192{
1193 struct inode *inode = req->inode;
1194 struct fuse_inode *fi = get_fuse_inode(inode);
1195
1196 mapping_set_error(inode->i_mapping, req->out.h.error);
1197 spin_lock(&fc->lock);
1198 fi->writectr--;
1199 fuse_writepage_finish(fc, req);
1200 spin_unlock(&fc->lock);
1201 fuse_writepage_free(fc, req);
1202}
1203
1204static int fuse_writepage_locked(struct page *page)
1205{
1206 struct address_space *mapping = page->mapping;
1207 struct inode *inode = mapping->host;
1208 struct fuse_conn *fc = get_fuse_conn(inode);
1209 struct fuse_inode *fi = get_fuse_inode(inode);
1210 struct fuse_req *req;
1211 struct fuse_file *ff;
1212 struct page *tmp_page;
1213
1214 set_page_writeback(page);
1215
1216 req = fuse_request_alloc_nofs();
1217 if (!req)
1218 goto err;
1219
1220 tmp_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
1221 if (!tmp_page)
1222 goto err_free;
1223
1224 spin_lock(&fc->lock);
1225 BUG_ON(list_empty(&fi->write_files));
1226 ff = list_entry(fi->write_files.next, struct fuse_file, write_entry);
1227 req->ff = fuse_file_get(ff);
1228 spin_unlock(&fc->lock);
1229
2106cb18 1230 fuse_write_fill(req, ff, page_offset(page), 0);
3be5a52b
MS
1231
1232 copy_highpage(tmp_page, page);
2d698b07 1233 req->misc.write.in.write_flags |= FUSE_WRITE_CACHE;
f4975c67 1234 req->in.argpages = 1;
3be5a52b
MS
1235 req->num_pages = 1;
1236 req->pages[0] = tmp_page;
1237 req->page_offset = 0;
1238 req->end = fuse_writepage_end;
1239 req->inode = inode;
1240
1241 inc_bdi_stat(mapping->backing_dev_info, BDI_WRITEBACK);
1242 inc_zone_page_state(tmp_page, NR_WRITEBACK_TEMP);
1243 end_page_writeback(page);
1244
1245 spin_lock(&fc->lock);
1246 list_add(&req->writepages_entry, &fi->writepages);
1247 list_add_tail(&req->list, &fi->queued_writes);
1248 fuse_flush_writepages(inode);
1249 spin_unlock(&fc->lock);
1250
1251 return 0;
1252
1253err_free:
1254 fuse_request_free(req);
1255err:
1256 end_page_writeback(page);
1257 return -ENOMEM;
1258}
1259
1260static int fuse_writepage(struct page *page, struct writeback_control *wbc)
1261{
1262 int err;
1263
1264 err = fuse_writepage_locked(page);
1265 unlock_page(page);
1266
1267 return err;
1268}
1269
1270static int fuse_launder_page(struct page *page)
1271{
1272 int err = 0;
1273 if (clear_page_dirty_for_io(page)) {
1274 struct inode *inode = page->mapping->host;
1275 err = fuse_writepage_locked(page);
1276 if (!err)
1277 fuse_wait_on_page_writeback(inode, page->index);
1278 }
1279 return err;
1280}
1281
1282/*
1283 * Write back dirty pages now, because there may not be any suitable
1284 * open files later
1285 */
1286static void fuse_vma_close(struct vm_area_struct *vma)
1287{
1288 filemap_write_and_wait(vma->vm_file->f_mapping);
1289}
1290
1291/*
1292 * Wait for writeback against this page to complete before allowing it
1293 * to be marked dirty again, and hence written back again, possibly
1294 * before the previous writepage completed.
1295 *
1296 * Block here, instead of in ->writepage(), so that the userspace fs
1297 * can only block processes actually operating on the filesystem.
1298 *
1299 * Otherwise unprivileged userspace fs would be able to block
1300 * unrelated:
1301 *
1302 * - page migration
1303 * - sync(2)
1304 * - try_to_free_pages() with order > PAGE_ALLOC_COSTLY_ORDER
1305 */
c2ec175c 1306static int fuse_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
3be5a52b 1307{
c2ec175c 1308 struct page *page = vmf->page;
3be5a52b
MS
1309 /*
1310 * Don't use page->mapping as it may become NULL from a
1311 * concurrent truncate.
1312 */
1313 struct inode *inode = vma->vm_file->f_mapping->host;
1314
1315 fuse_wait_on_page_writeback(inode, page->index);
1316 return 0;
1317}
1318
f0f37e2f 1319static const struct vm_operations_struct fuse_file_vm_ops = {
3be5a52b
MS
1320 .close = fuse_vma_close,
1321 .fault = filemap_fault,
1322 .page_mkwrite = fuse_page_mkwrite,
1323};
1324
1325static int fuse_file_mmap(struct file *file, struct vm_area_struct *vma)
1326{
1327 if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE)) {
1328 struct inode *inode = file->f_dentry->d_inode;
1329 struct fuse_conn *fc = get_fuse_conn(inode);
1330 struct fuse_inode *fi = get_fuse_inode(inode);
1331 struct fuse_file *ff = file->private_data;
1332 /*
1333 * file may be written through mmap, so chain it onto the
1334 * inodes's write_file list
1335 */
1336 spin_lock(&fc->lock);
1337 if (list_empty(&ff->write_entry))
1338 list_add(&ff->write_entry, &fi->write_files);
1339 spin_unlock(&fc->lock);
1340 }
1341 file_accessed(file);
1342 vma->vm_ops = &fuse_file_vm_ops;
b6aeaded
MS
1343 return 0;
1344}
1345
fc280c96
MS
1346static int fuse_direct_mmap(struct file *file, struct vm_area_struct *vma)
1347{
1348 /* Can't provide the coherency needed for MAP_SHARED */
1349 if (vma->vm_flags & VM_MAYSHARE)
1350 return -ENODEV;
1351
3121bfe7
MS
1352 invalidate_inode_pages2(file->f_mapping);
1353
fc280c96
MS
1354 return generic_file_mmap(file, vma);
1355}
1356
71421259
MS
1357static int convert_fuse_file_lock(const struct fuse_file_lock *ffl,
1358 struct file_lock *fl)
1359{
1360 switch (ffl->type) {
1361 case F_UNLCK:
1362 break;
1363
1364 case F_RDLCK:
1365 case F_WRLCK:
1366 if (ffl->start > OFFSET_MAX || ffl->end > OFFSET_MAX ||
1367 ffl->end < ffl->start)
1368 return -EIO;
1369
1370 fl->fl_start = ffl->start;
1371 fl->fl_end = ffl->end;
1372 fl->fl_pid = ffl->pid;
1373 break;
1374
1375 default:
1376 return -EIO;
1377 }
1378 fl->fl_type = ffl->type;
1379 return 0;
1380}
1381
1382static void fuse_lk_fill(struct fuse_req *req, struct file *file,
a9ff4f87
MS
1383 const struct file_lock *fl, int opcode, pid_t pid,
1384 int flock)
71421259 1385{
7706a9d6 1386 struct inode *inode = file->f_path.dentry->d_inode;
9c8ef561 1387 struct fuse_conn *fc = get_fuse_conn(inode);
71421259
MS
1388 struct fuse_file *ff = file->private_data;
1389 struct fuse_lk_in *arg = &req->misc.lk_in;
1390
1391 arg->fh = ff->fh;
9c8ef561 1392 arg->owner = fuse_lock_owner_id(fc, fl->fl_owner);
71421259
MS
1393 arg->lk.start = fl->fl_start;
1394 arg->lk.end = fl->fl_end;
1395 arg->lk.type = fl->fl_type;
1396 arg->lk.pid = pid;
a9ff4f87
MS
1397 if (flock)
1398 arg->lk_flags |= FUSE_LK_FLOCK;
71421259
MS
1399 req->in.h.opcode = opcode;
1400 req->in.h.nodeid = get_node_id(inode);
1401 req->in.numargs = 1;
1402 req->in.args[0].size = sizeof(*arg);
1403 req->in.args[0].value = arg;
1404}
1405
1406static int fuse_getlk(struct file *file, struct file_lock *fl)
1407{
7706a9d6 1408 struct inode *inode = file->f_path.dentry->d_inode;
71421259
MS
1409 struct fuse_conn *fc = get_fuse_conn(inode);
1410 struct fuse_req *req;
1411 struct fuse_lk_out outarg;
1412 int err;
1413
1414 req = fuse_get_req(fc);
1415 if (IS_ERR(req))
1416 return PTR_ERR(req);
1417
a9ff4f87 1418 fuse_lk_fill(req, file, fl, FUSE_GETLK, 0, 0);
71421259
MS
1419 req->out.numargs = 1;
1420 req->out.args[0].size = sizeof(outarg);
1421 req->out.args[0].value = &outarg;
b93f858a 1422 fuse_request_send(fc, req);
71421259
MS
1423 err = req->out.h.error;
1424 fuse_put_request(fc, req);
1425 if (!err)
1426 err = convert_fuse_file_lock(&outarg.lk, fl);
1427
1428 return err;
1429}
1430
a9ff4f87 1431static int fuse_setlk(struct file *file, struct file_lock *fl, int flock)
71421259 1432{
7706a9d6 1433 struct inode *inode = file->f_path.dentry->d_inode;
71421259
MS
1434 struct fuse_conn *fc = get_fuse_conn(inode);
1435 struct fuse_req *req;
1436 int opcode = (fl->fl_flags & FL_SLEEP) ? FUSE_SETLKW : FUSE_SETLK;
1437 pid_t pid = fl->fl_type != F_UNLCK ? current->tgid : 0;
1438 int err;
1439
48e90761
MS
1440 if (fl->fl_lmops && fl->fl_lmops->fl_grant) {
1441 /* NLM needs asynchronous locks, which we don't support yet */
1442 return -ENOLCK;
1443 }
1444
71421259
MS
1445 /* Unlock on close is handled by the flush method */
1446 if (fl->fl_flags & FL_CLOSE)
1447 return 0;
1448
1449 req = fuse_get_req(fc);
1450 if (IS_ERR(req))
1451 return PTR_ERR(req);
1452
a9ff4f87 1453 fuse_lk_fill(req, file, fl, opcode, pid, flock);
b93f858a 1454 fuse_request_send(fc, req);
71421259 1455 err = req->out.h.error;
a4d27e75
MS
1456 /* locking is restartable */
1457 if (err == -EINTR)
1458 err = -ERESTARTSYS;
71421259
MS
1459 fuse_put_request(fc, req);
1460 return err;
1461}
1462
1463static int fuse_file_lock(struct file *file, int cmd, struct file_lock *fl)
1464{
7706a9d6 1465 struct inode *inode = file->f_path.dentry->d_inode;
71421259
MS
1466 struct fuse_conn *fc = get_fuse_conn(inode);
1467 int err;
1468
48e90761
MS
1469 if (cmd == F_CANCELLK) {
1470 err = 0;
1471 } else if (cmd == F_GETLK) {
71421259 1472 if (fc->no_lock) {
9d6a8c5c 1473 posix_test_lock(file, fl);
71421259
MS
1474 err = 0;
1475 } else
1476 err = fuse_getlk(file, fl);
1477 } else {
1478 if (fc->no_lock)
48e90761 1479 err = posix_lock_file(file, fl, NULL);
71421259 1480 else
a9ff4f87 1481 err = fuse_setlk(file, fl, 0);
71421259
MS
1482 }
1483 return err;
1484}
1485
a9ff4f87
MS
1486static int fuse_file_flock(struct file *file, int cmd, struct file_lock *fl)
1487{
1488 struct inode *inode = file->f_path.dentry->d_inode;
1489 struct fuse_conn *fc = get_fuse_conn(inode);
1490 int err;
1491
1492 if (fc->no_lock) {
1493 err = flock_lock_file_wait(file, fl);
1494 } else {
1495 /* emulate flock with POSIX locks */
1496 fl->fl_owner = (fl_owner_t) file;
1497 err = fuse_setlk(file, fl, 1);
1498 }
1499
1500 return err;
1501}
1502
b2d2272f
MS
1503static sector_t fuse_bmap(struct address_space *mapping, sector_t block)
1504{
1505 struct inode *inode = mapping->host;
1506 struct fuse_conn *fc = get_fuse_conn(inode);
1507 struct fuse_req *req;
1508 struct fuse_bmap_in inarg;
1509 struct fuse_bmap_out outarg;
1510 int err;
1511
1512 if (!inode->i_sb->s_bdev || fc->no_bmap)
1513 return 0;
1514
1515 req = fuse_get_req(fc);
1516 if (IS_ERR(req))
1517 return 0;
1518
1519 memset(&inarg, 0, sizeof(inarg));
1520 inarg.block = block;
1521 inarg.blocksize = inode->i_sb->s_blocksize;
1522 req->in.h.opcode = FUSE_BMAP;
1523 req->in.h.nodeid = get_node_id(inode);
1524 req->in.numargs = 1;
1525 req->in.args[0].size = sizeof(inarg);
1526 req->in.args[0].value = &inarg;
1527 req->out.numargs = 1;
1528 req->out.args[0].size = sizeof(outarg);
1529 req->out.args[0].value = &outarg;
b93f858a 1530 fuse_request_send(fc, req);
b2d2272f
MS
1531 err = req->out.h.error;
1532 fuse_put_request(fc, req);
1533 if (err == -ENOSYS)
1534 fc->no_bmap = 1;
1535
1536 return err ? 0 : outarg.block;
1537}
1538
5559b8f4
MS
1539static loff_t fuse_file_llseek(struct file *file, loff_t offset, int origin)
1540{
1541 loff_t retval;
1542 struct inode *inode = file->f_path.dentry->d_inode;
1543
1544 mutex_lock(&inode->i_mutex);
1545 switch (origin) {
1546 case SEEK_END:
769415c6
MS
1547 retval = fuse_update_attributes(inode, NULL, file, NULL);
1548 if (retval)
5291658d 1549 goto exit;
5559b8f4
MS
1550 offset += i_size_read(inode);
1551 break;
1552 case SEEK_CUR:
1553 offset += file->f_pos;
1554 }
1555 retval = -EINVAL;
1556 if (offset >= 0 && offset <= inode->i_sb->s_maxbytes) {
1557 if (offset != file->f_pos) {
1558 file->f_pos = offset;
1559 file->f_version = 0;
1560 }
1561 retval = offset;
1562 }
5291658d 1563exit:
5559b8f4
MS
1564 mutex_unlock(&inode->i_mutex);
1565 return retval;
1566}
1567
59efec7b
TH
1568static int fuse_ioctl_copy_user(struct page **pages, struct iovec *iov,
1569 unsigned int nr_segs, size_t bytes, bool to_user)
1570{
1571 struct iov_iter ii;
1572 int page_idx = 0;
1573
1574 if (!bytes)
1575 return 0;
1576
1577 iov_iter_init(&ii, iov, nr_segs, bytes, 0);
1578
1579 while (iov_iter_count(&ii)) {
1580 struct page *page = pages[page_idx++];
1581 size_t todo = min_t(size_t, PAGE_SIZE, iov_iter_count(&ii));
4aa0edd2 1582 void *kaddr;
59efec7b 1583
4aa0edd2 1584 kaddr = kmap(page);
59efec7b
TH
1585
1586 while (todo) {
1587 char __user *uaddr = ii.iov->iov_base + ii.iov_offset;
1588 size_t iov_len = ii.iov->iov_len - ii.iov_offset;
1589 size_t copy = min(todo, iov_len);
1590 size_t left;
1591
1592 if (!to_user)
1593 left = copy_from_user(kaddr, uaddr, copy);
1594 else
1595 left = copy_to_user(uaddr, kaddr, copy);
1596
1597 if (unlikely(left))
1598 return -EFAULT;
1599
1600 iov_iter_advance(&ii, copy);
1601 todo -= copy;
1602 kaddr += copy;
1603 }
1604
0bd87182 1605 kunmap(page);
59efec7b
TH
1606 }
1607
1608 return 0;
1609}
1610
1611/*
1612 * For ioctls, there is no generic way to determine how much memory
1613 * needs to be read and/or written. Furthermore, ioctls are allowed
1614 * to dereference the passed pointer, so the parameter requires deep
1615 * copying but FUSE has no idea whatsoever about what to copy in or
1616 * out.
1617 *
1618 * This is solved by allowing FUSE server to retry ioctl with
1619 * necessary in/out iovecs. Let's assume the ioctl implementation
1620 * needs to read in the following structure.
1621 *
1622 * struct a {
1623 * char *buf;
1624 * size_t buflen;
1625 * }
1626 *
1627 * On the first callout to FUSE server, inarg->in_size and
1628 * inarg->out_size will be NULL; then, the server completes the ioctl
1629 * with FUSE_IOCTL_RETRY set in out->flags, out->in_iovs set to 1 and
1630 * the actual iov array to
1631 *
1632 * { { .iov_base = inarg.arg, .iov_len = sizeof(struct a) } }
1633 *
1634 * which tells FUSE to copy in the requested area and retry the ioctl.
1635 * On the second round, the server has access to the structure and
1636 * from that it can tell what to look for next, so on the invocation,
1637 * it sets FUSE_IOCTL_RETRY, out->in_iovs to 2 and iov array to
1638 *
1639 * { { .iov_base = inarg.arg, .iov_len = sizeof(struct a) },
1640 * { .iov_base = a.buf, .iov_len = a.buflen } }
1641 *
1642 * FUSE will copy both struct a and the pointed buffer from the
1643 * process doing the ioctl and retry ioctl with both struct a and the
1644 * buffer.
1645 *
1646 * This time, FUSE server has everything it needs and completes ioctl
1647 * without FUSE_IOCTL_RETRY which finishes the ioctl call.
1648 *
1649 * Copying data out works the same way.
1650 *
1651 * Note that if FUSE_IOCTL_UNRESTRICTED is clear, the kernel
1652 * automatically initializes in and out iovs by decoding @cmd with
1653 * _IOC_* macros and the server is not allowed to request RETRY. This
1654 * limits ioctl data transfers to well-formed ioctls and is the forced
1655 * behavior for all FUSE servers.
1656 */
08cbf542
TH
1657long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg,
1658 unsigned int flags)
59efec7b 1659{
59efec7b 1660 struct fuse_file *ff = file->private_data;
d36f2487 1661 struct fuse_conn *fc = ff->fc;
59efec7b
TH
1662 struct fuse_ioctl_in inarg = {
1663 .fh = ff->fh,
1664 .cmd = cmd,
1665 .arg = arg,
1666 .flags = flags
1667 };
1668 struct fuse_ioctl_out outarg;
1669 struct fuse_req *req = NULL;
1670 struct page **pages = NULL;
1671 struct page *iov_page = NULL;
1672 struct iovec *in_iov = NULL, *out_iov = NULL;
1673 unsigned int in_iovs = 0, out_iovs = 0, num_pages = 0, max_pages;
1674 size_t in_size, out_size, transferred;
1675 int err;
1676
1677 /* assume all the iovs returned by client always fits in a page */
1678 BUILD_BUG_ON(sizeof(struct iovec) * FUSE_IOCTL_MAX_IOV > PAGE_SIZE);
1679
59efec7b
TH
1680 err = -ENOMEM;
1681 pages = kzalloc(sizeof(pages[0]) * FUSE_MAX_PAGES_PER_REQ, GFP_KERNEL);
1682 iov_page = alloc_page(GFP_KERNEL);
1683 if (!pages || !iov_page)
1684 goto out;
1685
1686 /*
1687 * If restricted, initialize IO parameters as encoded in @cmd.
1688 * RETRY from server is not allowed.
1689 */
1690 if (!(flags & FUSE_IOCTL_UNRESTRICTED)) {
1691 struct iovec *iov = page_address(iov_page);
1692
c9f0523d 1693 iov->iov_base = (void __user *)arg;
59efec7b
TH
1694 iov->iov_len = _IOC_SIZE(cmd);
1695
1696 if (_IOC_DIR(cmd) & _IOC_WRITE) {
1697 in_iov = iov;
1698 in_iovs = 1;
1699 }
1700
1701 if (_IOC_DIR(cmd) & _IOC_READ) {
1702 out_iov = iov;
1703 out_iovs = 1;
1704 }
1705 }
1706
1707 retry:
1708 inarg.in_size = in_size = iov_length(in_iov, in_iovs);
1709 inarg.out_size = out_size = iov_length(out_iov, out_iovs);
1710
1711 /*
1712 * Out data can be used either for actual out data or iovs,
1713 * make sure there always is at least one page.
1714 */
1715 out_size = max_t(size_t, out_size, PAGE_SIZE);
1716 max_pages = DIV_ROUND_UP(max(in_size, out_size), PAGE_SIZE);
1717
1718 /* make sure there are enough buffer pages and init request with them */
1719 err = -ENOMEM;
1720 if (max_pages > FUSE_MAX_PAGES_PER_REQ)
1721 goto out;
1722 while (num_pages < max_pages) {
1723 pages[num_pages] = alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
1724 if (!pages[num_pages])
1725 goto out;
1726 num_pages++;
1727 }
1728
1729 req = fuse_get_req(fc);
1730 if (IS_ERR(req)) {
1731 err = PTR_ERR(req);
1732 req = NULL;
1733 goto out;
1734 }
1735 memcpy(req->pages, pages, sizeof(req->pages[0]) * num_pages);
1736 req->num_pages = num_pages;
1737
1738 /* okay, let's send it to the client */
1739 req->in.h.opcode = FUSE_IOCTL;
d36f2487 1740 req->in.h.nodeid = ff->nodeid;
59efec7b
TH
1741 req->in.numargs = 1;
1742 req->in.args[0].size = sizeof(inarg);
1743 req->in.args[0].value = &inarg;
1744 if (in_size) {
1745 req->in.numargs++;
1746 req->in.args[1].size = in_size;
1747 req->in.argpages = 1;
1748
1749 err = fuse_ioctl_copy_user(pages, in_iov, in_iovs, in_size,
1750 false);
1751 if (err)
1752 goto out;
1753 }
1754
1755 req->out.numargs = 2;
1756 req->out.args[0].size = sizeof(outarg);
1757 req->out.args[0].value = &outarg;
1758 req->out.args[1].size = out_size;
1759 req->out.argpages = 1;
1760 req->out.argvar = 1;
1761
b93f858a 1762 fuse_request_send(fc, req);
59efec7b
TH
1763 err = req->out.h.error;
1764 transferred = req->out.args[1].size;
1765 fuse_put_request(fc, req);
1766 req = NULL;
1767 if (err)
1768 goto out;
1769
1770 /* did it ask for retry? */
1771 if (outarg.flags & FUSE_IOCTL_RETRY) {
1772 char *vaddr;
1773
1774 /* no retry if in restricted mode */
1775 err = -EIO;
1776 if (!(flags & FUSE_IOCTL_UNRESTRICTED))
1777 goto out;
1778
1779 in_iovs = outarg.in_iovs;
1780 out_iovs = outarg.out_iovs;
1781
1782 /*
1783 * Make sure things are in boundary, separate checks
1784 * are to protect against overflow.
1785 */
1786 err = -ENOMEM;
1787 if (in_iovs > FUSE_IOCTL_MAX_IOV ||
1788 out_iovs > FUSE_IOCTL_MAX_IOV ||
1789 in_iovs + out_iovs > FUSE_IOCTL_MAX_IOV)
1790 goto out;
1791
1792 err = -EIO;
1793 if ((in_iovs + out_iovs) * sizeof(struct iovec) != transferred)
1794 goto out;
1795
1796 /* okay, copy in iovs and retry */
1797 vaddr = kmap_atomic(pages[0], KM_USER0);
1798 memcpy(page_address(iov_page), vaddr, transferred);
1799 kunmap_atomic(vaddr, KM_USER0);
1800
1801 in_iov = page_address(iov_page);
1802 out_iov = in_iov + in_iovs;
1803
1804 goto retry;
1805 }
1806
1807 err = -EIO;
1808 if (transferred > inarg.out_size)
1809 goto out;
1810
1811 err = fuse_ioctl_copy_user(pages, out_iov, out_iovs, transferred, true);
1812 out:
1813 if (req)
1814 fuse_put_request(fc, req);
1815 if (iov_page)
1816 __free_page(iov_page);
1817 while (num_pages)
1818 __free_page(pages[--num_pages]);
1819 kfree(pages);
1820
1821 return err ? err : outarg.result;
1822}
08cbf542 1823EXPORT_SYMBOL_GPL(fuse_do_ioctl);
59efec7b 1824
d36f2487
MS
1825static long fuse_file_ioctl_common(struct file *file, unsigned int cmd,
1826 unsigned long arg, unsigned int flags)
1827{
1828 struct inode *inode = file->f_dentry->d_inode;
1829 struct fuse_conn *fc = get_fuse_conn(inode);
1830
1831 if (!fuse_allow_task(fc, current))
1832 return -EACCES;
1833
1834 if (is_bad_inode(inode))
1835 return -EIO;
1836
1837 return fuse_do_ioctl(file, cmd, arg, flags);
1838}
1839
59efec7b
TH
1840static long fuse_file_ioctl(struct file *file, unsigned int cmd,
1841 unsigned long arg)
1842{
d36f2487 1843 return fuse_file_ioctl_common(file, cmd, arg, 0);
59efec7b
TH
1844}
1845
1846static long fuse_file_compat_ioctl(struct file *file, unsigned int cmd,
1847 unsigned long arg)
1848{
d36f2487 1849 return fuse_file_ioctl_common(file, cmd, arg, FUSE_IOCTL_COMPAT);
59efec7b
TH
1850}
1851
95668a69
TH
1852/*
1853 * All files which have been polled are linked to RB tree
1854 * fuse_conn->polled_files which is indexed by kh. Walk the tree and
1855 * find the matching one.
1856 */
1857static struct rb_node **fuse_find_polled_node(struct fuse_conn *fc, u64 kh,
1858 struct rb_node **parent_out)
1859{
1860 struct rb_node **link = &fc->polled_files.rb_node;
1861 struct rb_node *last = NULL;
1862
1863 while (*link) {
1864 struct fuse_file *ff;
1865
1866 last = *link;
1867 ff = rb_entry(last, struct fuse_file, polled_node);
1868
1869 if (kh < ff->kh)
1870 link = &last->rb_left;
1871 else if (kh > ff->kh)
1872 link = &last->rb_right;
1873 else
1874 return link;
1875 }
1876
1877 if (parent_out)
1878 *parent_out = last;
1879 return link;
1880}
1881
1882/*
1883 * The file is about to be polled. Make sure it's on the polled_files
1884 * RB tree. Note that files once added to the polled_files tree are
1885 * not removed before the file is released. This is because a file
1886 * polled once is likely to be polled again.
1887 */
1888static void fuse_register_polled_file(struct fuse_conn *fc,
1889 struct fuse_file *ff)
1890{
1891 spin_lock(&fc->lock);
1892 if (RB_EMPTY_NODE(&ff->polled_node)) {
1893 struct rb_node **link, *parent;
1894
1895 link = fuse_find_polled_node(fc, ff->kh, &parent);
1896 BUG_ON(*link);
1897 rb_link_node(&ff->polled_node, parent, link);
1898 rb_insert_color(&ff->polled_node, &fc->polled_files);
1899 }
1900 spin_unlock(&fc->lock);
1901}
1902
08cbf542 1903unsigned fuse_file_poll(struct file *file, poll_table *wait)
95668a69 1904{
95668a69 1905 struct fuse_file *ff = file->private_data;
797759aa 1906 struct fuse_conn *fc = ff->fc;
95668a69
TH
1907 struct fuse_poll_in inarg = { .fh = ff->fh, .kh = ff->kh };
1908 struct fuse_poll_out outarg;
1909 struct fuse_req *req;
1910 int err;
1911
1912 if (fc->no_poll)
1913 return DEFAULT_POLLMASK;
1914
1915 poll_wait(file, &ff->poll_wait, wait);
1916
1917 /*
1918 * Ask for notification iff there's someone waiting for it.
1919 * The client may ignore the flag and always notify.
1920 */
1921 if (waitqueue_active(&ff->poll_wait)) {
1922 inarg.flags |= FUSE_POLL_SCHEDULE_NOTIFY;
1923 fuse_register_polled_file(fc, ff);
1924 }
1925
1926 req = fuse_get_req(fc);
1927 if (IS_ERR(req))
201fa69a 1928 return POLLERR;
95668a69
TH
1929
1930 req->in.h.opcode = FUSE_POLL;
797759aa 1931 req->in.h.nodeid = ff->nodeid;
95668a69
TH
1932 req->in.numargs = 1;
1933 req->in.args[0].size = sizeof(inarg);
1934 req->in.args[0].value = &inarg;
1935 req->out.numargs = 1;
1936 req->out.args[0].size = sizeof(outarg);
1937 req->out.args[0].value = &outarg;
b93f858a 1938 fuse_request_send(fc, req);
95668a69
TH
1939 err = req->out.h.error;
1940 fuse_put_request(fc, req);
1941
1942 if (!err)
1943 return outarg.revents;
1944 if (err == -ENOSYS) {
1945 fc->no_poll = 1;
1946 return DEFAULT_POLLMASK;
1947 }
1948 return POLLERR;
1949}
08cbf542 1950EXPORT_SYMBOL_GPL(fuse_file_poll);
95668a69
TH
1951
1952/*
1953 * This is called from fuse_handle_notify() on FUSE_NOTIFY_POLL and
1954 * wakes up the poll waiters.
1955 */
1956int fuse_notify_poll_wakeup(struct fuse_conn *fc,
1957 struct fuse_notify_poll_wakeup_out *outarg)
1958{
1959 u64 kh = outarg->kh;
1960 struct rb_node **link;
1961
1962 spin_lock(&fc->lock);
1963
1964 link = fuse_find_polled_node(fc, kh, NULL);
1965 if (*link) {
1966 struct fuse_file *ff;
1967
1968 ff = rb_entry(*link, struct fuse_file, polled_node);
1969 wake_up_interruptible_sync(&ff->poll_wait);
1970 }
1971
1972 spin_unlock(&fc->lock);
1973 return 0;
1974}
1975
4b6f5d20 1976static const struct file_operations fuse_file_operations = {
5559b8f4 1977 .llseek = fuse_file_llseek,
543ade1f 1978 .read = do_sync_read,
bcb4be80 1979 .aio_read = fuse_file_aio_read,
543ade1f 1980 .write = do_sync_write,
ea9b9907 1981 .aio_write = fuse_file_aio_write,
b6aeaded
MS
1982 .mmap = fuse_file_mmap,
1983 .open = fuse_open,
1984 .flush = fuse_flush,
1985 .release = fuse_release,
1986 .fsync = fuse_fsync,
71421259 1987 .lock = fuse_file_lock,
a9ff4f87 1988 .flock = fuse_file_flock,
5ffc4ef4 1989 .splice_read = generic_file_splice_read,
59efec7b
TH
1990 .unlocked_ioctl = fuse_file_ioctl,
1991 .compat_ioctl = fuse_file_compat_ioctl,
95668a69 1992 .poll = fuse_file_poll,
b6aeaded
MS
1993};
1994
4b6f5d20 1995static const struct file_operations fuse_direct_io_file_operations = {
5559b8f4 1996 .llseek = fuse_file_llseek,
413ef8cb
MS
1997 .read = fuse_direct_read,
1998 .write = fuse_direct_write,
fc280c96 1999 .mmap = fuse_direct_mmap,
413ef8cb
MS
2000 .open = fuse_open,
2001 .flush = fuse_flush,
2002 .release = fuse_release,
2003 .fsync = fuse_fsync,
71421259 2004 .lock = fuse_file_lock,
a9ff4f87 2005 .flock = fuse_file_flock,
59efec7b
TH
2006 .unlocked_ioctl = fuse_file_ioctl,
2007 .compat_ioctl = fuse_file_compat_ioctl,
95668a69 2008 .poll = fuse_file_poll,
fc280c96 2009 /* no splice_read */
413ef8cb
MS
2010};
2011
f5e54d6e 2012static const struct address_space_operations fuse_file_aops = {
b6aeaded 2013 .readpage = fuse_readpage,
3be5a52b
MS
2014 .writepage = fuse_writepage,
2015 .launder_page = fuse_launder_page,
5e6f58a1
NP
2016 .write_begin = fuse_write_begin,
2017 .write_end = fuse_write_end,
db50b96c 2018 .readpages = fuse_readpages,
3be5a52b 2019 .set_page_dirty = __set_page_dirty_nobuffers,
b2d2272f 2020 .bmap = fuse_bmap,
b6aeaded
MS
2021};
2022
2023void fuse_init_file_inode(struct inode *inode)
2024{
45323fb7
MS
2025 inode->i_fop = &fuse_file_operations;
2026 inode->i_data.a_ops = &fuse_file_aops;
b6aeaded 2027}