2 * linux/fs/9p/vfs_file.c
4 * This file contians vfs file ops for 9P2000.
6 * Copyright (C) 2004 by Eric Van Hensbergen <ericvh@gmail.com>
7 * Copyright (C) 2002 by Ron Minnich <rminnich@lanl.gov>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2
11 * as published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to:
20 * Free Software Foundation
21 * 51 Franklin Street, Fifth Floor
22 * Boston, MA 02111-1301 USA
26 #include <linux/module.h>
27 #include <linux/errno.h>
29 #include <linux/sched.h>
30 #include <linux/file.h>
31 #include <linux/stat.h>
32 #include <linux/string.h>
33 #include <linux/inet.h>
34 #include <linux/list.h>
35 #include <linux/pagemap.h>
36 #include <linux/utsname.h>
37 #include <asm/uaccess.h>
38 #include <linux/idr.h>
39 #include <net/9p/9p.h>
40 #include <net/9p/client.h>
47 static const struct vm_operations_struct v9fs_file_vm_ops;
50 * v9fs_file_open - open a file (or directory)
51 * @inode: inode to be opened
52 * @file: file being opened
56 int v9fs_file_open(struct inode *inode, struct file *file)
59 struct v9fs_inode *v9inode;
60 struct v9fs_session_info *v9ses;
64 P9_DPRINTK(P9_DEBUG_VFS, "inode: %p file: %p\n", inode, file);
65 v9inode = V9FS_I(inode);
66 v9ses = v9fs_inode2v9ses(inode);
67 if (v9fs_proto_dotl(v9ses))
68 omode = file->f_flags;
70 omode = v9fs_uflags2omode(file->f_flags,
71 v9fs_proto_dotu(v9ses));
72 fid = file->private_data;
74 fid = v9fs_fid_clone(file->f_path.dentry);
78 err = p9_client_open(fid, omode);
83 if (file->f_flags & O_TRUNC) {
84 i_size_write(inode, 0);
87 if ((file->f_flags & O_APPEND) &&
88 (!v9fs_proto_dotu(v9ses) && !v9fs_proto_dotl(v9ses)))
89 generic_file_llseek(file, 0, SEEK_END);
92 file->private_data = fid;
93 mutex_lock(&v9inode->v_mutex);
94 if (v9ses->cache && !v9inode->writeback_fid &&
95 ((file->f_flags & O_ACCMODE) != O_RDONLY)) {
97 * clone a fid and add it to writeback_fid
98 * we do it during open time instead of
99 * page dirty time via write_begin/page_mkwrite
100 * because we want write after unlink usecase
103 fid = v9fs_writeback_fid(file->f_path.dentry);
106 mutex_unlock(&v9inode->v_mutex);
109 v9inode->writeback_fid = (void *) fid;
111 mutex_unlock(&v9inode->v_mutex);
112 #ifdef CONFIG_9P_FSCACHE
114 v9fs_cache_inode_set_cookie(inode, file);
118 p9_client_clunk(file->private_data);
119 file->private_data = NULL;
124 * v9fs_file_lock - lock a file (or directory)
125 * @filp: file to be locked
127 * @fl: file lock structure
129 * Bugs: this looks like a local only lock, we should extend into 9P
130 * by using open exclusive
133 static int v9fs_file_lock(struct file *filp, int cmd, struct file_lock *fl)
136 struct inode *inode = filp->f_path.dentry->d_inode;
138 P9_DPRINTK(P9_DEBUG_VFS, "filp: %p lock: %p\n", filp, fl);
140 /* No mandatory locks */
141 if (__mandatory_lock(inode) && fl->fl_type != F_UNLCK)
144 if ((IS_SETLK(cmd) || IS_SETLKW(cmd)) && fl->fl_type != F_UNLCK) {
145 filemap_write_and_wait(inode->i_mapping);
146 invalidate_mapping_pages(&inode->i_data, 0, -1);
152 static int v9fs_file_do_lock(struct file *filp, int cmd, struct file_lock *fl)
154 struct p9_flock flock;
158 unsigned char fl_type;
160 fid = filp->private_data;
163 if ((fl->fl_flags & FL_POSIX) != FL_POSIX)
166 res = posix_lock_file_wait(filp, fl);
170 /* convert posix lock to p9 tlock args */
171 memset(&flock, 0, sizeof(flock));
172 flock.type = fl->fl_type;
173 flock.start = fl->fl_start;
174 if (fl->fl_end == OFFSET_MAX)
177 flock.length = fl->fl_end - fl->fl_start + 1;
178 flock.proc_id = fl->fl_pid;
179 flock.client_id = utsname()->nodename;
181 flock.flags = P9_LOCK_FLAGS_BLOCK;
184 * if its a blocked request and we get P9_LOCK_BLOCKED as the status
185 * for lock request, keep on trying
188 res = p9_client_lock_dotl(fid, &flock, &status);
192 if (status != P9_LOCK_BLOCKED)
194 if (status == P9_LOCK_BLOCKED && !IS_SETLKW(cmd))
196 schedule_timeout_interruptible(P9_LOCK_TIMEOUT);
199 /* map 9p status to VFS status */
201 case P9_LOCK_SUCCESS:
204 case P9_LOCK_BLOCKED:
216 * incase server returned error for lock request, revert
219 if (res < 0 && fl->fl_type != F_UNLCK) {
220 fl_type = fl->fl_type;
221 fl->fl_type = F_UNLCK;
222 res = posix_lock_file_wait(filp, fl);
223 fl->fl_type = fl_type;
229 static int v9fs_file_getlock(struct file *filp, struct file_lock *fl)
231 struct p9_getlock glock;
235 fid = filp->private_data;
238 posix_test_lock(filp, fl);
240 * if we have a conflicting lock locally, no need to validate
243 if (fl->fl_type != F_UNLCK)
246 /* convert posix lock to p9 tgetlock args */
247 memset(&glock, 0, sizeof(glock));
248 glock.type = fl->fl_type;
249 glock.start = fl->fl_start;
250 if (fl->fl_end == OFFSET_MAX)
253 glock.length = fl->fl_end - fl->fl_start + 1;
254 glock.proc_id = fl->fl_pid;
255 glock.client_id = utsname()->nodename;
257 res = p9_client_getlock_dotl(fid, &glock);
260 if (glock.type != F_UNLCK) {
261 fl->fl_type = glock.type;
262 fl->fl_start = glock.start;
263 if (glock.length == 0)
264 fl->fl_end = OFFSET_MAX;
266 fl->fl_end = glock.start + glock.length - 1;
267 fl->fl_pid = glock.proc_id;
269 fl->fl_type = F_UNLCK;
275 * v9fs_file_lock_dotl - lock a file (or directory)
276 * @filp: file to be locked
278 * @fl: file lock structure
282 static int v9fs_file_lock_dotl(struct file *filp, int cmd, struct file_lock *fl)
284 struct inode *inode = filp->f_path.dentry->d_inode;
287 P9_DPRINTK(P9_DEBUG_VFS, "filp: %p cmd:%d lock: %p name: %s\n", filp,
288 cmd, fl, filp->f_path.dentry->d_name.name);
290 /* No mandatory locks */
291 if (__mandatory_lock(inode) && fl->fl_type != F_UNLCK)
294 if ((IS_SETLK(cmd) || IS_SETLKW(cmd)) && fl->fl_type != F_UNLCK) {
295 filemap_write_and_wait(inode->i_mapping);
296 invalidate_mapping_pages(&inode->i_data, 0, -1);
299 if (IS_SETLK(cmd) || IS_SETLKW(cmd))
300 ret = v9fs_file_do_lock(filp, cmd, fl);
301 else if (IS_GETLK(cmd))
302 ret = v9fs_file_getlock(filp, fl);
310 * v9fs_file_flock_dotl - lock a file
311 * @filp: file to be locked
313 * @fl: file lock structure
317 static int v9fs_file_flock_dotl(struct file *filp, int cmd,
318 struct file_lock *fl)
320 struct inode *inode = filp->f_path.dentry->d_inode;
323 P9_DPRINTK(P9_DEBUG_VFS, "filp: %p cmd:%d lock: %p name: %s\n", filp,
324 cmd, fl, filp->f_path.dentry->d_name.name);
326 /* No mandatory locks */
327 if (__mandatory_lock(inode) && fl->fl_type != F_UNLCK)
330 if (!(fl->fl_flags & FL_FLOCK))
333 if ((IS_SETLK(cmd) || IS_SETLKW(cmd)) && fl->fl_type != F_UNLCK) {
334 filemap_write_and_wait(inode->i_mapping);
335 invalidate_mapping_pages(&inode->i_data, 0, -1);
337 /* Convert flock to posix lock */
338 fl->fl_owner = (fl_owner_t)filp;
340 fl->fl_end = OFFSET_MAX;
341 fl->fl_flags |= FL_POSIX;
342 fl->fl_flags ^= FL_FLOCK;
344 if (IS_SETLK(cmd) | IS_SETLKW(cmd))
345 ret = v9fs_file_do_lock(filp, cmd, fl);
353 * v9fs_fid_readn - read from a fid
355 * @data: data buffer to read data into
356 * @udata: user data buffer to read data into
357 * @count: size of buffer
358 * @offset: offset at which to read data
362 v9fs_fid_readn(struct p9_fid *fid, char *data, char __user *udata, u32 count,
367 P9_DPRINTK(P9_DEBUG_VFS, "fid %d offset %llu count %d\n", fid->fid,
368 (long long unsigned) offset, count);
371 size = fid->iounit ? fid->iounit : fid->clnt->msize - P9_IOHDRSZ;
373 n = p9_client_read(fid, data, udata, offset, count);
385 } while (count > 0 && n == size);
394 * v9fs_file_readn - read from a file
395 * @filp: file pointer to read
396 * @data: data buffer to read data into
397 * @udata: user data buffer to read data into
398 * @count: size of buffer
399 * @offset: offset at which to read data
403 v9fs_file_readn(struct file *filp, char *data, char __user *udata, u32 count,
406 return v9fs_fid_readn(filp->private_data, data, udata, count, offset);
410 * v9fs_file_read - read from a file
411 * @filp: file pointer to read
412 * @udata: user data buffer to read data into
413 * @count: size of buffer
414 * @offset: offset at which to read data
419 v9fs_file_read(struct file *filp, char __user *udata, size_t count,
426 P9_DPRINTK(P9_DEBUG_VFS, "count %zu offset %lld\n", count, *offset);
427 fid = filp->private_data;
429 size = fid->iounit ? fid->iounit : fid->clnt->msize - P9_IOHDRSZ;
431 ret = v9fs_file_readn(filp, NULL, udata, count, *offset);
433 ret = p9_client_read(fid, NULL, udata, *offset, count);
442 v9fs_file_write_internal(struct inode *inode, struct p9_fid *fid,
443 const char __user *data, size_t count,
444 loff_t *offset, int invalidate)
449 struct p9_client *clnt;
450 loff_t origin = *offset;
451 unsigned long pg_start, pg_end;
453 P9_DPRINTK(P9_DEBUG_VFS, "data %p count %d offset %x\n", data,
454 (int)count, (int)*offset);
458 n = p9_client_write(fid, NULL, data+total, origin+total, count);
465 if (invalidate && (total > 0)) {
466 pg_start = origin >> PAGE_CACHE_SHIFT;
467 pg_end = (origin + total - 1) >> PAGE_CACHE_SHIFT;
468 if (inode->i_mapping && inode->i_mapping->nrpages)
469 invalidate_inode_pages2_range(inode->i_mapping,
472 i_size = i_size_read(inode);
473 if (*offset > i_size) {
474 inode_add_bytes(inode, *offset - i_size);
475 i_size_write(inode, *offset);
485 * v9fs_file_write - write to a file
486 * @filp: file pointer to write
487 * @data: data buffer to write data from
488 * @count: size of buffer
489 * @offset: offset at which to write data
493 v9fs_file_write(struct file *filp, const char __user * data,
494 size_t count, loff_t *offset)
497 loff_t origin = *offset;
500 retval = generic_write_checks(filp, &origin, &count, 0);
505 if ((ssize_t) count < 0)
511 retval = v9fs_file_write_internal(filp->f_path.dentry->d_inode,
513 data, count, &origin, 1);
514 /* update offset on successful write */
522 static int v9fs_file_fsync(struct file *filp, loff_t start, loff_t end,
526 struct inode *inode = filp->f_mapping->host;
527 struct p9_wstat wstat;
530 retval = filemap_write_and_wait_range(inode->i_mapping, start, end);
534 mutex_lock(&inode->i_mutex);
535 P9_DPRINTK(P9_DEBUG_VFS, "filp %p datasync %x\n", filp, datasync);
537 fid = filp->private_data;
538 v9fs_blank_wstat(&wstat);
540 retval = p9_client_wstat(fid, &wstat);
541 mutex_unlock(&inode->i_mutex);
546 int v9fs_file_fsync_dotl(struct file *filp, loff_t start, loff_t end,
550 struct inode *inode = filp->f_mapping->host;
553 retval = filemap_write_and_wait_range(inode->i_mapping, start, end);
557 mutex_lock(&inode->i_mutex);
558 P9_DPRINTK(P9_DEBUG_VFS, "v9fs_file_fsync_dotl: filp %p datasync %x\n",
561 fid = filp->private_data;
563 retval = p9_client_fsync(fid, datasync);
564 mutex_unlock(&inode->i_mutex);
570 v9fs_file_mmap(struct file *file, struct vm_area_struct *vma)
574 retval = generic_file_mmap(file, vma);
576 vma->vm_ops = &v9fs_file_vm_ops;
582 v9fs_vm_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
584 struct v9fs_inode *v9inode;
585 struct page *page = vmf->page;
586 struct file *filp = vma->vm_file;
587 struct inode *inode = filp->f_path.dentry->d_inode;
590 P9_DPRINTK(P9_DEBUG_VFS, "page %p fid %lx\n",
591 page, (unsigned long)filp->private_data);
593 v9inode = V9FS_I(inode);
594 /* make sure the cache has finished storing the page */
595 v9fs_fscache_wait_on_page_write(inode, page);
596 BUG_ON(!v9inode->writeback_fid);
598 if (page->mapping != inode->i_mapping)
601 return VM_FAULT_LOCKED;
604 return VM_FAULT_NOPAGE;
608 v9fs_direct_read(struct file *filp, char __user *udata, size_t count,
613 struct address_space *mapping;
616 mapping = filp->f_mapping;
617 inode = mapping->host;
620 size = i_size_read(inode);
622 filemap_write_and_wait_range(mapping, offset,
625 return v9fs_file_read(filp, udata, count, offsetp);
629 * v9fs_cached_file_read - read from a file
630 * @filp: file pointer to read
631 * @udata: user data buffer to read data into
632 * @count: size of buffer
633 * @offset: offset at which to read data
637 v9fs_cached_file_read(struct file *filp, char __user *data, size_t count,
640 if (filp->f_flags & O_DIRECT)
641 return v9fs_direct_read(filp, data, count, offset);
642 return do_sync_read(filp, data, count, offset);
646 v9fs_direct_write(struct file *filp, const char __user * data,
647 size_t count, loff_t *offsetp)
652 struct address_space *mapping;
655 mapping = filp->f_mapping;
656 inode = mapping->host;
660 mutex_lock(&inode->i_mutex);
661 retval = filemap_write_and_wait_range(mapping, offset,
666 * After a write we want buffered reads to be sure to go to disk to get
667 * the new data. We invalidate clean cached page from the region we're
668 * about to write. We do this *before* the write so that if we fail
669 * here we fall back to buffered write
671 if (mapping->nrpages) {
672 pgoff_t pg_start = offset >> PAGE_CACHE_SHIFT;
673 pgoff_t pg_end = (offset + count - 1) >> PAGE_CACHE_SHIFT;
675 retval = invalidate_inode_pages2_range(mapping,
678 * If a page can not be invalidated, fall back
682 if (retval == -EBUSY)
687 retval = v9fs_file_write(filp, data, count, offsetp);
689 mutex_unlock(&inode->i_mutex);
693 mutex_unlock(&inode->i_mutex);
694 return do_sync_write(filp, data, count, offsetp);
698 * v9fs_cached_file_write - write to a file
699 * @filp: file pointer to write
700 * @data: data buffer to write data from
701 * @count: size of buffer
702 * @offset: offset at which to write data
706 v9fs_cached_file_write(struct file *filp, const char __user * data,
707 size_t count, loff_t *offset)
710 if (filp->f_flags & O_DIRECT)
711 return v9fs_direct_write(filp, data, count, offset);
712 return do_sync_write(filp, data, count, offset);
715 static const struct vm_operations_struct v9fs_file_vm_ops = {
716 .fault = filemap_fault,
717 .page_mkwrite = v9fs_vm_page_mkwrite,
721 const struct file_operations v9fs_cached_file_operations = {
722 .llseek = generic_file_llseek,
723 .read = v9fs_cached_file_read,
724 .write = v9fs_cached_file_write,
725 .aio_read = generic_file_aio_read,
726 .aio_write = generic_file_aio_write,
727 .open = v9fs_file_open,
728 .release = v9fs_dir_release,
729 .lock = v9fs_file_lock,
730 .mmap = v9fs_file_mmap,
731 .fsync = v9fs_file_fsync,
734 const struct file_operations v9fs_cached_file_operations_dotl = {
735 .llseek = generic_file_llseek,
736 .read = v9fs_cached_file_read,
737 .write = v9fs_cached_file_write,
738 .aio_read = generic_file_aio_read,
739 .aio_write = generic_file_aio_write,
740 .open = v9fs_file_open,
741 .release = v9fs_dir_release,
742 .lock = v9fs_file_lock_dotl,
743 .flock = v9fs_file_flock_dotl,
744 .mmap = v9fs_file_mmap,
745 .fsync = v9fs_file_fsync_dotl,
748 const struct file_operations v9fs_file_operations = {
749 .llseek = generic_file_llseek,
750 .read = v9fs_file_read,
751 .write = v9fs_file_write,
752 .open = v9fs_file_open,
753 .release = v9fs_dir_release,
754 .lock = v9fs_file_lock,
755 .mmap = generic_file_readonly_mmap,
756 .fsync = v9fs_file_fsync,
759 const struct file_operations v9fs_file_operations_dotl = {
760 .llseek = generic_file_llseek,
761 .read = v9fs_file_read,
762 .write = v9fs_file_write,
763 .open = v9fs_file_open,
764 .release = v9fs_dir_release,
765 .lock = v9fs_file_lock_dotl,
766 .flock = v9fs_file_flock_dotl,
767 .mmap = generic_file_readonly_mmap,
768 .fsync = v9fs_file_fsync_dotl,