4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/f2fs_fs.h>
13 #include <linux/stat.h>
14 #include <linux/buffer_head.h>
15 #include <linux/writeback.h>
16 #include <linux/blkdev.h>
17 #include <linux/falloc.h>
18 #include <linux/types.h>
19 #include <linux/compat.h>
20 #include <linux/uaccess.h>
21 #include <linux/mount.h>
29 static int f2fs_vm_page_mkwrite(struct vm_area_struct *vma,
32 struct page *page = vmf->page;
33 struct inode *inode = file_inode(vma->vm_file);
34 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
36 struct dnode_of_data dn;
41 sb_start_pagefault(inode->i_sb);
43 mutex_lock_op(sbi, DATA_NEW);
45 /* block allocation */
46 set_new_dnode(&dn, inode, NULL, NULL, 0);
47 err = get_dnode_of_data(&dn, page->index, ALLOC_NODE);
49 mutex_unlock_op(sbi, DATA_NEW);
53 old_blk_addr = dn.data_blkaddr;
55 if (old_blk_addr == NULL_ADDR) {
56 err = reserve_new_block(&dn);
59 mutex_unlock_op(sbi, DATA_NEW);
65 mutex_unlock_op(sbi, DATA_NEW);
68 if (page->mapping != inode->i_mapping ||
69 page_offset(page) >= i_size_read(inode) ||
70 !PageUptodate(page)) {
77 * check to see if the page is mapped already (no holes)
79 if (PageMappedToDisk(page))
83 wait_on_page_writeback(page);
85 /* page is wholly or partially inside EOF */
86 if (((page->index + 1) << PAGE_CACHE_SHIFT) > i_size_read(inode)) {
88 offset = i_size_read(inode) & ~PAGE_CACHE_MASK;
89 zero_user_segment(page, offset, PAGE_CACHE_SIZE);
92 SetPageUptodate(page);
94 file_update_time(vma->vm_file);
96 sb_end_pagefault(inode->i_sb);
97 return block_page_mkwrite_return(err);
100 static const struct vm_operations_struct f2fs_file_vm_ops = {
101 .fault = filemap_fault,
102 .page_mkwrite = f2fs_vm_page_mkwrite,
103 .remap_pages = generic_file_remap_pages,
106 static int need_to_sync_dir(struct f2fs_sb_info *sbi, struct inode *inode)
108 struct dentry *dentry;
111 inode = igrab(inode);
112 dentry = d_find_any_alias(inode);
117 pino = dentry->d_parent->d_inode->i_ino;
120 return !is_checkpointed_node(sbi, pino);
123 int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
125 struct inode *inode = file->f_mapping->host;
126 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
127 unsigned long long cur_version;
129 bool need_cp = false;
130 struct writeback_control wbc = {
131 .sync_mode = WB_SYNC_ALL,
132 .nr_to_write = LONG_MAX,
136 if (inode->i_sb->s_flags & MS_RDONLY)
139 ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
143 /* guarantee free sections for fsync */
144 f2fs_balance_fs(sbi);
146 mutex_lock(&inode->i_mutex);
148 if (datasync && !(inode->i_state & I_DIRTY_DATASYNC))
151 mutex_lock(&sbi->cp_mutex);
152 cur_version = le64_to_cpu(F2FS_CKPT(sbi)->checkpoint_ver);
153 mutex_unlock(&sbi->cp_mutex);
155 if (F2FS_I(inode)->data_version != cur_version &&
156 !(inode->i_state & I_DIRTY))
158 F2FS_I(inode)->data_version--;
160 if (!S_ISREG(inode->i_mode) || inode->i_nlink != 1)
162 else if (is_inode_flag_set(F2FS_I(inode), FI_NEED_CP))
164 else if (!space_for_roll_forward(sbi))
166 else if (need_to_sync_dir(sbi, inode))
170 /* all the dirty node pages should be flushed for POR */
171 ret = f2fs_sync_fs(inode->i_sb, 1);
172 clear_inode_flag(F2FS_I(inode), FI_NEED_CP);
174 /* if there is no written node page, write its inode page */
175 while (!sync_node_pages(sbi, inode->i_ino, &wbc)) {
176 ret = f2fs_write_inode(inode, NULL);
180 filemap_fdatawait_range(sbi->node_inode->i_mapping,
182 ret = blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL);
185 mutex_unlock(&inode->i_mutex);
189 static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma)
192 vma->vm_ops = &f2fs_file_vm_ops;
196 static int truncate_data_blocks_range(struct dnode_of_data *dn, int count)
198 int nr_free = 0, ofs = dn->ofs_in_node;
199 struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
200 struct f2fs_node *raw_node;
203 raw_node = page_address(dn->node_page);
204 addr = blkaddr_in_node(raw_node) + ofs;
206 for ( ; count > 0; count--, addr++, dn->ofs_in_node++) {
207 block_t blkaddr = le32_to_cpu(*addr);
208 if (blkaddr == NULL_ADDR)
211 update_extent_cache(NULL_ADDR, dn);
212 invalidate_blocks(sbi, blkaddr);
213 dec_valid_block_count(sbi, dn->inode, 1);
217 set_page_dirty(dn->node_page);
220 dn->ofs_in_node = ofs;
224 void truncate_data_blocks(struct dnode_of_data *dn)
226 truncate_data_blocks_range(dn, ADDRS_PER_BLOCK);
229 static void truncate_partial_data_page(struct inode *inode, u64 from)
231 unsigned offset = from & (PAGE_CACHE_SIZE - 1);
237 page = find_data_page(inode, from >> PAGE_CACHE_SHIFT);
242 wait_on_page_writeback(page);
243 zero_user(page, offset, PAGE_CACHE_SIZE - offset);
244 set_page_dirty(page);
245 f2fs_put_page(page, 1);
248 static int truncate_blocks(struct inode *inode, u64 from)
250 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
251 unsigned int blocksize = inode->i_sb->s_blocksize;
252 struct dnode_of_data dn;
257 free_from = (pgoff_t)
258 ((from + blocksize - 1) >> (sbi->log_blocksize));
260 mutex_lock_op(sbi, DATA_TRUNC);
262 set_new_dnode(&dn, inode, NULL, NULL, 0);
263 err = get_dnode_of_data(&dn, free_from, LOOKUP_NODE);
267 mutex_unlock_op(sbi, DATA_TRUNC);
271 if (IS_INODE(dn.node_page))
272 count = ADDRS_PER_INODE;
274 count = ADDRS_PER_BLOCK;
276 count -= dn.ofs_in_node;
278 if (dn.ofs_in_node || IS_INODE(dn.node_page)) {
279 truncate_data_blocks_range(&dn, count);
285 err = truncate_inode_blocks(inode, free_from);
286 mutex_unlock_op(sbi, DATA_TRUNC);
288 /* lastly zero out the first data page */
289 truncate_partial_data_page(inode, from);
294 void f2fs_truncate(struct inode *inode)
296 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
297 S_ISLNK(inode->i_mode)))
300 if (!truncate_blocks(inode, i_size_read(inode))) {
301 inode->i_mtime = inode->i_ctime = CURRENT_TIME;
302 mark_inode_dirty(inode);
306 static int f2fs_getattr(struct vfsmount *mnt,
307 struct dentry *dentry, struct kstat *stat)
309 struct inode *inode = dentry->d_inode;
310 generic_fillattr(inode, stat);
315 #ifdef CONFIG_F2FS_FS_POSIX_ACL
316 static void __setattr_copy(struct inode *inode, const struct iattr *attr)
318 struct f2fs_inode_info *fi = F2FS_I(inode);
319 unsigned int ia_valid = attr->ia_valid;
321 if (ia_valid & ATTR_UID)
322 inode->i_uid = attr->ia_uid;
323 if (ia_valid & ATTR_GID)
324 inode->i_gid = attr->ia_gid;
325 if (ia_valid & ATTR_ATIME)
326 inode->i_atime = timespec_trunc(attr->ia_atime,
327 inode->i_sb->s_time_gran);
328 if (ia_valid & ATTR_MTIME)
329 inode->i_mtime = timespec_trunc(attr->ia_mtime,
330 inode->i_sb->s_time_gran);
331 if (ia_valid & ATTR_CTIME)
332 inode->i_ctime = timespec_trunc(attr->ia_ctime,
333 inode->i_sb->s_time_gran);
334 if (ia_valid & ATTR_MODE) {
335 umode_t mode = attr->ia_mode;
337 if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID))
339 set_acl_inode(fi, mode);
343 #define __setattr_copy setattr_copy
346 int f2fs_setattr(struct dentry *dentry, struct iattr *attr)
348 struct inode *inode = dentry->d_inode;
349 struct f2fs_inode_info *fi = F2FS_I(inode);
352 err = inode_change_ok(inode, attr);
356 if ((attr->ia_valid & ATTR_SIZE) &&
357 attr->ia_size != i_size_read(inode)) {
358 truncate_setsize(inode, attr->ia_size);
359 f2fs_truncate(inode);
360 f2fs_balance_fs(F2FS_SB(inode->i_sb));
363 __setattr_copy(inode, attr);
365 if (attr->ia_valid & ATTR_MODE) {
366 err = f2fs_acl_chmod(inode);
367 if (err || is_inode_flag_set(fi, FI_ACL_MODE)) {
368 inode->i_mode = fi->i_acl_mode;
369 clear_inode_flag(fi, FI_ACL_MODE);
373 mark_inode_dirty(inode);
377 const struct inode_operations f2fs_file_inode_operations = {
378 .getattr = f2fs_getattr,
379 .setattr = f2fs_setattr,
380 .get_acl = f2fs_get_acl,
381 #ifdef CONFIG_F2FS_FS_XATTR
382 .setxattr = generic_setxattr,
383 .getxattr = generic_getxattr,
384 .listxattr = f2fs_listxattr,
385 .removexattr = generic_removexattr,
389 static void fill_zero(struct inode *inode, pgoff_t index,
390 loff_t start, loff_t len)
392 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
398 f2fs_balance_fs(sbi);
400 mutex_lock_op(sbi, DATA_NEW);
401 page = get_new_data_page(inode, index, false);
402 mutex_unlock_op(sbi, DATA_NEW);
405 wait_on_page_writeback(page);
406 zero_user(page, start, len);
407 set_page_dirty(page);
408 f2fs_put_page(page, 1);
412 int truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end)
417 for (index = pg_start; index < pg_end; index++) {
418 struct dnode_of_data dn;
419 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
421 f2fs_balance_fs(sbi);
423 mutex_lock_op(sbi, DATA_TRUNC);
424 set_new_dnode(&dn, inode, NULL, NULL, 0);
425 err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
427 mutex_unlock_op(sbi, DATA_TRUNC);
433 if (dn.data_blkaddr != NULL_ADDR)
434 truncate_data_blocks_range(&dn, 1);
436 mutex_unlock_op(sbi, DATA_TRUNC);
441 static int punch_hole(struct inode *inode, loff_t offset, loff_t len, int mode)
443 pgoff_t pg_start, pg_end;
444 loff_t off_start, off_end;
447 pg_start = ((unsigned long long) offset) >> PAGE_CACHE_SHIFT;
448 pg_end = ((unsigned long long) offset + len) >> PAGE_CACHE_SHIFT;
450 off_start = offset & (PAGE_CACHE_SIZE - 1);
451 off_end = (offset + len) & (PAGE_CACHE_SIZE - 1);
453 if (pg_start == pg_end) {
454 fill_zero(inode, pg_start, off_start,
455 off_end - off_start);
458 fill_zero(inode, pg_start++, off_start,
459 PAGE_CACHE_SIZE - off_start);
461 fill_zero(inode, pg_end, 0, off_end);
463 if (pg_start < pg_end) {
464 struct address_space *mapping = inode->i_mapping;
465 loff_t blk_start, blk_end;
467 blk_start = pg_start << PAGE_CACHE_SHIFT;
468 blk_end = pg_end << PAGE_CACHE_SHIFT;
469 truncate_inode_pages_range(mapping, blk_start,
471 ret = truncate_hole(inode, pg_start, pg_end);
475 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
476 i_size_read(inode) <= (offset + len)) {
477 i_size_write(inode, offset);
478 mark_inode_dirty(inode);
484 static int expand_inode_data(struct inode *inode, loff_t offset,
485 loff_t len, int mode)
487 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
488 pgoff_t index, pg_start, pg_end;
489 loff_t new_size = i_size_read(inode);
490 loff_t off_start, off_end;
493 ret = inode_newsize_ok(inode, (len + offset));
497 pg_start = ((unsigned long long) offset) >> PAGE_CACHE_SHIFT;
498 pg_end = ((unsigned long long) offset + len) >> PAGE_CACHE_SHIFT;
500 off_start = offset & (PAGE_CACHE_SIZE - 1);
501 off_end = (offset + len) & (PAGE_CACHE_SIZE - 1);
503 for (index = pg_start; index <= pg_end; index++) {
504 struct dnode_of_data dn;
506 mutex_lock_op(sbi, DATA_NEW);
508 set_new_dnode(&dn, inode, NULL, NULL, 0);
509 ret = get_dnode_of_data(&dn, index, ALLOC_NODE);
511 mutex_unlock_op(sbi, DATA_NEW);
515 if (dn.data_blkaddr == NULL_ADDR) {
516 ret = reserve_new_block(&dn);
519 mutex_unlock_op(sbi, DATA_NEW);
525 mutex_unlock_op(sbi, DATA_NEW);
527 if (pg_start == pg_end)
528 new_size = offset + len;
529 else if (index == pg_start && off_start)
530 new_size = (index + 1) << PAGE_CACHE_SHIFT;
531 else if (index == pg_end)
532 new_size = (index << PAGE_CACHE_SHIFT) + off_end;
534 new_size += PAGE_CACHE_SIZE;
537 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
538 i_size_read(inode) < new_size) {
539 i_size_write(inode, new_size);
540 mark_inode_dirty(inode);
546 static long f2fs_fallocate(struct file *file, int mode,
547 loff_t offset, loff_t len)
549 struct inode *inode = file_inode(file);
552 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
555 if (mode & FALLOC_FL_PUNCH_HOLE)
556 ret = punch_hole(inode, offset, len, mode);
558 ret = expand_inode_data(inode, offset, len, mode);
561 inode->i_mtime = inode->i_ctime = CURRENT_TIME;
562 mark_inode_dirty(inode);
567 #define F2FS_REG_FLMASK (~(FS_DIRSYNC_FL | FS_TOPDIR_FL))
568 #define F2FS_OTHER_FLMASK (FS_NODUMP_FL | FS_NOATIME_FL)
570 static inline __u32 f2fs_mask_flags(umode_t mode, __u32 flags)
574 else if (S_ISREG(mode))
575 return flags & F2FS_REG_FLMASK;
577 return flags & F2FS_OTHER_FLMASK;
580 long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
582 struct inode *inode = file_inode(filp);
583 struct f2fs_inode_info *fi = F2FS_I(inode);
588 case FS_IOC_GETFLAGS:
589 flags = fi->i_flags & FS_FL_USER_VISIBLE;
590 return put_user(flags, (int __user *) arg);
591 case FS_IOC_SETFLAGS:
593 unsigned int oldflags;
595 ret = mnt_want_write(filp->f_path.mnt);
599 if (!inode_owner_or_capable(inode)) {
604 if (get_user(flags, (int __user *) arg)) {
609 flags = f2fs_mask_flags(inode->i_mode, flags);
611 mutex_lock(&inode->i_mutex);
613 oldflags = fi->i_flags;
615 if ((flags ^ oldflags) & (FS_APPEND_FL | FS_IMMUTABLE_FL)) {
616 if (!capable(CAP_LINUX_IMMUTABLE)) {
617 mutex_unlock(&inode->i_mutex);
623 flags = flags & FS_FL_USER_MODIFIABLE;
624 flags |= oldflags & ~FS_FL_USER_MODIFIABLE;
626 mutex_unlock(&inode->i_mutex);
628 f2fs_set_inode_flags(inode);
629 inode->i_ctime = CURRENT_TIME;
630 mark_inode_dirty(inode);
632 mnt_drop_write(filp->f_path.mnt);
641 long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
644 case F2FS_IOC32_GETFLAGS:
645 cmd = F2FS_IOC_GETFLAGS;
647 case F2FS_IOC32_SETFLAGS:
648 cmd = F2FS_IOC_SETFLAGS;
653 return f2fs_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
657 const struct file_operations f2fs_file_operations = {
658 .llseek = generic_file_llseek,
659 .read = do_sync_read,
660 .write = do_sync_write,
661 .aio_read = generic_file_aio_read,
662 .aio_write = generic_file_aio_write,
663 .open = generic_file_open,
664 .mmap = f2fs_file_mmap,
665 .fsync = f2fs_sync_file,
666 .fallocate = f2fs_fallocate,
667 .unlocked_ioctl = f2fs_ioctl,
669 .compat_ioctl = f2fs_compat_ioctl,
671 .splice_read = generic_file_splice_read,
672 .splice_write = generic_file_splice_write,