2 * inode.c - NILFS inode operations.
4 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 * Written by Ryusuke Konishi <ryusuke@osrg.net>
24 #include <linux/buffer_head.h>
25 #include <linux/gfp.h>
26 #include <linux/mpage.h>
27 #include <linux/pagemap.h>
28 #include <linux/writeback.h>
29 #include <linux/uio.h>
39 * struct nilfs_iget_args - arguments used during comparison between inodes
41 * @cno: checkpoint number
42 * @root: pointer on NILFS root object (mounted checkpoint)
43 * @for_gc: inode for GC flag
45 struct nilfs_iget_args {
48 struct nilfs_root *root;
52 static int nilfs_iget_test(struct inode *inode, void *opaque);
54 void nilfs_inode_add_blocks(struct inode *inode, int n)
56 struct nilfs_root *root = NILFS_I(inode)->i_root;
58 inode_add_bytes(inode, (1 << inode->i_blkbits) * n);
60 atomic64_add(n, &root->blocks_count);
63 void nilfs_inode_sub_blocks(struct inode *inode, int n)
65 struct nilfs_root *root = NILFS_I(inode)->i_root;
67 inode_sub_bytes(inode, (1 << inode->i_blkbits) * n);
69 atomic64_sub(n, &root->blocks_count);
73 * nilfs_get_block() - get a file block on the filesystem (callback function)
74 * @inode - inode struct of the target file
75 * @blkoff - file block number
76 * @bh_result - buffer head to be mapped on
77 * @create - indicate whether allocating the block or not when it has not
80 * This function does not issue actual read request of the specified data
81 * block. It is done by VFS.
83 int nilfs_get_block(struct inode *inode, sector_t blkoff,
84 struct buffer_head *bh_result, int create)
86 struct nilfs_inode_info *ii = NILFS_I(inode);
87 struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
90 unsigned maxblocks = bh_result->b_size >> inode->i_blkbits;
92 down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
93 ret = nilfs_bmap_lookup_contig(ii->i_bmap, blkoff, &blknum, maxblocks);
94 up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
95 if (ret >= 0) { /* found */
96 map_bh(bh_result, inode->i_sb, blknum);
98 bh_result->b_size = (ret << inode->i_blkbits);
101 /* data block was not found */
102 if (ret == -ENOENT && create) {
103 struct nilfs_transaction_info ti;
105 bh_result->b_blocknr = 0;
106 err = nilfs_transaction_begin(inode->i_sb, &ti, 1);
109 err = nilfs_bmap_insert(ii->i_bmap, (unsigned long)blkoff,
110 (unsigned long)bh_result);
111 if (unlikely(err != 0)) {
112 if (err == -EEXIST) {
114 * The get_block() function could be called
115 * from multiple callers for an inode.
116 * However, the page having this block must
117 * be locked in this case.
120 "nilfs_get_block: a race condition "
121 "while inserting a data block. "
122 "(inode number=%lu, file block "
125 (unsigned long long)blkoff);
128 nilfs_transaction_abort(inode->i_sb);
131 nilfs_mark_inode_dirty_sync(inode);
132 nilfs_transaction_commit(inode->i_sb); /* never fails */
133 /* Error handling should be detailed */
134 set_buffer_new(bh_result);
135 set_buffer_delay(bh_result);
136 map_bh(bh_result, inode->i_sb, 0); /* dbn must be changed
138 } else if (ret == -ENOENT) {
139 /* not found is not error (e.g. hole); must return without
140 the mapped state flag. */
151 * nilfs_readpage() - implement readpage() method of nilfs_aops {}
152 * address_space_operations.
153 * @file - file struct of the file to be read
154 * @page - the page to be read
156 static int nilfs_readpage(struct file *file, struct page *page)
158 return mpage_readpage(page, nilfs_get_block);
162 * nilfs_readpages() - implement readpages() method of nilfs_aops {}
163 * address_space_operations.
164 * @file - file struct of the file to be read
165 * @mapping - address_space struct used for reading multiple pages
166 * @pages - the pages to be read
167 * @nr_pages - number of pages to be read
169 static int nilfs_readpages(struct file *file, struct address_space *mapping,
170 struct list_head *pages, unsigned nr_pages)
172 return mpage_readpages(mapping, pages, nr_pages, nilfs_get_block);
175 static int nilfs_writepages(struct address_space *mapping,
176 struct writeback_control *wbc)
178 struct inode *inode = mapping->host;
181 if (inode->i_sb->s_flags & MS_RDONLY) {
182 nilfs_clear_dirty_pages(mapping, false);
186 if (wbc->sync_mode == WB_SYNC_ALL)
187 err = nilfs_construct_dsync_segment(inode->i_sb, inode,
193 static int nilfs_writepage(struct page *page, struct writeback_control *wbc)
195 struct inode *inode = page->mapping->host;
198 if (inode->i_sb->s_flags & MS_RDONLY) {
200 * It means that filesystem was remounted in read-only
201 * mode because of error or metadata corruption. But we
202 * have dirty pages that try to be flushed in background.
203 * So, here we simply discard this dirty page.
205 nilfs_clear_dirty_page(page, false);
210 redirty_page_for_writepage(wbc, page);
213 if (wbc->sync_mode == WB_SYNC_ALL) {
214 err = nilfs_construct_segment(inode->i_sb);
217 } else if (wbc->for_reclaim)
218 nilfs_flush_segment(inode->i_sb, inode->i_ino);
223 static int nilfs_set_page_dirty(struct page *page)
225 struct inode *inode = page->mapping->host;
226 int ret = __set_page_dirty_nobuffers(page);
228 if (page_has_buffers(page)) {
229 unsigned nr_dirty = 0;
230 struct buffer_head *bh, *head;
233 * This page is locked by callers, and no other thread
234 * concurrently marks its buffers dirty since they are
235 * only dirtied through routines in fs/buffer.c in
236 * which call sites of mark_buffer_dirty are protected
239 bh = head = page_buffers(page);
241 /* Do not mark hole blocks dirty */
242 if (buffer_dirty(bh) || !buffer_mapped(bh))
245 set_buffer_dirty(bh);
247 } while (bh = bh->b_this_page, bh != head);
250 nilfs_set_file_dirty(inode, nr_dirty);
252 unsigned nr_dirty = 1 << (PAGE_CACHE_SHIFT - inode->i_blkbits);
254 nilfs_set_file_dirty(inode, nr_dirty);
259 void nilfs_write_failed(struct address_space *mapping, loff_t to)
261 struct inode *inode = mapping->host;
263 if (to > inode->i_size) {
264 truncate_pagecache(inode, inode->i_size);
265 nilfs_truncate(inode);
269 static int nilfs_write_begin(struct file *file, struct address_space *mapping,
270 loff_t pos, unsigned len, unsigned flags,
271 struct page **pagep, void **fsdata)
274 struct inode *inode = mapping->host;
275 int err = nilfs_transaction_begin(inode->i_sb, NULL, 1);
280 err = block_write_begin(mapping, pos, len, flags, pagep,
283 nilfs_write_failed(mapping, pos + len);
284 nilfs_transaction_abort(inode->i_sb);
289 static int nilfs_write_end(struct file *file, struct address_space *mapping,
290 loff_t pos, unsigned len, unsigned copied,
291 struct page *page, void *fsdata)
293 struct inode *inode = mapping->host;
294 unsigned start = pos & (PAGE_CACHE_SIZE - 1);
298 nr_dirty = nilfs_page_count_clean_buffers(page, start,
300 copied = generic_write_end(file, mapping, pos, len, copied, page,
302 nilfs_set_file_dirty(inode, nr_dirty);
303 err = nilfs_transaction_commit(inode->i_sb);
304 return err ? : copied;
308 nilfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t offset)
310 struct file *file = iocb->ki_filp;
311 struct address_space *mapping = file->f_mapping;
312 struct inode *inode = file->f_mapping->host;
313 size_t count = iov_iter_count(iter);
316 if (iov_iter_rw(iter) == WRITE)
319 /* Needs synchronization with the cleaner */
320 size = blockdev_direct_IO(iocb, inode, iter, offset, nilfs_get_block);
323 * In case of error extending write may have instantiated a few
324 * blocks outside i_size. Trim these off again.
326 if (unlikely(iov_iter_rw(iter) == WRITE && size < 0)) {
327 loff_t isize = i_size_read(inode);
328 loff_t end = offset + count;
331 nilfs_write_failed(mapping, end);
337 const struct address_space_operations nilfs_aops = {
338 .writepage = nilfs_writepage,
339 .readpage = nilfs_readpage,
340 .writepages = nilfs_writepages,
341 .set_page_dirty = nilfs_set_page_dirty,
342 .readpages = nilfs_readpages,
343 .write_begin = nilfs_write_begin,
344 .write_end = nilfs_write_end,
345 /* .releasepage = nilfs_releasepage, */
346 .invalidatepage = block_invalidatepage,
347 .direct_IO = nilfs_direct_IO,
348 .is_partially_uptodate = block_is_partially_uptodate,
351 static int nilfs_insert_inode_locked(struct inode *inode,
352 struct nilfs_root *root,
355 struct nilfs_iget_args args = {
356 .ino = ino, .root = root, .cno = 0, .for_gc = 0
359 return insert_inode_locked4(inode, ino, nilfs_iget_test, &args);
362 struct inode *nilfs_new_inode(struct inode *dir, umode_t mode)
364 struct super_block *sb = dir->i_sb;
365 struct the_nilfs *nilfs = sb->s_fs_info;
367 struct nilfs_inode_info *ii;
368 struct nilfs_root *root;
372 inode = new_inode(sb);
373 if (unlikely(!inode))
376 mapping_set_gfp_mask(inode->i_mapping,
377 mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS);
379 root = NILFS_I(dir)->i_root;
381 ii->i_state = 1 << NILFS_I_NEW;
384 err = nilfs_ifile_create_inode(root->ifile, &ino, &ii->i_bh);
386 goto failed_ifile_create_inode;
387 /* reference count of i_bh inherits from nilfs_mdt_read_block() */
389 atomic64_inc(&root->inodes_count);
390 inode_init_owner(inode, dir, mode);
392 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
394 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)) {
395 err = nilfs_bmap_read(ii->i_bmap, NULL);
397 goto failed_after_creation;
399 set_bit(NILFS_I_BMAP, &ii->i_state);
400 /* No lock is needed; iget() ensures it. */
403 ii->i_flags = nilfs_mask_flags(
404 mode, NILFS_I(dir)->i_flags & NILFS_FL_INHERITED);
406 /* ii->i_file_acl = 0; */
407 /* ii->i_dir_acl = 0; */
408 ii->i_dir_start_lookup = 0;
409 nilfs_set_inode_flags(inode);
410 spin_lock(&nilfs->ns_next_gen_lock);
411 inode->i_generation = nilfs->ns_next_generation++;
412 spin_unlock(&nilfs->ns_next_gen_lock);
413 if (nilfs_insert_inode_locked(inode, root, ino) < 0) {
415 goto failed_after_creation;
418 err = nilfs_init_acl(inode, dir);
420 goto failed_after_creation; /* never occur. When supporting
421 nilfs_init_acl(), proper cancellation of
422 above jobs should be considered */
426 failed_after_creation:
428 unlock_new_inode(inode);
429 iput(inode); /* raw_inode will be deleted through
430 nilfs_evict_inode() */
433 failed_ifile_create_inode:
434 make_bad_inode(inode);
435 iput(inode); /* if i_nlink == 1, generic_forget_inode() will be
441 void nilfs_set_inode_flags(struct inode *inode)
443 unsigned int flags = NILFS_I(inode)->i_flags;
445 inode->i_flags &= ~(S_SYNC | S_APPEND | S_IMMUTABLE | S_NOATIME |
447 if (flags & FS_SYNC_FL)
448 inode->i_flags |= S_SYNC;
449 if (flags & FS_APPEND_FL)
450 inode->i_flags |= S_APPEND;
451 if (flags & FS_IMMUTABLE_FL)
452 inode->i_flags |= S_IMMUTABLE;
453 if (flags & FS_NOATIME_FL)
454 inode->i_flags |= S_NOATIME;
455 if (flags & FS_DIRSYNC_FL)
456 inode->i_flags |= S_DIRSYNC;
457 mapping_set_gfp_mask(inode->i_mapping,
458 mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS);
461 int nilfs_read_inode_common(struct inode *inode,
462 struct nilfs_inode *raw_inode)
464 struct nilfs_inode_info *ii = NILFS_I(inode);
467 inode->i_mode = le16_to_cpu(raw_inode->i_mode);
468 i_uid_write(inode, le32_to_cpu(raw_inode->i_uid));
469 i_gid_write(inode, le32_to_cpu(raw_inode->i_gid));
470 set_nlink(inode, le16_to_cpu(raw_inode->i_links_count));
471 inode->i_size = le64_to_cpu(raw_inode->i_size);
472 inode->i_atime.tv_sec = le64_to_cpu(raw_inode->i_mtime);
473 inode->i_ctime.tv_sec = le64_to_cpu(raw_inode->i_ctime);
474 inode->i_mtime.tv_sec = le64_to_cpu(raw_inode->i_mtime);
475 inode->i_atime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
476 inode->i_ctime.tv_nsec = le32_to_cpu(raw_inode->i_ctime_nsec);
477 inode->i_mtime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
478 if (inode->i_nlink == 0)
479 return -ESTALE; /* this inode is deleted */
481 inode->i_blocks = le64_to_cpu(raw_inode->i_blocks);
482 ii->i_flags = le32_to_cpu(raw_inode->i_flags);
484 ii->i_file_acl = le32_to_cpu(raw_inode->i_file_acl);
485 ii->i_dir_acl = S_ISREG(inode->i_mode) ?
486 0 : le32_to_cpu(raw_inode->i_dir_acl);
488 ii->i_dir_start_lookup = 0;
489 inode->i_generation = le32_to_cpu(raw_inode->i_generation);
491 if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
492 S_ISLNK(inode->i_mode)) {
493 err = nilfs_bmap_read(ii->i_bmap, raw_inode);
496 set_bit(NILFS_I_BMAP, &ii->i_state);
497 /* No lock is needed; iget() ensures it. */
502 static int __nilfs_read_inode(struct super_block *sb,
503 struct nilfs_root *root, unsigned long ino,
506 struct the_nilfs *nilfs = sb->s_fs_info;
507 struct buffer_head *bh;
508 struct nilfs_inode *raw_inode;
511 down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
512 err = nilfs_ifile_get_inode_block(root->ifile, ino, &bh);
516 raw_inode = nilfs_ifile_map_inode(root->ifile, ino, bh);
518 err = nilfs_read_inode_common(inode, raw_inode);
522 if (S_ISREG(inode->i_mode)) {
523 inode->i_op = &nilfs_file_inode_operations;
524 inode->i_fop = &nilfs_file_operations;
525 inode->i_mapping->a_ops = &nilfs_aops;
526 } else if (S_ISDIR(inode->i_mode)) {
527 inode->i_op = &nilfs_dir_inode_operations;
528 inode->i_fop = &nilfs_dir_operations;
529 inode->i_mapping->a_ops = &nilfs_aops;
530 } else if (S_ISLNK(inode->i_mode)) {
531 inode->i_op = &nilfs_symlink_inode_operations;
532 inode->i_mapping->a_ops = &nilfs_aops;
534 inode->i_op = &nilfs_special_inode_operations;
536 inode, inode->i_mode,
537 huge_decode_dev(le64_to_cpu(raw_inode->i_device_code)));
539 nilfs_ifile_unmap_inode(root->ifile, ino, bh);
541 up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
542 nilfs_set_inode_flags(inode);
546 nilfs_ifile_unmap_inode(root->ifile, ino, bh);
550 up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
554 static int nilfs_iget_test(struct inode *inode, void *opaque)
556 struct nilfs_iget_args *args = opaque;
557 struct nilfs_inode_info *ii;
559 if (args->ino != inode->i_ino || args->root != NILFS_I(inode)->i_root)
563 if (!test_bit(NILFS_I_GCINODE, &ii->i_state))
564 return !args->for_gc;
566 return args->for_gc && args->cno == ii->i_cno;
569 static int nilfs_iget_set(struct inode *inode, void *opaque)
571 struct nilfs_iget_args *args = opaque;
573 inode->i_ino = args->ino;
575 NILFS_I(inode)->i_state = 1 << NILFS_I_GCINODE;
576 NILFS_I(inode)->i_cno = args->cno;
577 NILFS_I(inode)->i_root = NULL;
579 if (args->root && args->ino == NILFS_ROOT_INO)
580 nilfs_get_root(args->root);
581 NILFS_I(inode)->i_root = args->root;
586 struct inode *nilfs_ilookup(struct super_block *sb, struct nilfs_root *root,
589 struct nilfs_iget_args args = {
590 .ino = ino, .root = root, .cno = 0, .for_gc = 0
593 return ilookup5(sb, ino, nilfs_iget_test, &args);
596 struct inode *nilfs_iget_locked(struct super_block *sb, struct nilfs_root *root,
599 struct nilfs_iget_args args = {
600 .ino = ino, .root = root, .cno = 0, .for_gc = 0
603 return iget5_locked(sb, ino, nilfs_iget_test, nilfs_iget_set, &args);
606 struct inode *nilfs_iget(struct super_block *sb, struct nilfs_root *root,
612 inode = nilfs_iget_locked(sb, root, ino);
613 if (unlikely(!inode))
614 return ERR_PTR(-ENOMEM);
615 if (!(inode->i_state & I_NEW))
618 err = __nilfs_read_inode(sb, root, ino, inode);
623 unlock_new_inode(inode);
627 struct inode *nilfs_iget_for_gc(struct super_block *sb, unsigned long ino,
630 struct nilfs_iget_args args = {
631 .ino = ino, .root = NULL, .cno = cno, .for_gc = 1
636 inode = iget5_locked(sb, ino, nilfs_iget_test, nilfs_iget_set, &args);
637 if (unlikely(!inode))
638 return ERR_PTR(-ENOMEM);
639 if (!(inode->i_state & I_NEW))
642 err = nilfs_init_gcinode(inode);
647 unlock_new_inode(inode);
651 void nilfs_write_inode_common(struct inode *inode,
652 struct nilfs_inode *raw_inode, int has_bmap)
654 struct nilfs_inode_info *ii = NILFS_I(inode);
656 raw_inode->i_mode = cpu_to_le16(inode->i_mode);
657 raw_inode->i_uid = cpu_to_le32(i_uid_read(inode));
658 raw_inode->i_gid = cpu_to_le32(i_gid_read(inode));
659 raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
660 raw_inode->i_size = cpu_to_le64(inode->i_size);
661 raw_inode->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
662 raw_inode->i_mtime = cpu_to_le64(inode->i_mtime.tv_sec);
663 raw_inode->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
664 raw_inode->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
665 raw_inode->i_blocks = cpu_to_le64(inode->i_blocks);
667 raw_inode->i_flags = cpu_to_le32(ii->i_flags);
668 raw_inode->i_generation = cpu_to_le32(inode->i_generation);
670 if (NILFS_ROOT_METADATA_FILE(inode->i_ino)) {
671 struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
673 /* zero-fill unused portion in the case of super root block */
674 raw_inode->i_xattr = 0;
675 raw_inode->i_pad = 0;
676 memset((void *)raw_inode + sizeof(*raw_inode), 0,
677 nilfs->ns_inode_size - sizeof(*raw_inode));
681 nilfs_bmap_write(ii->i_bmap, raw_inode);
682 else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
683 raw_inode->i_device_code =
684 cpu_to_le64(huge_encode_dev(inode->i_rdev));
685 /* When extending inode, nilfs->ns_inode_size should be checked
686 for substitutions of appended fields */
689 void nilfs_update_inode(struct inode *inode, struct buffer_head *ibh, int flags)
691 ino_t ino = inode->i_ino;
692 struct nilfs_inode_info *ii = NILFS_I(inode);
693 struct inode *ifile = ii->i_root->ifile;
694 struct nilfs_inode *raw_inode;
696 raw_inode = nilfs_ifile_map_inode(ifile, ino, ibh);
698 if (test_and_clear_bit(NILFS_I_NEW, &ii->i_state))
699 memset(raw_inode, 0, NILFS_MDT(ifile)->mi_entry_size);
700 if (flags & I_DIRTY_DATASYNC)
701 set_bit(NILFS_I_INODE_SYNC, &ii->i_state);
703 nilfs_write_inode_common(inode, raw_inode, 0);
704 /* XXX: call with has_bmap = 0 is a workaround to avoid
705 deadlock of bmap. This delays update of i_bmap to just
707 nilfs_ifile_unmap_inode(ifile, ino, ibh);
710 #define NILFS_MAX_TRUNCATE_BLOCKS 16384 /* 64MB for 4KB block */
712 static void nilfs_truncate_bmap(struct nilfs_inode_info *ii,
718 if (!test_bit(NILFS_I_BMAP, &ii->i_state))
721 ret = nilfs_bmap_last_key(ii->i_bmap, &b);
730 b -= min_t(unsigned long, NILFS_MAX_TRUNCATE_BLOCKS, b - from);
731 ret = nilfs_bmap_truncate(ii->i_bmap, b);
732 nilfs_relax_pressure_in_lock(ii->vfs_inode.i_sb);
733 if (!ret || (ret == -ENOMEM &&
734 nilfs_bmap_truncate(ii->i_bmap, b) == 0))
738 nilfs_warning(ii->vfs_inode.i_sb, __func__,
739 "failed to truncate bmap (ino=%lu, err=%d)",
740 ii->vfs_inode.i_ino, ret);
743 void nilfs_truncate(struct inode *inode)
745 unsigned long blkoff;
746 unsigned int blocksize;
747 struct nilfs_transaction_info ti;
748 struct super_block *sb = inode->i_sb;
749 struct nilfs_inode_info *ii = NILFS_I(inode);
751 if (!test_bit(NILFS_I_BMAP, &ii->i_state))
753 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
756 blocksize = sb->s_blocksize;
757 blkoff = (inode->i_size + blocksize - 1) >> sb->s_blocksize_bits;
758 nilfs_transaction_begin(sb, &ti, 0); /* never fails */
760 block_truncate_page(inode->i_mapping, inode->i_size, nilfs_get_block);
762 nilfs_truncate_bmap(ii, blkoff);
764 inode->i_mtime = inode->i_ctime = CURRENT_TIME;
766 nilfs_set_transaction_flag(NILFS_TI_SYNC);
768 nilfs_mark_inode_dirty(inode);
769 nilfs_set_file_dirty(inode, 0);
770 nilfs_transaction_commit(sb);
771 /* May construct a logical segment and may fail in sync mode.
772 But truncate has no return value. */
775 static void nilfs_clear_inode(struct inode *inode)
777 struct nilfs_inode_info *ii = NILFS_I(inode);
778 struct nilfs_mdt_info *mdi = NILFS_MDT(inode);
781 * Free resources allocated in nilfs_read_inode(), here.
783 BUG_ON(!list_empty(&ii->i_dirty));
787 if (mdi && mdi->mi_palloc_cache)
788 nilfs_palloc_destroy_cache(inode);
790 if (test_bit(NILFS_I_BMAP, &ii->i_state))
791 nilfs_bmap_clear(ii->i_bmap);
793 nilfs_btnode_cache_clear(&ii->i_btnode_cache);
795 if (ii->i_root && inode->i_ino == NILFS_ROOT_INO)
796 nilfs_put_root(ii->i_root);
799 void nilfs_evict_inode(struct inode *inode)
801 struct nilfs_transaction_info ti;
802 struct super_block *sb = inode->i_sb;
803 struct nilfs_inode_info *ii = NILFS_I(inode);
806 if (inode->i_nlink || !ii->i_root || unlikely(is_bad_inode(inode))) {
807 truncate_inode_pages_final(&inode->i_data);
809 nilfs_clear_inode(inode);
812 nilfs_transaction_begin(sb, &ti, 0); /* never fails */
814 truncate_inode_pages_final(&inode->i_data);
816 /* TODO: some of the following operations may fail. */
817 nilfs_truncate_bmap(ii, 0);
818 nilfs_mark_inode_dirty(inode);
821 ret = nilfs_ifile_delete_inode(ii->i_root->ifile, inode->i_ino);
823 atomic64_dec(&ii->i_root->inodes_count);
825 nilfs_clear_inode(inode);
828 nilfs_set_transaction_flag(NILFS_TI_SYNC);
829 nilfs_transaction_commit(sb);
830 /* May construct a logical segment and may fail in sync mode.
831 But delete_inode has no return value. */
834 int nilfs_setattr(struct dentry *dentry, struct iattr *iattr)
836 struct nilfs_transaction_info ti;
837 struct inode *inode = d_inode(dentry);
838 struct super_block *sb = inode->i_sb;
841 err = inode_change_ok(inode, iattr);
845 err = nilfs_transaction_begin(sb, &ti, 0);
849 if ((iattr->ia_valid & ATTR_SIZE) &&
850 iattr->ia_size != i_size_read(inode)) {
851 inode_dio_wait(inode);
852 truncate_setsize(inode, iattr->ia_size);
853 nilfs_truncate(inode);
856 setattr_copy(inode, iattr);
857 mark_inode_dirty(inode);
859 if (iattr->ia_valid & ATTR_MODE) {
860 err = nilfs_acl_chmod(inode);
865 return nilfs_transaction_commit(sb);
868 nilfs_transaction_abort(sb);
872 int nilfs_permission(struct inode *inode, int mask)
874 struct nilfs_root *root = NILFS_I(inode)->i_root;
875 if ((mask & MAY_WRITE) && root &&
876 root->cno != NILFS_CPTREE_CURRENT_CNO)
877 return -EROFS; /* snapshot is not writable */
879 return generic_permission(inode, mask);
882 int nilfs_load_inode_block(struct inode *inode, struct buffer_head **pbh)
884 struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
885 struct nilfs_inode_info *ii = NILFS_I(inode);
888 spin_lock(&nilfs->ns_inode_lock);
889 if (ii->i_bh == NULL) {
890 spin_unlock(&nilfs->ns_inode_lock);
891 err = nilfs_ifile_get_inode_block(ii->i_root->ifile,
895 spin_lock(&nilfs->ns_inode_lock);
896 if (ii->i_bh == NULL)
906 spin_unlock(&nilfs->ns_inode_lock);
910 int nilfs_inode_dirty(struct inode *inode)
912 struct nilfs_inode_info *ii = NILFS_I(inode);
913 struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
916 if (!list_empty(&ii->i_dirty)) {
917 spin_lock(&nilfs->ns_inode_lock);
918 ret = test_bit(NILFS_I_DIRTY, &ii->i_state) ||
919 test_bit(NILFS_I_BUSY, &ii->i_state);
920 spin_unlock(&nilfs->ns_inode_lock);
925 int nilfs_set_file_dirty(struct inode *inode, unsigned nr_dirty)
927 struct nilfs_inode_info *ii = NILFS_I(inode);
928 struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
930 atomic_add(nr_dirty, &nilfs->ns_ndirtyblks);
932 if (test_and_set_bit(NILFS_I_DIRTY, &ii->i_state))
935 spin_lock(&nilfs->ns_inode_lock);
936 if (!test_bit(NILFS_I_QUEUED, &ii->i_state) &&
937 !test_bit(NILFS_I_BUSY, &ii->i_state)) {
938 /* Because this routine may race with nilfs_dispose_list(),
939 we have to check NILFS_I_QUEUED here, too. */
940 if (list_empty(&ii->i_dirty) && igrab(inode) == NULL) {
941 /* This will happen when somebody is freeing
943 nilfs_warning(inode->i_sb, __func__,
944 "cannot get inode (ino=%lu)\n",
946 spin_unlock(&nilfs->ns_inode_lock);
947 return -EINVAL; /* NILFS_I_DIRTY may remain for
950 list_move_tail(&ii->i_dirty, &nilfs->ns_dirty_files);
951 set_bit(NILFS_I_QUEUED, &ii->i_state);
953 spin_unlock(&nilfs->ns_inode_lock);
957 int __nilfs_mark_inode_dirty(struct inode *inode, int flags)
959 struct buffer_head *ibh;
962 err = nilfs_load_inode_block(inode, &ibh);
964 nilfs_warning(inode->i_sb, __func__,
965 "failed to reget inode block.\n");
968 nilfs_update_inode(inode, ibh, flags);
969 mark_buffer_dirty(ibh);
970 nilfs_mdt_mark_dirty(NILFS_I(inode)->i_root->ifile);
976 * nilfs_dirty_inode - reflect changes on given inode to an inode block.
977 * @inode: inode of the file to be registered.
979 * nilfs_dirty_inode() loads a inode block containing the specified
980 * @inode and copies data from a nilfs_inode to a corresponding inode
981 * entry in the inode block. This operation is excluded from the segment
982 * construction. This function can be called both as a single operation
983 * and as a part of indivisible file operations.
985 void nilfs_dirty_inode(struct inode *inode, int flags)
987 struct nilfs_transaction_info ti;
988 struct nilfs_mdt_info *mdi = NILFS_MDT(inode);
990 if (is_bad_inode(inode)) {
991 nilfs_warning(inode->i_sb, __func__,
992 "tried to mark bad_inode dirty. ignored.\n");
997 nilfs_mdt_mark_dirty(inode);
1000 nilfs_transaction_begin(inode->i_sb, &ti, 0);
1001 __nilfs_mark_inode_dirty(inode, flags);
1002 nilfs_transaction_commit(inode->i_sb); /* never fails */
1005 int nilfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
1006 __u64 start, __u64 len)
1008 struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
1009 __u64 logical = 0, phys = 0, size = 0;
1012 sector_t blkoff, end_blkoff;
1013 sector_t delalloc_blkoff;
1014 unsigned long delalloc_blklen;
1015 unsigned int blkbits = inode->i_blkbits;
1018 ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC);
1022 mutex_lock(&inode->i_mutex);
1024 isize = i_size_read(inode);
1026 blkoff = start >> blkbits;
1027 end_blkoff = (start + len - 1) >> blkbits;
1029 delalloc_blklen = nilfs_find_uncommitted_extent(inode, blkoff,
1034 unsigned int maxblocks;
1036 if (delalloc_blklen && blkoff == delalloc_blkoff) {
1038 /* End of the current extent */
1039 ret = fiemap_fill_next_extent(
1040 fieinfo, logical, phys, size, flags);
1044 if (blkoff > end_blkoff)
1047 flags = FIEMAP_EXTENT_MERGED | FIEMAP_EXTENT_DELALLOC;
1048 logical = blkoff << blkbits;
1050 size = delalloc_blklen << blkbits;
1052 blkoff = delalloc_blkoff + delalloc_blklen;
1053 delalloc_blklen = nilfs_find_uncommitted_extent(
1054 inode, blkoff, &delalloc_blkoff);
1059 * Limit the number of blocks that we look up so as
1060 * not to get into the next delayed allocation extent.
1062 maxblocks = INT_MAX;
1063 if (delalloc_blklen)
1064 maxblocks = min_t(sector_t, delalloc_blkoff - blkoff,
1068 down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
1069 n = nilfs_bmap_lookup_contig(
1070 NILFS_I(inode)->i_bmap, blkoff, &blkphy, maxblocks);
1071 up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
1076 if (unlikely(n != -ENOENT))
1081 past_eof = ((blkoff << blkbits) >= isize);
1084 /* End of the current extent */
1087 flags |= FIEMAP_EXTENT_LAST;
1089 ret = fiemap_fill_next_extent(
1090 fieinfo, logical, phys, size, flags);
1095 if (blkoff > end_blkoff || past_eof)
1099 if (phys && blkphy << blkbits == phys + size) {
1100 /* The current extent goes on */
1101 size += n << blkbits;
1103 /* Terminate the current extent */
1104 ret = fiemap_fill_next_extent(
1105 fieinfo, logical, phys, size,
1107 if (ret || blkoff > end_blkoff)
1110 /* Start another extent */
1111 flags = FIEMAP_EXTENT_MERGED;
1112 logical = blkoff << blkbits;
1113 phys = blkphy << blkbits;
1114 size = n << blkbits;
1117 /* Start a new extent */
1118 flags = FIEMAP_EXTENT_MERGED;
1119 logical = blkoff << blkbits;
1120 phys = blkphy << blkbits;
1121 size = n << blkbits;
1128 /* If ret is 1 then we just hit the end of the extent array */
1132 mutex_unlock(&inode->i_mutex);