4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/f2fs_fs.h>
13 #include <linux/buffer_head.h>
14 #include <linux/writeback.h>
19 #include <trace/events/f2fs.h>
21 void f2fs_set_inode_flags(struct inode *inode)
23 unsigned int flags = F2FS_I(inode)->i_flags;
24 unsigned int new_fl = 0;
26 if (flags & FS_SYNC_FL)
28 if (flags & FS_APPEND_FL)
30 if (flags & FS_IMMUTABLE_FL)
31 new_fl |= S_IMMUTABLE;
32 if (flags & FS_NOATIME_FL)
34 if (flags & FS_DIRSYNC_FL)
36 inode_set_flags(inode, new_fl,
37 S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
40 static void __get_inode_rdev(struct inode *inode, struct f2fs_inode *ri)
42 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
43 S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
46 old_decode_dev(le32_to_cpu(ri->i_addr[0]));
49 new_decode_dev(le32_to_cpu(ri->i_addr[1]));
53 static bool __written_first_block(struct f2fs_inode *ri)
55 block_t addr = le32_to_cpu(ri->i_addr[0]);
57 if (addr != NEW_ADDR && addr != NULL_ADDR)
62 static void __set_inode_rdev(struct inode *inode, struct f2fs_inode *ri)
64 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
65 if (old_valid_dev(inode->i_rdev)) {
67 cpu_to_le32(old_encode_dev(inode->i_rdev));
72 cpu_to_le32(new_encode_dev(inode->i_rdev));
78 static void __recover_inline_status(struct inode *inode, struct page *ipage)
80 void *inline_data = inline_data_addr(ipage);
81 __le32 *start = inline_data;
82 __le32 *end = start + MAX_INLINE_DATA / sizeof(__le32);
86 f2fs_wait_on_page_writeback(ipage, NODE);
88 set_inode_flag(F2FS_I(inode), FI_DATA_EXIST);
89 set_raw_inline(F2FS_I(inode), F2FS_INODE(ipage));
90 set_page_dirty(ipage);
97 static int do_read_inode(struct inode *inode)
99 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
100 struct f2fs_inode_info *fi = F2FS_I(inode);
101 struct page *node_page;
102 struct f2fs_inode *ri;
104 /* Check if ino is within scope */
105 if (check_nid_range(sbi, inode->i_ino)) {
106 f2fs_msg(inode->i_sb, KERN_ERR, "bad inode number: %lu",
107 (unsigned long) inode->i_ino);
112 node_page = get_node_page(sbi, inode->i_ino);
113 if (IS_ERR(node_page))
114 return PTR_ERR(node_page);
116 ri = F2FS_INODE(node_page);
118 inode->i_mode = le16_to_cpu(ri->i_mode);
119 i_uid_write(inode, le32_to_cpu(ri->i_uid));
120 i_gid_write(inode, le32_to_cpu(ri->i_gid));
121 set_nlink(inode, le32_to_cpu(ri->i_links));
122 inode->i_size = le64_to_cpu(ri->i_size);
123 inode->i_blocks = le64_to_cpu(ri->i_blocks);
125 inode->i_atime.tv_sec = le64_to_cpu(ri->i_atime);
126 inode->i_ctime.tv_sec = le64_to_cpu(ri->i_ctime);
127 inode->i_mtime.tv_sec = le64_to_cpu(ri->i_mtime);
128 inode->i_atime.tv_nsec = le32_to_cpu(ri->i_atime_nsec);
129 inode->i_ctime.tv_nsec = le32_to_cpu(ri->i_ctime_nsec);
130 inode->i_mtime.tv_nsec = le32_to_cpu(ri->i_mtime_nsec);
131 inode->i_generation = le32_to_cpu(ri->i_generation);
133 fi->i_current_depth = le32_to_cpu(ri->i_current_depth);
134 fi->i_xattr_nid = le32_to_cpu(ri->i_xattr_nid);
135 fi->i_flags = le32_to_cpu(ri->i_flags);
137 fi->i_advise = ri->i_advise;
138 fi->i_pino = le32_to_cpu(ri->i_pino);
139 fi->i_dir_level = ri->i_dir_level;
141 f2fs_init_extent_tree(inode, &ri->i_ext);
143 get_inline_info(fi, ri);
145 /* check data exist */
146 if (f2fs_has_inline_data(inode) && !f2fs_exist_data(inode))
147 __recover_inline_status(inode, node_page);
149 /* get rdev by using inline_info */
150 __get_inode_rdev(inode, ri);
152 if (__written_first_block(ri))
153 set_inode_flag(F2FS_I(inode), FI_FIRST_BLOCK_WRITTEN);
155 f2fs_put_page(node_page, 1);
157 stat_inc_inline_xattr(inode);
158 stat_inc_inline_inode(inode);
159 stat_inc_inline_dir(inode);
164 struct inode *f2fs_iget(struct super_block *sb, unsigned long ino)
166 struct f2fs_sb_info *sbi = F2FS_SB(sb);
170 inode = iget_locked(sb, ino);
172 return ERR_PTR(-ENOMEM);
174 if (!(inode->i_state & I_NEW)) {
175 trace_f2fs_iget(inode);
178 if (ino == F2FS_NODE_INO(sbi) || ino == F2FS_META_INO(sbi))
181 ret = do_read_inode(inode);
185 if (ino == F2FS_NODE_INO(sbi)) {
186 inode->i_mapping->a_ops = &f2fs_node_aops;
187 mapping_set_gfp_mask(inode->i_mapping, GFP_F2FS_ZERO);
188 } else if (ino == F2FS_META_INO(sbi)) {
189 inode->i_mapping->a_ops = &f2fs_meta_aops;
190 mapping_set_gfp_mask(inode->i_mapping, GFP_F2FS_ZERO);
191 } else if (S_ISREG(inode->i_mode)) {
192 inode->i_op = &f2fs_file_inode_operations;
193 inode->i_fop = &f2fs_file_operations;
194 inode->i_mapping->a_ops = &f2fs_dblock_aops;
195 } else if (S_ISDIR(inode->i_mode)) {
196 inode->i_op = &f2fs_dir_inode_operations;
197 inode->i_fop = &f2fs_dir_operations;
198 inode->i_mapping->a_ops = &f2fs_dblock_aops;
199 mapping_set_gfp_mask(inode->i_mapping, GFP_F2FS_HIGH_ZERO);
200 } else if (S_ISLNK(inode->i_mode)) {
201 if (f2fs_encrypted_inode(inode))
202 inode->i_op = &f2fs_encrypted_symlink_inode_operations;
204 inode->i_op = &f2fs_symlink_inode_operations;
205 inode_nohighmem(inode);
206 inode->i_mapping->a_ops = &f2fs_dblock_aops;
207 } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
208 S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
209 inode->i_op = &f2fs_special_inode_operations;
210 init_special_inode(inode, inode->i_mode, inode->i_rdev);
215 unlock_new_inode(inode);
216 trace_f2fs_iget(inode);
221 trace_f2fs_iget_exit(inode, ret);
225 void update_inode(struct inode *inode, struct page *node_page)
227 struct f2fs_inode *ri;
229 f2fs_wait_on_page_writeback(node_page, NODE);
231 ri = F2FS_INODE(node_page);
233 ri->i_mode = cpu_to_le16(inode->i_mode);
234 ri->i_advise = F2FS_I(inode)->i_advise;
235 ri->i_uid = cpu_to_le32(i_uid_read(inode));
236 ri->i_gid = cpu_to_le32(i_gid_read(inode));
237 ri->i_links = cpu_to_le32(inode->i_nlink);
238 ri->i_size = cpu_to_le64(i_size_read(inode));
239 ri->i_blocks = cpu_to_le64(inode->i_blocks);
241 if (F2FS_I(inode)->extent_tree)
242 set_raw_extent(&F2FS_I(inode)->extent_tree->largest,
245 memset(&ri->i_ext, 0, sizeof(ri->i_ext));
246 set_raw_inline(F2FS_I(inode), ri);
248 ri->i_atime = cpu_to_le64(inode->i_atime.tv_sec);
249 ri->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
250 ri->i_mtime = cpu_to_le64(inode->i_mtime.tv_sec);
251 ri->i_atime_nsec = cpu_to_le32(inode->i_atime.tv_nsec);
252 ri->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
253 ri->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
254 ri->i_current_depth = cpu_to_le32(F2FS_I(inode)->i_current_depth);
255 ri->i_xattr_nid = cpu_to_le32(F2FS_I(inode)->i_xattr_nid);
256 ri->i_flags = cpu_to_le32(F2FS_I(inode)->i_flags);
257 ri->i_pino = cpu_to_le32(F2FS_I(inode)->i_pino);
258 ri->i_generation = cpu_to_le32(inode->i_generation);
259 ri->i_dir_level = F2FS_I(inode)->i_dir_level;
261 __set_inode_rdev(inode, ri);
262 set_cold_node(inode, node_page);
263 set_page_dirty(node_page);
265 clear_inode_flag(F2FS_I(inode), FI_DIRTY_INODE);
268 void update_inode_page(struct inode *inode)
270 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
271 struct page *node_page;
273 node_page = get_node_page(sbi, inode->i_ino);
274 if (IS_ERR(node_page)) {
275 int err = PTR_ERR(node_page);
276 if (err == -ENOMEM) {
279 } else if (err != -ENOENT) {
280 f2fs_stop_checkpoint(sbi);
284 update_inode(inode, node_page);
285 f2fs_put_page(node_page, 1);
288 int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc)
290 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
292 if (inode->i_ino == F2FS_NODE_INO(sbi) ||
293 inode->i_ino == F2FS_META_INO(sbi))
296 if (!is_inode_flag_set(F2FS_I(inode), FI_DIRTY_INODE))
300 * We need to balance fs here to prevent from producing dirty node pages
301 * during the urgent cleaning time when runing out of free sections.
303 update_inode_page(inode);
305 f2fs_balance_fs(sbi);
310 * Called at the last iput() if i_nlink is zero
312 void f2fs_evict_inode(struct inode *inode)
314 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
315 struct f2fs_inode_info *fi = F2FS_I(inode);
316 nid_t xnid = fi->i_xattr_nid;
319 /* some remained atomic pages should discarded */
320 if (f2fs_is_atomic_file(inode))
321 commit_inmem_pages(inode, true);
323 trace_f2fs_evict_inode(inode);
324 truncate_inode_pages_final(&inode->i_data);
326 if (inode->i_ino == F2FS_NODE_INO(sbi) ||
327 inode->i_ino == F2FS_META_INO(sbi))
330 f2fs_bug_on(sbi, get_dirty_pages(inode));
331 remove_dirty_dir_inode(inode);
333 f2fs_destroy_extent_tree(inode);
335 if (inode->i_nlink || is_bad_inode(inode))
338 sb_start_intwrite(inode->i_sb);
339 set_inode_flag(fi, FI_NO_ALLOC);
340 i_size_write(inode, 0);
342 if (F2FS_HAS_BLOCKS(inode))
343 err = f2fs_truncate(inode, true);
347 err = remove_inode_page(inode);
351 sb_end_intwrite(inode->i_sb);
353 stat_dec_inline_xattr(inode);
354 stat_dec_inline_dir(inode);
355 stat_dec_inline_inode(inode);
357 invalidate_mapping_pages(NODE_MAPPING(sbi), inode->i_ino, inode->i_ino);
359 invalidate_mapping_pages(NODE_MAPPING(sbi), xnid, xnid);
360 if (is_inode_flag_set(fi, FI_APPEND_WRITE))
361 add_dirty_inode(sbi, inode->i_ino, APPEND_INO);
362 if (is_inode_flag_set(fi, FI_UPDATE_WRITE))
363 add_dirty_inode(sbi, inode->i_ino, UPDATE_INO);
364 if (is_inode_flag_set(fi, FI_FREE_NID)) {
365 if (err && err != -ENOENT)
366 alloc_nid_done(sbi, inode->i_ino);
368 alloc_nid_failed(sbi, inode->i_ino);
369 clear_inode_flag(fi, FI_FREE_NID);
372 if (err && err != -ENOENT) {
373 if (!exist_written_data(sbi, inode->i_ino, ORPHAN_INO)) {
375 * get here because we failed to release resource
376 * of inode previously, reminder our user to run fsck
379 set_sbi_flag(sbi, SBI_NEED_FSCK);
380 f2fs_msg(sbi->sb, KERN_WARNING,
381 "inode (ino:%lu) resource leak, run fsck "
382 "to fix this issue!", inode->i_ino);
386 #ifdef CONFIG_F2FS_FS_ENCRYPTION
387 if (fi->i_crypt_info)
388 f2fs_free_encryption_info(inode, fi->i_crypt_info);
393 /* caller should call f2fs_lock_op() */
394 void handle_failed_inode(struct inode *inode)
396 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
400 make_bad_inode(inode);
401 unlock_new_inode(inode);
403 i_size_write(inode, 0);
404 if (F2FS_HAS_BLOCKS(inode))
405 err = f2fs_truncate(inode, false);
408 err = remove_inode_page(inode);
411 * if we skip truncate_node in remove_inode_page bacause we failed
412 * before, it's better to find another way to release resource of
413 * this inode (e.g. valid block count, node block or nid). Here we
414 * choose to add this inode to orphan list, so that we can call iput
415 * for releasing in orphan recovery flow.
417 * Note: we should add inode to orphan list before f2fs_unlock_op()
418 * so we can prevent losing this orphan when encoutering checkpoint
419 * and following suddenly power-off.
421 if (err && err != -ENOENT) {
422 err = acquire_orphan_inode(sbi);
424 add_orphan_inode(sbi, inode->i_ino);
427 set_inode_flag(F2FS_I(inode), FI_FREE_NID);
430 /* iput will drop the inode object */