4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #include <linux/module.h>
12 #include <linux/init.h>
14 #include <linux/statfs.h>
15 #include <linux/buffer_head.h>
16 #include <linux/backing-dev.h>
17 #include <linux/kthread.h>
18 #include <linux/parser.h>
19 #include <linux/mount.h>
20 #include <linux/seq_file.h>
21 #include <linux/proc_fs.h>
22 #include <linux/random.h>
23 #include <linux/exportfs.h>
24 #include <linux/blkdev.h>
25 #include <linux/quotaops.h>
26 #include <linux/f2fs_fs.h>
27 #include <linux/sysfs.h>
28 #include <linux/quota.h>
37 #define CREATE_TRACE_POINTS
38 #include <trace/events/f2fs.h>
40 static struct kmem_cache *f2fs_inode_cachep;
42 #ifdef CONFIG_F2FS_FAULT_INJECTION
44 char *fault_name[FAULT_MAX] = {
45 [FAULT_KMALLOC] = "kmalloc",
46 [FAULT_PAGE_ALLOC] = "page alloc",
47 [FAULT_ALLOC_NID] = "alloc nid",
48 [FAULT_ORPHAN] = "orphan",
49 [FAULT_BLOCK] = "no more block",
50 [FAULT_DIR_DEPTH] = "too big dir depth",
51 [FAULT_EVICT_INODE] = "evict_inode fail",
52 [FAULT_TRUNCATE] = "truncate fail",
53 [FAULT_IO] = "IO error",
54 [FAULT_CHECKPOINT] = "checkpoint error",
57 static void f2fs_build_fault_attr(struct f2fs_sb_info *sbi,
60 struct f2fs_fault_info *ffi = &sbi->fault_info;
63 atomic_set(&ffi->inject_ops, 0);
64 ffi->inject_rate = rate;
65 ffi->inject_type = (1 << FAULT_MAX) - 1;
67 memset(ffi, 0, sizeof(struct f2fs_fault_info));
72 /* f2fs-wide shrinker description */
73 static struct shrinker f2fs_shrinker_info = {
74 .scan_objects = f2fs_shrink_scan,
75 .count_objects = f2fs_shrink_count,
76 .seeks = DEFAULT_SEEKS,
81 Opt_disable_roll_forward,
92 Opt_disable_ext_identify,
128 static match_table_t f2fs_tokens = {
129 {Opt_gc_background, "background_gc=%s"},
130 {Opt_disable_roll_forward, "disable_roll_forward"},
131 {Opt_norecovery, "norecovery"},
132 {Opt_discard, "discard"},
133 {Opt_nodiscard, "nodiscard"},
134 {Opt_noheap, "no_heap"},
136 {Opt_user_xattr, "user_xattr"},
137 {Opt_nouser_xattr, "nouser_xattr"},
139 {Opt_noacl, "noacl"},
140 {Opt_active_logs, "active_logs=%u"},
141 {Opt_disable_ext_identify, "disable_ext_identify"},
142 {Opt_inline_xattr, "inline_xattr"},
143 {Opt_noinline_xattr, "noinline_xattr"},
144 {Opt_inline_data, "inline_data"},
145 {Opt_inline_dentry, "inline_dentry"},
146 {Opt_noinline_dentry, "noinline_dentry"},
147 {Opt_flush_merge, "flush_merge"},
148 {Opt_noflush_merge, "noflush_merge"},
149 {Opt_nobarrier, "nobarrier"},
150 {Opt_fastboot, "fastboot"},
151 {Opt_extent_cache, "extent_cache"},
152 {Opt_noextent_cache, "noextent_cache"},
153 {Opt_noinline_data, "noinline_data"},
154 {Opt_data_flush, "data_flush"},
155 {Opt_mode, "mode=%s"},
156 {Opt_io_size_bits, "io_bits=%u"},
157 {Opt_fault_injection, "fault_injection=%u"},
158 {Opt_lazytime, "lazytime"},
159 {Opt_nolazytime, "nolazytime"},
160 {Opt_quota, "quota"},
161 {Opt_noquota, "noquota"},
162 {Opt_usrquota, "usrquota"},
163 {Opt_grpquota, "grpquota"},
164 {Opt_prjquota, "prjquota"},
165 {Opt_usrjquota, "usrjquota=%s"},
166 {Opt_grpjquota, "grpjquota=%s"},
167 {Opt_prjjquota, "prjjquota=%s"},
168 {Opt_offusrjquota, "usrjquota="},
169 {Opt_offgrpjquota, "grpjquota="},
170 {Opt_offprjjquota, "prjjquota="},
171 {Opt_jqfmt_vfsold, "jqfmt=vfsold"},
172 {Opt_jqfmt_vfsv0, "jqfmt=vfsv0"},
173 {Opt_jqfmt_vfsv1, "jqfmt=vfsv1"},
177 void f2fs_msg(struct super_block *sb, const char *level, const char *fmt, ...)
179 struct va_format vaf;
185 printk_ratelimited("%sF2FS-fs (%s): %pV\n", level, sb->s_id, &vaf);
189 static void init_once(void *foo)
191 struct f2fs_inode_info *fi = (struct f2fs_inode_info *) foo;
193 inode_init_once(&fi->vfs_inode);
197 static const char * const quotatypes[] = INITQFNAMES;
198 #define QTYPE2NAME(t) (quotatypes[t])
199 static int f2fs_set_qf_name(struct super_block *sb, int qtype,
202 struct f2fs_sb_info *sbi = F2FS_SB(sb);
206 if (sb_any_quota_loaded(sb) && !sbi->s_qf_names[qtype]) {
207 f2fs_msg(sb, KERN_ERR,
208 "Cannot change journaled "
209 "quota options when quota turned on");
212 qname = match_strdup(args);
214 f2fs_msg(sb, KERN_ERR,
215 "Not enough memory for storing quotafile name");
218 if (sbi->s_qf_names[qtype]) {
219 if (strcmp(sbi->s_qf_names[qtype], qname) == 0)
222 f2fs_msg(sb, KERN_ERR,
223 "%s quota file already specified",
227 if (strchr(qname, '/')) {
228 f2fs_msg(sb, KERN_ERR,
229 "quotafile must be on filesystem root");
232 sbi->s_qf_names[qtype] = qname;
240 static int f2fs_clear_qf_name(struct super_block *sb, int qtype)
242 struct f2fs_sb_info *sbi = F2FS_SB(sb);
244 if (sb_any_quota_loaded(sb) && sbi->s_qf_names[qtype]) {
245 f2fs_msg(sb, KERN_ERR, "Cannot change journaled quota options"
246 " when quota turned on");
249 kfree(sbi->s_qf_names[qtype]);
250 sbi->s_qf_names[qtype] = NULL;
254 static int f2fs_check_quota_options(struct f2fs_sb_info *sbi)
257 * We do the test below only for project quotas. 'usrquota' and
258 * 'grpquota' mount options are allowed even without quota feature
259 * to support legacy quotas in quota files.
261 if (test_opt(sbi, PRJQUOTA) && !f2fs_sb_has_project_quota(sbi->sb)) {
262 f2fs_msg(sbi->sb, KERN_ERR, "Project quota feature not enabled. "
263 "Cannot enable project quota enforcement.");
266 if (sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA] ||
267 sbi->s_qf_names[PRJQUOTA]) {
268 if (test_opt(sbi, USRQUOTA) && sbi->s_qf_names[USRQUOTA])
269 clear_opt(sbi, USRQUOTA);
271 if (test_opt(sbi, GRPQUOTA) && sbi->s_qf_names[GRPQUOTA])
272 clear_opt(sbi, GRPQUOTA);
274 if (test_opt(sbi, PRJQUOTA) && sbi->s_qf_names[PRJQUOTA])
275 clear_opt(sbi, PRJQUOTA);
277 if (test_opt(sbi, GRPQUOTA) || test_opt(sbi, USRQUOTA) ||
278 test_opt(sbi, PRJQUOTA)) {
279 f2fs_msg(sbi->sb, KERN_ERR, "old and new quota "
284 if (!sbi->s_jquota_fmt) {
285 f2fs_msg(sbi->sb, KERN_ERR, "journaled quota format "
294 static int parse_options(struct super_block *sb, char *options)
296 struct f2fs_sb_info *sbi = F2FS_SB(sb);
297 struct request_queue *q;
298 substring_t args[MAX_OPT_ARGS];
308 while ((p = strsep(&options, ",")) != NULL) {
313 * Initialize args struct so we know whether arg was
314 * found; some options take optional arguments.
316 args[0].to = args[0].from = NULL;
317 token = match_token(p, f2fs_tokens, args);
320 case Opt_gc_background:
321 name = match_strdup(&args[0]);
325 if (strlen(name) == 2 && !strncmp(name, "on", 2)) {
327 clear_opt(sbi, FORCE_FG_GC);
328 } else if (strlen(name) == 3 && !strncmp(name, "off", 3)) {
329 clear_opt(sbi, BG_GC);
330 clear_opt(sbi, FORCE_FG_GC);
331 } else if (strlen(name) == 4 && !strncmp(name, "sync", 4)) {
333 set_opt(sbi, FORCE_FG_GC);
340 case Opt_disable_roll_forward:
341 set_opt(sbi, DISABLE_ROLL_FORWARD);
344 /* this option mounts f2fs with ro */
345 set_opt(sbi, DISABLE_ROLL_FORWARD);
346 if (!f2fs_readonly(sb))
350 q = bdev_get_queue(sb->s_bdev);
351 if (blk_queue_discard(q)) {
352 set_opt(sbi, DISCARD);
353 } else if (!f2fs_sb_mounted_blkzoned(sb)) {
354 f2fs_msg(sb, KERN_WARNING,
355 "mounting with \"discard\" option, but "
356 "the device does not support discard");
360 if (f2fs_sb_mounted_blkzoned(sb)) {
361 f2fs_msg(sb, KERN_WARNING,
362 "discard is required for zoned block devices");
365 clear_opt(sbi, DISCARD);
368 set_opt(sbi, NOHEAP);
371 clear_opt(sbi, NOHEAP);
373 #ifdef CONFIG_F2FS_FS_XATTR
375 set_opt(sbi, XATTR_USER);
377 case Opt_nouser_xattr:
378 clear_opt(sbi, XATTR_USER);
380 case Opt_inline_xattr:
381 set_opt(sbi, INLINE_XATTR);
383 case Opt_noinline_xattr:
384 clear_opt(sbi, INLINE_XATTR);
388 f2fs_msg(sb, KERN_INFO,
389 "user_xattr options not supported");
391 case Opt_nouser_xattr:
392 f2fs_msg(sb, KERN_INFO,
393 "nouser_xattr options not supported");
395 case Opt_inline_xattr:
396 f2fs_msg(sb, KERN_INFO,
397 "inline_xattr options not supported");
399 case Opt_noinline_xattr:
400 f2fs_msg(sb, KERN_INFO,
401 "noinline_xattr options not supported");
404 #ifdef CONFIG_F2FS_FS_POSIX_ACL
406 set_opt(sbi, POSIX_ACL);
409 clear_opt(sbi, POSIX_ACL);
413 f2fs_msg(sb, KERN_INFO, "acl options not supported");
416 f2fs_msg(sb, KERN_INFO, "noacl options not supported");
419 case Opt_active_logs:
420 if (args->from && match_int(args, &arg))
422 if (arg != 2 && arg != 4 && arg != NR_CURSEG_TYPE)
424 sbi->active_logs = arg;
426 case Opt_disable_ext_identify:
427 set_opt(sbi, DISABLE_EXT_IDENTIFY);
429 case Opt_inline_data:
430 set_opt(sbi, INLINE_DATA);
432 case Opt_inline_dentry:
433 set_opt(sbi, INLINE_DENTRY);
435 case Opt_noinline_dentry:
436 clear_opt(sbi, INLINE_DENTRY);
438 case Opt_flush_merge:
439 set_opt(sbi, FLUSH_MERGE);
441 case Opt_noflush_merge:
442 clear_opt(sbi, FLUSH_MERGE);
445 set_opt(sbi, NOBARRIER);
448 set_opt(sbi, FASTBOOT);
450 case Opt_extent_cache:
451 set_opt(sbi, EXTENT_CACHE);
453 case Opt_noextent_cache:
454 clear_opt(sbi, EXTENT_CACHE);
456 case Opt_noinline_data:
457 clear_opt(sbi, INLINE_DATA);
460 set_opt(sbi, DATA_FLUSH);
463 name = match_strdup(&args[0]);
467 if (strlen(name) == 8 &&
468 !strncmp(name, "adaptive", 8)) {
469 if (f2fs_sb_mounted_blkzoned(sb)) {
470 f2fs_msg(sb, KERN_WARNING,
471 "adaptive mode is not allowed with "
472 "zoned block device feature");
476 set_opt_mode(sbi, F2FS_MOUNT_ADAPTIVE);
477 } else if (strlen(name) == 3 &&
478 !strncmp(name, "lfs", 3)) {
479 set_opt_mode(sbi, F2FS_MOUNT_LFS);
486 case Opt_io_size_bits:
487 if (args->from && match_int(args, &arg))
489 if (arg > __ilog2_u32(BIO_MAX_PAGES)) {
490 f2fs_msg(sb, KERN_WARNING,
491 "Not support %d, larger than %d",
492 1 << arg, BIO_MAX_PAGES);
495 sbi->write_io_size_bits = arg;
497 case Opt_fault_injection:
498 if (args->from && match_int(args, &arg))
500 #ifdef CONFIG_F2FS_FAULT_INJECTION
501 f2fs_build_fault_attr(sbi, arg);
502 set_opt(sbi, FAULT_INJECTION);
504 f2fs_msg(sb, KERN_INFO,
505 "FAULT_INJECTION was not selected");
509 sb->s_flags |= MS_LAZYTIME;
512 sb->s_flags &= ~MS_LAZYTIME;
517 set_opt(sbi, USRQUOTA);
520 set_opt(sbi, GRPQUOTA);
523 set_opt(sbi, PRJQUOTA);
526 ret = f2fs_set_qf_name(sb, USRQUOTA, &args[0]);
531 ret = f2fs_set_qf_name(sb, GRPQUOTA, &args[0]);
536 ret = f2fs_set_qf_name(sb, PRJQUOTA, &args[0]);
540 case Opt_offusrjquota:
541 ret = f2fs_clear_qf_name(sb, USRQUOTA);
545 case Opt_offgrpjquota:
546 ret = f2fs_clear_qf_name(sb, GRPQUOTA);
550 case Opt_offprjjquota:
551 ret = f2fs_clear_qf_name(sb, PRJQUOTA);
555 case Opt_jqfmt_vfsold:
556 sbi->s_jquota_fmt = QFMT_VFS_OLD;
558 case Opt_jqfmt_vfsv0:
559 sbi->s_jquota_fmt = QFMT_VFS_V0;
561 case Opt_jqfmt_vfsv1:
562 sbi->s_jquota_fmt = QFMT_VFS_V1;
565 clear_opt(sbi, QUOTA);
566 clear_opt(sbi, USRQUOTA);
567 clear_opt(sbi, GRPQUOTA);
568 clear_opt(sbi, PRJQUOTA);
578 case Opt_offusrjquota:
579 case Opt_offgrpjquota:
580 case Opt_offprjjquota:
581 case Opt_jqfmt_vfsold:
582 case Opt_jqfmt_vfsv0:
583 case Opt_jqfmt_vfsv1:
585 f2fs_msg(sb, KERN_INFO,
586 "quota operations not supported");
590 f2fs_msg(sb, KERN_ERR,
591 "Unrecognized mount option \"%s\" or missing value",
597 if (f2fs_check_quota_options(sbi))
601 if (F2FS_IO_SIZE_BITS(sbi) && !test_opt(sbi, LFS)) {
602 f2fs_msg(sb, KERN_ERR,
603 "Should set mode=lfs with %uKB-sized IO",
604 F2FS_IO_SIZE_KB(sbi));
610 static struct inode *f2fs_alloc_inode(struct super_block *sb)
612 struct f2fs_inode_info *fi;
614 fi = kmem_cache_alloc(f2fs_inode_cachep, GFP_F2FS_ZERO);
618 init_once((void *) fi);
620 /* Initialize f2fs-specific inode info */
621 fi->vfs_inode.i_version = 1;
622 atomic_set(&fi->dirty_pages, 0);
623 fi->i_current_depth = 1;
625 init_rwsem(&fi->i_sem);
626 INIT_LIST_HEAD(&fi->dirty_list);
627 INIT_LIST_HEAD(&fi->gdirty_list);
628 INIT_LIST_HEAD(&fi->inmem_pages);
629 mutex_init(&fi->inmem_lock);
630 init_rwsem(&fi->dio_rwsem[READ]);
631 init_rwsem(&fi->dio_rwsem[WRITE]);
632 init_rwsem(&fi->i_mmap_sem);
633 init_rwsem(&fi->i_xattr_sem);
636 memset(&fi->i_dquot, 0, sizeof(fi->i_dquot));
637 fi->i_reserved_quota = 0;
639 /* Will be used by directory only */
640 fi->i_dir_level = F2FS_SB(sb)->dir_level;
642 return &fi->vfs_inode;
645 static int f2fs_drop_inode(struct inode *inode)
649 * This is to avoid a deadlock condition like below.
650 * writeback_single_inode(inode)
651 * - f2fs_write_data_page
652 * - f2fs_gc -> iput -> evict
653 * - inode_wait_for_writeback(inode)
655 if ((!inode_unhashed(inode) && inode->i_state & I_SYNC)) {
656 if (!inode->i_nlink && !is_bad_inode(inode)) {
657 /* to avoid evict_inode call simultaneously */
658 atomic_inc(&inode->i_count);
659 spin_unlock(&inode->i_lock);
661 /* some remained atomic pages should discarded */
662 if (f2fs_is_atomic_file(inode))
663 drop_inmem_pages(inode);
665 /* should remain fi->extent_tree for writepage */
666 f2fs_destroy_extent_node(inode);
668 sb_start_intwrite(inode->i_sb);
669 f2fs_i_size_write(inode, 0);
671 if (F2FS_HAS_BLOCKS(inode))
672 f2fs_truncate(inode);
674 sb_end_intwrite(inode->i_sb);
676 fscrypt_put_encryption_info(inode, NULL);
677 spin_lock(&inode->i_lock);
678 atomic_dec(&inode->i_count);
680 trace_f2fs_drop_inode(inode, 0);
683 ret = generic_drop_inode(inode);
684 trace_f2fs_drop_inode(inode, ret);
688 int f2fs_inode_dirtied(struct inode *inode, bool sync)
690 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
693 spin_lock(&sbi->inode_lock[DIRTY_META]);
694 if (is_inode_flag_set(inode, FI_DIRTY_INODE)) {
697 set_inode_flag(inode, FI_DIRTY_INODE);
698 stat_inc_dirty_inode(sbi, DIRTY_META);
700 if (sync && list_empty(&F2FS_I(inode)->gdirty_list)) {
701 list_add_tail(&F2FS_I(inode)->gdirty_list,
702 &sbi->inode_list[DIRTY_META]);
703 inc_page_count(sbi, F2FS_DIRTY_IMETA);
705 spin_unlock(&sbi->inode_lock[DIRTY_META]);
709 void f2fs_inode_synced(struct inode *inode)
711 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
713 spin_lock(&sbi->inode_lock[DIRTY_META]);
714 if (!is_inode_flag_set(inode, FI_DIRTY_INODE)) {
715 spin_unlock(&sbi->inode_lock[DIRTY_META]);
718 if (!list_empty(&F2FS_I(inode)->gdirty_list)) {
719 list_del_init(&F2FS_I(inode)->gdirty_list);
720 dec_page_count(sbi, F2FS_DIRTY_IMETA);
722 clear_inode_flag(inode, FI_DIRTY_INODE);
723 clear_inode_flag(inode, FI_AUTO_RECOVER);
724 stat_dec_dirty_inode(F2FS_I_SB(inode), DIRTY_META);
725 spin_unlock(&sbi->inode_lock[DIRTY_META]);
729 * f2fs_dirty_inode() is called from __mark_inode_dirty()
731 * We should call set_dirty_inode to write the dirty inode through write_inode.
733 static void f2fs_dirty_inode(struct inode *inode, int flags)
735 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
737 if (inode->i_ino == F2FS_NODE_INO(sbi) ||
738 inode->i_ino == F2FS_META_INO(sbi))
741 if (flags == I_DIRTY_TIME)
744 if (is_inode_flag_set(inode, FI_AUTO_RECOVER))
745 clear_inode_flag(inode, FI_AUTO_RECOVER);
747 f2fs_inode_dirtied(inode, false);
750 static void f2fs_i_callback(struct rcu_head *head)
752 struct inode *inode = container_of(head, struct inode, i_rcu);
753 kmem_cache_free(f2fs_inode_cachep, F2FS_I(inode));
756 static void f2fs_destroy_inode(struct inode *inode)
758 call_rcu(&inode->i_rcu, f2fs_i_callback);
761 static void destroy_percpu_info(struct f2fs_sb_info *sbi)
763 percpu_counter_destroy(&sbi->alloc_valid_block_count);
764 percpu_counter_destroy(&sbi->total_valid_inode_count);
767 static void destroy_device_list(struct f2fs_sb_info *sbi)
771 for (i = 0; i < sbi->s_ndevs; i++) {
772 blkdev_put(FDEV(i).bdev, FMODE_EXCL);
773 #ifdef CONFIG_BLK_DEV_ZONED
774 kfree(FDEV(i).blkz_type);
780 static void f2fs_put_super(struct super_block *sb)
782 struct f2fs_sb_info *sbi = F2FS_SB(sb);
785 f2fs_quota_off_umount(sb);
787 /* prevent remaining shrinker jobs */
788 mutex_lock(&sbi->umount_mutex);
791 * We don't need to do checkpoint when superblock is clean.
792 * But, the previous checkpoint was not done by umount, it needs to do
793 * clean checkpoint again.
795 if (is_sbi_flag_set(sbi, SBI_IS_DIRTY) ||
796 !is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) {
797 struct cp_control cpc = {
800 write_checkpoint(sbi, &cpc);
803 /* be sure to wait for any on-going discard commands */
804 f2fs_wait_discard_bios(sbi);
806 if (f2fs_discard_en(sbi) && !sbi->discard_blks) {
807 struct cp_control cpc = {
808 .reason = CP_UMOUNT | CP_TRIMMED,
810 write_checkpoint(sbi, &cpc);
813 /* write_checkpoint can update stat informaion */
814 f2fs_destroy_stats(sbi);
817 * normally superblock is clean, so we need to release this.
818 * In addition, EIO will skip do checkpoint, we need this as well.
820 release_ino_entry(sbi, true);
822 f2fs_leave_shrinker(sbi);
823 mutex_unlock(&sbi->umount_mutex);
825 /* our cp_error case, we can wait for any writeback page */
826 f2fs_flush_merged_writes(sbi);
828 iput(sbi->node_inode);
829 iput(sbi->meta_inode);
831 /* destroy f2fs internal modules */
832 destroy_node_manager(sbi);
833 destroy_segment_manager(sbi);
837 f2fs_unregister_sysfs(sbi);
839 sb->s_fs_info = NULL;
840 if (sbi->s_chksum_driver)
841 crypto_free_shash(sbi->s_chksum_driver);
842 kfree(sbi->raw_super);
844 destroy_device_list(sbi);
845 mempool_destroy(sbi->write_io_dummy);
847 for (i = 0; i < MAXQUOTAS; i++)
848 kfree(sbi->s_qf_names[i]);
850 destroy_percpu_info(sbi);
851 for (i = 0; i < NR_PAGE_TYPE; i++)
852 kfree(sbi->write_io[i]);
856 int f2fs_sync_fs(struct super_block *sb, int sync)
858 struct f2fs_sb_info *sbi = F2FS_SB(sb);
861 trace_f2fs_sync_fs(sb, sync);
863 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
867 struct cp_control cpc;
869 cpc.reason = __get_cp_reason(sbi);
871 mutex_lock(&sbi->gc_mutex);
872 err = write_checkpoint(sbi, &cpc);
873 mutex_unlock(&sbi->gc_mutex);
875 f2fs_trace_ios(NULL, 1);
880 static int f2fs_freeze(struct super_block *sb)
882 if (f2fs_readonly(sb))
885 /* IO error happened before */
886 if (unlikely(f2fs_cp_error(F2FS_SB(sb))))
889 /* must be clean, since sync_filesystem() was already called */
890 if (is_sbi_flag_set(F2FS_SB(sb), SBI_IS_DIRTY))
895 static int f2fs_unfreeze(struct super_block *sb)
901 static int f2fs_statfs_project(struct super_block *sb,
902 kprojid_t projid, struct kstatfs *buf)
909 qid = make_kqid_projid(projid);
910 dquot = dqget(sb, qid);
912 return PTR_ERR(dquot);
913 spin_lock(&dq_data_lock);
915 limit = (dquot->dq_dqb.dqb_bsoftlimit ?
916 dquot->dq_dqb.dqb_bsoftlimit :
917 dquot->dq_dqb.dqb_bhardlimit) >> sb->s_blocksize_bits;
918 if (limit && buf->f_blocks > limit) {
919 curblock = dquot->dq_dqb.dqb_curspace >> sb->s_blocksize_bits;
920 buf->f_blocks = limit;
921 buf->f_bfree = buf->f_bavail =
922 (buf->f_blocks > curblock) ?
923 (buf->f_blocks - curblock) : 0;
926 limit = dquot->dq_dqb.dqb_isoftlimit ?
927 dquot->dq_dqb.dqb_isoftlimit :
928 dquot->dq_dqb.dqb_ihardlimit;
929 if (limit && buf->f_files > limit) {
930 buf->f_files = limit;
932 (buf->f_files > dquot->dq_dqb.dqb_curinodes) ?
933 (buf->f_files - dquot->dq_dqb.dqb_curinodes) : 0;
936 spin_unlock(&dq_data_lock);
942 static int f2fs_statfs(struct dentry *dentry, struct kstatfs *buf)
944 struct super_block *sb = dentry->d_sb;
945 struct f2fs_sb_info *sbi = F2FS_SB(sb);
946 u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
947 block_t total_count, user_block_count, start_count, ovp_count;
948 u64 avail_node_count;
950 total_count = le64_to_cpu(sbi->raw_super->block_count);
951 user_block_count = sbi->user_block_count;
952 start_count = le32_to_cpu(sbi->raw_super->segment0_blkaddr);
953 ovp_count = SM_I(sbi)->ovp_segments << sbi->log_blocks_per_seg;
954 buf->f_type = F2FS_SUPER_MAGIC;
955 buf->f_bsize = sbi->blocksize;
957 buf->f_blocks = total_count - start_count;
958 buf->f_bfree = user_block_count - valid_user_blocks(sbi) + ovp_count;
959 buf->f_bavail = user_block_count - valid_user_blocks(sbi) -
960 sbi->reserved_blocks;
962 avail_node_count = sbi->total_node_count - F2FS_RESERVED_NODE_NUM;
964 if (avail_node_count > user_block_count) {
965 buf->f_files = user_block_count;
966 buf->f_ffree = buf->f_bavail;
968 buf->f_files = avail_node_count;
969 buf->f_ffree = min(avail_node_count - valid_node_count(sbi),
973 buf->f_namelen = F2FS_NAME_LEN;
974 buf->f_fsid.val[0] = (u32)id;
975 buf->f_fsid.val[1] = (u32)(id >> 32);
978 if (is_inode_flag_set(dentry->d_inode, FI_PROJ_INHERIT) &&
979 sb_has_quota_limits_enabled(sb, PRJQUOTA)) {
980 f2fs_statfs_project(sb, F2FS_I(dentry->d_inode)->i_projid, buf);
986 static inline void f2fs_show_quota_options(struct seq_file *seq,
987 struct super_block *sb)
990 struct f2fs_sb_info *sbi = F2FS_SB(sb);
992 if (sbi->s_jquota_fmt) {
995 switch (sbi->s_jquota_fmt) {
1006 seq_printf(seq, ",jqfmt=%s", fmtname);
1009 if (sbi->s_qf_names[USRQUOTA])
1010 seq_show_option(seq, "usrjquota", sbi->s_qf_names[USRQUOTA]);
1012 if (sbi->s_qf_names[GRPQUOTA])
1013 seq_show_option(seq, "grpjquota", sbi->s_qf_names[GRPQUOTA]);
1015 if (sbi->s_qf_names[PRJQUOTA])
1016 seq_show_option(seq, "prjjquota", sbi->s_qf_names[PRJQUOTA]);
1020 static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
1022 struct f2fs_sb_info *sbi = F2FS_SB(root->d_sb);
1024 if (!f2fs_readonly(sbi->sb) && test_opt(sbi, BG_GC)) {
1025 if (test_opt(sbi, FORCE_FG_GC))
1026 seq_printf(seq, ",background_gc=%s", "sync");
1028 seq_printf(seq, ",background_gc=%s", "on");
1030 seq_printf(seq, ",background_gc=%s", "off");
1032 if (test_opt(sbi, DISABLE_ROLL_FORWARD))
1033 seq_puts(seq, ",disable_roll_forward");
1034 if (test_opt(sbi, DISCARD))
1035 seq_puts(seq, ",discard");
1036 if (test_opt(sbi, NOHEAP))
1037 seq_puts(seq, ",no_heap");
1039 seq_puts(seq, ",heap");
1040 #ifdef CONFIG_F2FS_FS_XATTR
1041 if (test_opt(sbi, XATTR_USER))
1042 seq_puts(seq, ",user_xattr");
1044 seq_puts(seq, ",nouser_xattr");
1045 if (test_opt(sbi, INLINE_XATTR))
1046 seq_puts(seq, ",inline_xattr");
1048 seq_puts(seq, ",noinline_xattr");
1050 #ifdef CONFIG_F2FS_FS_POSIX_ACL
1051 if (test_opt(sbi, POSIX_ACL))
1052 seq_puts(seq, ",acl");
1054 seq_puts(seq, ",noacl");
1056 if (test_opt(sbi, DISABLE_EXT_IDENTIFY))
1057 seq_puts(seq, ",disable_ext_identify");
1058 if (test_opt(sbi, INLINE_DATA))
1059 seq_puts(seq, ",inline_data");
1061 seq_puts(seq, ",noinline_data");
1062 if (test_opt(sbi, INLINE_DENTRY))
1063 seq_puts(seq, ",inline_dentry");
1065 seq_puts(seq, ",noinline_dentry");
1066 if (!f2fs_readonly(sbi->sb) && test_opt(sbi, FLUSH_MERGE))
1067 seq_puts(seq, ",flush_merge");
1068 if (test_opt(sbi, NOBARRIER))
1069 seq_puts(seq, ",nobarrier");
1070 if (test_opt(sbi, FASTBOOT))
1071 seq_puts(seq, ",fastboot");
1072 if (test_opt(sbi, EXTENT_CACHE))
1073 seq_puts(seq, ",extent_cache");
1075 seq_puts(seq, ",noextent_cache");
1076 if (test_opt(sbi, DATA_FLUSH))
1077 seq_puts(seq, ",data_flush");
1079 seq_puts(seq, ",mode=");
1080 if (test_opt(sbi, ADAPTIVE))
1081 seq_puts(seq, "adaptive");
1082 else if (test_opt(sbi, LFS))
1083 seq_puts(seq, "lfs");
1084 seq_printf(seq, ",active_logs=%u", sbi->active_logs);
1085 if (F2FS_IO_SIZE_BITS(sbi))
1086 seq_printf(seq, ",io_size=%uKB", F2FS_IO_SIZE_KB(sbi));
1087 #ifdef CONFIG_F2FS_FAULT_INJECTION
1088 if (test_opt(sbi, FAULT_INJECTION))
1089 seq_printf(seq, ",fault_injection=%u",
1090 sbi->fault_info.inject_rate);
1093 if (test_opt(sbi, QUOTA))
1094 seq_puts(seq, ",quota");
1095 if (test_opt(sbi, USRQUOTA))
1096 seq_puts(seq, ",usrquota");
1097 if (test_opt(sbi, GRPQUOTA))
1098 seq_puts(seq, ",grpquota");
1099 if (test_opt(sbi, PRJQUOTA))
1100 seq_puts(seq, ",prjquota");
1102 f2fs_show_quota_options(seq, sbi->sb);
1107 static void default_options(struct f2fs_sb_info *sbi)
1109 /* init some FS parameters */
1110 sbi->active_logs = NR_CURSEG_TYPE;
1112 set_opt(sbi, BG_GC);
1113 set_opt(sbi, INLINE_XATTR);
1114 set_opt(sbi, INLINE_DATA);
1115 set_opt(sbi, INLINE_DENTRY);
1116 set_opt(sbi, EXTENT_CACHE);
1117 set_opt(sbi, NOHEAP);
1118 sbi->sb->s_flags |= MS_LAZYTIME;
1119 set_opt(sbi, FLUSH_MERGE);
1120 if (f2fs_sb_mounted_blkzoned(sbi->sb)) {
1121 set_opt_mode(sbi, F2FS_MOUNT_LFS);
1122 set_opt(sbi, DISCARD);
1124 set_opt_mode(sbi, F2FS_MOUNT_ADAPTIVE);
1127 #ifdef CONFIG_F2FS_FS_XATTR
1128 set_opt(sbi, XATTR_USER);
1130 #ifdef CONFIG_F2FS_FS_POSIX_ACL
1131 set_opt(sbi, POSIX_ACL);
1134 #ifdef CONFIG_F2FS_FAULT_INJECTION
1135 f2fs_build_fault_attr(sbi, 0);
1139 static int f2fs_remount(struct super_block *sb, int *flags, char *data)
1141 struct f2fs_sb_info *sbi = F2FS_SB(sb);
1142 struct f2fs_mount_info org_mount_opt;
1143 unsigned long old_sb_flags;
1144 int err, active_logs;
1145 bool need_restart_gc = false;
1146 bool need_stop_gc = false;
1147 bool no_extent_cache = !test_opt(sbi, EXTENT_CACHE);
1148 #ifdef CONFIG_F2FS_FAULT_INJECTION
1149 struct f2fs_fault_info ffi = sbi->fault_info;
1153 char *s_qf_names[MAXQUOTAS];
1158 * Save the old mount options in case we
1159 * need to restore them.
1161 org_mount_opt = sbi->mount_opt;
1162 old_sb_flags = sb->s_flags;
1163 active_logs = sbi->active_logs;
1166 s_jquota_fmt = sbi->s_jquota_fmt;
1167 for (i = 0; i < MAXQUOTAS; i++) {
1168 if (sbi->s_qf_names[i]) {
1169 s_qf_names[i] = kstrdup(sbi->s_qf_names[i],
1171 if (!s_qf_names[i]) {
1172 for (j = 0; j < i; j++)
1173 kfree(s_qf_names[j]);
1177 s_qf_names[i] = NULL;
1182 /* recover superblocks we couldn't write due to previous RO mount */
1183 if (!(*flags & MS_RDONLY) && is_sbi_flag_set(sbi, SBI_NEED_SB_WRITE)) {
1184 err = f2fs_commit_super(sbi, false);
1185 f2fs_msg(sb, KERN_INFO,
1186 "Try to recover all the superblocks, ret: %d", err);
1188 clear_sbi_flag(sbi, SBI_NEED_SB_WRITE);
1191 default_options(sbi);
1193 /* parse mount options */
1194 err = parse_options(sb, data);
1199 * Previous and new state of filesystem is RO,
1200 * so skip checking GC and FLUSH_MERGE conditions.
1202 if (f2fs_readonly(sb) && (*flags & MS_RDONLY))
1205 if (!f2fs_readonly(sb) && (*flags & MS_RDONLY)) {
1206 err = dquot_suspend(sb, -1);
1210 /* dquot_resume needs RW */
1211 sb->s_flags &= ~MS_RDONLY;
1212 dquot_resume(sb, -1);
1215 /* disallow enable/disable extent_cache dynamically */
1216 if (no_extent_cache == !!test_opt(sbi, EXTENT_CACHE)) {
1218 f2fs_msg(sbi->sb, KERN_WARNING,
1219 "switch extent_cache option is not allowed");
1224 * We stop the GC thread if FS is mounted as RO
1225 * or if background_gc = off is passed in mount
1226 * option. Also sync the filesystem.
1228 if ((*flags & MS_RDONLY) || !test_opt(sbi, BG_GC)) {
1229 if (sbi->gc_thread) {
1230 stop_gc_thread(sbi);
1231 need_restart_gc = true;
1233 } else if (!sbi->gc_thread) {
1234 err = start_gc_thread(sbi);
1237 need_stop_gc = true;
1240 if (*flags & MS_RDONLY) {
1241 writeback_inodes_sb(sb, WB_REASON_SYNC);
1244 set_sbi_flag(sbi, SBI_IS_DIRTY);
1245 set_sbi_flag(sbi, SBI_IS_CLOSE);
1246 f2fs_sync_fs(sb, 1);
1247 clear_sbi_flag(sbi, SBI_IS_CLOSE);
1251 * We stop issue flush thread if FS is mounted as RO
1252 * or if flush_merge is not passed in mount option.
1254 if ((*flags & MS_RDONLY) || !test_opt(sbi, FLUSH_MERGE)) {
1255 clear_opt(sbi, FLUSH_MERGE);
1256 destroy_flush_cmd_control(sbi, false);
1258 err = create_flush_cmd_control(sbi);
1264 /* Release old quota file names */
1265 for (i = 0; i < MAXQUOTAS; i++)
1266 kfree(s_qf_names[i]);
1268 /* Update the POSIXACL Flag */
1269 sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
1270 (test_opt(sbi, POSIX_ACL) ? MS_POSIXACL : 0);
1274 if (need_restart_gc) {
1275 if (start_gc_thread(sbi))
1276 f2fs_msg(sbi->sb, KERN_WARNING,
1277 "background gc thread has stopped");
1278 } else if (need_stop_gc) {
1279 stop_gc_thread(sbi);
1283 sbi->s_jquota_fmt = s_jquota_fmt;
1284 for (i = 0; i < MAXQUOTAS; i++) {
1285 kfree(sbi->s_qf_names[i]);
1286 sbi->s_qf_names[i] = s_qf_names[i];
1289 sbi->mount_opt = org_mount_opt;
1290 sbi->active_logs = active_logs;
1291 sb->s_flags = old_sb_flags;
1292 #ifdef CONFIG_F2FS_FAULT_INJECTION
1293 sbi->fault_info = ffi;
1299 /* Read data from quotafile */
1300 static ssize_t f2fs_quota_read(struct super_block *sb, int type, char *data,
1301 size_t len, loff_t off)
1303 struct inode *inode = sb_dqopt(sb)->files[type];
1304 struct address_space *mapping = inode->i_mapping;
1305 block_t blkidx = F2FS_BYTES_TO_BLK(off);
1306 int offset = off & (sb->s_blocksize - 1);
1309 loff_t i_size = i_size_read(inode);
1316 if (off + len > i_size)
1319 while (toread > 0) {
1320 tocopy = min_t(unsigned long, sb->s_blocksize - offset, toread);
1322 page = read_mapping_page(mapping, blkidx, NULL);
1324 return PTR_ERR(page);
1328 if (unlikely(page->mapping != mapping)) {
1329 f2fs_put_page(page, 1);
1332 if (unlikely(!PageUptodate(page))) {
1333 f2fs_put_page(page, 1);
1337 kaddr = kmap_atomic(page);
1338 memcpy(data, kaddr + offset, tocopy);
1339 kunmap_atomic(kaddr);
1340 f2fs_put_page(page, 1);
1350 /* Write to quotafile */
1351 static ssize_t f2fs_quota_write(struct super_block *sb, int type,
1352 const char *data, size_t len, loff_t off)
1354 struct inode *inode = sb_dqopt(sb)->files[type];
1355 struct address_space *mapping = inode->i_mapping;
1356 const struct address_space_operations *a_ops = mapping->a_ops;
1357 int offset = off & (sb->s_blocksize - 1);
1358 size_t towrite = len;
1364 while (towrite > 0) {
1365 tocopy = min_t(unsigned long, sb->s_blocksize - offset,
1368 err = a_ops->write_begin(NULL, mapping, off, tocopy, 0,
1373 kaddr = kmap_atomic(page);
1374 memcpy(kaddr + offset, data, tocopy);
1375 kunmap_atomic(kaddr);
1376 flush_dcache_page(page);
1378 a_ops->write_end(NULL, mapping, off, tocopy, tocopy,
1390 inode->i_mtime = inode->i_ctime = current_time(inode);
1391 f2fs_mark_inode_dirty_sync(inode, false);
1392 return len - towrite;
1395 static struct dquot **f2fs_get_dquots(struct inode *inode)
1397 return F2FS_I(inode)->i_dquot;
1400 static qsize_t *f2fs_get_reserved_space(struct inode *inode)
1402 return &F2FS_I(inode)->i_reserved_quota;
1405 static int f2fs_quota_on_mount(struct f2fs_sb_info *sbi, int type)
1407 return dquot_quota_on_mount(sbi->sb, sbi->s_qf_names[type],
1408 sbi->s_jquota_fmt, type);
1411 void f2fs_enable_quota_files(struct f2fs_sb_info *sbi)
1415 for (i = 0; i < MAXQUOTAS; i++) {
1416 if (sbi->s_qf_names[i]) {
1417 ret = f2fs_quota_on_mount(sbi, i);
1419 f2fs_msg(sbi->sb, KERN_ERR,
1420 "Cannot turn on journaled "
1421 "quota: error %d", ret);
1426 static int f2fs_quota_sync(struct super_block *sb, int type)
1428 struct quota_info *dqopt = sb_dqopt(sb);
1432 ret = dquot_writeback_dquots(sb, type);
1437 * Now when everything is written we can discard the pagecache so
1438 * that userspace sees the changes.
1440 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1441 if (type != -1 && cnt != type)
1443 if (!sb_has_quota_active(sb, cnt))
1446 ret = filemap_write_and_wait(dqopt->files[cnt]->i_mapping);
1450 inode_lock(dqopt->files[cnt]);
1451 truncate_inode_pages(&dqopt->files[cnt]->i_data, 0);
1452 inode_unlock(dqopt->files[cnt]);
1457 static int f2fs_quota_on(struct super_block *sb, int type, int format_id,
1458 const struct path *path)
1460 struct inode *inode;
1463 err = f2fs_quota_sync(sb, type);
1467 err = dquot_quota_on(sb, type, format_id, path);
1471 inode = d_inode(path->dentry);
1474 F2FS_I(inode)->i_flags |= FS_NOATIME_FL | FS_IMMUTABLE_FL;
1475 inode_set_flags(inode, S_NOATIME | S_IMMUTABLE,
1476 S_NOATIME | S_IMMUTABLE);
1477 inode_unlock(inode);
1478 f2fs_mark_inode_dirty_sync(inode, false);
1483 static int f2fs_quota_off(struct super_block *sb, int type)
1485 struct inode *inode = sb_dqopt(sb)->files[type];
1488 if (!inode || !igrab(inode))
1489 return dquot_quota_off(sb, type);
1491 f2fs_quota_sync(sb, type);
1493 err = dquot_quota_off(sb, type);
1498 F2FS_I(inode)->i_flags &= ~(FS_NOATIME_FL | FS_IMMUTABLE_FL);
1499 inode_set_flags(inode, 0, S_NOATIME | S_IMMUTABLE);
1500 inode_unlock(inode);
1501 f2fs_mark_inode_dirty_sync(inode, false);
1507 void f2fs_quota_off_umount(struct super_block *sb)
1511 for (type = 0; type < MAXQUOTAS; type++)
1512 f2fs_quota_off(sb, type);
1515 int f2fs_get_projid(struct inode *inode, kprojid_t *projid)
1517 *projid = F2FS_I(inode)->i_projid;
1521 static const struct dquot_operations f2fs_quota_operations = {
1522 .get_reserved_space = f2fs_get_reserved_space,
1523 .write_dquot = dquot_commit,
1524 .acquire_dquot = dquot_acquire,
1525 .release_dquot = dquot_release,
1526 .mark_dirty = dquot_mark_dquot_dirty,
1527 .write_info = dquot_commit_info,
1528 .alloc_dquot = dquot_alloc,
1529 .destroy_dquot = dquot_destroy,
1530 .get_projid = f2fs_get_projid,
1531 .get_next_id = dquot_get_next_id,
1534 static const struct quotactl_ops f2fs_quotactl_ops = {
1535 .quota_on = f2fs_quota_on,
1536 .quota_off = f2fs_quota_off,
1537 .quota_sync = f2fs_quota_sync,
1538 .get_state = dquot_get_state,
1539 .set_info = dquot_set_dqinfo,
1540 .get_dqblk = dquot_get_dqblk,
1541 .set_dqblk = dquot_set_dqblk,
1542 .get_nextdqblk = dquot_get_next_dqblk,
1545 void f2fs_quota_off_umount(struct super_block *sb)
1550 static const struct super_operations f2fs_sops = {
1551 .alloc_inode = f2fs_alloc_inode,
1552 .drop_inode = f2fs_drop_inode,
1553 .destroy_inode = f2fs_destroy_inode,
1554 .write_inode = f2fs_write_inode,
1555 .dirty_inode = f2fs_dirty_inode,
1556 .show_options = f2fs_show_options,
1558 .quota_read = f2fs_quota_read,
1559 .quota_write = f2fs_quota_write,
1560 .get_dquots = f2fs_get_dquots,
1562 .evict_inode = f2fs_evict_inode,
1563 .put_super = f2fs_put_super,
1564 .sync_fs = f2fs_sync_fs,
1565 .freeze_fs = f2fs_freeze,
1566 .unfreeze_fs = f2fs_unfreeze,
1567 .statfs = f2fs_statfs,
1568 .remount_fs = f2fs_remount,
1571 #ifdef CONFIG_F2FS_FS_ENCRYPTION
1572 static int f2fs_get_context(struct inode *inode, void *ctx, size_t len)
1574 return f2fs_getxattr(inode, F2FS_XATTR_INDEX_ENCRYPTION,
1575 F2FS_XATTR_NAME_ENCRYPTION_CONTEXT,
1579 static int f2fs_set_context(struct inode *inode, const void *ctx, size_t len,
1582 return f2fs_setxattr(inode, F2FS_XATTR_INDEX_ENCRYPTION,
1583 F2FS_XATTR_NAME_ENCRYPTION_CONTEXT,
1584 ctx, len, fs_data, XATTR_CREATE);
1587 static unsigned f2fs_max_namelen(struct inode *inode)
1589 return S_ISLNK(inode->i_mode) ?
1590 inode->i_sb->s_blocksize : F2FS_NAME_LEN;
1593 static const struct fscrypt_operations f2fs_cryptops = {
1594 .key_prefix = "f2fs:",
1595 .get_context = f2fs_get_context,
1596 .set_context = f2fs_set_context,
1597 .empty_dir = f2fs_empty_dir,
1598 .max_namelen = f2fs_max_namelen,
1601 static const struct fscrypt_operations f2fs_cryptops = {
1605 static struct inode *f2fs_nfs_get_inode(struct super_block *sb,
1606 u64 ino, u32 generation)
1608 struct f2fs_sb_info *sbi = F2FS_SB(sb);
1609 struct inode *inode;
1611 if (check_nid_range(sbi, ino))
1612 return ERR_PTR(-ESTALE);
1615 * f2fs_iget isn't quite right if the inode is currently unallocated!
1616 * However f2fs_iget currently does appropriate checks to handle stale
1617 * inodes so everything is OK.
1619 inode = f2fs_iget(sb, ino);
1621 return ERR_CAST(inode);
1622 if (unlikely(generation && inode->i_generation != generation)) {
1623 /* we didn't find the right inode.. */
1625 return ERR_PTR(-ESTALE);
1630 static struct dentry *f2fs_fh_to_dentry(struct super_block *sb, struct fid *fid,
1631 int fh_len, int fh_type)
1633 return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
1634 f2fs_nfs_get_inode);
1637 static struct dentry *f2fs_fh_to_parent(struct super_block *sb, struct fid *fid,
1638 int fh_len, int fh_type)
1640 return generic_fh_to_parent(sb, fid, fh_len, fh_type,
1641 f2fs_nfs_get_inode);
1644 static const struct export_operations f2fs_export_ops = {
1645 .fh_to_dentry = f2fs_fh_to_dentry,
1646 .fh_to_parent = f2fs_fh_to_parent,
1647 .get_parent = f2fs_get_parent,
1650 static loff_t max_file_blocks(void)
1653 loff_t leaf_count = ADDRS_PER_BLOCK;
1656 * note: previously, result is equal to (DEF_ADDRS_PER_INODE -
1657 * F2FS_INLINE_XATTR_ADDRS), but now f2fs try to reserve more
1658 * space in inode.i_addr, it will be more safe to reassign
1662 /* two direct node blocks */
1663 result += (leaf_count * 2);
1665 /* two indirect node blocks */
1666 leaf_count *= NIDS_PER_BLOCK;
1667 result += (leaf_count * 2);
1669 /* one double indirect node block */
1670 leaf_count *= NIDS_PER_BLOCK;
1671 result += leaf_count;
1676 static int __f2fs_commit_super(struct buffer_head *bh,
1677 struct f2fs_super_block *super)
1681 memcpy(bh->b_data + F2FS_SUPER_OFFSET, super, sizeof(*super));
1682 set_buffer_uptodate(bh);
1683 set_buffer_dirty(bh);
1686 /* it's rare case, we can do fua all the time */
1687 return __sync_dirty_buffer(bh, REQ_SYNC | REQ_PREFLUSH | REQ_FUA);
1690 static inline bool sanity_check_area_boundary(struct f2fs_sb_info *sbi,
1691 struct buffer_head *bh)
1693 struct f2fs_super_block *raw_super = (struct f2fs_super_block *)
1694 (bh->b_data + F2FS_SUPER_OFFSET);
1695 struct super_block *sb = sbi->sb;
1696 u32 segment0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr);
1697 u32 cp_blkaddr = le32_to_cpu(raw_super->cp_blkaddr);
1698 u32 sit_blkaddr = le32_to_cpu(raw_super->sit_blkaddr);
1699 u32 nat_blkaddr = le32_to_cpu(raw_super->nat_blkaddr);
1700 u32 ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr);
1701 u32 main_blkaddr = le32_to_cpu(raw_super->main_blkaddr);
1702 u32 segment_count_ckpt = le32_to_cpu(raw_super->segment_count_ckpt);
1703 u32 segment_count_sit = le32_to_cpu(raw_super->segment_count_sit);
1704 u32 segment_count_nat = le32_to_cpu(raw_super->segment_count_nat);
1705 u32 segment_count_ssa = le32_to_cpu(raw_super->segment_count_ssa);
1706 u32 segment_count_main = le32_to_cpu(raw_super->segment_count_main);
1707 u32 segment_count = le32_to_cpu(raw_super->segment_count);
1708 u32 log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
1709 u64 main_end_blkaddr = main_blkaddr +
1710 (segment_count_main << log_blocks_per_seg);
1711 u64 seg_end_blkaddr = segment0_blkaddr +
1712 (segment_count << log_blocks_per_seg);
1714 if (segment0_blkaddr != cp_blkaddr) {
1715 f2fs_msg(sb, KERN_INFO,
1716 "Mismatch start address, segment0(%u) cp_blkaddr(%u)",
1717 segment0_blkaddr, cp_blkaddr);
1721 if (cp_blkaddr + (segment_count_ckpt << log_blocks_per_seg) !=
1723 f2fs_msg(sb, KERN_INFO,
1724 "Wrong CP boundary, start(%u) end(%u) blocks(%u)",
1725 cp_blkaddr, sit_blkaddr,
1726 segment_count_ckpt << log_blocks_per_seg);
1730 if (sit_blkaddr + (segment_count_sit << log_blocks_per_seg) !=
1732 f2fs_msg(sb, KERN_INFO,
1733 "Wrong SIT boundary, start(%u) end(%u) blocks(%u)",
1734 sit_blkaddr, nat_blkaddr,
1735 segment_count_sit << log_blocks_per_seg);
1739 if (nat_blkaddr + (segment_count_nat << log_blocks_per_seg) !=
1741 f2fs_msg(sb, KERN_INFO,
1742 "Wrong NAT boundary, start(%u) end(%u) blocks(%u)",
1743 nat_blkaddr, ssa_blkaddr,
1744 segment_count_nat << log_blocks_per_seg);
1748 if (ssa_blkaddr + (segment_count_ssa << log_blocks_per_seg) !=
1750 f2fs_msg(sb, KERN_INFO,
1751 "Wrong SSA boundary, start(%u) end(%u) blocks(%u)",
1752 ssa_blkaddr, main_blkaddr,
1753 segment_count_ssa << log_blocks_per_seg);
1757 if (main_end_blkaddr > seg_end_blkaddr) {
1758 f2fs_msg(sb, KERN_INFO,
1759 "Wrong MAIN_AREA boundary, start(%u) end(%u) block(%u)",
1762 (segment_count << log_blocks_per_seg),
1763 segment_count_main << log_blocks_per_seg);
1765 } else if (main_end_blkaddr < seg_end_blkaddr) {
1769 /* fix in-memory information all the time */
1770 raw_super->segment_count = cpu_to_le32((main_end_blkaddr -
1771 segment0_blkaddr) >> log_blocks_per_seg);
1773 if (f2fs_readonly(sb) || bdev_read_only(sb->s_bdev)) {
1774 set_sbi_flag(sbi, SBI_NEED_SB_WRITE);
1777 err = __f2fs_commit_super(bh, NULL);
1778 res = err ? "failed" : "done";
1780 f2fs_msg(sb, KERN_INFO,
1781 "Fix alignment : %s, start(%u) end(%u) block(%u)",
1784 (segment_count << log_blocks_per_seg),
1785 segment_count_main << log_blocks_per_seg);
1792 static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
1793 struct buffer_head *bh)
1795 struct f2fs_super_block *raw_super = (struct f2fs_super_block *)
1796 (bh->b_data + F2FS_SUPER_OFFSET);
1797 struct super_block *sb = sbi->sb;
1798 unsigned int blocksize;
1800 if (F2FS_SUPER_MAGIC != le32_to_cpu(raw_super->magic)) {
1801 f2fs_msg(sb, KERN_INFO,
1802 "Magic Mismatch, valid(0x%x) - read(0x%x)",
1803 F2FS_SUPER_MAGIC, le32_to_cpu(raw_super->magic));
1807 /* Currently, support only 4KB page cache size */
1808 if (F2FS_BLKSIZE != PAGE_SIZE) {
1809 f2fs_msg(sb, KERN_INFO,
1810 "Invalid page_cache_size (%lu), supports only 4KB\n",
1815 /* Currently, support only 4KB block size */
1816 blocksize = 1 << le32_to_cpu(raw_super->log_blocksize);
1817 if (blocksize != F2FS_BLKSIZE) {
1818 f2fs_msg(sb, KERN_INFO,
1819 "Invalid blocksize (%u), supports only 4KB\n",
1824 /* check log blocks per segment */
1825 if (le32_to_cpu(raw_super->log_blocks_per_seg) != 9) {
1826 f2fs_msg(sb, KERN_INFO,
1827 "Invalid log blocks per segment (%u)\n",
1828 le32_to_cpu(raw_super->log_blocks_per_seg));
1832 /* Currently, support 512/1024/2048/4096 bytes sector size */
1833 if (le32_to_cpu(raw_super->log_sectorsize) >
1834 F2FS_MAX_LOG_SECTOR_SIZE ||
1835 le32_to_cpu(raw_super->log_sectorsize) <
1836 F2FS_MIN_LOG_SECTOR_SIZE) {
1837 f2fs_msg(sb, KERN_INFO, "Invalid log sectorsize (%u)",
1838 le32_to_cpu(raw_super->log_sectorsize));
1841 if (le32_to_cpu(raw_super->log_sectors_per_block) +
1842 le32_to_cpu(raw_super->log_sectorsize) !=
1843 F2FS_MAX_LOG_SECTOR_SIZE) {
1844 f2fs_msg(sb, KERN_INFO,
1845 "Invalid log sectors per block(%u) log sectorsize(%u)",
1846 le32_to_cpu(raw_super->log_sectors_per_block),
1847 le32_to_cpu(raw_super->log_sectorsize));
1851 /* check reserved ino info */
1852 if (le32_to_cpu(raw_super->node_ino) != 1 ||
1853 le32_to_cpu(raw_super->meta_ino) != 2 ||
1854 le32_to_cpu(raw_super->root_ino) != 3) {
1855 f2fs_msg(sb, KERN_INFO,
1856 "Invalid Fs Meta Ino: node(%u) meta(%u) root(%u)",
1857 le32_to_cpu(raw_super->node_ino),
1858 le32_to_cpu(raw_super->meta_ino),
1859 le32_to_cpu(raw_super->root_ino));
1863 if (le32_to_cpu(raw_super->segment_count) > F2FS_MAX_SEGMENT) {
1864 f2fs_msg(sb, KERN_INFO,
1865 "Invalid segment count (%u)",
1866 le32_to_cpu(raw_super->segment_count));
1870 /* check CP/SIT/NAT/SSA/MAIN_AREA area boundary */
1871 if (sanity_check_area_boundary(sbi, bh))
1877 int sanity_check_ckpt(struct f2fs_sb_info *sbi)
1879 unsigned int total, fsmeta;
1880 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
1881 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
1882 unsigned int ovp_segments, reserved_segments;
1883 unsigned int main_segs, blocks_per_seg;
1886 total = le32_to_cpu(raw_super->segment_count);
1887 fsmeta = le32_to_cpu(raw_super->segment_count_ckpt);
1888 fsmeta += le32_to_cpu(raw_super->segment_count_sit);
1889 fsmeta += le32_to_cpu(raw_super->segment_count_nat);
1890 fsmeta += le32_to_cpu(ckpt->rsvd_segment_count);
1891 fsmeta += le32_to_cpu(raw_super->segment_count_ssa);
1893 if (unlikely(fsmeta >= total))
1896 ovp_segments = le32_to_cpu(ckpt->overprov_segment_count);
1897 reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count);
1899 if (unlikely(fsmeta < F2FS_MIN_SEGMENTS ||
1900 ovp_segments == 0 || reserved_segments == 0)) {
1901 f2fs_msg(sbi->sb, KERN_ERR,
1902 "Wrong layout: check mkfs.f2fs version");
1906 main_segs = le32_to_cpu(raw_super->segment_count_main);
1907 blocks_per_seg = sbi->blocks_per_seg;
1909 for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) {
1910 if (le32_to_cpu(ckpt->cur_node_segno[i]) >= main_segs ||
1911 le16_to_cpu(ckpt->cur_node_blkoff[i]) >= blocks_per_seg)
1914 for (i = 0; i < NR_CURSEG_DATA_TYPE; i++) {
1915 if (le32_to_cpu(ckpt->cur_data_segno[i]) >= main_segs ||
1916 le16_to_cpu(ckpt->cur_data_blkoff[i]) >= blocks_per_seg)
1920 if (unlikely(f2fs_cp_error(sbi))) {
1921 f2fs_msg(sbi->sb, KERN_ERR, "A bug case: need to run fsck");
1927 static void init_sb_info(struct f2fs_sb_info *sbi)
1929 struct f2fs_super_block *raw_super = sbi->raw_super;
1932 sbi->log_sectors_per_block =
1933 le32_to_cpu(raw_super->log_sectors_per_block);
1934 sbi->log_blocksize = le32_to_cpu(raw_super->log_blocksize);
1935 sbi->blocksize = 1 << sbi->log_blocksize;
1936 sbi->log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
1937 sbi->blocks_per_seg = 1 << sbi->log_blocks_per_seg;
1938 sbi->segs_per_sec = le32_to_cpu(raw_super->segs_per_sec);
1939 sbi->secs_per_zone = le32_to_cpu(raw_super->secs_per_zone);
1940 sbi->total_sections = le32_to_cpu(raw_super->section_count);
1941 sbi->total_node_count =
1942 (le32_to_cpu(raw_super->segment_count_nat) / 2)
1943 * sbi->blocks_per_seg * NAT_ENTRY_PER_BLOCK;
1944 sbi->root_ino_num = le32_to_cpu(raw_super->root_ino);
1945 sbi->node_ino_num = le32_to_cpu(raw_super->node_ino);
1946 sbi->meta_ino_num = le32_to_cpu(raw_super->meta_ino);
1947 sbi->cur_victim_sec = NULL_SECNO;
1948 sbi->max_victim_search = DEF_MAX_VICTIM_SEARCH;
1950 sbi->dir_level = DEF_DIR_LEVEL;
1951 sbi->interval_time[CP_TIME] = DEF_CP_INTERVAL;
1952 sbi->interval_time[REQ_TIME] = DEF_IDLE_INTERVAL;
1953 clear_sbi_flag(sbi, SBI_NEED_FSCK);
1955 for (i = 0; i < NR_COUNT_TYPE; i++)
1956 atomic_set(&sbi->nr_pages[i], 0);
1958 atomic_set(&sbi->wb_sync_req, 0);
1960 INIT_LIST_HEAD(&sbi->s_list);
1961 mutex_init(&sbi->umount_mutex);
1962 for (i = 0; i < NR_PAGE_TYPE - 1; i++)
1963 for (j = HOT; j < NR_TEMP_TYPE; j++)
1964 mutex_init(&sbi->wio_mutex[i][j]);
1965 spin_lock_init(&sbi->cp_lock);
1968 static int init_percpu_info(struct f2fs_sb_info *sbi)
1972 err = percpu_counter_init(&sbi->alloc_valid_block_count, 0, GFP_KERNEL);
1976 return percpu_counter_init(&sbi->total_valid_inode_count, 0,
1980 #ifdef CONFIG_BLK_DEV_ZONED
1981 static int init_blkz_info(struct f2fs_sb_info *sbi, int devi)
1983 struct block_device *bdev = FDEV(devi).bdev;
1984 sector_t nr_sectors = bdev->bd_part->nr_sects;
1985 sector_t sector = 0;
1986 struct blk_zone *zones;
1987 unsigned int i, nr_zones;
1991 if (!f2fs_sb_mounted_blkzoned(sbi->sb))
1994 if (sbi->blocks_per_blkz && sbi->blocks_per_blkz !=
1995 SECTOR_TO_BLOCK(bdev_zone_sectors(bdev)))
1997 sbi->blocks_per_blkz = SECTOR_TO_BLOCK(bdev_zone_sectors(bdev));
1998 if (sbi->log_blocks_per_blkz && sbi->log_blocks_per_blkz !=
1999 __ilog2_u32(sbi->blocks_per_blkz))
2001 sbi->log_blocks_per_blkz = __ilog2_u32(sbi->blocks_per_blkz);
2002 FDEV(devi).nr_blkz = SECTOR_TO_BLOCK(nr_sectors) >>
2003 sbi->log_blocks_per_blkz;
2004 if (nr_sectors & (bdev_zone_sectors(bdev) - 1))
2005 FDEV(devi).nr_blkz++;
2007 FDEV(devi).blkz_type = kmalloc(FDEV(devi).nr_blkz, GFP_KERNEL);
2008 if (!FDEV(devi).blkz_type)
2011 #define F2FS_REPORT_NR_ZONES 4096
2013 zones = kcalloc(F2FS_REPORT_NR_ZONES, sizeof(struct blk_zone),
2018 /* Get block zones type */
2019 while (zones && sector < nr_sectors) {
2021 nr_zones = F2FS_REPORT_NR_ZONES;
2022 err = blkdev_report_zones(bdev, sector,
2032 for (i = 0; i < nr_zones; i++) {
2033 FDEV(devi).blkz_type[n] = zones[i].type;
2034 sector += zones[i].len;
2046 * Read f2fs raw super block.
2047 * Because we have two copies of super block, so read both of them
2048 * to get the first valid one. If any one of them is broken, we pass
2049 * them recovery flag back to the caller.
2051 static int read_raw_super_block(struct f2fs_sb_info *sbi,
2052 struct f2fs_super_block **raw_super,
2053 int *valid_super_block, int *recovery)
2055 struct super_block *sb = sbi->sb;
2057 struct buffer_head *bh;
2058 struct f2fs_super_block *super;
2061 super = kzalloc(sizeof(struct f2fs_super_block), GFP_KERNEL);
2065 for (block = 0; block < 2; block++) {
2066 bh = sb_bread(sb, block);
2068 f2fs_msg(sb, KERN_ERR, "Unable to read %dth superblock",
2074 /* sanity checking of raw super */
2075 if (sanity_check_raw_super(sbi, bh)) {
2076 f2fs_msg(sb, KERN_ERR,
2077 "Can't find valid F2FS filesystem in %dth superblock",
2085 memcpy(super, bh->b_data + F2FS_SUPER_OFFSET,
2087 *valid_super_block = block;
2093 /* Fail to read any one of the superblocks*/
2097 /* No valid superblock */
2106 int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover)
2108 struct buffer_head *bh;
2111 if ((recover && f2fs_readonly(sbi->sb)) ||
2112 bdev_read_only(sbi->sb->s_bdev)) {
2113 set_sbi_flag(sbi, SBI_NEED_SB_WRITE);
2117 /* write back-up superblock first */
2118 bh = sb_getblk(sbi->sb, sbi->valid_super_block ? 0: 1);
2121 err = __f2fs_commit_super(bh, F2FS_RAW_SUPER(sbi));
2124 /* if we are in recovery path, skip writing valid superblock */
2128 /* write current valid superblock */
2129 bh = sb_getblk(sbi->sb, sbi->valid_super_block);
2132 err = __f2fs_commit_super(bh, F2FS_RAW_SUPER(sbi));
2137 static int f2fs_scan_devices(struct f2fs_sb_info *sbi)
2139 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
2140 unsigned int max_devices = MAX_DEVICES;
2143 /* Initialize single device information */
2144 if (!RDEV(0).path[0]) {
2145 if (!bdev_is_zoned(sbi->sb->s_bdev))
2151 * Initialize multiple devices information, or single
2152 * zoned block device information.
2154 sbi->devs = kcalloc(max_devices, sizeof(struct f2fs_dev_info),
2159 for (i = 0; i < max_devices; i++) {
2161 if (i > 0 && !RDEV(i).path[0])
2164 if (max_devices == 1) {
2165 /* Single zoned block device mount */
2167 blkdev_get_by_dev(sbi->sb->s_bdev->bd_dev,
2168 sbi->sb->s_mode, sbi->sb->s_type);
2170 /* Multi-device mount */
2171 memcpy(FDEV(i).path, RDEV(i).path, MAX_PATH_LEN);
2172 FDEV(i).total_segments =
2173 le32_to_cpu(RDEV(i).total_segments);
2175 FDEV(i).start_blk = 0;
2176 FDEV(i).end_blk = FDEV(i).start_blk +
2177 (FDEV(i).total_segments <<
2178 sbi->log_blocks_per_seg) - 1 +
2179 le32_to_cpu(raw_super->segment0_blkaddr);
2181 FDEV(i).start_blk = FDEV(i - 1).end_blk + 1;
2182 FDEV(i).end_blk = FDEV(i).start_blk +
2183 (FDEV(i).total_segments <<
2184 sbi->log_blocks_per_seg) - 1;
2186 FDEV(i).bdev = blkdev_get_by_path(FDEV(i).path,
2187 sbi->sb->s_mode, sbi->sb->s_type);
2189 if (IS_ERR(FDEV(i).bdev))
2190 return PTR_ERR(FDEV(i).bdev);
2192 /* to release errored devices */
2193 sbi->s_ndevs = i + 1;
2195 #ifdef CONFIG_BLK_DEV_ZONED
2196 if (bdev_zoned_model(FDEV(i).bdev) == BLK_ZONED_HM &&
2197 !f2fs_sb_mounted_blkzoned(sbi->sb)) {
2198 f2fs_msg(sbi->sb, KERN_ERR,
2199 "Zoned block device feature not enabled\n");
2202 if (bdev_zoned_model(FDEV(i).bdev) != BLK_ZONED_NONE) {
2203 if (init_blkz_info(sbi, i)) {
2204 f2fs_msg(sbi->sb, KERN_ERR,
2205 "Failed to initialize F2FS blkzone information");
2208 if (max_devices == 1)
2210 f2fs_msg(sbi->sb, KERN_INFO,
2211 "Mount Device [%2d]: %20s, %8u, %8x - %8x (zone: %s)",
2213 FDEV(i).total_segments,
2214 FDEV(i).start_blk, FDEV(i).end_blk,
2215 bdev_zoned_model(FDEV(i).bdev) == BLK_ZONED_HA ?
2216 "Host-aware" : "Host-managed");
2220 f2fs_msg(sbi->sb, KERN_INFO,
2221 "Mount Device [%2d]: %20s, %8u, %8x - %8x",
2223 FDEV(i).total_segments,
2224 FDEV(i).start_blk, FDEV(i).end_blk);
2226 f2fs_msg(sbi->sb, KERN_INFO,
2227 "IO Block Size: %8d KB", F2FS_IO_SIZE_KB(sbi));
2231 static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
2233 struct f2fs_sb_info *sbi;
2234 struct f2fs_super_block *raw_super;
2237 bool retry = true, need_fsck = false;
2238 char *options = NULL;
2239 int recovery, i, valid_super_block;
2240 struct curseg_info *seg_i;
2245 valid_super_block = -1;
2248 /* allocate memory for f2fs-specific super block info */
2249 sbi = kzalloc(sizeof(struct f2fs_sb_info), GFP_KERNEL);
2255 /* Load the checksum driver */
2256 sbi->s_chksum_driver = crypto_alloc_shash("crc32", 0, 0);
2257 if (IS_ERR(sbi->s_chksum_driver)) {
2258 f2fs_msg(sb, KERN_ERR, "Cannot load crc32 driver.");
2259 err = PTR_ERR(sbi->s_chksum_driver);
2260 sbi->s_chksum_driver = NULL;
2264 /* set a block size */
2265 if (unlikely(!sb_set_blocksize(sb, F2FS_BLKSIZE))) {
2266 f2fs_msg(sb, KERN_ERR, "unable to set blocksize");
2270 err = read_raw_super_block(sbi, &raw_super, &valid_super_block,
2275 sb->s_fs_info = sbi;
2276 sbi->raw_super = raw_super;
2278 /* precompute checksum seed for metadata */
2279 if (f2fs_sb_has_inode_chksum(sb))
2280 sbi->s_chksum_seed = f2fs_chksum(sbi, ~0, raw_super->uuid,
2281 sizeof(raw_super->uuid));
2284 * The BLKZONED feature indicates that the drive was formatted with
2285 * zone alignment optimization. This is optional for host-aware
2286 * devices, but mandatory for host-managed zoned block devices.
2288 #ifndef CONFIG_BLK_DEV_ZONED
2289 if (f2fs_sb_mounted_blkzoned(sb)) {
2290 f2fs_msg(sb, KERN_ERR,
2291 "Zoned block device support is not enabled\n");
2296 default_options(sbi);
2297 /* parse mount options */
2298 options = kstrdup((const char *)data, GFP_KERNEL);
2299 if (data && !options) {
2304 err = parse_options(sb, options);
2308 sbi->max_file_blocks = max_file_blocks();
2309 sb->s_maxbytes = sbi->max_file_blocks <<
2310 le32_to_cpu(raw_super->log_blocksize);
2311 sb->s_max_links = F2FS_LINK_MAX;
2312 get_random_bytes(&sbi->s_next_generation, sizeof(u32));
2315 sb->dq_op = &f2fs_quota_operations;
2316 sb->s_qcop = &f2fs_quotactl_ops;
2317 sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ;
2320 sb->s_op = &f2fs_sops;
2321 sb->s_cop = &f2fs_cryptops;
2322 sb->s_xattr = f2fs_xattr_handlers;
2323 sb->s_export_op = &f2fs_export_ops;
2324 sb->s_magic = F2FS_SUPER_MAGIC;
2325 sb->s_time_gran = 1;
2326 sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
2327 (test_opt(sbi, POSIX_ACL) ? MS_POSIXACL : 0);
2328 memcpy(&sb->s_uuid, raw_super->uuid, sizeof(raw_super->uuid));
2330 /* init f2fs-specific super block info */
2331 sbi->valid_super_block = valid_super_block;
2332 mutex_init(&sbi->gc_mutex);
2333 mutex_init(&sbi->cp_mutex);
2334 init_rwsem(&sbi->node_write);
2335 init_rwsem(&sbi->node_change);
2337 /* disallow all the data/node/meta page writes */
2338 set_sbi_flag(sbi, SBI_POR_DOING);
2339 spin_lock_init(&sbi->stat_lock);
2341 /* init iostat info */
2342 spin_lock_init(&sbi->iostat_lock);
2343 sbi->iostat_enable = false;
2345 for (i = 0; i < NR_PAGE_TYPE; i++) {
2346 int n = (i == META) ? 1: NR_TEMP_TYPE;
2349 sbi->write_io[i] = kmalloc(n * sizeof(struct f2fs_bio_info),
2351 if (!sbi->write_io[i]) {
2356 for (j = HOT; j < n; j++) {
2357 init_rwsem(&sbi->write_io[i][j].io_rwsem);
2358 sbi->write_io[i][j].sbi = sbi;
2359 sbi->write_io[i][j].bio = NULL;
2360 spin_lock_init(&sbi->write_io[i][j].io_lock);
2361 INIT_LIST_HEAD(&sbi->write_io[i][j].io_list);
2365 init_rwsem(&sbi->cp_rwsem);
2366 init_waitqueue_head(&sbi->cp_wait);
2369 err = init_percpu_info(sbi);
2373 if (F2FS_IO_SIZE(sbi) > 1) {
2374 sbi->write_io_dummy =
2375 mempool_create_page_pool(2 * (F2FS_IO_SIZE(sbi) - 1), 0);
2376 if (!sbi->write_io_dummy) {
2382 /* get an inode for meta space */
2383 sbi->meta_inode = f2fs_iget(sb, F2FS_META_INO(sbi));
2384 if (IS_ERR(sbi->meta_inode)) {
2385 f2fs_msg(sb, KERN_ERR, "Failed to read F2FS meta data inode");
2386 err = PTR_ERR(sbi->meta_inode);
2390 err = get_valid_checkpoint(sbi);
2392 f2fs_msg(sb, KERN_ERR, "Failed to get valid F2FS checkpoint");
2393 goto free_meta_inode;
2396 /* Initialize device list */
2397 err = f2fs_scan_devices(sbi);
2399 f2fs_msg(sb, KERN_ERR, "Failed to find devices");
2403 sbi->total_valid_node_count =
2404 le32_to_cpu(sbi->ckpt->valid_node_count);
2405 percpu_counter_set(&sbi->total_valid_inode_count,
2406 le32_to_cpu(sbi->ckpt->valid_inode_count));
2407 sbi->user_block_count = le64_to_cpu(sbi->ckpt->user_block_count);
2408 sbi->total_valid_block_count =
2409 le64_to_cpu(sbi->ckpt->valid_block_count);
2410 sbi->last_valid_block_count = sbi->total_valid_block_count;
2411 sbi->reserved_blocks = 0;
2413 for (i = 0; i < NR_INODE_TYPE; i++) {
2414 INIT_LIST_HEAD(&sbi->inode_list[i]);
2415 spin_lock_init(&sbi->inode_lock[i]);
2418 init_extent_cache_info(sbi);
2420 init_ino_entry_info(sbi);
2422 /* setup f2fs internal modules */
2423 err = build_segment_manager(sbi);
2425 f2fs_msg(sb, KERN_ERR,
2426 "Failed to initialize F2FS segment manager");
2429 err = build_node_manager(sbi);
2431 f2fs_msg(sb, KERN_ERR,
2432 "Failed to initialize F2FS node manager");
2436 /* For write statistics */
2437 if (sb->s_bdev->bd_part)
2438 sbi->sectors_written_start =
2439 (u64)part_stat_read(sb->s_bdev->bd_part, sectors[1]);
2441 /* Read accumulated write IO statistics if exists */
2442 seg_i = CURSEG_I(sbi, CURSEG_HOT_NODE);
2443 if (__exist_node_summaries(sbi))
2444 sbi->kbytes_written =
2445 le64_to_cpu(seg_i->journal->info.kbytes_written);
2447 build_gc_manager(sbi);
2449 /* get an inode for node space */
2450 sbi->node_inode = f2fs_iget(sb, F2FS_NODE_INO(sbi));
2451 if (IS_ERR(sbi->node_inode)) {
2452 f2fs_msg(sb, KERN_ERR, "Failed to read node inode");
2453 err = PTR_ERR(sbi->node_inode);
2457 f2fs_join_shrinker(sbi);
2459 err = f2fs_build_stats(sbi);
2463 /* read root inode and dentry */
2464 root = f2fs_iget(sb, F2FS_ROOT_INO(sbi));
2466 f2fs_msg(sb, KERN_ERR, "Failed to read root inode");
2467 err = PTR_ERR(root);
2468 goto free_node_inode;
2470 if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) {
2473 goto free_node_inode;
2476 sb->s_root = d_make_root(root); /* allocate root dentry */
2479 goto free_root_inode;
2482 err = f2fs_register_sysfs(sbi);
2484 goto free_root_inode;
2486 /* if there are nt orphan nodes free them */
2487 err = recover_orphan_inodes(sbi);
2491 /* recover fsynced data */
2492 if (!test_opt(sbi, DISABLE_ROLL_FORWARD)) {
2494 * mount should be failed, when device has readonly mode, and
2495 * previous checkpoint was not done by clean system shutdown.
2497 if (bdev_read_only(sb->s_bdev) &&
2498 !is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) {
2504 set_sbi_flag(sbi, SBI_NEED_FSCK);
2509 err = recover_fsync_data(sbi, false);
2512 f2fs_msg(sb, KERN_ERR,
2513 "Cannot recover all fsync data errno=%d", err);
2517 err = recover_fsync_data(sbi, true);
2519 if (!f2fs_readonly(sb) && err > 0) {
2521 f2fs_msg(sb, KERN_ERR,
2522 "Need to recover fsync data");
2527 /* recover_fsync_data() cleared this already */
2528 clear_sbi_flag(sbi, SBI_POR_DOING);
2531 * If filesystem is not mounted as read-only then
2532 * do start the gc_thread.
2534 if (test_opt(sbi, BG_GC) && !f2fs_readonly(sb)) {
2535 /* After POR, we can run background GC thread.*/
2536 err = start_gc_thread(sbi);
2542 /* recover broken superblock */
2544 err = f2fs_commit_super(sbi, true);
2545 f2fs_msg(sb, KERN_INFO,
2546 "Try to recover %dth superblock, ret: %d",
2547 sbi->valid_super_block ? 1 : 2, err);
2550 f2fs_msg(sbi->sb, KERN_NOTICE, "Mounted with checkpoint version = %llx",
2551 cur_cp_version(F2FS_CKPT(sbi)));
2552 f2fs_update_time(sbi, CP_TIME);
2553 f2fs_update_time(sbi, REQ_TIME);
2557 f2fs_sync_inode_meta(sbi);
2559 * Some dirty meta pages can be produced by recover_orphan_inodes()
2560 * failed by EIO. Then, iput(node_inode) can trigger balance_fs_bg()
2561 * followed by write_checkpoint() through f2fs_write_node_pages(), which
2562 * falls into an infinite loop in sync_meta_pages().
2564 truncate_inode_pages_final(META_MAPPING(sbi));
2566 f2fs_unregister_sysfs(sbi);
2571 truncate_inode_pages_final(NODE_MAPPING(sbi));
2572 mutex_lock(&sbi->umount_mutex);
2573 release_ino_entry(sbi, true);
2574 f2fs_leave_shrinker(sbi);
2575 iput(sbi->node_inode);
2576 mutex_unlock(&sbi->umount_mutex);
2577 f2fs_destroy_stats(sbi);
2579 destroy_node_manager(sbi);
2581 destroy_segment_manager(sbi);
2583 destroy_device_list(sbi);
2586 make_bad_inode(sbi->meta_inode);
2587 iput(sbi->meta_inode);
2589 mempool_destroy(sbi->write_io_dummy);
2591 for (i = 0; i < NR_PAGE_TYPE; i++)
2592 kfree(sbi->write_io[i]);
2593 destroy_percpu_info(sbi);
2595 for (i = 0; i < MAXQUOTAS; i++)
2596 kfree(sbi->s_qf_names[i]);
2602 if (sbi->s_chksum_driver)
2603 crypto_free_shash(sbi->s_chksum_driver);
2606 /* give only one another chance */
2609 shrink_dcache_sb(sb);
2615 static struct dentry *f2fs_mount(struct file_system_type *fs_type, int flags,
2616 const char *dev_name, void *data)
2618 return mount_bdev(fs_type, flags, dev_name, data, f2fs_fill_super);
2621 static void kill_f2fs_super(struct super_block *sb)
2624 set_sbi_flag(F2FS_SB(sb), SBI_IS_CLOSE);
2625 stop_gc_thread(F2FS_SB(sb));
2626 stop_discard_thread(F2FS_SB(sb));
2628 kill_block_super(sb);
2631 static struct file_system_type f2fs_fs_type = {
2632 .owner = THIS_MODULE,
2634 .mount = f2fs_mount,
2635 .kill_sb = kill_f2fs_super,
2636 .fs_flags = FS_REQUIRES_DEV,
2638 MODULE_ALIAS_FS("f2fs");
2640 static int __init init_inodecache(void)
2642 f2fs_inode_cachep = kmem_cache_create("f2fs_inode_cache",
2643 sizeof(struct f2fs_inode_info), 0,
2644 SLAB_RECLAIM_ACCOUNT|SLAB_ACCOUNT, NULL);
2645 if (!f2fs_inode_cachep)
2650 static void destroy_inodecache(void)
2653 * Make sure all delayed rcu free inodes are flushed before we
2657 kmem_cache_destroy(f2fs_inode_cachep);
2660 static int __init init_f2fs_fs(void)
2664 f2fs_build_trace_ios();
2666 err = init_inodecache();
2669 err = create_node_manager_caches();
2671 goto free_inodecache;
2672 err = create_segment_manager_caches();
2674 goto free_node_manager_caches;
2675 err = create_checkpoint_caches();
2677 goto free_segment_manager_caches;
2678 err = create_extent_cache();
2680 goto free_checkpoint_caches;
2681 err = f2fs_init_sysfs();
2683 goto free_extent_cache;
2684 err = register_shrinker(&f2fs_shrinker_info);
2687 err = register_filesystem(&f2fs_fs_type);
2690 err = f2fs_create_root_stats();
2692 goto free_filesystem;
2696 unregister_filesystem(&f2fs_fs_type);
2698 unregister_shrinker(&f2fs_shrinker_info);
2702 destroy_extent_cache();
2703 free_checkpoint_caches:
2704 destroy_checkpoint_caches();
2705 free_segment_manager_caches:
2706 destroy_segment_manager_caches();
2707 free_node_manager_caches:
2708 destroy_node_manager_caches();
2710 destroy_inodecache();
2715 static void __exit exit_f2fs_fs(void)
2717 f2fs_destroy_root_stats();
2718 unregister_filesystem(&f2fs_fs_type);
2719 unregister_shrinker(&f2fs_shrinker_info);
2721 destroy_extent_cache();
2722 destroy_checkpoint_caches();
2723 destroy_segment_manager_caches();
2724 destroy_node_manager_caches();
2725 destroy_inodecache();
2726 f2fs_destroy_trace_ios();
2729 module_init(init_f2fs_fs)
2730 module_exit(exit_f2fs_fs)
2732 MODULE_AUTHOR("Samsung Electronics's Praesto Team");
2733 MODULE_DESCRIPTION("Flash Friendly File System");
2734 MODULE_LICENSE("GPL");