4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #include <linux/module.h>
12 #include <linux/init.h>
14 #include <linux/statfs.h>
15 #include <linux/buffer_head.h>
16 #include <linux/backing-dev.h>
17 #include <linux/kthread.h>
18 #include <linux/parser.h>
19 #include <linux/mount.h>
20 #include <linux/seq_file.h>
21 #include <linux/proc_fs.h>
22 #include <linux/random.h>
23 #include <linux/exportfs.h>
24 #include <linux/blkdev.h>
25 #include <linux/quotaops.h>
26 #include <linux/f2fs_fs.h>
27 #include <linux/sysfs.h>
28 #include <linux/quota.h>
37 #define CREATE_TRACE_POINTS
38 #include <trace/events/f2fs.h>
40 static struct kmem_cache *f2fs_inode_cachep;
42 #ifdef CONFIG_F2FS_FAULT_INJECTION
44 char *fault_name[FAULT_MAX] = {
45 [FAULT_KMALLOC] = "kmalloc",
46 [FAULT_KVMALLOC] = "kvmalloc",
47 [FAULT_PAGE_ALLOC] = "page alloc",
48 [FAULT_PAGE_GET] = "page get",
49 [FAULT_ALLOC_BIO] = "alloc bio",
50 [FAULT_ALLOC_NID] = "alloc nid",
51 [FAULT_ORPHAN] = "orphan",
52 [FAULT_BLOCK] = "no more block",
53 [FAULT_DIR_DEPTH] = "too big dir depth",
54 [FAULT_EVICT_INODE] = "evict_inode fail",
55 [FAULT_TRUNCATE] = "truncate fail",
56 [FAULT_IO] = "IO error",
57 [FAULT_CHECKPOINT] = "checkpoint error",
60 static void f2fs_build_fault_attr(struct f2fs_sb_info *sbi,
63 struct f2fs_fault_info *ffi = &F2FS_OPTION(sbi).fault_info;
66 atomic_set(&ffi->inject_ops, 0);
67 ffi->inject_rate = rate;
68 ffi->inject_type = (1 << FAULT_MAX) - 1;
70 memset(ffi, 0, sizeof(struct f2fs_fault_info));
75 /* f2fs-wide shrinker description */
76 static struct shrinker f2fs_shrinker_info = {
77 .scan_objects = f2fs_shrink_scan,
78 .count_objects = f2fs_shrink_count,
79 .seeks = DEFAULT_SEEKS,
84 Opt_disable_roll_forward,
95 Opt_disable_ext_identify,
98 Opt_inline_xattr_size,
135 Opt_test_dummy_encryption,
139 static match_table_t f2fs_tokens = {
140 {Opt_gc_background, "background_gc=%s"},
141 {Opt_disable_roll_forward, "disable_roll_forward"},
142 {Opt_norecovery, "norecovery"},
143 {Opt_discard, "discard"},
144 {Opt_nodiscard, "nodiscard"},
145 {Opt_noheap, "no_heap"},
147 {Opt_user_xattr, "user_xattr"},
148 {Opt_nouser_xattr, "nouser_xattr"},
150 {Opt_noacl, "noacl"},
151 {Opt_active_logs, "active_logs=%u"},
152 {Opt_disable_ext_identify, "disable_ext_identify"},
153 {Opt_inline_xattr, "inline_xattr"},
154 {Opt_noinline_xattr, "noinline_xattr"},
155 {Opt_inline_xattr_size, "inline_xattr_size=%u"},
156 {Opt_inline_data, "inline_data"},
157 {Opt_inline_dentry, "inline_dentry"},
158 {Opt_noinline_dentry, "noinline_dentry"},
159 {Opt_flush_merge, "flush_merge"},
160 {Opt_noflush_merge, "noflush_merge"},
161 {Opt_nobarrier, "nobarrier"},
162 {Opt_fastboot, "fastboot"},
163 {Opt_extent_cache, "extent_cache"},
164 {Opt_noextent_cache, "noextent_cache"},
165 {Opt_noinline_data, "noinline_data"},
166 {Opt_data_flush, "data_flush"},
167 {Opt_reserve_root, "reserve_root=%u"},
168 {Opt_resgid, "resgid=%u"},
169 {Opt_resuid, "resuid=%u"},
170 {Opt_mode, "mode=%s"},
171 {Opt_io_size_bits, "io_bits=%u"},
172 {Opt_fault_injection, "fault_injection=%u"},
173 {Opt_lazytime, "lazytime"},
174 {Opt_nolazytime, "nolazytime"},
175 {Opt_quota, "quota"},
176 {Opt_noquota, "noquota"},
177 {Opt_usrquota, "usrquota"},
178 {Opt_grpquota, "grpquota"},
179 {Opt_prjquota, "prjquota"},
180 {Opt_usrjquota, "usrjquota=%s"},
181 {Opt_grpjquota, "grpjquota=%s"},
182 {Opt_prjjquota, "prjjquota=%s"},
183 {Opt_offusrjquota, "usrjquota="},
184 {Opt_offgrpjquota, "grpjquota="},
185 {Opt_offprjjquota, "prjjquota="},
186 {Opt_jqfmt_vfsold, "jqfmt=vfsold"},
187 {Opt_jqfmt_vfsv0, "jqfmt=vfsv0"},
188 {Opt_jqfmt_vfsv1, "jqfmt=vfsv1"},
189 {Opt_whint, "whint_mode=%s"},
190 {Opt_alloc, "alloc_mode=%s"},
191 {Opt_fsync, "fsync_mode=%s"},
192 {Opt_test_dummy_encryption, "test_dummy_encryption"},
196 void f2fs_msg(struct super_block *sb, const char *level, const char *fmt, ...)
198 struct va_format vaf;
204 printk_ratelimited("%sF2FS-fs (%s): %pV\n", level, sb->s_id, &vaf);
208 static inline void limit_reserve_root(struct f2fs_sb_info *sbi)
210 block_t limit = (sbi->user_block_count << 1) / 1000;
213 if (test_opt(sbi, RESERVE_ROOT) &&
214 F2FS_OPTION(sbi).root_reserved_blocks > limit) {
215 F2FS_OPTION(sbi).root_reserved_blocks = limit;
216 f2fs_msg(sbi->sb, KERN_INFO,
217 "Reduce reserved blocks for root = %u",
218 F2FS_OPTION(sbi).root_reserved_blocks);
220 if (!test_opt(sbi, RESERVE_ROOT) &&
221 (!uid_eq(F2FS_OPTION(sbi).s_resuid,
222 make_kuid(&init_user_ns, F2FS_DEF_RESUID)) ||
223 !gid_eq(F2FS_OPTION(sbi).s_resgid,
224 make_kgid(&init_user_ns, F2FS_DEF_RESGID))))
225 f2fs_msg(sbi->sb, KERN_INFO,
226 "Ignore s_resuid=%u, s_resgid=%u w/o reserve_root",
227 from_kuid_munged(&init_user_ns,
228 F2FS_OPTION(sbi).s_resuid),
229 from_kgid_munged(&init_user_ns,
230 F2FS_OPTION(sbi).s_resgid));
233 static void init_once(void *foo)
235 struct f2fs_inode_info *fi = (struct f2fs_inode_info *) foo;
237 inode_init_once(&fi->vfs_inode);
241 static const char * const quotatypes[] = INITQFNAMES;
242 #define QTYPE2NAME(t) (quotatypes[t])
243 static int f2fs_set_qf_name(struct super_block *sb, int qtype,
246 struct f2fs_sb_info *sbi = F2FS_SB(sb);
250 if (sb_any_quota_loaded(sb) && !F2FS_OPTION(sbi).s_qf_names[qtype]) {
251 f2fs_msg(sb, KERN_ERR,
252 "Cannot change journaled "
253 "quota options when quota turned on");
256 if (f2fs_sb_has_quota_ino(sb)) {
257 f2fs_msg(sb, KERN_INFO,
258 "QUOTA feature is enabled, so ignore qf_name");
262 qname = match_strdup(args);
264 f2fs_msg(sb, KERN_ERR,
265 "Not enough memory for storing quotafile name");
268 if (F2FS_OPTION(sbi).s_qf_names[qtype]) {
269 if (strcmp(F2FS_OPTION(sbi).s_qf_names[qtype], qname) == 0)
272 f2fs_msg(sb, KERN_ERR,
273 "%s quota file already specified",
277 if (strchr(qname, '/')) {
278 f2fs_msg(sb, KERN_ERR,
279 "quotafile must be on filesystem root");
282 F2FS_OPTION(sbi).s_qf_names[qtype] = qname;
290 static int f2fs_clear_qf_name(struct super_block *sb, int qtype)
292 struct f2fs_sb_info *sbi = F2FS_SB(sb);
294 if (sb_any_quota_loaded(sb) && F2FS_OPTION(sbi).s_qf_names[qtype]) {
295 f2fs_msg(sb, KERN_ERR, "Cannot change journaled quota options"
296 " when quota turned on");
299 kfree(F2FS_OPTION(sbi).s_qf_names[qtype]);
300 F2FS_OPTION(sbi).s_qf_names[qtype] = NULL;
304 static int f2fs_check_quota_options(struct f2fs_sb_info *sbi)
307 * We do the test below only for project quotas. 'usrquota' and
308 * 'grpquota' mount options are allowed even without quota feature
309 * to support legacy quotas in quota files.
311 if (test_opt(sbi, PRJQUOTA) && !f2fs_sb_has_project_quota(sbi->sb)) {
312 f2fs_msg(sbi->sb, KERN_ERR, "Project quota feature not enabled. "
313 "Cannot enable project quota enforcement.");
316 if (F2FS_OPTION(sbi).s_qf_names[USRQUOTA] ||
317 F2FS_OPTION(sbi).s_qf_names[GRPQUOTA] ||
318 F2FS_OPTION(sbi).s_qf_names[PRJQUOTA]) {
319 if (test_opt(sbi, USRQUOTA) &&
320 F2FS_OPTION(sbi).s_qf_names[USRQUOTA])
321 clear_opt(sbi, USRQUOTA);
323 if (test_opt(sbi, GRPQUOTA) &&
324 F2FS_OPTION(sbi).s_qf_names[GRPQUOTA])
325 clear_opt(sbi, GRPQUOTA);
327 if (test_opt(sbi, PRJQUOTA) &&
328 F2FS_OPTION(sbi).s_qf_names[PRJQUOTA])
329 clear_opt(sbi, PRJQUOTA);
331 if (test_opt(sbi, GRPQUOTA) || test_opt(sbi, USRQUOTA) ||
332 test_opt(sbi, PRJQUOTA)) {
333 f2fs_msg(sbi->sb, KERN_ERR, "old and new quota "
338 if (!F2FS_OPTION(sbi).s_jquota_fmt) {
339 f2fs_msg(sbi->sb, KERN_ERR, "journaled quota format "
345 if (f2fs_sb_has_quota_ino(sbi->sb) && F2FS_OPTION(sbi).s_jquota_fmt) {
346 f2fs_msg(sbi->sb, KERN_INFO,
347 "QUOTA feature is enabled, so ignore jquota_fmt");
348 F2FS_OPTION(sbi).s_jquota_fmt = 0;
350 if (f2fs_sb_has_quota_ino(sbi->sb) && f2fs_readonly(sbi->sb)) {
351 f2fs_msg(sbi->sb, KERN_INFO,
352 "Filesystem with quota feature cannot be mounted RDWR "
353 "without CONFIG_QUOTA");
360 static int parse_options(struct super_block *sb, char *options)
362 struct f2fs_sb_info *sbi = F2FS_SB(sb);
363 struct request_queue *q;
364 substring_t args[MAX_OPT_ARGS];
376 while ((p = strsep(&options, ",")) != NULL) {
381 * Initialize args struct so we know whether arg was
382 * found; some options take optional arguments.
384 args[0].to = args[0].from = NULL;
385 token = match_token(p, f2fs_tokens, args);
388 case Opt_gc_background:
389 name = match_strdup(&args[0]);
393 if (strlen(name) == 2 && !strncmp(name, "on", 2)) {
395 clear_opt(sbi, FORCE_FG_GC);
396 } else if (strlen(name) == 3 && !strncmp(name, "off", 3)) {
397 clear_opt(sbi, BG_GC);
398 clear_opt(sbi, FORCE_FG_GC);
399 } else if (strlen(name) == 4 && !strncmp(name, "sync", 4)) {
401 set_opt(sbi, FORCE_FG_GC);
408 case Opt_disable_roll_forward:
409 set_opt(sbi, DISABLE_ROLL_FORWARD);
412 /* this option mounts f2fs with ro */
413 set_opt(sbi, DISABLE_ROLL_FORWARD);
414 if (!f2fs_readonly(sb))
418 q = bdev_get_queue(sb->s_bdev);
419 if (blk_queue_discard(q)) {
420 set_opt(sbi, DISCARD);
421 } else if (!f2fs_sb_has_blkzoned(sb)) {
422 f2fs_msg(sb, KERN_WARNING,
423 "mounting with \"discard\" option, but "
424 "the device does not support discard");
428 if (f2fs_sb_has_blkzoned(sb)) {
429 f2fs_msg(sb, KERN_WARNING,
430 "discard is required for zoned block devices");
433 clear_opt(sbi, DISCARD);
436 set_opt(sbi, NOHEAP);
439 clear_opt(sbi, NOHEAP);
441 #ifdef CONFIG_F2FS_FS_XATTR
443 set_opt(sbi, XATTR_USER);
445 case Opt_nouser_xattr:
446 clear_opt(sbi, XATTR_USER);
448 case Opt_inline_xattr:
449 set_opt(sbi, INLINE_XATTR);
451 case Opt_noinline_xattr:
452 clear_opt(sbi, INLINE_XATTR);
454 case Opt_inline_xattr_size:
455 if (args->from && match_int(args, &arg))
457 set_opt(sbi, INLINE_XATTR_SIZE);
458 F2FS_OPTION(sbi).inline_xattr_size = arg;
462 f2fs_msg(sb, KERN_INFO,
463 "user_xattr options not supported");
465 case Opt_nouser_xattr:
466 f2fs_msg(sb, KERN_INFO,
467 "nouser_xattr options not supported");
469 case Opt_inline_xattr:
470 f2fs_msg(sb, KERN_INFO,
471 "inline_xattr options not supported");
473 case Opt_noinline_xattr:
474 f2fs_msg(sb, KERN_INFO,
475 "noinline_xattr options not supported");
478 #ifdef CONFIG_F2FS_FS_POSIX_ACL
480 set_opt(sbi, POSIX_ACL);
483 clear_opt(sbi, POSIX_ACL);
487 f2fs_msg(sb, KERN_INFO, "acl options not supported");
490 f2fs_msg(sb, KERN_INFO, "noacl options not supported");
493 case Opt_active_logs:
494 if (args->from && match_int(args, &arg))
496 if (arg != 2 && arg != 4 && arg != NR_CURSEG_TYPE)
498 F2FS_OPTION(sbi).active_logs = arg;
500 case Opt_disable_ext_identify:
501 set_opt(sbi, DISABLE_EXT_IDENTIFY);
503 case Opt_inline_data:
504 set_opt(sbi, INLINE_DATA);
506 case Opt_inline_dentry:
507 set_opt(sbi, INLINE_DENTRY);
509 case Opt_noinline_dentry:
510 clear_opt(sbi, INLINE_DENTRY);
512 case Opt_flush_merge:
513 set_opt(sbi, FLUSH_MERGE);
515 case Opt_noflush_merge:
516 clear_opt(sbi, FLUSH_MERGE);
519 set_opt(sbi, NOBARRIER);
522 set_opt(sbi, FASTBOOT);
524 case Opt_extent_cache:
525 set_opt(sbi, EXTENT_CACHE);
527 case Opt_noextent_cache:
528 clear_opt(sbi, EXTENT_CACHE);
530 case Opt_noinline_data:
531 clear_opt(sbi, INLINE_DATA);
534 set_opt(sbi, DATA_FLUSH);
536 case Opt_reserve_root:
537 if (args->from && match_int(args, &arg))
539 if (test_opt(sbi, RESERVE_ROOT)) {
540 f2fs_msg(sb, KERN_INFO,
541 "Preserve previous reserve_root=%u",
542 F2FS_OPTION(sbi).root_reserved_blocks);
544 F2FS_OPTION(sbi).root_reserved_blocks = arg;
545 set_opt(sbi, RESERVE_ROOT);
549 if (args->from && match_int(args, &arg))
551 uid = make_kuid(current_user_ns(), arg);
552 if (!uid_valid(uid)) {
553 f2fs_msg(sb, KERN_ERR,
554 "Invalid uid value %d", arg);
557 F2FS_OPTION(sbi).s_resuid = uid;
560 if (args->from && match_int(args, &arg))
562 gid = make_kgid(current_user_ns(), arg);
563 if (!gid_valid(gid)) {
564 f2fs_msg(sb, KERN_ERR,
565 "Invalid gid value %d", arg);
568 F2FS_OPTION(sbi).s_resgid = gid;
571 name = match_strdup(&args[0]);
575 if (strlen(name) == 8 &&
576 !strncmp(name, "adaptive", 8)) {
577 if (f2fs_sb_has_blkzoned(sb)) {
578 f2fs_msg(sb, KERN_WARNING,
579 "adaptive mode is not allowed with "
580 "zoned block device feature");
584 set_opt_mode(sbi, F2FS_MOUNT_ADAPTIVE);
585 } else if (strlen(name) == 3 &&
586 !strncmp(name, "lfs", 3)) {
587 set_opt_mode(sbi, F2FS_MOUNT_LFS);
594 case Opt_io_size_bits:
595 if (args->from && match_int(args, &arg))
597 if (arg > __ilog2_u32(BIO_MAX_PAGES)) {
598 f2fs_msg(sb, KERN_WARNING,
599 "Not support %d, larger than %d",
600 1 << arg, BIO_MAX_PAGES);
603 F2FS_OPTION(sbi).write_io_size_bits = arg;
605 case Opt_fault_injection:
606 if (args->from && match_int(args, &arg))
608 #ifdef CONFIG_F2FS_FAULT_INJECTION
609 f2fs_build_fault_attr(sbi, arg);
610 set_opt(sbi, FAULT_INJECTION);
612 f2fs_msg(sb, KERN_INFO,
613 "FAULT_INJECTION was not selected");
617 sb->s_flags |= SB_LAZYTIME;
620 sb->s_flags &= ~SB_LAZYTIME;
625 set_opt(sbi, USRQUOTA);
628 set_opt(sbi, GRPQUOTA);
631 set_opt(sbi, PRJQUOTA);
634 ret = f2fs_set_qf_name(sb, USRQUOTA, &args[0]);
639 ret = f2fs_set_qf_name(sb, GRPQUOTA, &args[0]);
644 ret = f2fs_set_qf_name(sb, PRJQUOTA, &args[0]);
648 case Opt_offusrjquota:
649 ret = f2fs_clear_qf_name(sb, USRQUOTA);
653 case Opt_offgrpjquota:
654 ret = f2fs_clear_qf_name(sb, GRPQUOTA);
658 case Opt_offprjjquota:
659 ret = f2fs_clear_qf_name(sb, PRJQUOTA);
663 case Opt_jqfmt_vfsold:
664 F2FS_OPTION(sbi).s_jquota_fmt = QFMT_VFS_OLD;
666 case Opt_jqfmt_vfsv0:
667 F2FS_OPTION(sbi).s_jquota_fmt = QFMT_VFS_V0;
669 case Opt_jqfmt_vfsv1:
670 F2FS_OPTION(sbi).s_jquota_fmt = QFMT_VFS_V1;
673 clear_opt(sbi, QUOTA);
674 clear_opt(sbi, USRQUOTA);
675 clear_opt(sbi, GRPQUOTA);
676 clear_opt(sbi, PRJQUOTA);
686 case Opt_offusrjquota:
687 case Opt_offgrpjquota:
688 case Opt_offprjjquota:
689 case Opt_jqfmt_vfsold:
690 case Opt_jqfmt_vfsv0:
691 case Opt_jqfmt_vfsv1:
693 f2fs_msg(sb, KERN_INFO,
694 "quota operations not supported");
698 name = match_strdup(&args[0]);
701 if (strlen(name) == 10 &&
702 !strncmp(name, "user-based", 10)) {
703 F2FS_OPTION(sbi).whint_mode = WHINT_MODE_USER;
704 } else if (strlen(name) == 3 &&
705 !strncmp(name, "off", 3)) {
706 F2FS_OPTION(sbi).whint_mode = WHINT_MODE_OFF;
707 } else if (strlen(name) == 8 &&
708 !strncmp(name, "fs-based", 8)) {
709 F2FS_OPTION(sbi).whint_mode = WHINT_MODE_FS;
717 name = match_strdup(&args[0]);
721 if (strlen(name) == 7 &&
722 !strncmp(name, "default", 7)) {
723 F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_DEFAULT;
724 } else if (strlen(name) == 5 &&
725 !strncmp(name, "reuse", 5)) {
726 F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_REUSE;
734 name = match_strdup(&args[0]);
737 if (strlen(name) == 5 &&
738 !strncmp(name, "posix", 5)) {
739 F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_POSIX;
740 } else if (strlen(name) == 6 &&
741 !strncmp(name, "strict", 6)) {
742 F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_STRICT;
749 case Opt_test_dummy_encryption:
750 #ifdef CONFIG_F2FS_FS_ENCRYPTION
751 if (!f2fs_sb_has_encrypt(sb)) {
752 f2fs_msg(sb, KERN_ERR, "Encrypt feature is off");
756 F2FS_OPTION(sbi).test_dummy_encryption = true;
757 f2fs_msg(sb, KERN_INFO,
758 "Test dummy encryption mode enabled");
760 f2fs_msg(sb, KERN_INFO,
761 "Test dummy encryption mount option ignored");
765 f2fs_msg(sb, KERN_ERR,
766 "Unrecognized mount option \"%s\" or missing value",
772 if (f2fs_check_quota_options(sbi))
776 if (F2FS_IO_SIZE_BITS(sbi) && !test_opt(sbi, LFS)) {
777 f2fs_msg(sb, KERN_ERR,
778 "Should set mode=lfs with %uKB-sized IO",
779 F2FS_IO_SIZE_KB(sbi));
783 if (test_opt(sbi, INLINE_XATTR_SIZE)) {
784 if (!f2fs_sb_has_extra_attr(sb) ||
785 !f2fs_sb_has_flexible_inline_xattr(sb)) {
786 f2fs_msg(sb, KERN_ERR,
787 "extra_attr or flexible_inline_xattr "
791 if (!test_opt(sbi, INLINE_XATTR)) {
792 f2fs_msg(sb, KERN_ERR,
793 "inline_xattr_size option should be "
794 "set with inline_xattr option");
797 if (!F2FS_OPTION(sbi).inline_xattr_size ||
798 F2FS_OPTION(sbi).inline_xattr_size >=
799 DEF_ADDRS_PER_INODE -
800 F2FS_TOTAL_EXTRA_ATTR_SIZE -
801 DEF_INLINE_RESERVED_SIZE -
802 DEF_MIN_INLINE_SIZE) {
803 f2fs_msg(sb, KERN_ERR,
804 "inline xattr size is out of range");
809 /* Not pass down write hints if the number of active logs is lesser
810 * than NR_CURSEG_TYPE.
812 if (F2FS_OPTION(sbi).active_logs != NR_CURSEG_TYPE)
813 F2FS_OPTION(sbi).whint_mode = WHINT_MODE_OFF;
817 static struct inode *f2fs_alloc_inode(struct super_block *sb)
819 struct f2fs_inode_info *fi;
821 fi = kmem_cache_alloc(f2fs_inode_cachep, GFP_F2FS_ZERO);
825 init_once((void *) fi);
827 /* Initialize f2fs-specific inode info */
828 atomic_set(&fi->dirty_pages, 0);
829 fi->i_current_depth = 1;
830 init_rwsem(&fi->i_sem);
831 INIT_LIST_HEAD(&fi->dirty_list);
832 INIT_LIST_HEAD(&fi->gdirty_list);
833 INIT_LIST_HEAD(&fi->inmem_ilist);
834 INIT_LIST_HEAD(&fi->inmem_pages);
835 mutex_init(&fi->inmem_lock);
836 init_rwsem(&fi->dio_rwsem[READ]);
837 init_rwsem(&fi->dio_rwsem[WRITE]);
838 init_rwsem(&fi->i_mmap_sem);
839 init_rwsem(&fi->i_xattr_sem);
841 /* Will be used by directory only */
842 fi->i_dir_level = F2FS_SB(sb)->dir_level;
844 return &fi->vfs_inode;
847 static int f2fs_drop_inode(struct inode *inode)
851 * This is to avoid a deadlock condition like below.
852 * writeback_single_inode(inode)
853 * - f2fs_write_data_page
854 * - f2fs_gc -> iput -> evict
855 * - inode_wait_for_writeback(inode)
857 if ((!inode_unhashed(inode) && inode->i_state & I_SYNC)) {
858 if (!inode->i_nlink && !is_bad_inode(inode)) {
859 /* to avoid evict_inode call simultaneously */
860 atomic_inc(&inode->i_count);
861 spin_unlock(&inode->i_lock);
863 /* some remained atomic pages should discarded */
864 if (f2fs_is_atomic_file(inode))
865 drop_inmem_pages(inode);
867 /* should remain fi->extent_tree for writepage */
868 f2fs_destroy_extent_node(inode);
870 sb_start_intwrite(inode->i_sb);
871 f2fs_i_size_write(inode, 0);
873 if (F2FS_HAS_BLOCKS(inode))
874 f2fs_truncate(inode);
876 sb_end_intwrite(inode->i_sb);
878 spin_lock(&inode->i_lock);
879 atomic_dec(&inode->i_count);
881 trace_f2fs_drop_inode(inode, 0);
884 ret = generic_drop_inode(inode);
885 trace_f2fs_drop_inode(inode, ret);
889 int f2fs_inode_dirtied(struct inode *inode, bool sync)
891 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
894 spin_lock(&sbi->inode_lock[DIRTY_META]);
895 if (is_inode_flag_set(inode, FI_DIRTY_INODE)) {
898 set_inode_flag(inode, FI_DIRTY_INODE);
899 stat_inc_dirty_inode(sbi, DIRTY_META);
901 if (sync && list_empty(&F2FS_I(inode)->gdirty_list)) {
902 list_add_tail(&F2FS_I(inode)->gdirty_list,
903 &sbi->inode_list[DIRTY_META]);
904 inc_page_count(sbi, F2FS_DIRTY_IMETA);
906 spin_unlock(&sbi->inode_lock[DIRTY_META]);
910 void f2fs_inode_synced(struct inode *inode)
912 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
914 spin_lock(&sbi->inode_lock[DIRTY_META]);
915 if (!is_inode_flag_set(inode, FI_DIRTY_INODE)) {
916 spin_unlock(&sbi->inode_lock[DIRTY_META]);
919 if (!list_empty(&F2FS_I(inode)->gdirty_list)) {
920 list_del_init(&F2FS_I(inode)->gdirty_list);
921 dec_page_count(sbi, F2FS_DIRTY_IMETA);
923 clear_inode_flag(inode, FI_DIRTY_INODE);
924 clear_inode_flag(inode, FI_AUTO_RECOVER);
925 stat_dec_dirty_inode(F2FS_I_SB(inode), DIRTY_META);
926 spin_unlock(&sbi->inode_lock[DIRTY_META]);
930 * f2fs_dirty_inode() is called from __mark_inode_dirty()
932 * We should call set_dirty_inode to write the dirty inode through write_inode.
934 static void f2fs_dirty_inode(struct inode *inode, int flags)
936 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
938 if (inode->i_ino == F2FS_NODE_INO(sbi) ||
939 inode->i_ino == F2FS_META_INO(sbi))
942 if (flags == I_DIRTY_TIME)
945 if (is_inode_flag_set(inode, FI_AUTO_RECOVER))
946 clear_inode_flag(inode, FI_AUTO_RECOVER);
948 f2fs_inode_dirtied(inode, false);
951 static void f2fs_i_callback(struct rcu_head *head)
953 struct inode *inode = container_of(head, struct inode, i_rcu);
954 kmem_cache_free(f2fs_inode_cachep, F2FS_I(inode));
957 static void f2fs_destroy_inode(struct inode *inode)
959 call_rcu(&inode->i_rcu, f2fs_i_callback);
962 static void destroy_percpu_info(struct f2fs_sb_info *sbi)
964 percpu_counter_destroy(&sbi->alloc_valid_block_count);
965 percpu_counter_destroy(&sbi->total_valid_inode_count);
968 static void destroy_device_list(struct f2fs_sb_info *sbi)
972 for (i = 0; i < sbi->s_ndevs; i++) {
973 blkdev_put(FDEV(i).bdev, FMODE_EXCL);
974 #ifdef CONFIG_BLK_DEV_ZONED
975 kfree(FDEV(i).blkz_type);
981 static void f2fs_put_super(struct super_block *sb)
983 struct f2fs_sb_info *sbi = F2FS_SB(sb);
987 f2fs_quota_off_umount(sb);
989 /* prevent remaining shrinker jobs */
990 mutex_lock(&sbi->umount_mutex);
993 * We don't need to do checkpoint when superblock is clean.
994 * But, the previous checkpoint was not done by umount, it needs to do
995 * clean checkpoint again.
997 if (is_sbi_flag_set(sbi, SBI_IS_DIRTY) ||
998 !is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) {
999 struct cp_control cpc = {
1000 .reason = CP_UMOUNT,
1002 write_checkpoint(sbi, &cpc);
1005 /* be sure to wait for any on-going discard commands */
1006 dropped = f2fs_wait_discard_bios(sbi);
1008 if (f2fs_discard_en(sbi) && !sbi->discard_blks && !dropped) {
1009 struct cp_control cpc = {
1010 .reason = CP_UMOUNT | CP_TRIMMED,
1012 write_checkpoint(sbi, &cpc);
1015 /* write_checkpoint can update stat informaion */
1016 f2fs_destroy_stats(sbi);
1019 * normally superblock is clean, so we need to release this.
1020 * In addition, EIO will skip do checkpoint, we need this as well.
1022 release_ino_entry(sbi, true);
1024 f2fs_leave_shrinker(sbi);
1025 mutex_unlock(&sbi->umount_mutex);
1027 /* our cp_error case, we can wait for any writeback page */
1028 f2fs_flush_merged_writes(sbi);
1030 iput(sbi->node_inode);
1031 iput(sbi->meta_inode);
1033 /* destroy f2fs internal modules */
1034 destroy_node_manager(sbi);
1035 destroy_segment_manager(sbi);
1039 f2fs_unregister_sysfs(sbi);
1041 sb->s_fs_info = NULL;
1042 if (sbi->s_chksum_driver)
1043 crypto_free_shash(sbi->s_chksum_driver);
1044 kfree(sbi->raw_super);
1046 destroy_device_list(sbi);
1047 mempool_destroy(sbi->write_io_dummy);
1049 for (i = 0; i < MAXQUOTAS; i++)
1050 kfree(F2FS_OPTION(sbi).s_qf_names[i]);
1052 destroy_percpu_info(sbi);
1053 for (i = 0; i < NR_PAGE_TYPE; i++)
1054 kfree(sbi->write_io[i]);
1058 int f2fs_sync_fs(struct super_block *sb, int sync)
1060 struct f2fs_sb_info *sbi = F2FS_SB(sb);
1063 if (unlikely(f2fs_cp_error(sbi)))
1066 trace_f2fs_sync_fs(sb, sync);
1068 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
1072 struct cp_control cpc;
1074 cpc.reason = __get_cp_reason(sbi);
1076 mutex_lock(&sbi->gc_mutex);
1077 err = write_checkpoint(sbi, &cpc);
1078 mutex_unlock(&sbi->gc_mutex);
1080 f2fs_trace_ios(NULL, 1);
1085 static int f2fs_freeze(struct super_block *sb)
1087 if (f2fs_readonly(sb))
1090 /* IO error happened before */
1091 if (unlikely(f2fs_cp_error(F2FS_SB(sb))))
1094 /* must be clean, since sync_filesystem() was already called */
1095 if (is_sbi_flag_set(F2FS_SB(sb), SBI_IS_DIRTY))
1100 static int f2fs_unfreeze(struct super_block *sb)
1106 static int f2fs_statfs_project(struct super_block *sb,
1107 kprojid_t projid, struct kstatfs *buf)
1110 struct dquot *dquot;
1114 qid = make_kqid_projid(projid);
1115 dquot = dqget(sb, qid);
1117 return PTR_ERR(dquot);
1118 spin_lock(&dq_data_lock);
1120 limit = (dquot->dq_dqb.dqb_bsoftlimit ?
1121 dquot->dq_dqb.dqb_bsoftlimit :
1122 dquot->dq_dqb.dqb_bhardlimit) >> sb->s_blocksize_bits;
1123 if (limit && buf->f_blocks > limit) {
1124 curblock = dquot->dq_dqb.dqb_curspace >> sb->s_blocksize_bits;
1125 buf->f_blocks = limit;
1126 buf->f_bfree = buf->f_bavail =
1127 (buf->f_blocks > curblock) ?
1128 (buf->f_blocks - curblock) : 0;
1131 limit = dquot->dq_dqb.dqb_isoftlimit ?
1132 dquot->dq_dqb.dqb_isoftlimit :
1133 dquot->dq_dqb.dqb_ihardlimit;
1134 if (limit && buf->f_files > limit) {
1135 buf->f_files = limit;
1137 (buf->f_files > dquot->dq_dqb.dqb_curinodes) ?
1138 (buf->f_files - dquot->dq_dqb.dqb_curinodes) : 0;
1141 spin_unlock(&dq_data_lock);
1147 static int f2fs_statfs(struct dentry *dentry, struct kstatfs *buf)
1149 struct super_block *sb = dentry->d_sb;
1150 struct f2fs_sb_info *sbi = F2FS_SB(sb);
1151 u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
1152 block_t total_count, user_block_count, start_count;
1153 u64 avail_node_count;
1155 total_count = le64_to_cpu(sbi->raw_super->block_count);
1156 user_block_count = sbi->user_block_count;
1157 start_count = le32_to_cpu(sbi->raw_super->segment0_blkaddr);
1158 buf->f_type = F2FS_SUPER_MAGIC;
1159 buf->f_bsize = sbi->blocksize;
1161 buf->f_blocks = total_count - start_count;
1162 buf->f_bfree = user_block_count - valid_user_blocks(sbi) -
1163 sbi->current_reserved_blocks;
1164 if (buf->f_bfree > F2FS_OPTION(sbi).root_reserved_blocks)
1165 buf->f_bavail = buf->f_bfree -
1166 F2FS_OPTION(sbi).root_reserved_blocks;
1170 avail_node_count = sbi->total_node_count - sbi->nquota_files -
1171 F2FS_RESERVED_NODE_NUM;
1173 if (avail_node_count > user_block_count) {
1174 buf->f_files = user_block_count;
1175 buf->f_ffree = buf->f_bavail;
1177 buf->f_files = avail_node_count;
1178 buf->f_ffree = min(avail_node_count - valid_node_count(sbi),
1182 buf->f_namelen = F2FS_NAME_LEN;
1183 buf->f_fsid.val[0] = (u32)id;
1184 buf->f_fsid.val[1] = (u32)(id >> 32);
1187 if (is_inode_flag_set(dentry->d_inode, FI_PROJ_INHERIT) &&
1188 sb_has_quota_limits_enabled(sb, PRJQUOTA)) {
1189 f2fs_statfs_project(sb, F2FS_I(dentry->d_inode)->i_projid, buf);
1195 static inline void f2fs_show_quota_options(struct seq_file *seq,
1196 struct super_block *sb)
1199 struct f2fs_sb_info *sbi = F2FS_SB(sb);
1201 if (F2FS_OPTION(sbi).s_jquota_fmt) {
1204 switch (F2FS_OPTION(sbi).s_jquota_fmt) {
1215 seq_printf(seq, ",jqfmt=%s", fmtname);
1218 if (F2FS_OPTION(sbi).s_qf_names[USRQUOTA])
1219 seq_show_option(seq, "usrjquota",
1220 F2FS_OPTION(sbi).s_qf_names[USRQUOTA]);
1222 if (F2FS_OPTION(sbi).s_qf_names[GRPQUOTA])
1223 seq_show_option(seq, "grpjquota",
1224 F2FS_OPTION(sbi).s_qf_names[GRPQUOTA]);
1226 if (F2FS_OPTION(sbi).s_qf_names[PRJQUOTA])
1227 seq_show_option(seq, "prjjquota",
1228 F2FS_OPTION(sbi).s_qf_names[PRJQUOTA]);
1232 static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
1234 struct f2fs_sb_info *sbi = F2FS_SB(root->d_sb);
1236 if (!f2fs_readonly(sbi->sb) && test_opt(sbi, BG_GC)) {
1237 if (test_opt(sbi, FORCE_FG_GC))
1238 seq_printf(seq, ",background_gc=%s", "sync");
1240 seq_printf(seq, ",background_gc=%s", "on");
1242 seq_printf(seq, ",background_gc=%s", "off");
1244 if (test_opt(sbi, DISABLE_ROLL_FORWARD))
1245 seq_puts(seq, ",disable_roll_forward");
1246 if (test_opt(sbi, DISCARD))
1247 seq_puts(seq, ",discard");
1248 if (test_opt(sbi, NOHEAP))
1249 seq_puts(seq, ",no_heap");
1251 seq_puts(seq, ",heap");
1252 #ifdef CONFIG_F2FS_FS_XATTR
1253 if (test_opt(sbi, XATTR_USER))
1254 seq_puts(seq, ",user_xattr");
1256 seq_puts(seq, ",nouser_xattr");
1257 if (test_opt(sbi, INLINE_XATTR))
1258 seq_puts(seq, ",inline_xattr");
1260 seq_puts(seq, ",noinline_xattr");
1261 if (test_opt(sbi, INLINE_XATTR_SIZE))
1262 seq_printf(seq, ",inline_xattr_size=%u",
1263 F2FS_OPTION(sbi).inline_xattr_size);
1265 #ifdef CONFIG_F2FS_FS_POSIX_ACL
1266 if (test_opt(sbi, POSIX_ACL))
1267 seq_puts(seq, ",acl");
1269 seq_puts(seq, ",noacl");
1271 if (test_opt(sbi, DISABLE_EXT_IDENTIFY))
1272 seq_puts(seq, ",disable_ext_identify");
1273 if (test_opt(sbi, INLINE_DATA))
1274 seq_puts(seq, ",inline_data");
1276 seq_puts(seq, ",noinline_data");
1277 if (test_opt(sbi, INLINE_DENTRY))
1278 seq_puts(seq, ",inline_dentry");
1280 seq_puts(seq, ",noinline_dentry");
1281 if (!f2fs_readonly(sbi->sb) && test_opt(sbi, FLUSH_MERGE))
1282 seq_puts(seq, ",flush_merge");
1283 if (test_opt(sbi, NOBARRIER))
1284 seq_puts(seq, ",nobarrier");
1285 if (test_opt(sbi, FASTBOOT))
1286 seq_puts(seq, ",fastboot");
1287 if (test_opt(sbi, EXTENT_CACHE))
1288 seq_puts(seq, ",extent_cache");
1290 seq_puts(seq, ",noextent_cache");
1291 if (test_opt(sbi, DATA_FLUSH))
1292 seq_puts(seq, ",data_flush");
1294 seq_puts(seq, ",mode=");
1295 if (test_opt(sbi, ADAPTIVE))
1296 seq_puts(seq, "adaptive");
1297 else if (test_opt(sbi, LFS))
1298 seq_puts(seq, "lfs");
1299 seq_printf(seq, ",active_logs=%u", F2FS_OPTION(sbi).active_logs);
1300 if (test_opt(sbi, RESERVE_ROOT))
1301 seq_printf(seq, ",reserve_root=%u,resuid=%u,resgid=%u",
1302 F2FS_OPTION(sbi).root_reserved_blocks,
1303 from_kuid_munged(&init_user_ns,
1304 F2FS_OPTION(sbi).s_resuid),
1305 from_kgid_munged(&init_user_ns,
1306 F2FS_OPTION(sbi).s_resgid));
1307 if (F2FS_IO_SIZE_BITS(sbi))
1308 seq_printf(seq, ",io_size=%uKB", F2FS_IO_SIZE_KB(sbi));
1309 #ifdef CONFIG_F2FS_FAULT_INJECTION
1310 if (test_opt(sbi, FAULT_INJECTION))
1311 seq_printf(seq, ",fault_injection=%u",
1312 F2FS_OPTION(sbi).fault_info.inject_rate);
1315 if (test_opt(sbi, QUOTA))
1316 seq_puts(seq, ",quota");
1317 if (test_opt(sbi, USRQUOTA))
1318 seq_puts(seq, ",usrquota");
1319 if (test_opt(sbi, GRPQUOTA))
1320 seq_puts(seq, ",grpquota");
1321 if (test_opt(sbi, PRJQUOTA))
1322 seq_puts(seq, ",prjquota");
1324 f2fs_show_quota_options(seq, sbi->sb);
1325 if (F2FS_OPTION(sbi).whint_mode == WHINT_MODE_USER)
1326 seq_printf(seq, ",whint_mode=%s", "user-based");
1327 else if (F2FS_OPTION(sbi).whint_mode == WHINT_MODE_FS)
1328 seq_printf(seq, ",whint_mode=%s", "fs-based");
1329 #ifdef CONFIG_F2FS_FS_ENCRYPTION
1330 if (F2FS_OPTION(sbi).test_dummy_encryption)
1331 seq_puts(seq, ",test_dummy_encryption");
1334 if (F2FS_OPTION(sbi).alloc_mode == ALLOC_MODE_DEFAULT)
1335 seq_printf(seq, ",alloc_mode=%s", "default");
1336 else if (F2FS_OPTION(sbi).alloc_mode == ALLOC_MODE_REUSE)
1337 seq_printf(seq, ",alloc_mode=%s", "reuse");
1339 if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_POSIX)
1340 seq_printf(seq, ",fsync_mode=%s", "posix");
1341 else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT)
1342 seq_printf(seq, ",fsync_mode=%s", "strict");
1346 static void default_options(struct f2fs_sb_info *sbi)
1348 /* init some FS parameters */
1349 F2FS_OPTION(sbi).active_logs = NR_CURSEG_TYPE;
1350 F2FS_OPTION(sbi).inline_xattr_size = DEFAULT_INLINE_XATTR_ADDRS;
1351 F2FS_OPTION(sbi).whint_mode = WHINT_MODE_OFF;
1352 F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_DEFAULT;
1353 F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_POSIX;
1354 F2FS_OPTION(sbi).test_dummy_encryption = false;
1355 sbi->readdir_ra = 1;
1357 set_opt(sbi, BG_GC);
1358 set_opt(sbi, INLINE_XATTR);
1359 set_opt(sbi, INLINE_DATA);
1360 set_opt(sbi, INLINE_DENTRY);
1361 set_opt(sbi, EXTENT_CACHE);
1362 set_opt(sbi, NOHEAP);
1363 sbi->sb->s_flags |= SB_LAZYTIME;
1364 set_opt(sbi, FLUSH_MERGE);
1365 if (f2fs_sb_has_blkzoned(sbi->sb)) {
1366 set_opt_mode(sbi, F2FS_MOUNT_LFS);
1367 set_opt(sbi, DISCARD);
1369 set_opt_mode(sbi, F2FS_MOUNT_ADAPTIVE);
1372 #ifdef CONFIG_F2FS_FS_XATTR
1373 set_opt(sbi, XATTR_USER);
1375 #ifdef CONFIG_F2FS_FS_POSIX_ACL
1376 set_opt(sbi, POSIX_ACL);
1379 #ifdef CONFIG_F2FS_FAULT_INJECTION
1380 f2fs_build_fault_attr(sbi, 0);
1385 static int f2fs_enable_quotas(struct super_block *sb);
1387 static int f2fs_remount(struct super_block *sb, int *flags, char *data)
1389 struct f2fs_sb_info *sbi = F2FS_SB(sb);
1390 struct f2fs_mount_info org_mount_opt;
1391 unsigned long old_sb_flags;
1393 bool need_restart_gc = false;
1394 bool need_stop_gc = false;
1395 bool no_extent_cache = !test_opt(sbi, EXTENT_CACHE);
1401 * Save the old mount options in case we
1402 * need to restore them.
1404 org_mount_opt = sbi->mount_opt;
1405 old_sb_flags = sb->s_flags;
1408 org_mount_opt.s_jquota_fmt = F2FS_OPTION(sbi).s_jquota_fmt;
1409 for (i = 0; i < MAXQUOTAS; i++) {
1410 if (F2FS_OPTION(sbi).s_qf_names[i]) {
1411 org_mount_opt.s_qf_names[i] =
1412 kstrdup(F2FS_OPTION(sbi).s_qf_names[i],
1414 if (!org_mount_opt.s_qf_names[i]) {
1415 for (j = 0; j < i; j++)
1416 kfree(org_mount_opt.s_qf_names[j]);
1420 org_mount_opt.s_qf_names[i] = NULL;
1425 /* recover superblocks we couldn't write due to previous RO mount */
1426 if (!(*flags & SB_RDONLY) && is_sbi_flag_set(sbi, SBI_NEED_SB_WRITE)) {
1427 err = f2fs_commit_super(sbi, false);
1428 f2fs_msg(sb, KERN_INFO,
1429 "Try to recover all the superblocks, ret: %d", err);
1431 clear_sbi_flag(sbi, SBI_NEED_SB_WRITE);
1434 default_options(sbi);
1436 /* parse mount options */
1437 err = parse_options(sb, data);
1442 * Previous and new state of filesystem is RO,
1443 * so skip checking GC and FLUSH_MERGE conditions.
1445 if (f2fs_readonly(sb) && (*flags & SB_RDONLY))
1449 if (!f2fs_readonly(sb) && (*flags & SB_RDONLY)) {
1450 err = dquot_suspend(sb, -1);
1453 } else if (f2fs_readonly(sb) && !(*flags & MS_RDONLY)) {
1454 /* dquot_resume needs RW */
1455 sb->s_flags &= ~SB_RDONLY;
1456 if (sb_any_quota_suspended(sb)) {
1457 dquot_resume(sb, -1);
1458 } else if (f2fs_sb_has_quota_ino(sb)) {
1459 err = f2fs_enable_quotas(sb);
1465 /* disallow enable/disable extent_cache dynamically */
1466 if (no_extent_cache == !!test_opt(sbi, EXTENT_CACHE)) {
1468 f2fs_msg(sbi->sb, KERN_WARNING,
1469 "switch extent_cache option is not allowed");
1474 * We stop the GC thread if FS is mounted as RO
1475 * or if background_gc = off is passed in mount
1476 * option. Also sync the filesystem.
1478 if ((*flags & SB_RDONLY) || !test_opt(sbi, BG_GC)) {
1479 if (sbi->gc_thread) {
1480 stop_gc_thread(sbi);
1481 need_restart_gc = true;
1483 } else if (!sbi->gc_thread) {
1484 err = start_gc_thread(sbi);
1487 need_stop_gc = true;
1490 if (*flags & SB_RDONLY ||
1491 F2FS_OPTION(sbi).whint_mode != org_mount_opt.whint_mode) {
1492 writeback_inodes_sb(sb, WB_REASON_SYNC);
1495 set_sbi_flag(sbi, SBI_IS_DIRTY);
1496 set_sbi_flag(sbi, SBI_IS_CLOSE);
1497 f2fs_sync_fs(sb, 1);
1498 clear_sbi_flag(sbi, SBI_IS_CLOSE);
1502 * We stop issue flush thread if FS is mounted as RO
1503 * or if flush_merge is not passed in mount option.
1505 if ((*flags & SB_RDONLY) || !test_opt(sbi, FLUSH_MERGE)) {
1506 clear_opt(sbi, FLUSH_MERGE);
1507 destroy_flush_cmd_control(sbi, false);
1509 err = create_flush_cmd_control(sbi);
1515 /* Release old quota file names */
1516 for (i = 0; i < MAXQUOTAS; i++)
1517 kfree(org_mount_opt.s_qf_names[i]);
1519 /* Update the POSIXACL Flag */
1520 sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
1521 (test_opt(sbi, POSIX_ACL) ? SB_POSIXACL : 0);
1523 limit_reserve_root(sbi);
1526 if (need_restart_gc) {
1527 if (start_gc_thread(sbi))
1528 f2fs_msg(sbi->sb, KERN_WARNING,
1529 "background gc thread has stopped");
1530 } else if (need_stop_gc) {
1531 stop_gc_thread(sbi);
1535 F2FS_OPTION(sbi).s_jquota_fmt = org_mount_opt.s_jquota_fmt;
1536 for (i = 0; i < MAXQUOTAS; i++) {
1537 kfree(F2FS_OPTION(sbi).s_qf_names[i]);
1538 F2FS_OPTION(sbi).s_qf_names[i] = org_mount_opt.s_qf_names[i];
1541 sbi->mount_opt = org_mount_opt;
1542 sb->s_flags = old_sb_flags;
1547 /* Read data from quotafile */
1548 static ssize_t f2fs_quota_read(struct super_block *sb, int type, char *data,
1549 size_t len, loff_t off)
1551 struct inode *inode = sb_dqopt(sb)->files[type];
1552 struct address_space *mapping = inode->i_mapping;
1553 block_t blkidx = F2FS_BYTES_TO_BLK(off);
1554 int offset = off & (sb->s_blocksize - 1);
1557 loff_t i_size = i_size_read(inode);
1564 if (off + len > i_size)
1567 while (toread > 0) {
1568 tocopy = min_t(unsigned long, sb->s_blocksize - offset, toread);
1570 page = read_cache_page_gfp(mapping, blkidx, GFP_NOFS);
1572 if (PTR_ERR(page) == -ENOMEM) {
1573 congestion_wait(BLK_RW_ASYNC, HZ/50);
1576 return PTR_ERR(page);
1581 if (unlikely(page->mapping != mapping)) {
1582 f2fs_put_page(page, 1);
1585 if (unlikely(!PageUptodate(page))) {
1586 f2fs_put_page(page, 1);
1590 kaddr = kmap_atomic(page);
1591 memcpy(data, kaddr + offset, tocopy);
1592 kunmap_atomic(kaddr);
1593 f2fs_put_page(page, 1);
1603 /* Write to quotafile */
1604 static ssize_t f2fs_quota_write(struct super_block *sb, int type,
1605 const char *data, size_t len, loff_t off)
1607 struct inode *inode = sb_dqopt(sb)->files[type];
1608 struct address_space *mapping = inode->i_mapping;
1609 const struct address_space_operations *a_ops = mapping->a_ops;
1610 int offset = off & (sb->s_blocksize - 1);
1611 size_t towrite = len;
1617 while (towrite > 0) {
1618 tocopy = min_t(unsigned long, sb->s_blocksize - offset,
1621 err = a_ops->write_begin(NULL, mapping, off, tocopy, 0,
1623 if (unlikely(err)) {
1624 if (err == -ENOMEM) {
1625 congestion_wait(BLK_RW_ASYNC, HZ/50);
1631 kaddr = kmap_atomic(page);
1632 memcpy(kaddr + offset, data, tocopy);
1633 kunmap_atomic(kaddr);
1634 flush_dcache_page(page);
1636 a_ops->write_end(NULL, mapping, off, tocopy, tocopy,
1647 inode->i_mtime = inode->i_ctime = current_time(inode);
1648 f2fs_mark_inode_dirty_sync(inode, false);
1649 return len - towrite;
1652 static struct dquot **f2fs_get_dquots(struct inode *inode)
1654 return F2FS_I(inode)->i_dquot;
1657 static qsize_t *f2fs_get_reserved_space(struct inode *inode)
1659 return &F2FS_I(inode)->i_reserved_quota;
1662 static int f2fs_quota_on_mount(struct f2fs_sb_info *sbi, int type)
1664 return dquot_quota_on_mount(sbi->sb, F2FS_OPTION(sbi).s_qf_names[type],
1665 F2FS_OPTION(sbi).s_jquota_fmt, type);
1668 int f2fs_enable_quota_files(struct f2fs_sb_info *sbi, bool rdonly)
1673 if (f2fs_sb_has_quota_ino(sbi->sb) && rdonly) {
1674 err = f2fs_enable_quotas(sbi->sb);
1676 f2fs_msg(sbi->sb, KERN_ERR,
1677 "Cannot turn on quota_ino: %d", err);
1683 for (i = 0; i < MAXQUOTAS; i++) {
1684 if (F2FS_OPTION(sbi).s_qf_names[i]) {
1685 err = f2fs_quota_on_mount(sbi, i);
1690 f2fs_msg(sbi->sb, KERN_ERR,
1691 "Cannot turn on quotas: %d on %d", err, i);
1697 static int f2fs_quota_enable(struct super_block *sb, int type, int format_id,
1700 struct inode *qf_inode;
1701 unsigned long qf_inum;
1704 BUG_ON(!f2fs_sb_has_quota_ino(sb));
1706 qf_inum = f2fs_qf_ino(sb, type);
1710 qf_inode = f2fs_iget(sb, qf_inum);
1711 if (IS_ERR(qf_inode)) {
1712 f2fs_msg(sb, KERN_ERR,
1713 "Bad quota inode %u:%lu", type, qf_inum);
1714 return PTR_ERR(qf_inode);
1717 /* Don't account quota for quota files to avoid recursion */
1718 qf_inode->i_flags |= S_NOQUOTA;
1719 err = dquot_enable(qf_inode, type, format_id, flags);
1724 static int f2fs_enable_quotas(struct super_block *sb)
1727 unsigned long qf_inum;
1728 bool quota_mopt[MAXQUOTAS] = {
1729 test_opt(F2FS_SB(sb), USRQUOTA),
1730 test_opt(F2FS_SB(sb), GRPQUOTA),
1731 test_opt(F2FS_SB(sb), PRJQUOTA),
1734 sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE | DQUOT_NOLIST_DIRTY;
1735 for (type = 0; type < MAXQUOTAS; type++) {
1736 qf_inum = f2fs_qf_ino(sb, type);
1738 err = f2fs_quota_enable(sb, type, QFMT_VFS_V1,
1739 DQUOT_USAGE_ENABLED |
1740 (quota_mopt[type] ? DQUOT_LIMITS_ENABLED : 0));
1742 f2fs_msg(sb, KERN_ERR,
1743 "Failed to enable quota tracking "
1744 "(type=%d, err=%d). Please run "
1745 "fsck to fix.", type, err);
1746 for (type--; type >= 0; type--)
1747 dquot_quota_off(sb, type);
1755 static int f2fs_quota_sync(struct super_block *sb, int type)
1757 struct quota_info *dqopt = sb_dqopt(sb);
1761 ret = dquot_writeback_dquots(sb, type);
1766 * Now when everything is written we can discard the pagecache so
1767 * that userspace sees the changes.
1769 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1770 if (type != -1 && cnt != type)
1772 if (!sb_has_quota_active(sb, cnt))
1775 ret = filemap_write_and_wait(dqopt->files[cnt]->i_mapping);
1779 inode_lock(dqopt->files[cnt]);
1780 truncate_inode_pages(&dqopt->files[cnt]->i_data, 0);
1781 inode_unlock(dqopt->files[cnt]);
1786 static int f2fs_quota_on(struct super_block *sb, int type, int format_id,
1787 const struct path *path)
1789 struct inode *inode;
1792 err = f2fs_quota_sync(sb, type);
1796 err = dquot_quota_on(sb, type, format_id, path);
1800 inode = d_inode(path->dentry);
1803 F2FS_I(inode)->i_flags |= FS_NOATIME_FL | FS_IMMUTABLE_FL;
1804 inode_set_flags(inode, S_NOATIME | S_IMMUTABLE,
1805 S_NOATIME | S_IMMUTABLE);
1806 inode_unlock(inode);
1807 f2fs_mark_inode_dirty_sync(inode, false);
1812 static int f2fs_quota_off(struct super_block *sb, int type)
1814 struct inode *inode = sb_dqopt(sb)->files[type];
1817 if (!inode || !igrab(inode))
1818 return dquot_quota_off(sb, type);
1820 f2fs_quota_sync(sb, type);
1822 err = dquot_quota_off(sb, type);
1823 if (err || f2fs_sb_has_quota_ino(sb))
1827 F2FS_I(inode)->i_flags &= ~(FS_NOATIME_FL | FS_IMMUTABLE_FL);
1828 inode_set_flags(inode, 0, S_NOATIME | S_IMMUTABLE);
1829 inode_unlock(inode);
1830 f2fs_mark_inode_dirty_sync(inode, false);
1836 void f2fs_quota_off_umount(struct super_block *sb)
1840 for (type = 0; type < MAXQUOTAS; type++)
1841 f2fs_quota_off(sb, type);
1844 static int f2fs_get_projid(struct inode *inode, kprojid_t *projid)
1846 *projid = F2FS_I(inode)->i_projid;
1850 static const struct dquot_operations f2fs_quota_operations = {
1851 .get_reserved_space = f2fs_get_reserved_space,
1852 .write_dquot = dquot_commit,
1853 .acquire_dquot = dquot_acquire,
1854 .release_dquot = dquot_release,
1855 .mark_dirty = dquot_mark_dquot_dirty,
1856 .write_info = dquot_commit_info,
1857 .alloc_dquot = dquot_alloc,
1858 .destroy_dquot = dquot_destroy,
1859 .get_projid = f2fs_get_projid,
1860 .get_next_id = dquot_get_next_id,
1863 static const struct quotactl_ops f2fs_quotactl_ops = {
1864 .quota_on = f2fs_quota_on,
1865 .quota_off = f2fs_quota_off,
1866 .quota_sync = f2fs_quota_sync,
1867 .get_state = dquot_get_state,
1868 .set_info = dquot_set_dqinfo,
1869 .get_dqblk = dquot_get_dqblk,
1870 .set_dqblk = dquot_set_dqblk,
1871 .get_nextdqblk = dquot_get_next_dqblk,
1874 void f2fs_quota_off_umount(struct super_block *sb)
1879 static const struct super_operations f2fs_sops = {
1880 .alloc_inode = f2fs_alloc_inode,
1881 .drop_inode = f2fs_drop_inode,
1882 .destroy_inode = f2fs_destroy_inode,
1883 .write_inode = f2fs_write_inode,
1884 .dirty_inode = f2fs_dirty_inode,
1885 .show_options = f2fs_show_options,
1887 .quota_read = f2fs_quota_read,
1888 .quota_write = f2fs_quota_write,
1889 .get_dquots = f2fs_get_dquots,
1891 .evict_inode = f2fs_evict_inode,
1892 .put_super = f2fs_put_super,
1893 .sync_fs = f2fs_sync_fs,
1894 .freeze_fs = f2fs_freeze,
1895 .unfreeze_fs = f2fs_unfreeze,
1896 .statfs = f2fs_statfs,
1897 .remount_fs = f2fs_remount,
1900 #ifdef CONFIG_F2FS_FS_ENCRYPTION
1901 static int f2fs_get_context(struct inode *inode, void *ctx, size_t len)
1903 return f2fs_getxattr(inode, F2FS_XATTR_INDEX_ENCRYPTION,
1904 F2FS_XATTR_NAME_ENCRYPTION_CONTEXT,
1908 static int f2fs_set_context(struct inode *inode, const void *ctx, size_t len,
1911 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1914 * Encrypting the root directory is not allowed because fsck
1915 * expects lost+found directory to exist and remain unencrypted
1916 * if LOST_FOUND feature is enabled.
1919 if (f2fs_sb_has_lost_found(sbi->sb) &&
1920 inode->i_ino == F2FS_ROOT_INO(sbi))
1923 return f2fs_setxattr(inode, F2FS_XATTR_INDEX_ENCRYPTION,
1924 F2FS_XATTR_NAME_ENCRYPTION_CONTEXT,
1925 ctx, len, fs_data, XATTR_CREATE);
1928 static bool f2fs_dummy_context(struct inode *inode)
1930 return DUMMY_ENCRYPTION_ENABLED(F2FS_I_SB(inode));
1933 static const struct fscrypt_operations f2fs_cryptops = {
1934 .key_prefix = "f2fs:",
1935 .get_context = f2fs_get_context,
1936 .set_context = f2fs_set_context,
1937 .dummy_context = f2fs_dummy_context,
1938 .empty_dir = f2fs_empty_dir,
1939 .max_namelen = F2FS_NAME_LEN,
1943 static struct inode *f2fs_nfs_get_inode(struct super_block *sb,
1944 u64 ino, u32 generation)
1946 struct f2fs_sb_info *sbi = F2FS_SB(sb);
1947 struct inode *inode;
1949 if (check_nid_range(sbi, ino))
1950 return ERR_PTR(-ESTALE);
1953 * f2fs_iget isn't quite right if the inode is currently unallocated!
1954 * However f2fs_iget currently does appropriate checks to handle stale
1955 * inodes so everything is OK.
1957 inode = f2fs_iget(sb, ino);
1959 return ERR_CAST(inode);
1960 if (unlikely(generation && inode->i_generation != generation)) {
1961 /* we didn't find the right inode.. */
1963 return ERR_PTR(-ESTALE);
1968 static struct dentry *f2fs_fh_to_dentry(struct super_block *sb, struct fid *fid,
1969 int fh_len, int fh_type)
1971 return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
1972 f2fs_nfs_get_inode);
1975 static struct dentry *f2fs_fh_to_parent(struct super_block *sb, struct fid *fid,
1976 int fh_len, int fh_type)
1978 return generic_fh_to_parent(sb, fid, fh_len, fh_type,
1979 f2fs_nfs_get_inode);
1982 static const struct export_operations f2fs_export_ops = {
1983 .fh_to_dentry = f2fs_fh_to_dentry,
1984 .fh_to_parent = f2fs_fh_to_parent,
1985 .get_parent = f2fs_get_parent,
1988 static loff_t max_file_blocks(void)
1991 loff_t leaf_count = ADDRS_PER_BLOCK;
1994 * note: previously, result is equal to (DEF_ADDRS_PER_INODE -
1995 * DEFAULT_INLINE_XATTR_ADDRS), but now f2fs try to reserve more
1996 * space in inode.i_addr, it will be more safe to reassign
2000 /* two direct node blocks */
2001 result += (leaf_count * 2);
2003 /* two indirect node blocks */
2004 leaf_count *= NIDS_PER_BLOCK;
2005 result += (leaf_count * 2);
2007 /* one double indirect node block */
2008 leaf_count *= NIDS_PER_BLOCK;
2009 result += leaf_count;
2014 static int __f2fs_commit_super(struct buffer_head *bh,
2015 struct f2fs_super_block *super)
2019 memcpy(bh->b_data + F2FS_SUPER_OFFSET, super, sizeof(*super));
2020 set_buffer_dirty(bh);
2023 /* it's rare case, we can do fua all the time */
2024 return __sync_dirty_buffer(bh, REQ_SYNC | REQ_PREFLUSH | REQ_FUA);
2027 static inline bool sanity_check_area_boundary(struct f2fs_sb_info *sbi,
2028 struct buffer_head *bh)
2030 struct f2fs_super_block *raw_super = (struct f2fs_super_block *)
2031 (bh->b_data + F2FS_SUPER_OFFSET);
2032 struct super_block *sb = sbi->sb;
2033 u32 segment0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr);
2034 u32 cp_blkaddr = le32_to_cpu(raw_super->cp_blkaddr);
2035 u32 sit_blkaddr = le32_to_cpu(raw_super->sit_blkaddr);
2036 u32 nat_blkaddr = le32_to_cpu(raw_super->nat_blkaddr);
2037 u32 ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr);
2038 u32 main_blkaddr = le32_to_cpu(raw_super->main_blkaddr);
2039 u32 segment_count_ckpt = le32_to_cpu(raw_super->segment_count_ckpt);
2040 u32 segment_count_sit = le32_to_cpu(raw_super->segment_count_sit);
2041 u32 segment_count_nat = le32_to_cpu(raw_super->segment_count_nat);
2042 u32 segment_count_ssa = le32_to_cpu(raw_super->segment_count_ssa);
2043 u32 segment_count_main = le32_to_cpu(raw_super->segment_count_main);
2044 u32 segment_count = le32_to_cpu(raw_super->segment_count);
2045 u32 log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
2046 u64 main_end_blkaddr = main_blkaddr +
2047 (segment_count_main << log_blocks_per_seg);
2048 u64 seg_end_blkaddr = segment0_blkaddr +
2049 (segment_count << log_blocks_per_seg);
2051 if (segment0_blkaddr != cp_blkaddr) {
2052 f2fs_msg(sb, KERN_INFO,
2053 "Mismatch start address, segment0(%u) cp_blkaddr(%u)",
2054 segment0_blkaddr, cp_blkaddr);
2058 if (cp_blkaddr + (segment_count_ckpt << log_blocks_per_seg) !=
2060 f2fs_msg(sb, KERN_INFO,
2061 "Wrong CP boundary, start(%u) end(%u) blocks(%u)",
2062 cp_blkaddr, sit_blkaddr,
2063 segment_count_ckpt << log_blocks_per_seg);
2067 if (sit_blkaddr + (segment_count_sit << log_blocks_per_seg) !=
2069 f2fs_msg(sb, KERN_INFO,
2070 "Wrong SIT boundary, start(%u) end(%u) blocks(%u)",
2071 sit_blkaddr, nat_blkaddr,
2072 segment_count_sit << log_blocks_per_seg);
2076 if (nat_blkaddr + (segment_count_nat << log_blocks_per_seg) !=
2078 f2fs_msg(sb, KERN_INFO,
2079 "Wrong NAT boundary, start(%u) end(%u) blocks(%u)",
2080 nat_blkaddr, ssa_blkaddr,
2081 segment_count_nat << log_blocks_per_seg);
2085 if (ssa_blkaddr + (segment_count_ssa << log_blocks_per_seg) !=
2087 f2fs_msg(sb, KERN_INFO,
2088 "Wrong SSA boundary, start(%u) end(%u) blocks(%u)",
2089 ssa_blkaddr, main_blkaddr,
2090 segment_count_ssa << log_blocks_per_seg);
2094 if (main_end_blkaddr > seg_end_blkaddr) {
2095 f2fs_msg(sb, KERN_INFO,
2096 "Wrong MAIN_AREA boundary, start(%u) end(%u) block(%u)",
2099 (segment_count << log_blocks_per_seg),
2100 segment_count_main << log_blocks_per_seg);
2102 } else if (main_end_blkaddr < seg_end_blkaddr) {
2106 /* fix in-memory information all the time */
2107 raw_super->segment_count = cpu_to_le32((main_end_blkaddr -
2108 segment0_blkaddr) >> log_blocks_per_seg);
2110 if (f2fs_readonly(sb) || bdev_read_only(sb->s_bdev)) {
2111 set_sbi_flag(sbi, SBI_NEED_SB_WRITE);
2114 err = __f2fs_commit_super(bh, NULL);
2115 res = err ? "failed" : "done";
2117 f2fs_msg(sb, KERN_INFO,
2118 "Fix alignment : %s, start(%u) end(%u) block(%u)",
2121 (segment_count << log_blocks_per_seg),
2122 segment_count_main << log_blocks_per_seg);
2129 static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
2130 struct buffer_head *bh)
2132 struct f2fs_super_block *raw_super = (struct f2fs_super_block *)
2133 (bh->b_data + F2FS_SUPER_OFFSET);
2134 struct super_block *sb = sbi->sb;
2135 unsigned int blocksize;
2137 if (F2FS_SUPER_MAGIC != le32_to_cpu(raw_super->magic)) {
2138 f2fs_msg(sb, KERN_INFO,
2139 "Magic Mismatch, valid(0x%x) - read(0x%x)",
2140 F2FS_SUPER_MAGIC, le32_to_cpu(raw_super->magic));
2144 /* Currently, support only 4KB page cache size */
2145 if (F2FS_BLKSIZE != PAGE_SIZE) {
2146 f2fs_msg(sb, KERN_INFO,
2147 "Invalid page_cache_size (%lu), supports only 4KB\n",
2152 /* Currently, support only 4KB block size */
2153 blocksize = 1 << le32_to_cpu(raw_super->log_blocksize);
2154 if (blocksize != F2FS_BLKSIZE) {
2155 f2fs_msg(sb, KERN_INFO,
2156 "Invalid blocksize (%u), supports only 4KB\n",
2161 /* check log blocks per segment */
2162 if (le32_to_cpu(raw_super->log_blocks_per_seg) != 9) {
2163 f2fs_msg(sb, KERN_INFO,
2164 "Invalid log blocks per segment (%u)\n",
2165 le32_to_cpu(raw_super->log_blocks_per_seg));
2169 /* Currently, support 512/1024/2048/4096 bytes sector size */
2170 if (le32_to_cpu(raw_super->log_sectorsize) >
2171 F2FS_MAX_LOG_SECTOR_SIZE ||
2172 le32_to_cpu(raw_super->log_sectorsize) <
2173 F2FS_MIN_LOG_SECTOR_SIZE) {
2174 f2fs_msg(sb, KERN_INFO, "Invalid log sectorsize (%u)",
2175 le32_to_cpu(raw_super->log_sectorsize));
2178 if (le32_to_cpu(raw_super->log_sectors_per_block) +
2179 le32_to_cpu(raw_super->log_sectorsize) !=
2180 F2FS_MAX_LOG_SECTOR_SIZE) {
2181 f2fs_msg(sb, KERN_INFO,
2182 "Invalid log sectors per block(%u) log sectorsize(%u)",
2183 le32_to_cpu(raw_super->log_sectors_per_block),
2184 le32_to_cpu(raw_super->log_sectorsize));
2188 /* check reserved ino info */
2189 if (le32_to_cpu(raw_super->node_ino) != 1 ||
2190 le32_to_cpu(raw_super->meta_ino) != 2 ||
2191 le32_to_cpu(raw_super->root_ino) != 3) {
2192 f2fs_msg(sb, KERN_INFO,
2193 "Invalid Fs Meta Ino: node(%u) meta(%u) root(%u)",
2194 le32_to_cpu(raw_super->node_ino),
2195 le32_to_cpu(raw_super->meta_ino),
2196 le32_to_cpu(raw_super->root_ino));
2200 if (le32_to_cpu(raw_super->segment_count) > F2FS_MAX_SEGMENT) {
2201 f2fs_msg(sb, KERN_INFO,
2202 "Invalid segment count (%u)",
2203 le32_to_cpu(raw_super->segment_count));
2207 /* check CP/SIT/NAT/SSA/MAIN_AREA area boundary */
2208 if (sanity_check_area_boundary(sbi, bh))
2214 int sanity_check_ckpt(struct f2fs_sb_info *sbi)
2216 unsigned int total, fsmeta;
2217 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
2218 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
2219 unsigned int ovp_segments, reserved_segments;
2220 unsigned int main_segs, blocks_per_seg;
2223 total = le32_to_cpu(raw_super->segment_count);
2224 fsmeta = le32_to_cpu(raw_super->segment_count_ckpt);
2225 fsmeta += le32_to_cpu(raw_super->segment_count_sit);
2226 fsmeta += le32_to_cpu(raw_super->segment_count_nat);
2227 fsmeta += le32_to_cpu(ckpt->rsvd_segment_count);
2228 fsmeta += le32_to_cpu(raw_super->segment_count_ssa);
2230 if (unlikely(fsmeta >= total))
2233 ovp_segments = le32_to_cpu(ckpt->overprov_segment_count);
2234 reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count);
2236 if (unlikely(fsmeta < F2FS_MIN_SEGMENTS ||
2237 ovp_segments == 0 || reserved_segments == 0)) {
2238 f2fs_msg(sbi->sb, KERN_ERR,
2239 "Wrong layout: check mkfs.f2fs version");
2243 main_segs = le32_to_cpu(raw_super->segment_count_main);
2244 blocks_per_seg = sbi->blocks_per_seg;
2246 for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) {
2247 if (le32_to_cpu(ckpt->cur_node_segno[i]) >= main_segs ||
2248 le16_to_cpu(ckpt->cur_node_blkoff[i]) >= blocks_per_seg)
2251 for (i = 0; i < NR_CURSEG_DATA_TYPE; i++) {
2252 if (le32_to_cpu(ckpt->cur_data_segno[i]) >= main_segs ||
2253 le16_to_cpu(ckpt->cur_data_blkoff[i]) >= blocks_per_seg)
2257 if (unlikely(f2fs_cp_error(sbi))) {
2258 f2fs_msg(sbi->sb, KERN_ERR, "A bug case: need to run fsck");
2264 static void init_sb_info(struct f2fs_sb_info *sbi)
2266 struct f2fs_super_block *raw_super = sbi->raw_super;
2269 sbi->log_sectors_per_block =
2270 le32_to_cpu(raw_super->log_sectors_per_block);
2271 sbi->log_blocksize = le32_to_cpu(raw_super->log_blocksize);
2272 sbi->blocksize = 1 << sbi->log_blocksize;
2273 sbi->log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
2274 sbi->blocks_per_seg = 1 << sbi->log_blocks_per_seg;
2275 sbi->segs_per_sec = le32_to_cpu(raw_super->segs_per_sec);
2276 sbi->secs_per_zone = le32_to_cpu(raw_super->secs_per_zone);
2277 sbi->total_sections = le32_to_cpu(raw_super->section_count);
2278 sbi->total_node_count =
2279 (le32_to_cpu(raw_super->segment_count_nat) / 2)
2280 * sbi->blocks_per_seg * NAT_ENTRY_PER_BLOCK;
2281 sbi->root_ino_num = le32_to_cpu(raw_super->root_ino);
2282 sbi->node_ino_num = le32_to_cpu(raw_super->node_ino);
2283 sbi->meta_ino_num = le32_to_cpu(raw_super->meta_ino);
2284 sbi->cur_victim_sec = NULL_SECNO;
2285 sbi->max_victim_search = DEF_MAX_VICTIM_SEARCH;
2287 sbi->dir_level = DEF_DIR_LEVEL;
2288 sbi->interval_time[CP_TIME] = DEF_CP_INTERVAL;
2289 sbi->interval_time[REQ_TIME] = DEF_IDLE_INTERVAL;
2290 clear_sbi_flag(sbi, SBI_NEED_FSCK);
2292 for (i = 0; i < NR_COUNT_TYPE; i++)
2293 atomic_set(&sbi->nr_pages[i], 0);
2295 atomic_set(&sbi->wb_sync_req, 0);
2297 INIT_LIST_HEAD(&sbi->s_list);
2298 mutex_init(&sbi->umount_mutex);
2299 for (i = 0; i < NR_PAGE_TYPE - 1; i++)
2300 for (j = HOT; j < NR_TEMP_TYPE; j++)
2301 mutex_init(&sbi->wio_mutex[i][j]);
2302 spin_lock_init(&sbi->cp_lock);
2304 sbi->dirty_device = 0;
2305 spin_lock_init(&sbi->dev_lock);
2307 init_rwsem(&sbi->sb_lock);
2310 static int init_percpu_info(struct f2fs_sb_info *sbi)
2314 err = percpu_counter_init(&sbi->alloc_valid_block_count, 0, GFP_KERNEL);
2318 return percpu_counter_init(&sbi->total_valid_inode_count, 0,
2322 #ifdef CONFIG_BLK_DEV_ZONED
2323 static int init_blkz_info(struct f2fs_sb_info *sbi, int devi)
2325 struct block_device *bdev = FDEV(devi).bdev;
2326 sector_t nr_sectors = bdev->bd_part->nr_sects;
2327 sector_t sector = 0;
2328 struct blk_zone *zones;
2329 unsigned int i, nr_zones;
2333 if (!f2fs_sb_has_blkzoned(sbi->sb))
2336 if (sbi->blocks_per_blkz && sbi->blocks_per_blkz !=
2337 SECTOR_TO_BLOCK(bdev_zone_sectors(bdev)))
2339 sbi->blocks_per_blkz = SECTOR_TO_BLOCK(bdev_zone_sectors(bdev));
2340 if (sbi->log_blocks_per_blkz && sbi->log_blocks_per_blkz !=
2341 __ilog2_u32(sbi->blocks_per_blkz))
2343 sbi->log_blocks_per_blkz = __ilog2_u32(sbi->blocks_per_blkz);
2344 FDEV(devi).nr_blkz = SECTOR_TO_BLOCK(nr_sectors) >>
2345 sbi->log_blocks_per_blkz;
2346 if (nr_sectors & (bdev_zone_sectors(bdev) - 1))
2347 FDEV(devi).nr_blkz++;
2349 FDEV(devi).blkz_type = f2fs_kmalloc(sbi, FDEV(devi).nr_blkz,
2351 if (!FDEV(devi).blkz_type)
2354 #define F2FS_REPORT_NR_ZONES 4096
2356 zones = f2fs_kzalloc(sbi, sizeof(struct blk_zone) *
2357 F2FS_REPORT_NR_ZONES, GFP_KERNEL);
2361 /* Get block zones type */
2362 while (zones && sector < nr_sectors) {
2364 nr_zones = F2FS_REPORT_NR_ZONES;
2365 err = blkdev_report_zones(bdev, sector,
2375 for (i = 0; i < nr_zones; i++) {
2376 FDEV(devi).blkz_type[n] = zones[i].type;
2377 sector += zones[i].len;
2389 * Read f2fs raw super block.
2390 * Because we have two copies of super block, so read both of them
2391 * to get the first valid one. If any one of them is broken, we pass
2392 * them recovery flag back to the caller.
2394 static int read_raw_super_block(struct f2fs_sb_info *sbi,
2395 struct f2fs_super_block **raw_super,
2396 int *valid_super_block, int *recovery)
2398 struct super_block *sb = sbi->sb;
2400 struct buffer_head *bh;
2401 struct f2fs_super_block *super;
2404 super = kzalloc(sizeof(struct f2fs_super_block), GFP_KERNEL);
2408 for (block = 0; block < 2; block++) {
2409 bh = sb_bread(sb, block);
2411 f2fs_msg(sb, KERN_ERR, "Unable to read %dth superblock",
2417 /* sanity checking of raw super */
2418 if (sanity_check_raw_super(sbi, bh)) {
2419 f2fs_msg(sb, KERN_ERR,
2420 "Can't find valid F2FS filesystem in %dth superblock",
2428 memcpy(super, bh->b_data + F2FS_SUPER_OFFSET,
2430 *valid_super_block = block;
2436 /* Fail to read any one of the superblocks*/
2440 /* No valid superblock */
2449 int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover)
2451 struct buffer_head *bh;
2454 if ((recover && f2fs_readonly(sbi->sb)) ||
2455 bdev_read_only(sbi->sb->s_bdev)) {
2456 set_sbi_flag(sbi, SBI_NEED_SB_WRITE);
2460 /* write back-up superblock first */
2461 bh = sb_bread(sbi->sb, sbi->valid_super_block ? 0 : 1);
2464 err = __f2fs_commit_super(bh, F2FS_RAW_SUPER(sbi));
2467 /* if we are in recovery path, skip writing valid superblock */
2471 /* write current valid superblock */
2472 bh = sb_bread(sbi->sb, sbi->valid_super_block);
2475 err = __f2fs_commit_super(bh, F2FS_RAW_SUPER(sbi));
2480 static int f2fs_scan_devices(struct f2fs_sb_info *sbi)
2482 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
2483 unsigned int max_devices = MAX_DEVICES;
2486 /* Initialize single device information */
2487 if (!RDEV(0).path[0]) {
2488 if (!bdev_is_zoned(sbi->sb->s_bdev))
2494 * Initialize multiple devices information, or single
2495 * zoned block device information.
2497 sbi->devs = f2fs_kzalloc(sbi, sizeof(struct f2fs_dev_info) *
2498 max_devices, GFP_KERNEL);
2502 for (i = 0; i < max_devices; i++) {
2504 if (i > 0 && !RDEV(i).path[0])
2507 if (max_devices == 1) {
2508 /* Single zoned block device mount */
2510 blkdev_get_by_dev(sbi->sb->s_bdev->bd_dev,
2511 sbi->sb->s_mode, sbi->sb->s_type);
2513 /* Multi-device mount */
2514 memcpy(FDEV(i).path, RDEV(i).path, MAX_PATH_LEN);
2515 FDEV(i).total_segments =
2516 le32_to_cpu(RDEV(i).total_segments);
2518 FDEV(i).start_blk = 0;
2519 FDEV(i).end_blk = FDEV(i).start_blk +
2520 (FDEV(i).total_segments <<
2521 sbi->log_blocks_per_seg) - 1 +
2522 le32_to_cpu(raw_super->segment0_blkaddr);
2524 FDEV(i).start_blk = FDEV(i - 1).end_blk + 1;
2525 FDEV(i).end_blk = FDEV(i).start_blk +
2526 (FDEV(i).total_segments <<
2527 sbi->log_blocks_per_seg) - 1;
2529 FDEV(i).bdev = blkdev_get_by_path(FDEV(i).path,
2530 sbi->sb->s_mode, sbi->sb->s_type);
2532 if (IS_ERR(FDEV(i).bdev))
2533 return PTR_ERR(FDEV(i).bdev);
2535 /* to release errored devices */
2536 sbi->s_ndevs = i + 1;
2538 #ifdef CONFIG_BLK_DEV_ZONED
2539 if (bdev_zoned_model(FDEV(i).bdev) == BLK_ZONED_HM &&
2540 !f2fs_sb_has_blkzoned(sbi->sb)) {
2541 f2fs_msg(sbi->sb, KERN_ERR,
2542 "Zoned block device feature not enabled\n");
2545 if (bdev_zoned_model(FDEV(i).bdev) != BLK_ZONED_NONE) {
2546 if (init_blkz_info(sbi, i)) {
2547 f2fs_msg(sbi->sb, KERN_ERR,
2548 "Failed to initialize F2FS blkzone information");
2551 if (max_devices == 1)
2553 f2fs_msg(sbi->sb, KERN_INFO,
2554 "Mount Device [%2d]: %20s, %8u, %8x - %8x (zone: %s)",
2556 FDEV(i).total_segments,
2557 FDEV(i).start_blk, FDEV(i).end_blk,
2558 bdev_zoned_model(FDEV(i).bdev) == BLK_ZONED_HA ?
2559 "Host-aware" : "Host-managed");
2563 f2fs_msg(sbi->sb, KERN_INFO,
2564 "Mount Device [%2d]: %20s, %8u, %8x - %8x",
2566 FDEV(i).total_segments,
2567 FDEV(i).start_blk, FDEV(i).end_blk);
2569 f2fs_msg(sbi->sb, KERN_INFO,
2570 "IO Block Size: %8d KB", F2FS_IO_SIZE_KB(sbi));
2574 static void f2fs_tuning_parameters(struct f2fs_sb_info *sbi)
2576 struct f2fs_sm_info *sm_i = SM_I(sbi);
2578 /* adjust parameters according to the volume size */
2579 if (sm_i->main_segments <= SMALL_VOLUME_SEGMENTS) {
2580 F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_REUSE;
2581 sm_i->dcc_info->discard_granularity = 1;
2582 sm_i->ipu_policy = 1 << F2FS_IPU_FORCE;
2586 static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
2588 struct f2fs_sb_info *sbi;
2589 struct f2fs_super_block *raw_super;
2592 bool retry = true, need_fsck = false;
2593 char *options = NULL;
2594 int recovery, i, valid_super_block;
2595 struct curseg_info *seg_i;
2600 valid_super_block = -1;
2603 /* allocate memory for f2fs-specific super block info */
2604 sbi = kzalloc(sizeof(struct f2fs_sb_info), GFP_KERNEL);
2610 /* Load the checksum driver */
2611 sbi->s_chksum_driver = crypto_alloc_shash("crc32", 0, 0);
2612 if (IS_ERR(sbi->s_chksum_driver)) {
2613 f2fs_msg(sb, KERN_ERR, "Cannot load crc32 driver.");
2614 err = PTR_ERR(sbi->s_chksum_driver);
2615 sbi->s_chksum_driver = NULL;
2619 /* set a block size */
2620 if (unlikely(!sb_set_blocksize(sb, F2FS_BLKSIZE))) {
2621 f2fs_msg(sb, KERN_ERR, "unable to set blocksize");
2625 err = read_raw_super_block(sbi, &raw_super, &valid_super_block,
2630 sb->s_fs_info = sbi;
2631 sbi->raw_super = raw_super;
2633 F2FS_OPTION(sbi).s_resuid = make_kuid(&init_user_ns, F2FS_DEF_RESUID);
2634 F2FS_OPTION(sbi).s_resgid = make_kgid(&init_user_ns, F2FS_DEF_RESGID);
2636 /* precompute checksum seed for metadata */
2637 if (f2fs_sb_has_inode_chksum(sb))
2638 sbi->s_chksum_seed = f2fs_chksum(sbi, ~0, raw_super->uuid,
2639 sizeof(raw_super->uuid));
2642 * The BLKZONED feature indicates that the drive was formatted with
2643 * zone alignment optimization. This is optional for host-aware
2644 * devices, but mandatory for host-managed zoned block devices.
2646 #ifndef CONFIG_BLK_DEV_ZONED
2647 if (f2fs_sb_has_blkzoned(sb)) {
2648 f2fs_msg(sb, KERN_ERR,
2649 "Zoned block device support is not enabled\n");
2654 default_options(sbi);
2655 /* parse mount options */
2656 options = kstrdup((const char *)data, GFP_KERNEL);
2657 if (data && !options) {
2662 err = parse_options(sb, options);
2666 sbi->max_file_blocks = max_file_blocks();
2667 sb->s_maxbytes = sbi->max_file_blocks <<
2668 le32_to_cpu(raw_super->log_blocksize);
2669 sb->s_max_links = F2FS_LINK_MAX;
2670 get_random_bytes(&sbi->s_next_generation, sizeof(u32));
2673 sb->dq_op = &f2fs_quota_operations;
2674 if (f2fs_sb_has_quota_ino(sb))
2675 sb->s_qcop = &dquot_quotactl_sysfile_ops;
2677 sb->s_qcop = &f2fs_quotactl_ops;
2678 sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ;
2680 if (f2fs_sb_has_quota_ino(sbi->sb)) {
2681 for (i = 0; i < MAXQUOTAS; i++) {
2682 if (f2fs_qf_ino(sbi->sb, i))
2683 sbi->nquota_files++;
2688 sb->s_op = &f2fs_sops;
2689 #ifdef CONFIG_F2FS_FS_ENCRYPTION
2690 sb->s_cop = &f2fs_cryptops;
2692 sb->s_xattr = f2fs_xattr_handlers;
2693 sb->s_export_op = &f2fs_export_ops;
2694 sb->s_magic = F2FS_SUPER_MAGIC;
2695 sb->s_time_gran = 1;
2696 sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
2697 (test_opt(sbi, POSIX_ACL) ? SB_POSIXACL : 0);
2698 memcpy(&sb->s_uuid, raw_super->uuid, sizeof(raw_super->uuid));
2699 sb->s_iflags |= SB_I_CGROUPWB;
2701 /* init f2fs-specific super block info */
2702 sbi->valid_super_block = valid_super_block;
2703 mutex_init(&sbi->gc_mutex);
2704 mutex_init(&sbi->cp_mutex);
2705 init_rwsem(&sbi->node_write);
2706 init_rwsem(&sbi->node_change);
2708 /* disallow all the data/node/meta page writes */
2709 set_sbi_flag(sbi, SBI_POR_DOING);
2710 spin_lock_init(&sbi->stat_lock);
2712 /* init iostat info */
2713 spin_lock_init(&sbi->iostat_lock);
2714 sbi->iostat_enable = false;
2716 for (i = 0; i < NR_PAGE_TYPE; i++) {
2717 int n = (i == META) ? 1: NR_TEMP_TYPE;
2720 sbi->write_io[i] = f2fs_kmalloc(sbi,
2721 n * sizeof(struct f2fs_bio_info),
2723 if (!sbi->write_io[i]) {
2728 for (j = HOT; j < n; j++) {
2729 init_rwsem(&sbi->write_io[i][j].io_rwsem);
2730 sbi->write_io[i][j].sbi = sbi;
2731 sbi->write_io[i][j].bio = NULL;
2732 spin_lock_init(&sbi->write_io[i][j].io_lock);
2733 INIT_LIST_HEAD(&sbi->write_io[i][j].io_list);
2737 init_rwsem(&sbi->cp_rwsem);
2738 init_waitqueue_head(&sbi->cp_wait);
2741 err = init_percpu_info(sbi);
2745 if (F2FS_IO_SIZE(sbi) > 1) {
2746 sbi->write_io_dummy =
2747 mempool_create_page_pool(2 * (F2FS_IO_SIZE(sbi) - 1), 0);
2748 if (!sbi->write_io_dummy) {
2754 /* get an inode for meta space */
2755 sbi->meta_inode = f2fs_iget(sb, F2FS_META_INO(sbi));
2756 if (IS_ERR(sbi->meta_inode)) {
2757 f2fs_msg(sb, KERN_ERR, "Failed to read F2FS meta data inode");
2758 err = PTR_ERR(sbi->meta_inode);
2762 err = get_valid_checkpoint(sbi);
2764 f2fs_msg(sb, KERN_ERR, "Failed to get valid F2FS checkpoint");
2765 goto free_meta_inode;
2768 /* Initialize device list */
2769 err = f2fs_scan_devices(sbi);
2771 f2fs_msg(sb, KERN_ERR, "Failed to find devices");
2775 sbi->total_valid_node_count =
2776 le32_to_cpu(sbi->ckpt->valid_node_count);
2777 percpu_counter_set(&sbi->total_valid_inode_count,
2778 le32_to_cpu(sbi->ckpt->valid_inode_count));
2779 sbi->user_block_count = le64_to_cpu(sbi->ckpt->user_block_count);
2780 sbi->total_valid_block_count =
2781 le64_to_cpu(sbi->ckpt->valid_block_count);
2782 sbi->last_valid_block_count = sbi->total_valid_block_count;
2783 sbi->reserved_blocks = 0;
2784 sbi->current_reserved_blocks = 0;
2785 limit_reserve_root(sbi);
2787 for (i = 0; i < NR_INODE_TYPE; i++) {
2788 INIT_LIST_HEAD(&sbi->inode_list[i]);
2789 spin_lock_init(&sbi->inode_lock[i]);
2792 init_extent_cache_info(sbi);
2794 init_ino_entry_info(sbi);
2796 /* setup f2fs internal modules */
2797 err = build_segment_manager(sbi);
2799 f2fs_msg(sb, KERN_ERR,
2800 "Failed to initialize F2FS segment manager");
2803 err = build_node_manager(sbi);
2805 f2fs_msg(sb, KERN_ERR,
2806 "Failed to initialize F2FS node manager");
2810 /* For write statistics */
2811 if (sb->s_bdev->bd_part)
2812 sbi->sectors_written_start =
2813 (u64)part_stat_read(sb->s_bdev->bd_part, sectors[1]);
2815 /* Read accumulated write IO statistics if exists */
2816 seg_i = CURSEG_I(sbi, CURSEG_HOT_NODE);
2817 if (__exist_node_summaries(sbi))
2818 sbi->kbytes_written =
2819 le64_to_cpu(seg_i->journal->info.kbytes_written);
2821 build_gc_manager(sbi);
2823 /* get an inode for node space */
2824 sbi->node_inode = f2fs_iget(sb, F2FS_NODE_INO(sbi));
2825 if (IS_ERR(sbi->node_inode)) {
2826 f2fs_msg(sb, KERN_ERR, "Failed to read node inode");
2827 err = PTR_ERR(sbi->node_inode);
2831 err = f2fs_build_stats(sbi);
2833 goto free_node_inode;
2835 /* read root inode and dentry */
2836 root = f2fs_iget(sb, F2FS_ROOT_INO(sbi));
2838 f2fs_msg(sb, KERN_ERR, "Failed to read root inode");
2839 err = PTR_ERR(root);
2842 if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) {
2845 goto free_node_inode;
2848 sb->s_root = d_make_root(root); /* allocate root dentry */
2851 goto free_root_inode;
2854 err = f2fs_register_sysfs(sbi);
2856 goto free_root_inode;
2860 * Turn on quotas which were not enabled for read-only mounts if
2861 * filesystem has quota feature, so that they are updated correctly.
2863 if (f2fs_sb_has_quota_ino(sb) && !f2fs_readonly(sb)) {
2864 err = f2fs_enable_quotas(sb);
2866 f2fs_msg(sb, KERN_ERR,
2867 "Cannot turn on quotas: error %d", err);
2872 /* if there are nt orphan nodes free them */
2873 err = recover_orphan_inodes(sbi);
2877 /* recover fsynced data */
2878 if (!test_opt(sbi, DISABLE_ROLL_FORWARD)) {
2880 * mount should be failed, when device has readonly mode, and
2881 * previous checkpoint was not done by clean system shutdown.
2883 if (bdev_read_only(sb->s_bdev) &&
2884 !is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) {
2890 set_sbi_flag(sbi, SBI_NEED_FSCK);
2895 err = recover_fsync_data(sbi, false);
2898 f2fs_msg(sb, KERN_ERR,
2899 "Cannot recover all fsync data errno=%d", err);
2903 err = recover_fsync_data(sbi, true);
2905 if (!f2fs_readonly(sb) && err > 0) {
2907 f2fs_msg(sb, KERN_ERR,
2908 "Need to recover fsync data");
2913 /* recover_fsync_data() cleared this already */
2914 clear_sbi_flag(sbi, SBI_POR_DOING);
2917 * If filesystem is not mounted as read-only then
2918 * do start the gc_thread.
2920 if (test_opt(sbi, BG_GC) && !f2fs_readonly(sb)) {
2921 /* After POR, we can run background GC thread.*/
2922 err = start_gc_thread(sbi);
2928 /* recover broken superblock */
2930 err = f2fs_commit_super(sbi, true);
2931 f2fs_msg(sb, KERN_INFO,
2932 "Try to recover %dth superblock, ret: %d",
2933 sbi->valid_super_block ? 1 : 2, err);
2936 f2fs_join_shrinker(sbi);
2938 f2fs_tuning_parameters(sbi);
2940 f2fs_msg(sbi->sb, KERN_NOTICE, "Mounted with checkpoint version = %llx",
2941 cur_cp_version(F2FS_CKPT(sbi)));
2942 f2fs_update_time(sbi, CP_TIME);
2943 f2fs_update_time(sbi, REQ_TIME);
2948 if (f2fs_sb_has_quota_ino(sb) && !f2fs_readonly(sb))
2949 f2fs_quota_off_umount(sbi->sb);
2951 f2fs_sync_inode_meta(sbi);
2953 * Some dirty meta pages can be produced by recover_orphan_inodes()
2954 * failed by EIO. Then, iput(node_inode) can trigger balance_fs_bg()
2955 * followed by write_checkpoint() through f2fs_write_node_pages(), which
2956 * falls into an infinite loop in sync_meta_pages().
2958 truncate_inode_pages_final(META_MAPPING(sbi));
2962 f2fs_unregister_sysfs(sbi);
2967 f2fs_destroy_stats(sbi);
2969 release_ino_entry(sbi, true);
2970 truncate_inode_pages_final(NODE_MAPPING(sbi));
2971 iput(sbi->node_inode);
2973 destroy_node_manager(sbi);
2975 destroy_segment_manager(sbi);
2977 destroy_device_list(sbi);
2980 make_bad_inode(sbi->meta_inode);
2981 iput(sbi->meta_inode);
2983 mempool_destroy(sbi->write_io_dummy);
2985 destroy_percpu_info(sbi);
2987 for (i = 0; i < NR_PAGE_TYPE; i++)
2988 kfree(sbi->write_io[i]);
2991 for (i = 0; i < MAXQUOTAS; i++)
2992 kfree(F2FS_OPTION(sbi).s_qf_names[i]);
2998 if (sbi->s_chksum_driver)
2999 crypto_free_shash(sbi->s_chksum_driver);
3002 /* give only one another chance */
3005 shrink_dcache_sb(sb);
3011 static struct dentry *f2fs_mount(struct file_system_type *fs_type, int flags,
3012 const char *dev_name, void *data)
3014 return mount_bdev(fs_type, flags, dev_name, data, f2fs_fill_super);
3017 static void kill_f2fs_super(struct super_block *sb)
3020 set_sbi_flag(F2FS_SB(sb), SBI_IS_CLOSE);
3021 stop_gc_thread(F2FS_SB(sb));
3022 stop_discard_thread(F2FS_SB(sb));
3024 kill_block_super(sb);
3027 static struct file_system_type f2fs_fs_type = {
3028 .owner = THIS_MODULE,
3030 .mount = f2fs_mount,
3031 .kill_sb = kill_f2fs_super,
3032 .fs_flags = FS_REQUIRES_DEV,
3034 MODULE_ALIAS_FS("f2fs");
3036 static int __init init_inodecache(void)
3038 f2fs_inode_cachep = kmem_cache_create("f2fs_inode_cache",
3039 sizeof(struct f2fs_inode_info), 0,
3040 SLAB_RECLAIM_ACCOUNT|SLAB_ACCOUNT, NULL);
3041 if (!f2fs_inode_cachep)
3046 static void destroy_inodecache(void)
3049 * Make sure all delayed rcu free inodes are flushed before we
3053 kmem_cache_destroy(f2fs_inode_cachep);
3056 static int __init init_f2fs_fs(void)
3060 f2fs_build_trace_ios();
3062 err = init_inodecache();
3065 err = create_node_manager_caches();
3067 goto free_inodecache;
3068 err = create_segment_manager_caches();
3070 goto free_node_manager_caches;
3071 err = create_checkpoint_caches();
3073 goto free_segment_manager_caches;
3074 err = create_extent_cache();
3076 goto free_checkpoint_caches;
3077 err = f2fs_init_sysfs();
3079 goto free_extent_cache;
3080 err = register_shrinker(&f2fs_shrinker_info);
3083 err = register_filesystem(&f2fs_fs_type);
3086 err = f2fs_create_root_stats();
3088 goto free_filesystem;
3092 unregister_filesystem(&f2fs_fs_type);
3094 unregister_shrinker(&f2fs_shrinker_info);
3098 destroy_extent_cache();
3099 free_checkpoint_caches:
3100 destroy_checkpoint_caches();
3101 free_segment_manager_caches:
3102 destroy_segment_manager_caches();
3103 free_node_manager_caches:
3104 destroy_node_manager_caches();
3106 destroy_inodecache();
3111 static void __exit exit_f2fs_fs(void)
3113 f2fs_destroy_root_stats();
3114 unregister_filesystem(&f2fs_fs_type);
3115 unregister_shrinker(&f2fs_shrinker_info);
3117 destroy_extent_cache();
3118 destroy_checkpoint_caches();
3119 destroy_segment_manager_caches();
3120 destroy_node_manager_caches();
3121 destroy_inodecache();
3122 f2fs_destroy_trace_ios();
3125 module_init(init_f2fs_fs)
3126 module_exit(exit_f2fs_fs)
3128 MODULE_AUTHOR("Samsung Electronics's Praesto Team");
3129 MODULE_DESCRIPTION("Flash Friendly File System");
3130 MODULE_LICENSE("GPL");