1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
8 #include <linux/module.h>
9 #include <linux/init.h>
11 #include <linux/statfs.h>
12 #include <linux/buffer_head.h>
13 #include <linux/backing-dev.h>
14 #include <linux/kthread.h>
15 #include <linux/parser.h>
16 #include <linux/mount.h>
17 #include <linux/seq_file.h>
18 #include <linux/proc_fs.h>
19 #include <linux/random.h>
20 #include <linux/exportfs.h>
21 #include <linux/blkdev.h>
22 #include <linux/quotaops.h>
23 #include <linux/f2fs_fs.h>
24 #include <linux/sysfs.h>
25 #include <linux/quota.h>
34 #define CREATE_TRACE_POINTS
35 #include <trace/events/f2fs.h>
37 static struct kmem_cache *f2fs_inode_cachep;
39 #ifdef CONFIG_F2FS_FAULT_INJECTION
41 const char *f2fs_fault_name[FAULT_MAX] = {
42 [FAULT_KMALLOC] = "kmalloc",
43 [FAULT_KVMALLOC] = "kvmalloc",
44 [FAULT_PAGE_ALLOC] = "page alloc",
45 [FAULT_PAGE_GET] = "page get",
46 [FAULT_ALLOC_BIO] = "alloc bio",
47 [FAULT_ALLOC_NID] = "alloc nid",
48 [FAULT_ORPHAN] = "orphan",
49 [FAULT_BLOCK] = "no more block",
50 [FAULT_DIR_DEPTH] = "too big dir depth",
51 [FAULT_EVICT_INODE] = "evict_inode fail",
52 [FAULT_TRUNCATE] = "truncate fail",
53 [FAULT_READ_IO] = "read IO error",
54 [FAULT_CHECKPOINT] = "checkpoint error",
55 [FAULT_DISCARD] = "discard error",
56 [FAULT_WRITE_IO] = "write IO error",
59 void f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned int rate,
62 struct f2fs_fault_info *ffi = &F2FS_OPTION(sbi).fault_info;
65 atomic_set(&ffi->inject_ops, 0);
66 ffi->inject_rate = rate;
70 ffi->inject_type = type;
73 memset(ffi, 0, sizeof(struct f2fs_fault_info));
77 /* f2fs-wide shrinker description */
78 static struct shrinker f2fs_shrinker_info = {
79 .scan_objects = f2fs_shrink_scan,
80 .count_objects = f2fs_shrink_count,
81 .seeks = DEFAULT_SEEKS,
86 Opt_disable_roll_forward,
97 Opt_disable_ext_identify,
100 Opt_inline_xattr_size,
138 Opt_test_dummy_encryption,
143 static match_table_t f2fs_tokens = {
144 {Opt_gc_background, "background_gc=%s"},
145 {Opt_disable_roll_forward, "disable_roll_forward"},
146 {Opt_norecovery, "norecovery"},
147 {Opt_discard, "discard"},
148 {Opt_nodiscard, "nodiscard"},
149 {Opt_noheap, "no_heap"},
151 {Opt_user_xattr, "user_xattr"},
152 {Opt_nouser_xattr, "nouser_xattr"},
154 {Opt_noacl, "noacl"},
155 {Opt_active_logs, "active_logs=%u"},
156 {Opt_disable_ext_identify, "disable_ext_identify"},
157 {Opt_inline_xattr, "inline_xattr"},
158 {Opt_noinline_xattr, "noinline_xattr"},
159 {Opt_inline_xattr_size, "inline_xattr_size=%u"},
160 {Opt_inline_data, "inline_data"},
161 {Opt_inline_dentry, "inline_dentry"},
162 {Opt_noinline_dentry, "noinline_dentry"},
163 {Opt_flush_merge, "flush_merge"},
164 {Opt_noflush_merge, "noflush_merge"},
165 {Opt_nobarrier, "nobarrier"},
166 {Opt_fastboot, "fastboot"},
167 {Opt_extent_cache, "extent_cache"},
168 {Opt_noextent_cache, "noextent_cache"},
169 {Opt_noinline_data, "noinline_data"},
170 {Opt_data_flush, "data_flush"},
171 {Opt_reserve_root, "reserve_root=%u"},
172 {Opt_resgid, "resgid=%u"},
173 {Opt_resuid, "resuid=%u"},
174 {Opt_mode, "mode=%s"},
175 {Opt_io_size_bits, "io_bits=%u"},
176 {Opt_fault_injection, "fault_injection=%u"},
177 {Opt_fault_type, "fault_type=%u"},
178 {Opt_lazytime, "lazytime"},
179 {Opt_nolazytime, "nolazytime"},
180 {Opt_quota, "quota"},
181 {Opt_noquota, "noquota"},
182 {Opt_usrquota, "usrquota"},
183 {Opt_grpquota, "grpquota"},
184 {Opt_prjquota, "prjquota"},
185 {Opt_usrjquota, "usrjquota=%s"},
186 {Opt_grpjquota, "grpjquota=%s"},
187 {Opt_prjjquota, "prjjquota=%s"},
188 {Opt_offusrjquota, "usrjquota="},
189 {Opt_offgrpjquota, "grpjquota="},
190 {Opt_offprjjquota, "prjjquota="},
191 {Opt_jqfmt_vfsold, "jqfmt=vfsold"},
192 {Opt_jqfmt_vfsv0, "jqfmt=vfsv0"},
193 {Opt_jqfmt_vfsv1, "jqfmt=vfsv1"},
194 {Opt_whint, "whint_mode=%s"},
195 {Opt_alloc, "alloc_mode=%s"},
196 {Opt_fsync, "fsync_mode=%s"},
197 {Opt_test_dummy_encryption, "test_dummy_encryption"},
198 {Opt_checkpoint, "checkpoint=%s"},
202 void f2fs_msg(struct super_block *sb, const char *level, const char *fmt, ...)
204 struct va_format vaf;
210 printk("%sF2FS-fs (%s): %pV\n", level, sb->s_id, &vaf);
214 static inline void limit_reserve_root(struct f2fs_sb_info *sbi)
216 block_t limit = (sbi->user_block_count << 1) / 1000;
219 if (test_opt(sbi, RESERVE_ROOT) &&
220 F2FS_OPTION(sbi).root_reserved_blocks > limit) {
221 F2FS_OPTION(sbi).root_reserved_blocks = limit;
222 f2fs_msg(sbi->sb, KERN_INFO,
223 "Reduce reserved blocks for root = %u",
224 F2FS_OPTION(sbi).root_reserved_blocks);
226 if (!test_opt(sbi, RESERVE_ROOT) &&
227 (!uid_eq(F2FS_OPTION(sbi).s_resuid,
228 make_kuid(&init_user_ns, F2FS_DEF_RESUID)) ||
229 !gid_eq(F2FS_OPTION(sbi).s_resgid,
230 make_kgid(&init_user_ns, F2FS_DEF_RESGID))))
231 f2fs_msg(sbi->sb, KERN_INFO,
232 "Ignore s_resuid=%u, s_resgid=%u w/o reserve_root",
233 from_kuid_munged(&init_user_ns,
234 F2FS_OPTION(sbi).s_resuid),
235 from_kgid_munged(&init_user_ns,
236 F2FS_OPTION(sbi).s_resgid));
239 static void init_once(void *foo)
241 struct f2fs_inode_info *fi = (struct f2fs_inode_info *) foo;
243 inode_init_once(&fi->vfs_inode);
247 static const char * const quotatypes[] = INITQFNAMES;
248 #define QTYPE2NAME(t) (quotatypes[t])
249 static int f2fs_set_qf_name(struct super_block *sb, int qtype,
252 struct f2fs_sb_info *sbi = F2FS_SB(sb);
256 if (sb_any_quota_loaded(sb) && !F2FS_OPTION(sbi).s_qf_names[qtype]) {
257 f2fs_msg(sb, KERN_ERR,
258 "Cannot change journaled "
259 "quota options when quota turned on");
262 if (f2fs_sb_has_quota_ino(sbi)) {
263 f2fs_msg(sb, KERN_INFO,
264 "QUOTA feature is enabled, so ignore qf_name");
268 qname = match_strdup(args);
270 f2fs_msg(sb, KERN_ERR,
271 "Not enough memory for storing quotafile name");
274 if (F2FS_OPTION(sbi).s_qf_names[qtype]) {
275 if (strcmp(F2FS_OPTION(sbi).s_qf_names[qtype], qname) == 0)
278 f2fs_msg(sb, KERN_ERR,
279 "%s quota file already specified",
283 if (strchr(qname, '/')) {
284 f2fs_msg(sb, KERN_ERR,
285 "quotafile must be on filesystem root");
288 F2FS_OPTION(sbi).s_qf_names[qtype] = qname;
296 static int f2fs_clear_qf_name(struct super_block *sb, int qtype)
298 struct f2fs_sb_info *sbi = F2FS_SB(sb);
300 if (sb_any_quota_loaded(sb) && F2FS_OPTION(sbi).s_qf_names[qtype]) {
301 f2fs_msg(sb, KERN_ERR, "Cannot change journaled quota options"
302 " when quota turned on");
305 kvfree(F2FS_OPTION(sbi).s_qf_names[qtype]);
306 F2FS_OPTION(sbi).s_qf_names[qtype] = NULL;
310 static int f2fs_check_quota_options(struct f2fs_sb_info *sbi)
313 * We do the test below only for project quotas. 'usrquota' and
314 * 'grpquota' mount options are allowed even without quota feature
315 * to support legacy quotas in quota files.
317 if (test_opt(sbi, PRJQUOTA) && !f2fs_sb_has_project_quota(sbi)) {
318 f2fs_msg(sbi->sb, KERN_ERR, "Project quota feature not enabled. "
319 "Cannot enable project quota enforcement.");
322 if (F2FS_OPTION(sbi).s_qf_names[USRQUOTA] ||
323 F2FS_OPTION(sbi).s_qf_names[GRPQUOTA] ||
324 F2FS_OPTION(sbi).s_qf_names[PRJQUOTA]) {
325 if (test_opt(sbi, USRQUOTA) &&
326 F2FS_OPTION(sbi).s_qf_names[USRQUOTA])
327 clear_opt(sbi, USRQUOTA);
329 if (test_opt(sbi, GRPQUOTA) &&
330 F2FS_OPTION(sbi).s_qf_names[GRPQUOTA])
331 clear_opt(sbi, GRPQUOTA);
333 if (test_opt(sbi, PRJQUOTA) &&
334 F2FS_OPTION(sbi).s_qf_names[PRJQUOTA])
335 clear_opt(sbi, PRJQUOTA);
337 if (test_opt(sbi, GRPQUOTA) || test_opt(sbi, USRQUOTA) ||
338 test_opt(sbi, PRJQUOTA)) {
339 f2fs_msg(sbi->sb, KERN_ERR, "old and new quota "
344 if (!F2FS_OPTION(sbi).s_jquota_fmt) {
345 f2fs_msg(sbi->sb, KERN_ERR, "journaled quota format "
351 if (f2fs_sb_has_quota_ino(sbi) && F2FS_OPTION(sbi).s_jquota_fmt) {
352 f2fs_msg(sbi->sb, KERN_INFO,
353 "QUOTA feature is enabled, so ignore jquota_fmt");
354 F2FS_OPTION(sbi).s_jquota_fmt = 0;
360 static int parse_options(struct super_block *sb, char *options)
362 struct f2fs_sb_info *sbi = F2FS_SB(sb);
363 substring_t args[MAX_OPT_ARGS];
375 while ((p = strsep(&options, ",")) != NULL) {
380 * Initialize args struct so we know whether arg was
381 * found; some options take optional arguments.
383 args[0].to = args[0].from = NULL;
384 token = match_token(p, f2fs_tokens, args);
387 case Opt_gc_background:
388 name = match_strdup(&args[0]);
392 if (strlen(name) == 2 && !strncmp(name, "on", 2)) {
394 clear_opt(sbi, FORCE_FG_GC);
395 } else if (strlen(name) == 3 && !strncmp(name, "off", 3)) {
396 clear_opt(sbi, BG_GC);
397 clear_opt(sbi, FORCE_FG_GC);
398 } else if (strlen(name) == 4 && !strncmp(name, "sync", 4)) {
400 set_opt(sbi, FORCE_FG_GC);
407 case Opt_disable_roll_forward:
408 set_opt(sbi, DISABLE_ROLL_FORWARD);
411 /* this option mounts f2fs with ro */
412 set_opt(sbi, DISABLE_ROLL_FORWARD);
413 if (!f2fs_readonly(sb))
417 set_opt(sbi, DISCARD);
420 if (f2fs_sb_has_blkzoned(sbi)) {
421 f2fs_msg(sb, KERN_WARNING,
422 "discard is required for zoned block devices");
425 clear_opt(sbi, DISCARD);
428 set_opt(sbi, NOHEAP);
431 clear_opt(sbi, NOHEAP);
433 #ifdef CONFIG_F2FS_FS_XATTR
435 set_opt(sbi, XATTR_USER);
437 case Opt_nouser_xattr:
438 clear_opt(sbi, XATTR_USER);
440 case Opt_inline_xattr:
441 set_opt(sbi, INLINE_XATTR);
443 case Opt_noinline_xattr:
444 clear_opt(sbi, INLINE_XATTR);
446 case Opt_inline_xattr_size:
447 if (args->from && match_int(args, &arg))
449 set_opt(sbi, INLINE_XATTR_SIZE);
450 F2FS_OPTION(sbi).inline_xattr_size = arg;
454 f2fs_msg(sb, KERN_INFO,
455 "user_xattr options not supported");
457 case Opt_nouser_xattr:
458 f2fs_msg(sb, KERN_INFO,
459 "nouser_xattr options not supported");
461 case Opt_inline_xattr:
462 f2fs_msg(sb, KERN_INFO,
463 "inline_xattr options not supported");
465 case Opt_noinline_xattr:
466 f2fs_msg(sb, KERN_INFO,
467 "noinline_xattr options not supported");
470 #ifdef CONFIG_F2FS_FS_POSIX_ACL
472 set_opt(sbi, POSIX_ACL);
475 clear_opt(sbi, POSIX_ACL);
479 f2fs_msg(sb, KERN_INFO, "acl options not supported");
482 f2fs_msg(sb, KERN_INFO, "noacl options not supported");
485 case Opt_active_logs:
486 if (args->from && match_int(args, &arg))
488 if (arg != 2 && arg != 4 && arg != NR_CURSEG_TYPE)
490 F2FS_OPTION(sbi).active_logs = arg;
492 case Opt_disable_ext_identify:
493 set_opt(sbi, DISABLE_EXT_IDENTIFY);
495 case Opt_inline_data:
496 set_opt(sbi, INLINE_DATA);
498 case Opt_inline_dentry:
499 set_opt(sbi, INLINE_DENTRY);
501 case Opt_noinline_dentry:
502 clear_opt(sbi, INLINE_DENTRY);
504 case Opt_flush_merge:
505 set_opt(sbi, FLUSH_MERGE);
507 case Opt_noflush_merge:
508 clear_opt(sbi, FLUSH_MERGE);
511 set_opt(sbi, NOBARRIER);
514 set_opt(sbi, FASTBOOT);
516 case Opt_extent_cache:
517 set_opt(sbi, EXTENT_CACHE);
519 case Opt_noextent_cache:
520 clear_opt(sbi, EXTENT_CACHE);
522 case Opt_noinline_data:
523 clear_opt(sbi, INLINE_DATA);
526 set_opt(sbi, DATA_FLUSH);
528 case Opt_reserve_root:
529 if (args->from && match_int(args, &arg))
531 if (test_opt(sbi, RESERVE_ROOT)) {
532 f2fs_msg(sb, KERN_INFO,
533 "Preserve previous reserve_root=%u",
534 F2FS_OPTION(sbi).root_reserved_blocks);
536 F2FS_OPTION(sbi).root_reserved_blocks = arg;
537 set_opt(sbi, RESERVE_ROOT);
541 if (args->from && match_int(args, &arg))
543 uid = make_kuid(current_user_ns(), arg);
544 if (!uid_valid(uid)) {
545 f2fs_msg(sb, KERN_ERR,
546 "Invalid uid value %d", arg);
549 F2FS_OPTION(sbi).s_resuid = uid;
552 if (args->from && match_int(args, &arg))
554 gid = make_kgid(current_user_ns(), arg);
555 if (!gid_valid(gid)) {
556 f2fs_msg(sb, KERN_ERR,
557 "Invalid gid value %d", arg);
560 F2FS_OPTION(sbi).s_resgid = gid;
563 name = match_strdup(&args[0]);
567 if (strlen(name) == 8 &&
568 !strncmp(name, "adaptive", 8)) {
569 if (f2fs_sb_has_blkzoned(sbi)) {
570 f2fs_msg(sb, KERN_WARNING,
571 "adaptive mode is not allowed with "
572 "zoned block device feature");
576 set_opt_mode(sbi, F2FS_MOUNT_ADAPTIVE);
577 } else if (strlen(name) == 3 &&
578 !strncmp(name, "lfs", 3)) {
579 set_opt_mode(sbi, F2FS_MOUNT_LFS);
586 case Opt_io_size_bits:
587 if (args->from && match_int(args, &arg))
589 if (arg <= 0 || arg > __ilog2_u32(BIO_MAX_PAGES)) {
590 f2fs_msg(sb, KERN_WARNING,
591 "Not support %d, larger than %d",
592 1 << arg, BIO_MAX_PAGES);
595 F2FS_OPTION(sbi).write_io_size_bits = arg;
597 #ifdef CONFIG_F2FS_FAULT_INJECTION
598 case Opt_fault_injection:
599 if (args->from && match_int(args, &arg))
601 f2fs_build_fault_attr(sbi, arg, F2FS_ALL_FAULT_TYPE);
602 set_opt(sbi, FAULT_INJECTION);
606 if (args->from && match_int(args, &arg))
608 f2fs_build_fault_attr(sbi, 0, arg);
609 set_opt(sbi, FAULT_INJECTION);
612 case Opt_fault_injection:
613 f2fs_msg(sb, KERN_INFO,
614 "fault_injection options not supported");
618 f2fs_msg(sb, KERN_INFO,
619 "fault_type options not supported");
623 sb->s_flags |= SB_LAZYTIME;
626 sb->s_flags &= ~SB_LAZYTIME;
631 set_opt(sbi, USRQUOTA);
634 set_opt(sbi, GRPQUOTA);
637 set_opt(sbi, PRJQUOTA);
640 ret = f2fs_set_qf_name(sb, USRQUOTA, &args[0]);
645 ret = f2fs_set_qf_name(sb, GRPQUOTA, &args[0]);
650 ret = f2fs_set_qf_name(sb, PRJQUOTA, &args[0]);
654 case Opt_offusrjquota:
655 ret = f2fs_clear_qf_name(sb, USRQUOTA);
659 case Opt_offgrpjquota:
660 ret = f2fs_clear_qf_name(sb, GRPQUOTA);
664 case Opt_offprjjquota:
665 ret = f2fs_clear_qf_name(sb, PRJQUOTA);
669 case Opt_jqfmt_vfsold:
670 F2FS_OPTION(sbi).s_jquota_fmt = QFMT_VFS_OLD;
672 case Opt_jqfmt_vfsv0:
673 F2FS_OPTION(sbi).s_jquota_fmt = QFMT_VFS_V0;
675 case Opt_jqfmt_vfsv1:
676 F2FS_OPTION(sbi).s_jquota_fmt = QFMT_VFS_V1;
679 clear_opt(sbi, QUOTA);
680 clear_opt(sbi, USRQUOTA);
681 clear_opt(sbi, GRPQUOTA);
682 clear_opt(sbi, PRJQUOTA);
692 case Opt_offusrjquota:
693 case Opt_offgrpjquota:
694 case Opt_offprjjquota:
695 case Opt_jqfmt_vfsold:
696 case Opt_jqfmt_vfsv0:
697 case Opt_jqfmt_vfsv1:
699 f2fs_msg(sb, KERN_INFO,
700 "quota operations not supported");
704 name = match_strdup(&args[0]);
707 if (strlen(name) == 10 &&
708 !strncmp(name, "user-based", 10)) {
709 F2FS_OPTION(sbi).whint_mode = WHINT_MODE_USER;
710 } else if (strlen(name) == 3 &&
711 !strncmp(name, "off", 3)) {
712 F2FS_OPTION(sbi).whint_mode = WHINT_MODE_OFF;
713 } else if (strlen(name) == 8 &&
714 !strncmp(name, "fs-based", 8)) {
715 F2FS_OPTION(sbi).whint_mode = WHINT_MODE_FS;
723 name = match_strdup(&args[0]);
727 if (strlen(name) == 7 &&
728 !strncmp(name, "default", 7)) {
729 F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_DEFAULT;
730 } else if (strlen(name) == 5 &&
731 !strncmp(name, "reuse", 5)) {
732 F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_REUSE;
740 name = match_strdup(&args[0]);
743 if (strlen(name) == 5 &&
744 !strncmp(name, "posix", 5)) {
745 F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_POSIX;
746 } else if (strlen(name) == 6 &&
747 !strncmp(name, "strict", 6)) {
748 F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_STRICT;
749 } else if (strlen(name) == 9 &&
750 !strncmp(name, "nobarrier", 9)) {
751 F2FS_OPTION(sbi).fsync_mode =
752 FSYNC_MODE_NOBARRIER;
759 case Opt_test_dummy_encryption:
760 #ifdef CONFIG_FS_ENCRYPTION
761 if (!f2fs_sb_has_encrypt(sbi)) {
762 f2fs_msg(sb, KERN_ERR, "Encrypt feature is off");
766 F2FS_OPTION(sbi).test_dummy_encryption = true;
767 f2fs_msg(sb, KERN_INFO,
768 "Test dummy encryption mode enabled");
770 f2fs_msg(sb, KERN_INFO,
771 "Test dummy encryption mount option ignored");
775 name = match_strdup(&args[0]);
779 if (strlen(name) == 6 &&
780 !strncmp(name, "enable", 6)) {
781 clear_opt(sbi, DISABLE_CHECKPOINT);
782 } else if (strlen(name) == 7 &&
783 !strncmp(name, "disable", 7)) {
784 set_opt(sbi, DISABLE_CHECKPOINT);
792 f2fs_msg(sb, KERN_ERR,
793 "Unrecognized mount option \"%s\" or missing value",
799 if (f2fs_check_quota_options(sbi))
802 if (f2fs_sb_has_quota_ino(sbi) && !f2fs_readonly(sbi->sb)) {
803 f2fs_msg(sbi->sb, KERN_INFO,
804 "Filesystem with quota feature cannot be mounted RDWR "
805 "without CONFIG_QUOTA");
808 if (f2fs_sb_has_project_quota(sbi) && !f2fs_readonly(sbi->sb)) {
809 f2fs_msg(sb, KERN_ERR,
810 "Filesystem with project quota feature cannot be "
811 "mounted RDWR without CONFIG_QUOTA");
816 if (F2FS_IO_SIZE_BITS(sbi) && !test_opt(sbi, LFS)) {
817 f2fs_msg(sb, KERN_ERR,
818 "Should set mode=lfs with %uKB-sized IO",
819 F2FS_IO_SIZE_KB(sbi));
823 if (test_opt(sbi, INLINE_XATTR_SIZE)) {
824 int min_size, max_size;
826 if (!f2fs_sb_has_extra_attr(sbi) ||
827 !f2fs_sb_has_flexible_inline_xattr(sbi)) {
828 f2fs_msg(sb, KERN_ERR,
829 "extra_attr or flexible_inline_xattr "
833 if (!test_opt(sbi, INLINE_XATTR)) {
834 f2fs_msg(sb, KERN_ERR,
835 "inline_xattr_size option should be "
836 "set with inline_xattr option");
840 min_size = sizeof(struct f2fs_xattr_header) / sizeof(__le32);
841 max_size = MAX_INLINE_XATTR_SIZE;
843 if (F2FS_OPTION(sbi).inline_xattr_size < min_size ||
844 F2FS_OPTION(sbi).inline_xattr_size > max_size) {
845 f2fs_msg(sb, KERN_ERR,
846 "inline xattr size is out of range: %d ~ %d",
852 if (test_opt(sbi, DISABLE_CHECKPOINT) && test_opt(sbi, LFS)) {
853 f2fs_msg(sb, KERN_ERR,
854 "LFS not compatible with checkpoint=disable\n");
858 /* Not pass down write hints if the number of active logs is lesser
859 * than NR_CURSEG_TYPE.
861 if (F2FS_OPTION(sbi).active_logs != NR_CURSEG_TYPE)
862 F2FS_OPTION(sbi).whint_mode = WHINT_MODE_OFF;
866 static struct inode *f2fs_alloc_inode(struct super_block *sb)
868 struct f2fs_inode_info *fi;
870 fi = kmem_cache_alloc(f2fs_inode_cachep, GFP_F2FS_ZERO);
874 init_once((void *) fi);
876 /* Initialize f2fs-specific inode info */
877 atomic_set(&fi->dirty_pages, 0);
878 init_rwsem(&fi->i_sem);
879 INIT_LIST_HEAD(&fi->dirty_list);
880 INIT_LIST_HEAD(&fi->gdirty_list);
881 INIT_LIST_HEAD(&fi->inmem_ilist);
882 INIT_LIST_HEAD(&fi->inmem_pages);
883 mutex_init(&fi->inmem_lock);
884 init_rwsem(&fi->i_gc_rwsem[READ]);
885 init_rwsem(&fi->i_gc_rwsem[WRITE]);
886 init_rwsem(&fi->i_mmap_sem);
887 init_rwsem(&fi->i_xattr_sem);
889 /* Will be used by directory only */
890 fi->i_dir_level = F2FS_SB(sb)->dir_level;
892 return &fi->vfs_inode;
895 static int f2fs_drop_inode(struct inode *inode)
899 * This is to avoid a deadlock condition like below.
900 * writeback_single_inode(inode)
901 * - f2fs_write_data_page
902 * - f2fs_gc -> iput -> evict
903 * - inode_wait_for_writeback(inode)
905 if ((!inode_unhashed(inode) && inode->i_state & I_SYNC)) {
906 if (!inode->i_nlink && !is_bad_inode(inode)) {
907 /* to avoid evict_inode call simultaneously */
908 atomic_inc(&inode->i_count);
909 spin_unlock(&inode->i_lock);
911 /* some remained atomic pages should discarded */
912 if (f2fs_is_atomic_file(inode))
913 f2fs_drop_inmem_pages(inode);
915 /* should remain fi->extent_tree for writepage */
916 f2fs_destroy_extent_node(inode);
918 sb_start_intwrite(inode->i_sb);
919 f2fs_i_size_write(inode, 0);
921 f2fs_submit_merged_write_cond(F2FS_I_SB(inode),
922 inode, NULL, 0, DATA);
923 truncate_inode_pages_final(inode->i_mapping);
925 if (F2FS_HAS_BLOCKS(inode))
926 f2fs_truncate(inode);
928 sb_end_intwrite(inode->i_sb);
930 spin_lock(&inode->i_lock);
931 atomic_dec(&inode->i_count);
933 trace_f2fs_drop_inode(inode, 0);
936 ret = generic_drop_inode(inode);
937 trace_f2fs_drop_inode(inode, ret);
941 int f2fs_inode_dirtied(struct inode *inode, bool sync)
943 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
946 spin_lock(&sbi->inode_lock[DIRTY_META]);
947 if (is_inode_flag_set(inode, FI_DIRTY_INODE)) {
950 set_inode_flag(inode, FI_DIRTY_INODE);
951 stat_inc_dirty_inode(sbi, DIRTY_META);
953 if (sync && list_empty(&F2FS_I(inode)->gdirty_list)) {
954 list_add_tail(&F2FS_I(inode)->gdirty_list,
955 &sbi->inode_list[DIRTY_META]);
956 inc_page_count(sbi, F2FS_DIRTY_IMETA);
958 spin_unlock(&sbi->inode_lock[DIRTY_META]);
962 void f2fs_inode_synced(struct inode *inode)
964 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
966 spin_lock(&sbi->inode_lock[DIRTY_META]);
967 if (!is_inode_flag_set(inode, FI_DIRTY_INODE)) {
968 spin_unlock(&sbi->inode_lock[DIRTY_META]);
971 if (!list_empty(&F2FS_I(inode)->gdirty_list)) {
972 list_del_init(&F2FS_I(inode)->gdirty_list);
973 dec_page_count(sbi, F2FS_DIRTY_IMETA);
975 clear_inode_flag(inode, FI_DIRTY_INODE);
976 clear_inode_flag(inode, FI_AUTO_RECOVER);
977 stat_dec_dirty_inode(F2FS_I_SB(inode), DIRTY_META);
978 spin_unlock(&sbi->inode_lock[DIRTY_META]);
982 * f2fs_dirty_inode() is called from __mark_inode_dirty()
984 * We should call set_dirty_inode to write the dirty inode through write_inode.
986 static void f2fs_dirty_inode(struct inode *inode, int flags)
988 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
990 if (inode->i_ino == F2FS_NODE_INO(sbi) ||
991 inode->i_ino == F2FS_META_INO(sbi))
994 if (flags == I_DIRTY_TIME)
997 if (is_inode_flag_set(inode, FI_AUTO_RECOVER))
998 clear_inode_flag(inode, FI_AUTO_RECOVER);
1000 f2fs_inode_dirtied(inode, false);
1003 static void f2fs_i_callback(struct rcu_head *head)
1005 struct inode *inode = container_of(head, struct inode, i_rcu);
1006 kmem_cache_free(f2fs_inode_cachep, F2FS_I(inode));
1009 static void f2fs_destroy_inode(struct inode *inode)
1011 call_rcu(&inode->i_rcu, f2fs_i_callback);
1014 static void destroy_percpu_info(struct f2fs_sb_info *sbi)
1016 percpu_counter_destroy(&sbi->alloc_valid_block_count);
1017 percpu_counter_destroy(&sbi->total_valid_inode_count);
1020 static void destroy_device_list(struct f2fs_sb_info *sbi)
1024 for (i = 0; i < sbi->s_ndevs; i++) {
1025 blkdev_put(FDEV(i).bdev, FMODE_EXCL);
1026 #ifdef CONFIG_BLK_DEV_ZONED
1027 kvfree(FDEV(i).blkz_type);
1033 static void f2fs_put_super(struct super_block *sb)
1035 struct f2fs_sb_info *sbi = F2FS_SB(sb);
1039 f2fs_quota_off_umount(sb);
1041 /* prevent remaining shrinker jobs */
1042 mutex_lock(&sbi->umount_mutex);
1045 * We don't need to do checkpoint when superblock is clean.
1046 * But, the previous checkpoint was not done by umount, it needs to do
1047 * clean checkpoint again.
1049 if ((is_sbi_flag_set(sbi, SBI_IS_DIRTY) ||
1050 !is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG))) {
1051 struct cp_control cpc = {
1052 .reason = CP_UMOUNT,
1054 f2fs_write_checkpoint(sbi, &cpc);
1057 /* be sure to wait for any on-going discard commands */
1058 dropped = f2fs_issue_discard_timeout(sbi);
1060 if ((f2fs_hw_support_discard(sbi) || f2fs_hw_should_discard(sbi)) &&
1061 !sbi->discard_blks && !dropped) {
1062 struct cp_control cpc = {
1063 .reason = CP_UMOUNT | CP_TRIMMED,
1065 f2fs_write_checkpoint(sbi, &cpc);
1069 * normally superblock is clean, so we need to release this.
1070 * In addition, EIO will skip do checkpoint, we need this as well.
1072 f2fs_release_ino_entry(sbi, true);
1074 f2fs_leave_shrinker(sbi);
1075 mutex_unlock(&sbi->umount_mutex);
1077 /* our cp_error case, we can wait for any writeback page */
1078 f2fs_flush_merged_writes(sbi);
1080 f2fs_wait_on_all_pages_writeback(sbi);
1082 f2fs_bug_on(sbi, sbi->fsync_node_num);
1084 iput(sbi->node_inode);
1085 sbi->node_inode = NULL;
1087 iput(sbi->meta_inode);
1088 sbi->meta_inode = NULL;
1091 * iput() can update stat information, if f2fs_write_checkpoint()
1092 * above failed with error.
1094 f2fs_destroy_stats(sbi);
1096 /* destroy f2fs internal modules */
1097 f2fs_destroy_node_manager(sbi);
1098 f2fs_destroy_segment_manager(sbi);
1102 f2fs_unregister_sysfs(sbi);
1104 sb->s_fs_info = NULL;
1105 if (sbi->s_chksum_driver)
1106 crypto_free_shash(sbi->s_chksum_driver);
1107 kvfree(sbi->raw_super);
1109 destroy_device_list(sbi);
1110 mempool_destroy(sbi->write_io_dummy);
1112 for (i = 0; i < MAXQUOTAS; i++)
1113 kvfree(F2FS_OPTION(sbi).s_qf_names[i]);
1115 destroy_percpu_info(sbi);
1116 for (i = 0; i < NR_PAGE_TYPE; i++)
1117 kvfree(sbi->write_io[i]);
1121 int f2fs_sync_fs(struct super_block *sb, int sync)
1123 struct f2fs_sb_info *sbi = F2FS_SB(sb);
1126 if (unlikely(f2fs_cp_error(sbi)))
1128 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
1131 trace_f2fs_sync_fs(sb, sync);
1133 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
1137 struct cp_control cpc;
1139 cpc.reason = __get_cp_reason(sbi);
1141 mutex_lock(&sbi->gc_mutex);
1142 err = f2fs_write_checkpoint(sbi, &cpc);
1143 mutex_unlock(&sbi->gc_mutex);
1145 f2fs_trace_ios(NULL, 1);
1150 static int f2fs_freeze(struct super_block *sb)
1152 if (f2fs_readonly(sb))
1155 /* IO error happened before */
1156 if (unlikely(f2fs_cp_error(F2FS_SB(sb))))
1159 /* must be clean, since sync_filesystem() was already called */
1160 if (is_sbi_flag_set(F2FS_SB(sb), SBI_IS_DIRTY))
1165 static int f2fs_unfreeze(struct super_block *sb)
1171 static int f2fs_statfs_project(struct super_block *sb,
1172 kprojid_t projid, struct kstatfs *buf)
1175 struct dquot *dquot;
1179 qid = make_kqid_projid(projid);
1180 dquot = dqget(sb, qid);
1182 return PTR_ERR(dquot);
1183 spin_lock(&dquot->dq_dqb_lock);
1185 limit = (dquot->dq_dqb.dqb_bsoftlimit ?
1186 dquot->dq_dqb.dqb_bsoftlimit :
1187 dquot->dq_dqb.dqb_bhardlimit) >> sb->s_blocksize_bits;
1188 if (limit && buf->f_blocks > limit) {
1189 curblock = dquot->dq_dqb.dqb_curspace >> sb->s_blocksize_bits;
1190 buf->f_blocks = limit;
1191 buf->f_bfree = buf->f_bavail =
1192 (buf->f_blocks > curblock) ?
1193 (buf->f_blocks - curblock) : 0;
1196 limit = dquot->dq_dqb.dqb_isoftlimit ?
1197 dquot->dq_dqb.dqb_isoftlimit :
1198 dquot->dq_dqb.dqb_ihardlimit;
1199 if (limit && buf->f_files > limit) {
1200 buf->f_files = limit;
1202 (buf->f_files > dquot->dq_dqb.dqb_curinodes) ?
1203 (buf->f_files - dquot->dq_dqb.dqb_curinodes) : 0;
1206 spin_unlock(&dquot->dq_dqb_lock);
1212 static int f2fs_statfs(struct dentry *dentry, struct kstatfs *buf)
1214 struct super_block *sb = dentry->d_sb;
1215 struct f2fs_sb_info *sbi = F2FS_SB(sb);
1216 u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
1217 block_t total_count, user_block_count, start_count;
1218 u64 avail_node_count;
1220 total_count = le64_to_cpu(sbi->raw_super->block_count);
1221 user_block_count = sbi->user_block_count;
1222 start_count = le32_to_cpu(sbi->raw_super->segment0_blkaddr);
1223 buf->f_type = F2FS_SUPER_MAGIC;
1224 buf->f_bsize = sbi->blocksize;
1226 buf->f_blocks = total_count - start_count;
1227 buf->f_bfree = user_block_count - valid_user_blocks(sbi) -
1228 sbi->current_reserved_blocks;
1229 if (unlikely(buf->f_bfree <= sbi->unusable_block_count))
1232 buf->f_bfree -= sbi->unusable_block_count;
1234 if (buf->f_bfree > F2FS_OPTION(sbi).root_reserved_blocks)
1235 buf->f_bavail = buf->f_bfree -
1236 F2FS_OPTION(sbi).root_reserved_blocks;
1240 avail_node_count = sbi->total_node_count - sbi->nquota_files -
1241 F2FS_RESERVED_NODE_NUM;
1243 if (avail_node_count > user_block_count) {
1244 buf->f_files = user_block_count;
1245 buf->f_ffree = buf->f_bavail;
1247 buf->f_files = avail_node_count;
1248 buf->f_ffree = min(avail_node_count - valid_node_count(sbi),
1252 buf->f_namelen = F2FS_NAME_LEN;
1253 buf->f_fsid.val[0] = (u32)id;
1254 buf->f_fsid.val[1] = (u32)(id >> 32);
1257 if (is_inode_flag_set(dentry->d_inode, FI_PROJ_INHERIT) &&
1258 sb_has_quota_limits_enabled(sb, PRJQUOTA)) {
1259 f2fs_statfs_project(sb, F2FS_I(dentry->d_inode)->i_projid, buf);
1265 static inline void f2fs_show_quota_options(struct seq_file *seq,
1266 struct super_block *sb)
1269 struct f2fs_sb_info *sbi = F2FS_SB(sb);
1271 if (F2FS_OPTION(sbi).s_jquota_fmt) {
1274 switch (F2FS_OPTION(sbi).s_jquota_fmt) {
1285 seq_printf(seq, ",jqfmt=%s", fmtname);
1288 if (F2FS_OPTION(sbi).s_qf_names[USRQUOTA])
1289 seq_show_option(seq, "usrjquota",
1290 F2FS_OPTION(sbi).s_qf_names[USRQUOTA]);
1292 if (F2FS_OPTION(sbi).s_qf_names[GRPQUOTA])
1293 seq_show_option(seq, "grpjquota",
1294 F2FS_OPTION(sbi).s_qf_names[GRPQUOTA]);
1296 if (F2FS_OPTION(sbi).s_qf_names[PRJQUOTA])
1297 seq_show_option(seq, "prjjquota",
1298 F2FS_OPTION(sbi).s_qf_names[PRJQUOTA]);
1302 static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
1304 struct f2fs_sb_info *sbi = F2FS_SB(root->d_sb);
1306 if (!f2fs_readonly(sbi->sb) && test_opt(sbi, BG_GC)) {
1307 if (test_opt(sbi, FORCE_FG_GC))
1308 seq_printf(seq, ",background_gc=%s", "sync");
1310 seq_printf(seq, ",background_gc=%s", "on");
1312 seq_printf(seq, ",background_gc=%s", "off");
1314 if (test_opt(sbi, DISABLE_ROLL_FORWARD))
1315 seq_puts(seq, ",disable_roll_forward");
1316 if (test_opt(sbi, DISCARD))
1317 seq_puts(seq, ",discard");
1318 if (test_opt(sbi, NOHEAP))
1319 seq_puts(seq, ",no_heap");
1321 seq_puts(seq, ",heap");
1322 #ifdef CONFIG_F2FS_FS_XATTR
1323 if (test_opt(sbi, XATTR_USER))
1324 seq_puts(seq, ",user_xattr");
1326 seq_puts(seq, ",nouser_xattr");
1327 if (test_opt(sbi, INLINE_XATTR))
1328 seq_puts(seq, ",inline_xattr");
1330 seq_puts(seq, ",noinline_xattr");
1331 if (test_opt(sbi, INLINE_XATTR_SIZE))
1332 seq_printf(seq, ",inline_xattr_size=%u",
1333 F2FS_OPTION(sbi).inline_xattr_size);
1335 #ifdef CONFIG_F2FS_FS_POSIX_ACL
1336 if (test_opt(sbi, POSIX_ACL))
1337 seq_puts(seq, ",acl");
1339 seq_puts(seq, ",noacl");
1341 if (test_opt(sbi, DISABLE_EXT_IDENTIFY))
1342 seq_puts(seq, ",disable_ext_identify");
1343 if (test_opt(sbi, INLINE_DATA))
1344 seq_puts(seq, ",inline_data");
1346 seq_puts(seq, ",noinline_data");
1347 if (test_opt(sbi, INLINE_DENTRY))
1348 seq_puts(seq, ",inline_dentry");
1350 seq_puts(seq, ",noinline_dentry");
1351 if (!f2fs_readonly(sbi->sb) && test_opt(sbi, FLUSH_MERGE))
1352 seq_puts(seq, ",flush_merge");
1353 if (test_opt(sbi, NOBARRIER))
1354 seq_puts(seq, ",nobarrier");
1355 if (test_opt(sbi, FASTBOOT))
1356 seq_puts(seq, ",fastboot");
1357 if (test_opt(sbi, EXTENT_CACHE))
1358 seq_puts(seq, ",extent_cache");
1360 seq_puts(seq, ",noextent_cache");
1361 if (test_opt(sbi, DATA_FLUSH))
1362 seq_puts(seq, ",data_flush");
1364 seq_puts(seq, ",mode=");
1365 if (test_opt(sbi, ADAPTIVE))
1366 seq_puts(seq, "adaptive");
1367 else if (test_opt(sbi, LFS))
1368 seq_puts(seq, "lfs");
1369 seq_printf(seq, ",active_logs=%u", F2FS_OPTION(sbi).active_logs);
1370 if (test_opt(sbi, RESERVE_ROOT))
1371 seq_printf(seq, ",reserve_root=%u,resuid=%u,resgid=%u",
1372 F2FS_OPTION(sbi).root_reserved_blocks,
1373 from_kuid_munged(&init_user_ns,
1374 F2FS_OPTION(sbi).s_resuid),
1375 from_kgid_munged(&init_user_ns,
1376 F2FS_OPTION(sbi).s_resgid));
1377 if (F2FS_IO_SIZE_BITS(sbi))
1378 seq_printf(seq, ",io_bits=%u",
1379 F2FS_OPTION(sbi).write_io_size_bits);
1380 #ifdef CONFIG_F2FS_FAULT_INJECTION
1381 if (test_opt(sbi, FAULT_INJECTION)) {
1382 seq_printf(seq, ",fault_injection=%u",
1383 F2FS_OPTION(sbi).fault_info.inject_rate);
1384 seq_printf(seq, ",fault_type=%u",
1385 F2FS_OPTION(sbi).fault_info.inject_type);
1389 if (test_opt(sbi, QUOTA))
1390 seq_puts(seq, ",quota");
1391 if (test_opt(sbi, USRQUOTA))
1392 seq_puts(seq, ",usrquota");
1393 if (test_opt(sbi, GRPQUOTA))
1394 seq_puts(seq, ",grpquota");
1395 if (test_opt(sbi, PRJQUOTA))
1396 seq_puts(seq, ",prjquota");
1398 f2fs_show_quota_options(seq, sbi->sb);
1399 if (F2FS_OPTION(sbi).whint_mode == WHINT_MODE_USER)
1400 seq_printf(seq, ",whint_mode=%s", "user-based");
1401 else if (F2FS_OPTION(sbi).whint_mode == WHINT_MODE_FS)
1402 seq_printf(seq, ",whint_mode=%s", "fs-based");
1403 #ifdef CONFIG_FS_ENCRYPTION
1404 if (F2FS_OPTION(sbi).test_dummy_encryption)
1405 seq_puts(seq, ",test_dummy_encryption");
1408 if (F2FS_OPTION(sbi).alloc_mode == ALLOC_MODE_DEFAULT)
1409 seq_printf(seq, ",alloc_mode=%s", "default");
1410 else if (F2FS_OPTION(sbi).alloc_mode == ALLOC_MODE_REUSE)
1411 seq_printf(seq, ",alloc_mode=%s", "reuse");
1413 if (test_opt(sbi, DISABLE_CHECKPOINT))
1414 seq_puts(seq, ",checkpoint=disable");
1416 if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_POSIX)
1417 seq_printf(seq, ",fsync_mode=%s", "posix");
1418 else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT)
1419 seq_printf(seq, ",fsync_mode=%s", "strict");
1420 else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_NOBARRIER)
1421 seq_printf(seq, ",fsync_mode=%s", "nobarrier");
1425 static void default_options(struct f2fs_sb_info *sbi)
1427 /* init some FS parameters */
1428 F2FS_OPTION(sbi).active_logs = NR_CURSEG_TYPE;
1429 F2FS_OPTION(sbi).inline_xattr_size = DEFAULT_INLINE_XATTR_ADDRS;
1430 F2FS_OPTION(sbi).whint_mode = WHINT_MODE_OFF;
1431 F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_DEFAULT;
1432 F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_POSIX;
1433 F2FS_OPTION(sbi).test_dummy_encryption = false;
1434 F2FS_OPTION(sbi).s_resuid = make_kuid(&init_user_ns, F2FS_DEF_RESUID);
1435 F2FS_OPTION(sbi).s_resgid = make_kgid(&init_user_ns, F2FS_DEF_RESGID);
1437 set_opt(sbi, BG_GC);
1438 set_opt(sbi, INLINE_XATTR);
1439 set_opt(sbi, INLINE_DATA);
1440 set_opt(sbi, INLINE_DENTRY);
1441 set_opt(sbi, EXTENT_CACHE);
1442 set_opt(sbi, NOHEAP);
1443 clear_opt(sbi, DISABLE_CHECKPOINT);
1444 sbi->sb->s_flags |= SB_LAZYTIME;
1445 set_opt(sbi, FLUSH_MERGE);
1446 set_opt(sbi, DISCARD);
1447 if (f2fs_sb_has_blkzoned(sbi))
1448 set_opt_mode(sbi, F2FS_MOUNT_LFS);
1450 set_opt_mode(sbi, F2FS_MOUNT_ADAPTIVE);
1452 #ifdef CONFIG_F2FS_FS_XATTR
1453 set_opt(sbi, XATTR_USER);
1455 #ifdef CONFIG_F2FS_FS_POSIX_ACL
1456 set_opt(sbi, POSIX_ACL);
1459 f2fs_build_fault_attr(sbi, 0, 0);
1463 static int f2fs_enable_quotas(struct super_block *sb);
1466 static int f2fs_disable_checkpoint(struct f2fs_sb_info *sbi)
1468 unsigned int s_flags = sbi->sb->s_flags;
1469 struct cp_control cpc;
1473 if (s_flags & SB_RDONLY) {
1474 f2fs_msg(sbi->sb, KERN_ERR,
1475 "checkpoint=disable on readonly fs");
1478 sbi->sb->s_flags |= SB_ACTIVE;
1480 f2fs_update_time(sbi, DISABLE_TIME);
1482 while (!f2fs_time_over(sbi, DISABLE_TIME)) {
1483 mutex_lock(&sbi->gc_mutex);
1484 err = f2fs_gc(sbi, true, false, NULL_SEGNO);
1485 if (err == -ENODATA) {
1489 if (err && err != -EAGAIN)
1493 ret = sync_filesystem(sbi->sb);
1495 err = ret ? ret: err;
1499 if (f2fs_disable_cp_again(sbi)) {
1504 mutex_lock(&sbi->gc_mutex);
1505 cpc.reason = CP_PAUSE;
1506 set_sbi_flag(sbi, SBI_CP_DISABLED);
1507 f2fs_write_checkpoint(sbi, &cpc);
1509 sbi->unusable_block_count = 0;
1510 mutex_unlock(&sbi->gc_mutex);
1512 sbi->sb->s_flags = s_flags; /* Restore MS_RDONLY status */
1516 static void f2fs_enable_checkpoint(struct f2fs_sb_info *sbi)
1518 mutex_lock(&sbi->gc_mutex);
1519 f2fs_dirty_to_prefree(sbi);
1521 clear_sbi_flag(sbi, SBI_CP_DISABLED);
1522 set_sbi_flag(sbi, SBI_IS_DIRTY);
1523 mutex_unlock(&sbi->gc_mutex);
1525 f2fs_sync_fs(sbi->sb, 1);
1528 static int f2fs_remount(struct super_block *sb, int *flags, char *data)
1530 struct f2fs_sb_info *sbi = F2FS_SB(sb);
1531 struct f2fs_mount_info org_mount_opt;
1532 unsigned long old_sb_flags;
1534 bool need_restart_gc = false;
1535 bool need_stop_gc = false;
1536 bool no_extent_cache = !test_opt(sbi, EXTENT_CACHE);
1537 bool disable_checkpoint = test_opt(sbi, DISABLE_CHECKPOINT);
1538 bool checkpoint_changed;
1544 * Save the old mount options in case we
1545 * need to restore them.
1547 org_mount_opt = sbi->mount_opt;
1548 old_sb_flags = sb->s_flags;
1551 org_mount_opt.s_jquota_fmt = F2FS_OPTION(sbi).s_jquota_fmt;
1552 for (i = 0; i < MAXQUOTAS; i++) {
1553 if (F2FS_OPTION(sbi).s_qf_names[i]) {
1554 org_mount_opt.s_qf_names[i] =
1555 kstrdup(F2FS_OPTION(sbi).s_qf_names[i],
1557 if (!org_mount_opt.s_qf_names[i]) {
1558 for (j = 0; j < i; j++)
1559 kvfree(org_mount_opt.s_qf_names[j]);
1563 org_mount_opt.s_qf_names[i] = NULL;
1568 /* recover superblocks we couldn't write due to previous RO mount */
1569 if (!(*flags & SB_RDONLY) && is_sbi_flag_set(sbi, SBI_NEED_SB_WRITE)) {
1570 err = f2fs_commit_super(sbi, false);
1571 f2fs_msg(sb, KERN_INFO,
1572 "Try to recover all the superblocks, ret: %d", err);
1574 clear_sbi_flag(sbi, SBI_NEED_SB_WRITE);
1577 default_options(sbi);
1579 /* parse mount options */
1580 err = parse_options(sb, data);
1583 checkpoint_changed =
1584 disable_checkpoint != test_opt(sbi, DISABLE_CHECKPOINT);
1587 * Previous and new state of filesystem is RO,
1588 * so skip checking GC and FLUSH_MERGE conditions.
1590 if (f2fs_readonly(sb) && (*flags & SB_RDONLY))
1594 if (!f2fs_readonly(sb) && (*flags & SB_RDONLY)) {
1595 err = dquot_suspend(sb, -1);
1598 } else if (f2fs_readonly(sb) && !(*flags & SB_RDONLY)) {
1599 /* dquot_resume needs RW */
1600 sb->s_flags &= ~SB_RDONLY;
1601 if (sb_any_quota_suspended(sb)) {
1602 dquot_resume(sb, -1);
1603 } else if (f2fs_sb_has_quota_ino(sbi)) {
1604 err = f2fs_enable_quotas(sb);
1610 /* disallow enable/disable extent_cache dynamically */
1611 if (no_extent_cache == !!test_opt(sbi, EXTENT_CACHE)) {
1613 f2fs_msg(sbi->sb, KERN_WARNING,
1614 "switch extent_cache option is not allowed");
1618 if ((*flags & SB_RDONLY) && test_opt(sbi, DISABLE_CHECKPOINT)) {
1620 f2fs_msg(sbi->sb, KERN_WARNING,
1621 "disabling checkpoint not compatible with read-only");
1626 * We stop the GC thread if FS is mounted as RO
1627 * or if background_gc = off is passed in mount
1628 * option. Also sync the filesystem.
1630 if ((*flags & SB_RDONLY) || !test_opt(sbi, BG_GC)) {
1631 if (sbi->gc_thread) {
1632 f2fs_stop_gc_thread(sbi);
1633 need_restart_gc = true;
1635 } else if (!sbi->gc_thread) {
1636 err = f2fs_start_gc_thread(sbi);
1639 need_stop_gc = true;
1642 if (*flags & SB_RDONLY ||
1643 F2FS_OPTION(sbi).whint_mode != org_mount_opt.whint_mode) {
1644 writeback_inodes_sb(sb, WB_REASON_SYNC);
1647 set_sbi_flag(sbi, SBI_IS_DIRTY);
1648 set_sbi_flag(sbi, SBI_IS_CLOSE);
1649 f2fs_sync_fs(sb, 1);
1650 clear_sbi_flag(sbi, SBI_IS_CLOSE);
1653 if (checkpoint_changed) {
1654 if (test_opt(sbi, DISABLE_CHECKPOINT)) {
1655 err = f2fs_disable_checkpoint(sbi);
1659 f2fs_enable_checkpoint(sbi);
1664 * We stop issue flush thread if FS is mounted as RO
1665 * or if flush_merge is not passed in mount option.
1667 if ((*flags & SB_RDONLY) || !test_opt(sbi, FLUSH_MERGE)) {
1668 clear_opt(sbi, FLUSH_MERGE);
1669 f2fs_destroy_flush_cmd_control(sbi, false);
1671 err = f2fs_create_flush_cmd_control(sbi);
1677 /* Release old quota file names */
1678 for (i = 0; i < MAXQUOTAS; i++)
1679 kvfree(org_mount_opt.s_qf_names[i]);
1681 /* Update the POSIXACL Flag */
1682 sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
1683 (test_opt(sbi, POSIX_ACL) ? SB_POSIXACL : 0);
1685 limit_reserve_root(sbi);
1686 *flags = (*flags & ~SB_LAZYTIME) | (sb->s_flags & SB_LAZYTIME);
1689 if (need_restart_gc) {
1690 if (f2fs_start_gc_thread(sbi))
1691 f2fs_msg(sbi->sb, KERN_WARNING,
1692 "background gc thread has stopped");
1693 } else if (need_stop_gc) {
1694 f2fs_stop_gc_thread(sbi);
1698 F2FS_OPTION(sbi).s_jquota_fmt = org_mount_opt.s_jquota_fmt;
1699 for (i = 0; i < MAXQUOTAS; i++) {
1700 kvfree(F2FS_OPTION(sbi).s_qf_names[i]);
1701 F2FS_OPTION(sbi).s_qf_names[i] = org_mount_opt.s_qf_names[i];
1704 sbi->mount_opt = org_mount_opt;
1705 sb->s_flags = old_sb_flags;
1710 /* Read data from quotafile */
1711 static ssize_t f2fs_quota_read(struct super_block *sb, int type, char *data,
1712 size_t len, loff_t off)
1714 struct inode *inode = sb_dqopt(sb)->files[type];
1715 struct address_space *mapping = inode->i_mapping;
1716 block_t blkidx = F2FS_BYTES_TO_BLK(off);
1717 int offset = off & (sb->s_blocksize - 1);
1720 loff_t i_size = i_size_read(inode);
1727 if (off + len > i_size)
1730 while (toread > 0) {
1731 tocopy = min_t(unsigned long, sb->s_blocksize - offset, toread);
1733 page = read_cache_page_gfp(mapping, blkidx, GFP_NOFS);
1735 if (PTR_ERR(page) == -ENOMEM) {
1736 congestion_wait(BLK_RW_ASYNC, HZ/50);
1739 set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
1740 return PTR_ERR(page);
1745 if (unlikely(page->mapping != mapping)) {
1746 f2fs_put_page(page, 1);
1749 if (unlikely(!PageUptodate(page))) {
1750 f2fs_put_page(page, 1);
1751 set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
1755 kaddr = kmap_atomic(page);
1756 memcpy(data, kaddr + offset, tocopy);
1757 kunmap_atomic(kaddr);
1758 f2fs_put_page(page, 1);
1768 /* Write to quotafile */
1769 static ssize_t f2fs_quota_write(struct super_block *sb, int type,
1770 const char *data, size_t len, loff_t off)
1772 struct inode *inode = sb_dqopt(sb)->files[type];
1773 struct address_space *mapping = inode->i_mapping;
1774 const struct address_space_operations *a_ops = mapping->a_ops;
1775 int offset = off & (sb->s_blocksize - 1);
1776 size_t towrite = len;
1782 while (towrite > 0) {
1783 tocopy = min_t(unsigned long, sb->s_blocksize - offset,
1786 err = a_ops->write_begin(NULL, mapping, off, tocopy, 0,
1788 if (unlikely(err)) {
1789 if (err == -ENOMEM) {
1790 congestion_wait(BLK_RW_ASYNC, HZ/50);
1793 set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
1797 kaddr = kmap_atomic(page);
1798 memcpy(kaddr + offset, data, tocopy);
1799 kunmap_atomic(kaddr);
1800 flush_dcache_page(page);
1802 a_ops->write_end(NULL, mapping, off, tocopy, tocopy,
1813 inode->i_mtime = inode->i_ctime = current_time(inode);
1814 f2fs_mark_inode_dirty_sync(inode, false);
1815 return len - towrite;
1818 static struct dquot **f2fs_get_dquots(struct inode *inode)
1820 return F2FS_I(inode)->i_dquot;
1823 static qsize_t *f2fs_get_reserved_space(struct inode *inode)
1825 return &F2FS_I(inode)->i_reserved_quota;
1828 static int f2fs_quota_on_mount(struct f2fs_sb_info *sbi, int type)
1830 if (is_set_ckpt_flags(sbi, CP_QUOTA_NEED_FSCK_FLAG)) {
1831 f2fs_msg(sbi->sb, KERN_ERR,
1832 "quota sysfile may be corrupted, skip loading it");
1836 return dquot_quota_on_mount(sbi->sb, F2FS_OPTION(sbi).s_qf_names[type],
1837 F2FS_OPTION(sbi).s_jquota_fmt, type);
1840 int f2fs_enable_quota_files(struct f2fs_sb_info *sbi, bool rdonly)
1845 if (f2fs_sb_has_quota_ino(sbi) && rdonly) {
1846 err = f2fs_enable_quotas(sbi->sb);
1848 f2fs_msg(sbi->sb, KERN_ERR,
1849 "Cannot turn on quota_ino: %d", err);
1855 for (i = 0; i < MAXQUOTAS; i++) {
1856 if (F2FS_OPTION(sbi).s_qf_names[i]) {
1857 err = f2fs_quota_on_mount(sbi, i);
1862 f2fs_msg(sbi->sb, KERN_ERR,
1863 "Cannot turn on quotas: %d on %d", err, i);
1869 static int f2fs_quota_enable(struct super_block *sb, int type, int format_id,
1872 struct inode *qf_inode;
1873 unsigned long qf_inum;
1876 BUG_ON(!f2fs_sb_has_quota_ino(F2FS_SB(sb)));
1878 qf_inum = f2fs_qf_ino(sb, type);
1882 qf_inode = f2fs_iget(sb, qf_inum);
1883 if (IS_ERR(qf_inode)) {
1884 f2fs_msg(sb, KERN_ERR,
1885 "Bad quota inode %u:%lu", type, qf_inum);
1886 return PTR_ERR(qf_inode);
1889 /* Don't account quota for quota files to avoid recursion */
1890 qf_inode->i_flags |= S_NOQUOTA;
1891 err = dquot_enable(qf_inode, type, format_id, flags);
1896 static int f2fs_enable_quotas(struct super_block *sb)
1899 unsigned long qf_inum;
1900 bool quota_mopt[MAXQUOTAS] = {
1901 test_opt(F2FS_SB(sb), USRQUOTA),
1902 test_opt(F2FS_SB(sb), GRPQUOTA),
1903 test_opt(F2FS_SB(sb), PRJQUOTA),
1906 if (is_set_ckpt_flags(F2FS_SB(sb), CP_QUOTA_NEED_FSCK_FLAG)) {
1907 f2fs_msg(sb, KERN_ERR,
1908 "quota file may be corrupted, skip loading it");
1912 sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE;
1914 for (type = 0; type < MAXQUOTAS; type++) {
1915 qf_inum = f2fs_qf_ino(sb, type);
1917 err = f2fs_quota_enable(sb, type, QFMT_VFS_V1,
1918 DQUOT_USAGE_ENABLED |
1919 (quota_mopt[type] ? DQUOT_LIMITS_ENABLED : 0));
1921 f2fs_msg(sb, KERN_ERR,
1922 "Failed to enable quota tracking "
1923 "(type=%d, err=%d). Please run "
1924 "fsck to fix.", type, err);
1925 for (type--; type >= 0; type--)
1926 dquot_quota_off(sb, type);
1927 set_sbi_flag(F2FS_SB(sb),
1928 SBI_QUOTA_NEED_REPAIR);
1936 int f2fs_quota_sync(struct super_block *sb, int type)
1938 struct f2fs_sb_info *sbi = F2FS_SB(sb);
1939 struct quota_info *dqopt = sb_dqopt(sb);
1943 ret = dquot_writeback_dquots(sb, type);
1948 * Now when everything is written we can discard the pagecache so
1949 * that userspace sees the changes.
1951 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1952 struct address_space *mapping;
1954 if (type != -1 && cnt != type)
1956 if (!sb_has_quota_active(sb, cnt))
1959 mapping = dqopt->files[cnt]->i_mapping;
1961 ret = filemap_fdatawrite(mapping);
1965 /* if we are using journalled quota */
1966 if (is_journalled_quota(sbi))
1969 ret = filemap_fdatawait(mapping);
1971 set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
1973 inode_lock(dqopt->files[cnt]);
1974 truncate_inode_pages(&dqopt->files[cnt]->i_data, 0);
1975 inode_unlock(dqopt->files[cnt]);
1979 set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
1983 static int f2fs_quota_on(struct super_block *sb, int type, int format_id,
1984 const struct path *path)
1986 struct inode *inode;
1989 err = f2fs_quota_sync(sb, type);
1993 err = dquot_quota_on(sb, type, format_id, path);
1997 inode = d_inode(path->dentry);
2000 F2FS_I(inode)->i_flags |= F2FS_NOATIME_FL | F2FS_IMMUTABLE_FL;
2001 f2fs_set_inode_flags(inode);
2002 inode_unlock(inode);
2003 f2fs_mark_inode_dirty_sync(inode, false);
2008 static int f2fs_quota_off(struct super_block *sb, int type)
2010 struct inode *inode = sb_dqopt(sb)->files[type];
2013 if (!inode || !igrab(inode))
2014 return dquot_quota_off(sb, type);
2016 err = f2fs_quota_sync(sb, type);
2020 err = dquot_quota_off(sb, type);
2021 if (err || f2fs_sb_has_quota_ino(F2FS_SB(sb)))
2025 F2FS_I(inode)->i_flags &= ~(F2FS_NOATIME_FL | F2FS_IMMUTABLE_FL);
2026 f2fs_set_inode_flags(inode);
2027 inode_unlock(inode);
2028 f2fs_mark_inode_dirty_sync(inode, false);
2034 void f2fs_quota_off_umount(struct super_block *sb)
2039 for (type = 0; type < MAXQUOTAS; type++) {
2040 err = f2fs_quota_off(sb, type);
2042 int ret = dquot_quota_off(sb, type);
2044 f2fs_msg(sb, KERN_ERR,
2045 "Fail to turn off disk quota "
2046 "(type: %d, err: %d, ret:%d), Please "
2047 "run fsck to fix it.", type, err, ret);
2048 set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
2052 * In case of checkpoint=disable, we must flush quota blocks.
2053 * This can cause NULL exception for node_inode in end_io, since
2054 * put_super already dropped it.
2056 sync_filesystem(sb);
2059 static void f2fs_truncate_quota_inode_pages(struct super_block *sb)
2061 struct quota_info *dqopt = sb_dqopt(sb);
2064 for (type = 0; type < MAXQUOTAS; type++) {
2065 if (!dqopt->files[type])
2067 f2fs_inode_synced(dqopt->files[type]);
2071 static int f2fs_dquot_commit(struct dquot *dquot)
2075 ret = dquot_commit(dquot);
2077 set_sbi_flag(F2FS_SB(dquot->dq_sb), SBI_QUOTA_NEED_REPAIR);
2081 static int f2fs_dquot_acquire(struct dquot *dquot)
2085 ret = dquot_acquire(dquot);
2087 set_sbi_flag(F2FS_SB(dquot->dq_sb), SBI_QUOTA_NEED_REPAIR);
2092 static int f2fs_dquot_release(struct dquot *dquot)
2096 ret = dquot_release(dquot);
2098 set_sbi_flag(F2FS_SB(dquot->dq_sb), SBI_QUOTA_NEED_REPAIR);
2102 static int f2fs_dquot_mark_dquot_dirty(struct dquot *dquot)
2104 struct super_block *sb = dquot->dq_sb;
2105 struct f2fs_sb_info *sbi = F2FS_SB(sb);
2108 ret = dquot_mark_dquot_dirty(dquot);
2110 /* if we are using journalled quota */
2111 if (is_journalled_quota(sbi))
2112 set_sbi_flag(sbi, SBI_QUOTA_NEED_FLUSH);
2117 static int f2fs_dquot_commit_info(struct super_block *sb, int type)
2121 ret = dquot_commit_info(sb, type);
2123 set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
2127 static int f2fs_get_projid(struct inode *inode, kprojid_t *projid)
2129 *projid = F2FS_I(inode)->i_projid;
2133 static const struct dquot_operations f2fs_quota_operations = {
2134 .get_reserved_space = f2fs_get_reserved_space,
2135 .write_dquot = f2fs_dquot_commit,
2136 .acquire_dquot = f2fs_dquot_acquire,
2137 .release_dquot = f2fs_dquot_release,
2138 .mark_dirty = f2fs_dquot_mark_dquot_dirty,
2139 .write_info = f2fs_dquot_commit_info,
2140 .alloc_dquot = dquot_alloc,
2141 .destroy_dquot = dquot_destroy,
2142 .get_projid = f2fs_get_projid,
2143 .get_next_id = dquot_get_next_id,
2146 static const struct quotactl_ops f2fs_quotactl_ops = {
2147 .quota_on = f2fs_quota_on,
2148 .quota_off = f2fs_quota_off,
2149 .quota_sync = f2fs_quota_sync,
2150 .get_state = dquot_get_state,
2151 .set_info = dquot_set_dqinfo,
2152 .get_dqblk = dquot_get_dqblk,
2153 .set_dqblk = dquot_set_dqblk,
2154 .get_nextdqblk = dquot_get_next_dqblk,
2157 int f2fs_quota_sync(struct super_block *sb, int type)
2162 void f2fs_quota_off_umount(struct super_block *sb)
2167 static const struct super_operations f2fs_sops = {
2168 .alloc_inode = f2fs_alloc_inode,
2169 .drop_inode = f2fs_drop_inode,
2170 .destroy_inode = f2fs_destroy_inode,
2171 .write_inode = f2fs_write_inode,
2172 .dirty_inode = f2fs_dirty_inode,
2173 .show_options = f2fs_show_options,
2175 .quota_read = f2fs_quota_read,
2176 .quota_write = f2fs_quota_write,
2177 .get_dquots = f2fs_get_dquots,
2179 .evict_inode = f2fs_evict_inode,
2180 .put_super = f2fs_put_super,
2181 .sync_fs = f2fs_sync_fs,
2182 .freeze_fs = f2fs_freeze,
2183 .unfreeze_fs = f2fs_unfreeze,
2184 .statfs = f2fs_statfs,
2185 .remount_fs = f2fs_remount,
2188 #ifdef CONFIG_FS_ENCRYPTION
2189 static int f2fs_get_context(struct inode *inode, void *ctx, size_t len)
2191 return f2fs_getxattr(inode, F2FS_XATTR_INDEX_ENCRYPTION,
2192 F2FS_XATTR_NAME_ENCRYPTION_CONTEXT,
2196 static int f2fs_set_context(struct inode *inode, const void *ctx, size_t len,
2199 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2202 * Encrypting the root directory is not allowed because fsck
2203 * expects lost+found directory to exist and remain unencrypted
2204 * if LOST_FOUND feature is enabled.
2207 if (f2fs_sb_has_lost_found(sbi) &&
2208 inode->i_ino == F2FS_ROOT_INO(sbi))
2211 return f2fs_setxattr(inode, F2FS_XATTR_INDEX_ENCRYPTION,
2212 F2FS_XATTR_NAME_ENCRYPTION_CONTEXT,
2213 ctx, len, fs_data, XATTR_CREATE);
2216 static bool f2fs_dummy_context(struct inode *inode)
2218 return DUMMY_ENCRYPTION_ENABLED(F2FS_I_SB(inode));
2221 static const struct fscrypt_operations f2fs_cryptops = {
2222 .key_prefix = "f2fs:",
2223 .get_context = f2fs_get_context,
2224 .set_context = f2fs_set_context,
2225 .dummy_context = f2fs_dummy_context,
2226 .empty_dir = f2fs_empty_dir,
2227 .max_namelen = F2FS_NAME_LEN,
2231 static struct inode *f2fs_nfs_get_inode(struct super_block *sb,
2232 u64 ino, u32 generation)
2234 struct f2fs_sb_info *sbi = F2FS_SB(sb);
2235 struct inode *inode;
2237 if (f2fs_check_nid_range(sbi, ino))
2238 return ERR_PTR(-ESTALE);
2241 * f2fs_iget isn't quite right if the inode is currently unallocated!
2242 * However f2fs_iget currently does appropriate checks to handle stale
2243 * inodes so everything is OK.
2245 inode = f2fs_iget(sb, ino);
2247 return ERR_CAST(inode);
2248 if (unlikely(generation && inode->i_generation != generation)) {
2249 /* we didn't find the right inode.. */
2251 return ERR_PTR(-ESTALE);
2256 static struct dentry *f2fs_fh_to_dentry(struct super_block *sb, struct fid *fid,
2257 int fh_len, int fh_type)
2259 return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
2260 f2fs_nfs_get_inode);
2263 static struct dentry *f2fs_fh_to_parent(struct super_block *sb, struct fid *fid,
2264 int fh_len, int fh_type)
2266 return generic_fh_to_parent(sb, fid, fh_len, fh_type,
2267 f2fs_nfs_get_inode);
2270 static const struct export_operations f2fs_export_ops = {
2271 .fh_to_dentry = f2fs_fh_to_dentry,
2272 .fh_to_parent = f2fs_fh_to_parent,
2273 .get_parent = f2fs_get_parent,
2276 static loff_t max_file_blocks(void)
2279 loff_t leaf_count = ADDRS_PER_BLOCK;
2282 * note: previously, result is equal to (DEF_ADDRS_PER_INODE -
2283 * DEFAULT_INLINE_XATTR_ADDRS), but now f2fs try to reserve more
2284 * space in inode.i_addr, it will be more safe to reassign
2288 /* two direct node blocks */
2289 result += (leaf_count * 2);
2291 /* two indirect node blocks */
2292 leaf_count *= NIDS_PER_BLOCK;
2293 result += (leaf_count * 2);
2295 /* one double indirect node block */
2296 leaf_count *= NIDS_PER_BLOCK;
2297 result += leaf_count;
2302 static int __f2fs_commit_super(struct buffer_head *bh,
2303 struct f2fs_super_block *super)
2307 memcpy(bh->b_data + F2FS_SUPER_OFFSET, super, sizeof(*super));
2308 set_buffer_dirty(bh);
2311 /* it's rare case, we can do fua all the time */
2312 return __sync_dirty_buffer(bh, REQ_SYNC | REQ_PREFLUSH | REQ_FUA);
2315 static inline bool sanity_check_area_boundary(struct f2fs_sb_info *sbi,
2316 struct buffer_head *bh)
2318 struct f2fs_super_block *raw_super = (struct f2fs_super_block *)
2319 (bh->b_data + F2FS_SUPER_OFFSET);
2320 struct super_block *sb = sbi->sb;
2321 u32 segment0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr);
2322 u32 cp_blkaddr = le32_to_cpu(raw_super->cp_blkaddr);
2323 u32 sit_blkaddr = le32_to_cpu(raw_super->sit_blkaddr);
2324 u32 nat_blkaddr = le32_to_cpu(raw_super->nat_blkaddr);
2325 u32 ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr);
2326 u32 main_blkaddr = le32_to_cpu(raw_super->main_blkaddr);
2327 u32 segment_count_ckpt = le32_to_cpu(raw_super->segment_count_ckpt);
2328 u32 segment_count_sit = le32_to_cpu(raw_super->segment_count_sit);
2329 u32 segment_count_nat = le32_to_cpu(raw_super->segment_count_nat);
2330 u32 segment_count_ssa = le32_to_cpu(raw_super->segment_count_ssa);
2331 u32 segment_count_main = le32_to_cpu(raw_super->segment_count_main);
2332 u32 segment_count = le32_to_cpu(raw_super->segment_count);
2333 u32 log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
2334 u64 main_end_blkaddr = main_blkaddr +
2335 (segment_count_main << log_blocks_per_seg);
2336 u64 seg_end_blkaddr = segment0_blkaddr +
2337 (segment_count << log_blocks_per_seg);
2339 if (segment0_blkaddr != cp_blkaddr) {
2340 f2fs_msg(sb, KERN_INFO,
2341 "Mismatch start address, segment0(%u) cp_blkaddr(%u)",
2342 segment0_blkaddr, cp_blkaddr);
2346 if (cp_blkaddr + (segment_count_ckpt << log_blocks_per_seg) !=
2348 f2fs_msg(sb, KERN_INFO,
2349 "Wrong CP boundary, start(%u) end(%u) blocks(%u)",
2350 cp_blkaddr, sit_blkaddr,
2351 segment_count_ckpt << log_blocks_per_seg);
2355 if (sit_blkaddr + (segment_count_sit << log_blocks_per_seg) !=
2357 f2fs_msg(sb, KERN_INFO,
2358 "Wrong SIT boundary, start(%u) end(%u) blocks(%u)",
2359 sit_blkaddr, nat_blkaddr,
2360 segment_count_sit << log_blocks_per_seg);
2364 if (nat_blkaddr + (segment_count_nat << log_blocks_per_seg) !=
2366 f2fs_msg(sb, KERN_INFO,
2367 "Wrong NAT boundary, start(%u) end(%u) blocks(%u)",
2368 nat_blkaddr, ssa_blkaddr,
2369 segment_count_nat << log_blocks_per_seg);
2373 if (ssa_blkaddr + (segment_count_ssa << log_blocks_per_seg) !=
2375 f2fs_msg(sb, KERN_INFO,
2376 "Wrong SSA boundary, start(%u) end(%u) blocks(%u)",
2377 ssa_blkaddr, main_blkaddr,
2378 segment_count_ssa << log_blocks_per_seg);
2382 if (main_end_blkaddr > seg_end_blkaddr) {
2383 f2fs_msg(sb, KERN_INFO,
2384 "Wrong MAIN_AREA boundary, start(%u) end(%u) block(%u)",
2387 (segment_count << log_blocks_per_seg),
2388 segment_count_main << log_blocks_per_seg);
2390 } else if (main_end_blkaddr < seg_end_blkaddr) {
2394 /* fix in-memory information all the time */
2395 raw_super->segment_count = cpu_to_le32((main_end_blkaddr -
2396 segment0_blkaddr) >> log_blocks_per_seg);
2398 if (f2fs_readonly(sb) || bdev_read_only(sb->s_bdev)) {
2399 set_sbi_flag(sbi, SBI_NEED_SB_WRITE);
2402 err = __f2fs_commit_super(bh, NULL);
2403 res = err ? "failed" : "done";
2405 f2fs_msg(sb, KERN_INFO,
2406 "Fix alignment : %s, start(%u) end(%u) block(%u)",
2409 (segment_count << log_blocks_per_seg),
2410 segment_count_main << log_blocks_per_seg);
2417 static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
2418 struct buffer_head *bh)
2420 block_t segment_count, segs_per_sec, secs_per_zone;
2421 block_t total_sections, blocks_per_seg;
2422 struct f2fs_super_block *raw_super = (struct f2fs_super_block *)
2423 (bh->b_data + F2FS_SUPER_OFFSET);
2424 struct super_block *sb = sbi->sb;
2425 unsigned int blocksize;
2426 size_t crc_offset = 0;
2429 /* Check checksum_offset and crc in superblock */
2430 if (__F2FS_HAS_FEATURE(raw_super, F2FS_FEATURE_SB_CHKSUM)) {
2431 crc_offset = le32_to_cpu(raw_super->checksum_offset);
2433 offsetof(struct f2fs_super_block, crc)) {
2434 f2fs_msg(sb, KERN_INFO,
2435 "Invalid SB checksum offset: %zu",
2439 crc = le32_to_cpu(raw_super->crc);
2440 if (!f2fs_crc_valid(sbi, crc, raw_super, crc_offset)) {
2441 f2fs_msg(sb, KERN_INFO,
2442 "Invalid SB checksum value: %u", crc);
2447 if (F2FS_SUPER_MAGIC != le32_to_cpu(raw_super->magic)) {
2448 f2fs_msg(sb, KERN_INFO,
2449 "Magic Mismatch, valid(0x%x) - read(0x%x)",
2450 F2FS_SUPER_MAGIC, le32_to_cpu(raw_super->magic));
2454 /* Currently, support only 4KB page cache size */
2455 if (F2FS_BLKSIZE != PAGE_SIZE) {
2456 f2fs_msg(sb, KERN_INFO,
2457 "Invalid page_cache_size (%lu), supports only 4KB\n",
2462 /* Currently, support only 4KB block size */
2463 blocksize = 1 << le32_to_cpu(raw_super->log_blocksize);
2464 if (blocksize != F2FS_BLKSIZE) {
2465 f2fs_msg(sb, KERN_INFO,
2466 "Invalid blocksize (%u), supports only 4KB\n",
2471 /* check log blocks per segment */
2472 if (le32_to_cpu(raw_super->log_blocks_per_seg) != 9) {
2473 f2fs_msg(sb, KERN_INFO,
2474 "Invalid log blocks per segment (%u)\n",
2475 le32_to_cpu(raw_super->log_blocks_per_seg));
2479 /* Currently, support 512/1024/2048/4096 bytes sector size */
2480 if (le32_to_cpu(raw_super->log_sectorsize) >
2481 F2FS_MAX_LOG_SECTOR_SIZE ||
2482 le32_to_cpu(raw_super->log_sectorsize) <
2483 F2FS_MIN_LOG_SECTOR_SIZE) {
2484 f2fs_msg(sb, KERN_INFO, "Invalid log sectorsize (%u)",
2485 le32_to_cpu(raw_super->log_sectorsize));
2488 if (le32_to_cpu(raw_super->log_sectors_per_block) +
2489 le32_to_cpu(raw_super->log_sectorsize) !=
2490 F2FS_MAX_LOG_SECTOR_SIZE) {
2491 f2fs_msg(sb, KERN_INFO,
2492 "Invalid log sectors per block(%u) log sectorsize(%u)",
2493 le32_to_cpu(raw_super->log_sectors_per_block),
2494 le32_to_cpu(raw_super->log_sectorsize));
2498 segment_count = le32_to_cpu(raw_super->segment_count);
2499 segs_per_sec = le32_to_cpu(raw_super->segs_per_sec);
2500 secs_per_zone = le32_to_cpu(raw_super->secs_per_zone);
2501 total_sections = le32_to_cpu(raw_super->section_count);
2503 /* blocks_per_seg should be 512, given the above check */
2504 blocks_per_seg = 1 << le32_to_cpu(raw_super->log_blocks_per_seg);
2506 if (segment_count > F2FS_MAX_SEGMENT ||
2507 segment_count < F2FS_MIN_SEGMENTS) {
2508 f2fs_msg(sb, KERN_INFO,
2509 "Invalid segment count (%u)",
2514 if (total_sections > segment_count ||
2515 total_sections < F2FS_MIN_SEGMENTS ||
2516 segs_per_sec > segment_count || !segs_per_sec) {
2517 f2fs_msg(sb, KERN_INFO,
2518 "Invalid segment/section count (%u, %u x %u)",
2519 segment_count, total_sections, segs_per_sec);
2523 if ((segment_count / segs_per_sec) < total_sections) {
2524 f2fs_msg(sb, KERN_INFO,
2525 "Small segment_count (%u < %u * %u)",
2526 segment_count, segs_per_sec, total_sections);
2530 if (segment_count > (le64_to_cpu(raw_super->block_count) >> 9)) {
2531 f2fs_msg(sb, KERN_INFO,
2532 "Wrong segment_count / block_count (%u > %llu)",
2533 segment_count, le64_to_cpu(raw_super->block_count));
2537 if (secs_per_zone > total_sections || !secs_per_zone) {
2538 f2fs_msg(sb, KERN_INFO,
2539 "Wrong secs_per_zone / total_sections (%u, %u)",
2540 secs_per_zone, total_sections);
2543 if (le32_to_cpu(raw_super->extension_count) > F2FS_MAX_EXTENSION ||
2544 raw_super->hot_ext_count > F2FS_MAX_EXTENSION ||
2545 (le32_to_cpu(raw_super->extension_count) +
2546 raw_super->hot_ext_count) > F2FS_MAX_EXTENSION) {
2547 f2fs_msg(sb, KERN_INFO,
2548 "Corrupted extension count (%u + %u > %u)",
2549 le32_to_cpu(raw_super->extension_count),
2550 raw_super->hot_ext_count,
2551 F2FS_MAX_EXTENSION);
2555 if (le32_to_cpu(raw_super->cp_payload) >
2556 (blocks_per_seg - F2FS_CP_PACKS)) {
2557 f2fs_msg(sb, KERN_INFO,
2558 "Insane cp_payload (%u > %u)",
2559 le32_to_cpu(raw_super->cp_payload),
2560 blocks_per_seg - F2FS_CP_PACKS);
2564 /* check reserved ino info */
2565 if (le32_to_cpu(raw_super->node_ino) != 1 ||
2566 le32_to_cpu(raw_super->meta_ino) != 2 ||
2567 le32_to_cpu(raw_super->root_ino) != 3) {
2568 f2fs_msg(sb, KERN_INFO,
2569 "Invalid Fs Meta Ino: node(%u) meta(%u) root(%u)",
2570 le32_to_cpu(raw_super->node_ino),
2571 le32_to_cpu(raw_super->meta_ino),
2572 le32_to_cpu(raw_super->root_ino));
2576 /* check CP/SIT/NAT/SSA/MAIN_AREA area boundary */
2577 if (sanity_check_area_boundary(sbi, bh))
2583 int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi)
2585 unsigned int total, fsmeta;
2586 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
2587 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
2588 unsigned int ovp_segments, reserved_segments;
2589 unsigned int main_segs, blocks_per_seg;
2590 unsigned int sit_segs, nat_segs;
2591 unsigned int sit_bitmap_size, nat_bitmap_size;
2592 unsigned int log_blocks_per_seg;
2593 unsigned int segment_count_main;
2594 unsigned int cp_pack_start_sum, cp_payload;
2595 block_t user_block_count;
2598 total = le32_to_cpu(raw_super->segment_count);
2599 fsmeta = le32_to_cpu(raw_super->segment_count_ckpt);
2600 sit_segs = le32_to_cpu(raw_super->segment_count_sit);
2602 nat_segs = le32_to_cpu(raw_super->segment_count_nat);
2604 fsmeta += le32_to_cpu(ckpt->rsvd_segment_count);
2605 fsmeta += le32_to_cpu(raw_super->segment_count_ssa);
2607 if (unlikely(fsmeta >= total))
2610 ovp_segments = le32_to_cpu(ckpt->overprov_segment_count);
2611 reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count);
2613 if (unlikely(fsmeta < F2FS_MIN_SEGMENTS ||
2614 ovp_segments == 0 || reserved_segments == 0)) {
2615 f2fs_msg(sbi->sb, KERN_ERR,
2616 "Wrong layout: check mkfs.f2fs version");
2620 user_block_count = le64_to_cpu(ckpt->user_block_count);
2621 segment_count_main = le32_to_cpu(raw_super->segment_count_main);
2622 log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
2623 if (!user_block_count || user_block_count >=
2624 segment_count_main << log_blocks_per_seg) {
2625 f2fs_msg(sbi->sb, KERN_ERR,
2626 "Wrong user_block_count: %u", user_block_count);
2630 main_segs = le32_to_cpu(raw_super->segment_count_main);
2631 blocks_per_seg = sbi->blocks_per_seg;
2633 for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) {
2634 if (le32_to_cpu(ckpt->cur_node_segno[i]) >= main_segs ||
2635 le16_to_cpu(ckpt->cur_node_blkoff[i]) >= blocks_per_seg)
2637 for (j = i + 1; j < NR_CURSEG_NODE_TYPE; j++) {
2638 if (le32_to_cpu(ckpt->cur_node_segno[i]) ==
2639 le32_to_cpu(ckpt->cur_node_segno[j])) {
2640 f2fs_msg(sbi->sb, KERN_ERR,
2641 "Node segment (%u, %u) has the same "
2643 le32_to_cpu(ckpt->cur_node_segno[i]));
2648 for (i = 0; i < NR_CURSEG_DATA_TYPE; i++) {
2649 if (le32_to_cpu(ckpt->cur_data_segno[i]) >= main_segs ||
2650 le16_to_cpu(ckpt->cur_data_blkoff[i]) >= blocks_per_seg)
2652 for (j = i + 1; j < NR_CURSEG_DATA_TYPE; j++) {
2653 if (le32_to_cpu(ckpt->cur_data_segno[i]) ==
2654 le32_to_cpu(ckpt->cur_data_segno[j])) {
2655 f2fs_msg(sbi->sb, KERN_ERR,
2656 "Data segment (%u, %u) has the same "
2658 le32_to_cpu(ckpt->cur_data_segno[i]));
2663 for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) {
2664 for (j = i; j < NR_CURSEG_DATA_TYPE; j++) {
2665 if (le32_to_cpu(ckpt->cur_node_segno[i]) ==
2666 le32_to_cpu(ckpt->cur_data_segno[j])) {
2667 f2fs_msg(sbi->sb, KERN_ERR,
2668 "Data segment (%u) and Data segment (%u)"
2669 " has the same segno: %u", i, j,
2670 le32_to_cpu(ckpt->cur_node_segno[i]));
2676 sit_bitmap_size = le32_to_cpu(ckpt->sit_ver_bitmap_bytesize);
2677 nat_bitmap_size = le32_to_cpu(ckpt->nat_ver_bitmap_bytesize);
2679 if (sit_bitmap_size != ((sit_segs / 2) << log_blocks_per_seg) / 8 ||
2680 nat_bitmap_size != ((nat_segs / 2) << log_blocks_per_seg) / 8) {
2681 f2fs_msg(sbi->sb, KERN_ERR,
2682 "Wrong bitmap size: sit: %u, nat:%u",
2683 sit_bitmap_size, nat_bitmap_size);
2687 cp_pack_start_sum = __start_sum_addr(sbi);
2688 cp_payload = __cp_payload(sbi);
2689 if (cp_pack_start_sum < cp_payload + 1 ||
2690 cp_pack_start_sum > blocks_per_seg - 1 -
2692 f2fs_msg(sbi->sb, KERN_ERR,
2693 "Wrong cp_pack_start_sum: %u",
2698 if (unlikely(f2fs_cp_error(sbi))) {
2699 f2fs_msg(sbi->sb, KERN_ERR, "A bug case: need to run fsck");
2705 static void init_sb_info(struct f2fs_sb_info *sbi)
2707 struct f2fs_super_block *raw_super = sbi->raw_super;
2710 sbi->log_sectors_per_block =
2711 le32_to_cpu(raw_super->log_sectors_per_block);
2712 sbi->log_blocksize = le32_to_cpu(raw_super->log_blocksize);
2713 sbi->blocksize = 1 << sbi->log_blocksize;
2714 sbi->log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
2715 sbi->blocks_per_seg = 1 << sbi->log_blocks_per_seg;
2716 sbi->segs_per_sec = le32_to_cpu(raw_super->segs_per_sec);
2717 sbi->secs_per_zone = le32_to_cpu(raw_super->secs_per_zone);
2718 sbi->total_sections = le32_to_cpu(raw_super->section_count);
2719 sbi->total_node_count =
2720 (le32_to_cpu(raw_super->segment_count_nat) / 2)
2721 * sbi->blocks_per_seg * NAT_ENTRY_PER_BLOCK;
2722 sbi->root_ino_num = le32_to_cpu(raw_super->root_ino);
2723 sbi->node_ino_num = le32_to_cpu(raw_super->node_ino);
2724 sbi->meta_ino_num = le32_to_cpu(raw_super->meta_ino);
2725 sbi->cur_victim_sec = NULL_SECNO;
2726 sbi->next_victim_seg[BG_GC] = NULL_SEGNO;
2727 sbi->next_victim_seg[FG_GC] = NULL_SEGNO;
2728 sbi->max_victim_search = DEF_MAX_VICTIM_SEARCH;
2729 sbi->migration_granularity = sbi->segs_per_sec;
2731 sbi->dir_level = DEF_DIR_LEVEL;
2732 sbi->interval_time[CP_TIME] = DEF_CP_INTERVAL;
2733 sbi->interval_time[REQ_TIME] = DEF_IDLE_INTERVAL;
2734 sbi->interval_time[DISCARD_TIME] = DEF_IDLE_INTERVAL;
2735 sbi->interval_time[GC_TIME] = DEF_IDLE_INTERVAL;
2736 sbi->interval_time[DISABLE_TIME] = DEF_DISABLE_INTERVAL;
2737 sbi->interval_time[UMOUNT_DISCARD_TIMEOUT] =
2738 DEF_UMOUNT_DISCARD_TIMEOUT;
2739 clear_sbi_flag(sbi, SBI_NEED_FSCK);
2741 for (i = 0; i < NR_COUNT_TYPE; i++)
2742 atomic_set(&sbi->nr_pages[i], 0);
2744 for (i = 0; i < META; i++)
2745 atomic_set(&sbi->wb_sync_req[i], 0);
2747 INIT_LIST_HEAD(&sbi->s_list);
2748 mutex_init(&sbi->umount_mutex);
2749 init_rwsem(&sbi->io_order_lock);
2750 spin_lock_init(&sbi->cp_lock);
2752 sbi->dirty_device = 0;
2753 spin_lock_init(&sbi->dev_lock);
2755 init_rwsem(&sbi->sb_lock);
2758 static int init_percpu_info(struct f2fs_sb_info *sbi)
2762 err = percpu_counter_init(&sbi->alloc_valid_block_count, 0, GFP_KERNEL);
2766 err = percpu_counter_init(&sbi->total_valid_inode_count, 0,
2769 percpu_counter_destroy(&sbi->alloc_valid_block_count);
2774 #ifdef CONFIG_BLK_DEV_ZONED
2775 static int init_blkz_info(struct f2fs_sb_info *sbi, int devi)
2777 struct block_device *bdev = FDEV(devi).bdev;
2778 sector_t nr_sectors = bdev->bd_part->nr_sects;
2779 sector_t sector = 0;
2780 struct blk_zone *zones;
2781 unsigned int i, nr_zones;
2785 if (!f2fs_sb_has_blkzoned(sbi))
2788 if (sbi->blocks_per_blkz && sbi->blocks_per_blkz !=
2789 SECTOR_TO_BLOCK(bdev_zone_sectors(bdev)))
2791 sbi->blocks_per_blkz = SECTOR_TO_BLOCK(bdev_zone_sectors(bdev));
2792 if (sbi->log_blocks_per_blkz && sbi->log_blocks_per_blkz !=
2793 __ilog2_u32(sbi->blocks_per_blkz))
2795 sbi->log_blocks_per_blkz = __ilog2_u32(sbi->blocks_per_blkz);
2796 FDEV(devi).nr_blkz = SECTOR_TO_BLOCK(nr_sectors) >>
2797 sbi->log_blocks_per_blkz;
2798 if (nr_sectors & (bdev_zone_sectors(bdev) - 1))
2799 FDEV(devi).nr_blkz++;
2801 FDEV(devi).blkz_type = f2fs_kmalloc(sbi, FDEV(devi).nr_blkz,
2803 if (!FDEV(devi).blkz_type)
2806 #define F2FS_REPORT_NR_ZONES 4096
2808 zones = f2fs_kzalloc(sbi,
2809 array_size(F2FS_REPORT_NR_ZONES,
2810 sizeof(struct blk_zone)),
2815 /* Get block zones type */
2816 while (zones && sector < nr_sectors) {
2818 nr_zones = F2FS_REPORT_NR_ZONES;
2819 err = blkdev_report_zones(bdev, sector,
2829 for (i = 0; i < nr_zones; i++) {
2830 FDEV(devi).blkz_type[n] = zones[i].type;
2831 sector += zones[i].len;
2843 * Read f2fs raw super block.
2844 * Because we have two copies of super block, so read both of them
2845 * to get the first valid one. If any one of them is broken, we pass
2846 * them recovery flag back to the caller.
2848 static int read_raw_super_block(struct f2fs_sb_info *sbi,
2849 struct f2fs_super_block **raw_super,
2850 int *valid_super_block, int *recovery)
2852 struct super_block *sb = sbi->sb;
2854 struct buffer_head *bh;
2855 struct f2fs_super_block *super;
2858 super = kzalloc(sizeof(struct f2fs_super_block), GFP_KERNEL);
2862 for (block = 0; block < 2; block++) {
2863 bh = sb_bread(sb, block);
2865 f2fs_msg(sb, KERN_ERR, "Unable to read %dth superblock",
2871 /* sanity checking of raw super */
2872 if (sanity_check_raw_super(sbi, bh)) {
2873 f2fs_msg(sb, KERN_ERR,
2874 "Can't find valid F2FS filesystem in %dth superblock",
2882 memcpy(super, bh->b_data + F2FS_SUPER_OFFSET,
2884 *valid_super_block = block;
2890 /* Fail to read any one of the superblocks*/
2894 /* No valid superblock */
2903 int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover)
2905 struct buffer_head *bh;
2909 if ((recover && f2fs_readonly(sbi->sb)) ||
2910 bdev_read_only(sbi->sb->s_bdev)) {
2911 set_sbi_flag(sbi, SBI_NEED_SB_WRITE);
2915 /* we should update superblock crc here */
2916 if (!recover && f2fs_sb_has_sb_chksum(sbi)) {
2917 crc = f2fs_crc32(sbi, F2FS_RAW_SUPER(sbi),
2918 offsetof(struct f2fs_super_block, crc));
2919 F2FS_RAW_SUPER(sbi)->crc = cpu_to_le32(crc);
2922 /* write back-up superblock first */
2923 bh = sb_bread(sbi->sb, sbi->valid_super_block ? 0 : 1);
2926 err = __f2fs_commit_super(bh, F2FS_RAW_SUPER(sbi));
2929 /* if we are in recovery path, skip writing valid superblock */
2933 /* write current valid superblock */
2934 bh = sb_bread(sbi->sb, sbi->valid_super_block);
2937 err = __f2fs_commit_super(bh, F2FS_RAW_SUPER(sbi));
2942 static int f2fs_scan_devices(struct f2fs_sb_info *sbi)
2944 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
2945 unsigned int max_devices = MAX_DEVICES;
2948 /* Initialize single device information */
2949 if (!RDEV(0).path[0]) {
2950 if (!bdev_is_zoned(sbi->sb->s_bdev))
2956 * Initialize multiple devices information, or single
2957 * zoned block device information.
2959 sbi->devs = f2fs_kzalloc(sbi,
2960 array_size(max_devices,
2961 sizeof(struct f2fs_dev_info)),
2966 for (i = 0; i < max_devices; i++) {
2968 if (i > 0 && !RDEV(i).path[0])
2971 if (max_devices == 1) {
2972 /* Single zoned block device mount */
2974 blkdev_get_by_dev(sbi->sb->s_bdev->bd_dev,
2975 sbi->sb->s_mode, sbi->sb->s_type);
2977 /* Multi-device mount */
2978 memcpy(FDEV(i).path, RDEV(i).path, MAX_PATH_LEN);
2979 FDEV(i).total_segments =
2980 le32_to_cpu(RDEV(i).total_segments);
2982 FDEV(i).start_blk = 0;
2983 FDEV(i).end_blk = FDEV(i).start_blk +
2984 (FDEV(i).total_segments <<
2985 sbi->log_blocks_per_seg) - 1 +
2986 le32_to_cpu(raw_super->segment0_blkaddr);
2988 FDEV(i).start_blk = FDEV(i - 1).end_blk + 1;
2989 FDEV(i).end_blk = FDEV(i).start_blk +
2990 (FDEV(i).total_segments <<
2991 sbi->log_blocks_per_seg) - 1;
2993 FDEV(i).bdev = blkdev_get_by_path(FDEV(i).path,
2994 sbi->sb->s_mode, sbi->sb->s_type);
2996 if (IS_ERR(FDEV(i).bdev))
2997 return PTR_ERR(FDEV(i).bdev);
2999 /* to release errored devices */
3000 sbi->s_ndevs = i + 1;
3002 #ifdef CONFIG_BLK_DEV_ZONED
3003 if (bdev_zoned_model(FDEV(i).bdev) == BLK_ZONED_HM &&
3004 !f2fs_sb_has_blkzoned(sbi)) {
3005 f2fs_msg(sbi->sb, KERN_ERR,
3006 "Zoned block device feature not enabled\n");
3009 if (bdev_zoned_model(FDEV(i).bdev) != BLK_ZONED_NONE) {
3010 if (init_blkz_info(sbi, i)) {
3011 f2fs_msg(sbi->sb, KERN_ERR,
3012 "Failed to initialize F2FS blkzone information");
3015 if (max_devices == 1)
3017 f2fs_msg(sbi->sb, KERN_INFO,
3018 "Mount Device [%2d]: %20s, %8u, %8x - %8x (zone: %s)",
3020 FDEV(i).total_segments,
3021 FDEV(i).start_blk, FDEV(i).end_blk,
3022 bdev_zoned_model(FDEV(i).bdev) == BLK_ZONED_HA ?
3023 "Host-aware" : "Host-managed");
3027 f2fs_msg(sbi->sb, KERN_INFO,
3028 "Mount Device [%2d]: %20s, %8u, %8x - %8x",
3030 FDEV(i).total_segments,
3031 FDEV(i).start_blk, FDEV(i).end_blk);
3033 f2fs_msg(sbi->sb, KERN_INFO,
3034 "IO Block Size: %8d KB", F2FS_IO_SIZE_KB(sbi));
3038 static void f2fs_tuning_parameters(struct f2fs_sb_info *sbi)
3040 struct f2fs_sm_info *sm_i = SM_I(sbi);
3042 /* adjust parameters according to the volume size */
3043 if (sm_i->main_segments <= SMALL_VOLUME_SEGMENTS) {
3044 F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_REUSE;
3045 sm_i->dcc_info->discard_granularity = 1;
3046 sm_i->ipu_policy = 1 << F2FS_IPU_FORCE;
3049 sbi->readdir_ra = 1;
3052 static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
3054 struct f2fs_sb_info *sbi;
3055 struct f2fs_super_block *raw_super;
3058 bool skip_recovery = false, need_fsck = false;
3059 char *options = NULL;
3060 int recovery, i, valid_super_block;
3061 struct curseg_info *seg_i;
3067 valid_super_block = -1;
3070 /* allocate memory for f2fs-specific super block info */
3071 sbi = kzalloc(sizeof(struct f2fs_sb_info), GFP_KERNEL);
3077 /* Load the checksum driver */
3078 sbi->s_chksum_driver = crypto_alloc_shash("crc32", 0, 0);
3079 if (IS_ERR(sbi->s_chksum_driver)) {
3080 f2fs_msg(sb, KERN_ERR, "Cannot load crc32 driver.");
3081 err = PTR_ERR(sbi->s_chksum_driver);
3082 sbi->s_chksum_driver = NULL;
3086 /* set a block size */
3087 if (unlikely(!sb_set_blocksize(sb, F2FS_BLKSIZE))) {
3088 f2fs_msg(sb, KERN_ERR, "unable to set blocksize");
3092 err = read_raw_super_block(sbi, &raw_super, &valid_super_block,
3097 sb->s_fs_info = sbi;
3098 sbi->raw_super = raw_super;
3100 /* precompute checksum seed for metadata */
3101 if (f2fs_sb_has_inode_chksum(sbi))
3102 sbi->s_chksum_seed = f2fs_chksum(sbi, ~0, raw_super->uuid,
3103 sizeof(raw_super->uuid));
3106 * The BLKZONED feature indicates that the drive was formatted with
3107 * zone alignment optimization. This is optional for host-aware
3108 * devices, but mandatory for host-managed zoned block devices.
3110 #ifndef CONFIG_BLK_DEV_ZONED
3111 if (f2fs_sb_has_blkzoned(sbi)) {
3112 f2fs_msg(sb, KERN_ERR,
3113 "Zoned block device support is not enabled\n");
3118 default_options(sbi);
3119 /* parse mount options */
3120 options = kstrdup((const char *)data, GFP_KERNEL);
3121 if (data && !options) {
3126 err = parse_options(sb, options);
3130 sbi->max_file_blocks = max_file_blocks();
3131 sb->s_maxbytes = sbi->max_file_blocks <<
3132 le32_to_cpu(raw_super->log_blocksize);
3133 sb->s_max_links = F2FS_LINK_MAX;
3136 sb->dq_op = &f2fs_quota_operations;
3137 if (f2fs_sb_has_quota_ino(sbi))
3138 sb->s_qcop = &dquot_quotactl_sysfile_ops;
3140 sb->s_qcop = &f2fs_quotactl_ops;
3141 sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ;
3143 if (f2fs_sb_has_quota_ino(sbi)) {
3144 for (i = 0; i < MAXQUOTAS; i++) {
3145 if (f2fs_qf_ino(sbi->sb, i))
3146 sbi->nquota_files++;
3151 sb->s_op = &f2fs_sops;
3152 #ifdef CONFIG_FS_ENCRYPTION
3153 sb->s_cop = &f2fs_cryptops;
3155 sb->s_xattr = f2fs_xattr_handlers;
3156 sb->s_export_op = &f2fs_export_ops;
3157 sb->s_magic = F2FS_SUPER_MAGIC;
3158 sb->s_time_gran = 1;
3159 sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
3160 (test_opt(sbi, POSIX_ACL) ? SB_POSIXACL : 0);
3161 memcpy(&sb->s_uuid, raw_super->uuid, sizeof(raw_super->uuid));
3162 sb->s_iflags |= SB_I_CGROUPWB;
3164 /* init f2fs-specific super block info */
3165 sbi->valid_super_block = valid_super_block;
3166 mutex_init(&sbi->gc_mutex);
3167 mutex_init(&sbi->writepages);
3168 mutex_init(&sbi->cp_mutex);
3169 init_rwsem(&sbi->node_write);
3170 init_rwsem(&sbi->node_change);
3172 /* disallow all the data/node/meta page writes */
3173 set_sbi_flag(sbi, SBI_POR_DOING);
3174 spin_lock_init(&sbi->stat_lock);
3176 /* init iostat info */
3177 spin_lock_init(&sbi->iostat_lock);
3178 sbi->iostat_enable = false;
3180 for (i = 0; i < NR_PAGE_TYPE; i++) {
3181 int n = (i == META) ? 1: NR_TEMP_TYPE;
3187 sizeof(struct f2fs_bio_info)),
3189 if (!sbi->write_io[i]) {
3194 for (j = HOT; j < n; j++) {
3195 init_rwsem(&sbi->write_io[i][j].io_rwsem);
3196 sbi->write_io[i][j].sbi = sbi;
3197 sbi->write_io[i][j].bio = NULL;
3198 spin_lock_init(&sbi->write_io[i][j].io_lock);
3199 INIT_LIST_HEAD(&sbi->write_io[i][j].io_list);
3203 init_rwsem(&sbi->cp_rwsem);
3204 init_waitqueue_head(&sbi->cp_wait);
3207 err = init_percpu_info(sbi);
3211 if (F2FS_IO_SIZE(sbi) > 1) {
3212 sbi->write_io_dummy =
3213 mempool_create_page_pool(2 * (F2FS_IO_SIZE(sbi) - 1), 0);
3214 if (!sbi->write_io_dummy) {
3220 /* get an inode for meta space */
3221 sbi->meta_inode = f2fs_iget(sb, F2FS_META_INO(sbi));
3222 if (IS_ERR(sbi->meta_inode)) {
3223 f2fs_msg(sb, KERN_ERR, "Failed to read F2FS meta data inode");
3224 err = PTR_ERR(sbi->meta_inode);
3228 err = f2fs_get_valid_checkpoint(sbi);
3230 f2fs_msg(sb, KERN_ERR, "Failed to get valid F2FS checkpoint");
3231 goto free_meta_inode;
3234 if (__is_set_ckpt_flags(F2FS_CKPT(sbi), CP_QUOTA_NEED_FSCK_FLAG))
3235 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
3236 if (__is_set_ckpt_flags(F2FS_CKPT(sbi), CP_DISABLED_QUICK_FLAG)) {
3237 set_sbi_flag(sbi, SBI_CP_DISABLED_QUICK);
3238 sbi->interval_time[DISABLE_TIME] = DEF_DISABLE_QUICK_INTERVAL;
3241 /* Initialize device list */
3242 err = f2fs_scan_devices(sbi);
3244 f2fs_msg(sb, KERN_ERR, "Failed to find devices");
3248 sbi->total_valid_node_count =
3249 le32_to_cpu(sbi->ckpt->valid_node_count);
3250 percpu_counter_set(&sbi->total_valid_inode_count,
3251 le32_to_cpu(sbi->ckpt->valid_inode_count));
3252 sbi->user_block_count = le64_to_cpu(sbi->ckpt->user_block_count);
3253 sbi->total_valid_block_count =
3254 le64_to_cpu(sbi->ckpt->valid_block_count);
3255 sbi->last_valid_block_count = sbi->total_valid_block_count;
3256 sbi->reserved_blocks = 0;
3257 sbi->current_reserved_blocks = 0;
3258 limit_reserve_root(sbi);
3260 for (i = 0; i < NR_INODE_TYPE; i++) {
3261 INIT_LIST_HEAD(&sbi->inode_list[i]);
3262 spin_lock_init(&sbi->inode_lock[i]);
3265 f2fs_init_extent_cache_info(sbi);
3267 f2fs_init_ino_entry_info(sbi);
3269 f2fs_init_fsync_node_info(sbi);
3271 /* setup f2fs internal modules */
3272 err = f2fs_build_segment_manager(sbi);
3274 f2fs_msg(sb, KERN_ERR,
3275 "Failed to initialize F2FS segment manager");
3278 err = f2fs_build_node_manager(sbi);
3280 f2fs_msg(sb, KERN_ERR,
3281 "Failed to initialize F2FS node manager");
3285 /* For write statistics */
3286 if (sb->s_bdev->bd_part)
3287 sbi->sectors_written_start =
3288 (u64)part_stat_read(sb->s_bdev->bd_part,
3289 sectors[STAT_WRITE]);
3291 /* Read accumulated write IO statistics if exists */
3292 seg_i = CURSEG_I(sbi, CURSEG_HOT_NODE);
3293 if (__exist_node_summaries(sbi))
3294 sbi->kbytes_written =
3295 le64_to_cpu(seg_i->journal->info.kbytes_written);
3297 f2fs_build_gc_manager(sbi);
3299 err = f2fs_build_stats(sbi);
3303 /* get an inode for node space */
3304 sbi->node_inode = f2fs_iget(sb, F2FS_NODE_INO(sbi));
3305 if (IS_ERR(sbi->node_inode)) {
3306 f2fs_msg(sb, KERN_ERR, "Failed to read node inode");
3307 err = PTR_ERR(sbi->node_inode);
3311 /* read root inode and dentry */
3312 root = f2fs_iget(sb, F2FS_ROOT_INO(sbi));
3314 f2fs_msg(sb, KERN_ERR, "Failed to read root inode");
3315 err = PTR_ERR(root);
3316 goto free_node_inode;
3318 if (!S_ISDIR(root->i_mode) || !root->i_blocks ||
3319 !root->i_size || !root->i_nlink) {
3322 goto free_node_inode;
3325 sb->s_root = d_make_root(root); /* allocate root dentry */
3328 goto free_node_inode;
3331 err = f2fs_register_sysfs(sbi);
3333 goto free_root_inode;
3336 /* Enable quota usage during mount */
3337 if (f2fs_sb_has_quota_ino(sbi) && !f2fs_readonly(sb)) {
3338 err = f2fs_enable_quotas(sb);
3340 f2fs_msg(sb, KERN_ERR,
3341 "Cannot turn on quotas: error %d", err);
3344 /* if there are nt orphan nodes free them */
3345 err = f2fs_recover_orphan_inodes(sbi);
3349 if (unlikely(is_set_ckpt_flags(sbi, CP_DISABLED_FLAG)))
3350 goto reset_checkpoint;
3352 /* recover fsynced data */
3353 if (!test_opt(sbi, DISABLE_ROLL_FORWARD)) {
3355 * mount should be failed, when device has readonly mode, and
3356 * previous checkpoint was not done by clean system shutdown.
3358 if (bdev_read_only(sb->s_bdev) &&
3359 !is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) {
3365 set_sbi_flag(sbi, SBI_NEED_FSCK);
3368 goto reset_checkpoint;
3370 err = f2fs_recover_fsync_data(sbi, false);
3373 skip_recovery = true;
3375 f2fs_msg(sb, KERN_ERR,
3376 "Cannot recover all fsync data errno=%d", err);
3380 err = f2fs_recover_fsync_data(sbi, true);
3382 if (!f2fs_readonly(sb) && err > 0) {
3384 f2fs_msg(sb, KERN_ERR,
3385 "Need to recover fsync data");
3390 /* f2fs_recover_fsync_data() cleared this already */
3391 clear_sbi_flag(sbi, SBI_POR_DOING);
3393 if (test_opt(sbi, DISABLE_CHECKPOINT)) {
3394 err = f2fs_disable_checkpoint(sbi);
3396 goto sync_free_meta;
3397 } else if (is_set_ckpt_flags(sbi, CP_DISABLED_FLAG)) {
3398 f2fs_enable_checkpoint(sbi);
3402 * If filesystem is not mounted as read-only then
3403 * do start the gc_thread.
3405 if (test_opt(sbi, BG_GC) && !f2fs_readonly(sb)) {
3406 /* After POR, we can run background GC thread.*/
3407 err = f2fs_start_gc_thread(sbi);
3409 goto sync_free_meta;
3413 /* recover broken superblock */
3415 err = f2fs_commit_super(sbi, true);
3416 f2fs_msg(sb, KERN_INFO,
3417 "Try to recover %dth superblock, ret: %d",
3418 sbi->valid_super_block ? 1 : 2, err);
3421 f2fs_join_shrinker(sbi);
3423 f2fs_tuning_parameters(sbi);
3425 f2fs_msg(sbi->sb, KERN_NOTICE, "Mounted with checkpoint version = %llx",
3426 cur_cp_version(F2FS_CKPT(sbi)));
3427 f2fs_update_time(sbi, CP_TIME);
3428 f2fs_update_time(sbi, REQ_TIME);
3429 clear_sbi_flag(sbi, SBI_CP_DISABLED_QUICK);
3433 /* safe to flush all the data */
3434 sync_filesystem(sbi->sb);
3439 f2fs_truncate_quota_inode_pages(sb);
3440 if (f2fs_sb_has_quota_ino(sbi) && !f2fs_readonly(sb))
3441 f2fs_quota_off_umount(sbi->sb);
3444 * Some dirty meta pages can be produced by f2fs_recover_orphan_inodes()
3445 * failed by EIO. Then, iput(node_inode) can trigger balance_fs_bg()
3446 * followed by f2fs_write_checkpoint() through f2fs_write_node_pages(), which
3447 * falls into an infinite loop in f2fs_sync_meta_pages().
3449 truncate_inode_pages_final(META_MAPPING(sbi));
3450 /* evict some inodes being cached by GC */
3452 f2fs_unregister_sysfs(sbi);
3457 f2fs_release_ino_entry(sbi, true);
3458 truncate_inode_pages_final(NODE_MAPPING(sbi));
3459 iput(sbi->node_inode);
3460 sbi->node_inode = NULL;
3462 f2fs_destroy_stats(sbi);
3464 f2fs_destroy_node_manager(sbi);
3466 f2fs_destroy_segment_manager(sbi);
3468 destroy_device_list(sbi);
3471 make_bad_inode(sbi->meta_inode);
3472 iput(sbi->meta_inode);
3473 sbi->meta_inode = NULL;
3475 mempool_destroy(sbi->write_io_dummy);
3477 destroy_percpu_info(sbi);
3479 for (i = 0; i < NR_PAGE_TYPE; i++)
3480 kvfree(sbi->write_io[i]);
3483 for (i = 0; i < MAXQUOTAS; i++)
3484 kvfree(F2FS_OPTION(sbi).s_qf_names[i]);
3490 if (sbi->s_chksum_driver)
3491 crypto_free_shash(sbi->s_chksum_driver);
3494 /* give only one another chance */
3495 if (retry_cnt > 0 && skip_recovery) {
3497 shrink_dcache_sb(sb);
3503 static struct dentry *f2fs_mount(struct file_system_type *fs_type, int flags,
3504 const char *dev_name, void *data)
3506 return mount_bdev(fs_type, flags, dev_name, data, f2fs_fill_super);
3509 static void kill_f2fs_super(struct super_block *sb)
3512 struct f2fs_sb_info *sbi = F2FS_SB(sb);
3514 set_sbi_flag(sbi, SBI_IS_CLOSE);
3515 f2fs_stop_gc_thread(sbi);
3516 f2fs_stop_discard_thread(sbi);
3518 if (is_sbi_flag_set(sbi, SBI_IS_DIRTY) ||
3519 !is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) {
3520 struct cp_control cpc = {
3521 .reason = CP_UMOUNT,
3523 f2fs_write_checkpoint(sbi, &cpc);
3526 if (is_sbi_flag_set(sbi, SBI_IS_RECOVERED) && f2fs_readonly(sb))
3527 sb->s_flags &= ~SB_RDONLY;
3529 kill_block_super(sb);
3532 static struct file_system_type f2fs_fs_type = {
3533 .owner = THIS_MODULE,
3535 .mount = f2fs_mount,
3536 .kill_sb = kill_f2fs_super,
3537 .fs_flags = FS_REQUIRES_DEV,
3539 MODULE_ALIAS_FS("f2fs");
3541 static int __init init_inodecache(void)
3543 f2fs_inode_cachep = kmem_cache_create("f2fs_inode_cache",
3544 sizeof(struct f2fs_inode_info), 0,
3545 SLAB_RECLAIM_ACCOUNT|SLAB_ACCOUNT, NULL);
3546 if (!f2fs_inode_cachep)
3551 static void destroy_inodecache(void)
3554 * Make sure all delayed rcu free inodes are flushed before we
3558 kmem_cache_destroy(f2fs_inode_cachep);
3561 static int __init init_f2fs_fs(void)
3565 if (PAGE_SIZE != F2FS_BLKSIZE) {
3566 printk("F2FS not supported on PAGE_SIZE(%lu) != %d\n",
3567 PAGE_SIZE, F2FS_BLKSIZE);
3571 f2fs_build_trace_ios();
3573 err = init_inodecache();
3576 err = f2fs_create_node_manager_caches();
3578 goto free_inodecache;
3579 err = f2fs_create_segment_manager_caches();
3581 goto free_node_manager_caches;
3582 err = f2fs_create_checkpoint_caches();
3584 goto free_segment_manager_caches;
3585 err = f2fs_create_extent_cache();
3587 goto free_checkpoint_caches;
3588 err = f2fs_init_sysfs();
3590 goto free_extent_cache;
3591 err = register_shrinker(&f2fs_shrinker_info);
3594 err = register_filesystem(&f2fs_fs_type);
3597 f2fs_create_root_stats();
3598 err = f2fs_init_post_read_processing();
3600 goto free_root_stats;
3604 f2fs_destroy_root_stats();
3605 unregister_filesystem(&f2fs_fs_type);
3607 unregister_shrinker(&f2fs_shrinker_info);
3611 f2fs_destroy_extent_cache();
3612 free_checkpoint_caches:
3613 f2fs_destroy_checkpoint_caches();
3614 free_segment_manager_caches:
3615 f2fs_destroy_segment_manager_caches();
3616 free_node_manager_caches:
3617 f2fs_destroy_node_manager_caches();
3619 destroy_inodecache();
3624 static void __exit exit_f2fs_fs(void)
3626 f2fs_destroy_post_read_processing();
3627 f2fs_destroy_root_stats();
3628 unregister_filesystem(&f2fs_fs_type);
3629 unregister_shrinker(&f2fs_shrinker_info);
3631 f2fs_destroy_extent_cache();
3632 f2fs_destroy_checkpoint_caches();
3633 f2fs_destroy_segment_manager_caches();
3634 f2fs_destroy_node_manager_caches();
3635 destroy_inodecache();
3636 f2fs_destroy_trace_ios();
3639 module_init(init_f2fs_fs)
3640 module_exit(exit_f2fs_fs)
3642 MODULE_AUTHOR("Samsung Electronics's Praesto Team");
3643 MODULE_DESCRIPTION("Flash Friendly File System");
3644 MODULE_LICENSE("GPL");