4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #include <linux/module.h>
12 #include <linux/init.h>
14 #include <linux/statfs.h>
15 #include <linux/buffer_head.h>
16 #include <linux/backing-dev.h>
17 #include <linux/kthread.h>
18 #include <linux/parser.h>
19 #include <linux/mount.h>
20 #include <linux/seq_file.h>
21 #include <linux/proc_fs.h>
22 #include <linux/random.h>
23 #include <linux/exportfs.h>
24 #include <linux/blkdev.h>
25 #include <linux/quotaops.h>
26 #include <linux/f2fs_fs.h>
27 #include <linux/sysfs.h>
28 #include <linux/quota.h>
37 #define CREATE_TRACE_POINTS
38 #include <trace/events/f2fs.h>
40 static struct kmem_cache *f2fs_inode_cachep;
42 #ifdef CONFIG_F2FS_FAULT_INJECTION
44 char *fault_name[FAULT_MAX] = {
45 [FAULT_KMALLOC] = "kmalloc",
46 [FAULT_KVMALLOC] = "kvmalloc",
47 [FAULT_PAGE_ALLOC] = "page alloc",
48 [FAULT_PAGE_GET] = "page get",
49 [FAULT_ALLOC_BIO] = "alloc bio",
50 [FAULT_ALLOC_NID] = "alloc nid",
51 [FAULT_ORPHAN] = "orphan",
52 [FAULT_BLOCK] = "no more block",
53 [FAULT_DIR_DEPTH] = "too big dir depth",
54 [FAULT_EVICT_INODE] = "evict_inode fail",
55 [FAULT_TRUNCATE] = "truncate fail",
56 [FAULT_IO] = "IO error",
57 [FAULT_CHECKPOINT] = "checkpoint error",
60 static void f2fs_build_fault_attr(struct f2fs_sb_info *sbi,
63 struct f2fs_fault_info *ffi = &F2FS_OPTION(sbi).fault_info;
66 atomic_set(&ffi->inject_ops, 0);
67 ffi->inject_rate = rate;
68 ffi->inject_type = (1 << FAULT_MAX) - 1;
70 memset(ffi, 0, sizeof(struct f2fs_fault_info));
75 /* f2fs-wide shrinker description */
76 static struct shrinker f2fs_shrinker_info = {
77 .scan_objects = f2fs_shrink_scan,
78 .count_objects = f2fs_shrink_count,
79 .seeks = DEFAULT_SEEKS,
84 Opt_disable_roll_forward,
95 Opt_disable_ext_identify,
98 Opt_inline_xattr_size,
135 Opt_test_dummy_encryption,
139 static match_table_t f2fs_tokens = {
140 {Opt_gc_background, "background_gc=%s"},
141 {Opt_disable_roll_forward, "disable_roll_forward"},
142 {Opt_norecovery, "norecovery"},
143 {Opt_discard, "discard"},
144 {Opt_nodiscard, "nodiscard"},
145 {Opt_noheap, "no_heap"},
147 {Opt_user_xattr, "user_xattr"},
148 {Opt_nouser_xattr, "nouser_xattr"},
150 {Opt_noacl, "noacl"},
151 {Opt_active_logs, "active_logs=%u"},
152 {Opt_disable_ext_identify, "disable_ext_identify"},
153 {Opt_inline_xattr, "inline_xattr"},
154 {Opt_noinline_xattr, "noinline_xattr"},
155 {Opt_inline_xattr_size, "inline_xattr_size=%u"},
156 {Opt_inline_data, "inline_data"},
157 {Opt_inline_dentry, "inline_dentry"},
158 {Opt_noinline_dentry, "noinline_dentry"},
159 {Opt_flush_merge, "flush_merge"},
160 {Opt_noflush_merge, "noflush_merge"},
161 {Opt_nobarrier, "nobarrier"},
162 {Opt_fastboot, "fastboot"},
163 {Opt_extent_cache, "extent_cache"},
164 {Opt_noextent_cache, "noextent_cache"},
165 {Opt_noinline_data, "noinline_data"},
166 {Opt_data_flush, "data_flush"},
167 {Opt_reserve_root, "reserve_root=%u"},
168 {Opt_resgid, "resgid=%u"},
169 {Opt_resuid, "resuid=%u"},
170 {Opt_mode, "mode=%s"},
171 {Opt_io_size_bits, "io_bits=%u"},
172 {Opt_fault_injection, "fault_injection=%u"},
173 {Opt_lazytime, "lazytime"},
174 {Opt_nolazytime, "nolazytime"},
175 {Opt_quota, "quota"},
176 {Opt_noquota, "noquota"},
177 {Opt_usrquota, "usrquota"},
178 {Opt_grpquota, "grpquota"},
179 {Opt_prjquota, "prjquota"},
180 {Opt_usrjquota, "usrjquota=%s"},
181 {Opt_grpjquota, "grpjquota=%s"},
182 {Opt_prjjquota, "prjjquota=%s"},
183 {Opt_offusrjquota, "usrjquota="},
184 {Opt_offgrpjquota, "grpjquota="},
185 {Opt_offprjjquota, "prjjquota="},
186 {Opt_jqfmt_vfsold, "jqfmt=vfsold"},
187 {Opt_jqfmt_vfsv0, "jqfmt=vfsv0"},
188 {Opt_jqfmt_vfsv1, "jqfmt=vfsv1"},
189 {Opt_whint, "whint_mode=%s"},
190 {Opt_alloc, "alloc_mode=%s"},
191 {Opt_fsync, "fsync_mode=%s"},
192 {Opt_test_dummy_encryption, "test_dummy_encryption"},
196 void f2fs_msg(struct super_block *sb, const char *level, const char *fmt, ...)
198 struct va_format vaf;
204 printk_ratelimited("%sF2FS-fs (%s): %pV\n", level, sb->s_id, &vaf);
208 static inline void limit_reserve_root(struct f2fs_sb_info *sbi)
210 block_t limit = (sbi->user_block_count << 1) / 1000;
213 if (test_opt(sbi, RESERVE_ROOT) &&
214 F2FS_OPTION(sbi).root_reserved_blocks > limit) {
215 F2FS_OPTION(sbi).root_reserved_blocks = limit;
216 f2fs_msg(sbi->sb, KERN_INFO,
217 "Reduce reserved blocks for root = %u",
218 F2FS_OPTION(sbi).root_reserved_blocks);
220 if (!test_opt(sbi, RESERVE_ROOT) &&
221 (!uid_eq(F2FS_OPTION(sbi).s_resuid,
222 make_kuid(&init_user_ns, F2FS_DEF_RESUID)) ||
223 !gid_eq(F2FS_OPTION(sbi).s_resgid,
224 make_kgid(&init_user_ns, F2FS_DEF_RESGID))))
225 f2fs_msg(sbi->sb, KERN_INFO,
226 "Ignore s_resuid=%u, s_resgid=%u w/o reserve_root",
227 from_kuid_munged(&init_user_ns,
228 F2FS_OPTION(sbi).s_resuid),
229 from_kgid_munged(&init_user_ns,
230 F2FS_OPTION(sbi).s_resgid));
233 static void init_once(void *foo)
235 struct f2fs_inode_info *fi = (struct f2fs_inode_info *) foo;
237 inode_init_once(&fi->vfs_inode);
241 static const char * const quotatypes[] = INITQFNAMES;
242 #define QTYPE2NAME(t) (quotatypes[t])
243 static int f2fs_set_qf_name(struct super_block *sb, int qtype,
246 struct f2fs_sb_info *sbi = F2FS_SB(sb);
250 if (sb_any_quota_loaded(sb) && !F2FS_OPTION(sbi).s_qf_names[qtype]) {
251 f2fs_msg(sb, KERN_ERR,
252 "Cannot change journaled "
253 "quota options when quota turned on");
256 if (f2fs_sb_has_quota_ino(sb)) {
257 f2fs_msg(sb, KERN_INFO,
258 "QUOTA feature is enabled, so ignore qf_name");
262 qname = match_strdup(args);
264 f2fs_msg(sb, KERN_ERR,
265 "Not enough memory for storing quotafile name");
268 if (F2FS_OPTION(sbi).s_qf_names[qtype]) {
269 if (strcmp(F2FS_OPTION(sbi).s_qf_names[qtype], qname) == 0)
272 f2fs_msg(sb, KERN_ERR,
273 "%s quota file already specified",
277 if (strchr(qname, '/')) {
278 f2fs_msg(sb, KERN_ERR,
279 "quotafile must be on filesystem root");
282 F2FS_OPTION(sbi).s_qf_names[qtype] = qname;
290 static int f2fs_clear_qf_name(struct super_block *sb, int qtype)
292 struct f2fs_sb_info *sbi = F2FS_SB(sb);
294 if (sb_any_quota_loaded(sb) && F2FS_OPTION(sbi).s_qf_names[qtype]) {
295 f2fs_msg(sb, KERN_ERR, "Cannot change journaled quota options"
296 " when quota turned on");
299 kfree(F2FS_OPTION(sbi).s_qf_names[qtype]);
300 F2FS_OPTION(sbi).s_qf_names[qtype] = NULL;
304 static int f2fs_check_quota_options(struct f2fs_sb_info *sbi)
307 * We do the test below only for project quotas. 'usrquota' and
308 * 'grpquota' mount options are allowed even without quota feature
309 * to support legacy quotas in quota files.
311 if (test_opt(sbi, PRJQUOTA) && !f2fs_sb_has_project_quota(sbi->sb)) {
312 f2fs_msg(sbi->sb, KERN_ERR, "Project quota feature not enabled. "
313 "Cannot enable project quota enforcement.");
316 if (F2FS_OPTION(sbi).s_qf_names[USRQUOTA] ||
317 F2FS_OPTION(sbi).s_qf_names[GRPQUOTA] ||
318 F2FS_OPTION(sbi).s_qf_names[PRJQUOTA]) {
319 if (test_opt(sbi, USRQUOTA) &&
320 F2FS_OPTION(sbi).s_qf_names[USRQUOTA])
321 clear_opt(sbi, USRQUOTA);
323 if (test_opt(sbi, GRPQUOTA) &&
324 F2FS_OPTION(sbi).s_qf_names[GRPQUOTA])
325 clear_opt(sbi, GRPQUOTA);
327 if (test_opt(sbi, PRJQUOTA) &&
328 F2FS_OPTION(sbi).s_qf_names[PRJQUOTA])
329 clear_opt(sbi, PRJQUOTA);
331 if (test_opt(sbi, GRPQUOTA) || test_opt(sbi, USRQUOTA) ||
332 test_opt(sbi, PRJQUOTA)) {
333 f2fs_msg(sbi->sb, KERN_ERR, "old and new quota "
338 if (!F2FS_OPTION(sbi).s_jquota_fmt) {
339 f2fs_msg(sbi->sb, KERN_ERR, "journaled quota format "
345 if (f2fs_sb_has_quota_ino(sbi->sb) && F2FS_OPTION(sbi).s_jquota_fmt) {
346 f2fs_msg(sbi->sb, KERN_INFO,
347 "QUOTA feature is enabled, so ignore jquota_fmt");
348 F2FS_OPTION(sbi).s_jquota_fmt = 0;
350 if (f2fs_sb_has_quota_ino(sbi->sb) && f2fs_readonly(sbi->sb)) {
351 f2fs_msg(sbi->sb, KERN_INFO,
352 "Filesystem with quota feature cannot be mounted RDWR "
353 "without CONFIG_QUOTA");
360 static int parse_options(struct super_block *sb, char *options)
362 struct f2fs_sb_info *sbi = F2FS_SB(sb);
363 struct request_queue *q;
364 substring_t args[MAX_OPT_ARGS];
376 while ((p = strsep(&options, ",")) != NULL) {
381 * Initialize args struct so we know whether arg was
382 * found; some options take optional arguments.
384 args[0].to = args[0].from = NULL;
385 token = match_token(p, f2fs_tokens, args);
388 case Opt_gc_background:
389 name = match_strdup(&args[0]);
393 if (strlen(name) == 2 && !strncmp(name, "on", 2)) {
395 clear_opt(sbi, FORCE_FG_GC);
396 } else if (strlen(name) == 3 && !strncmp(name, "off", 3)) {
397 clear_opt(sbi, BG_GC);
398 clear_opt(sbi, FORCE_FG_GC);
399 } else if (strlen(name) == 4 && !strncmp(name, "sync", 4)) {
401 set_opt(sbi, FORCE_FG_GC);
408 case Opt_disable_roll_forward:
409 set_opt(sbi, DISABLE_ROLL_FORWARD);
412 /* this option mounts f2fs with ro */
413 set_opt(sbi, DISABLE_ROLL_FORWARD);
414 if (!f2fs_readonly(sb))
418 q = bdev_get_queue(sb->s_bdev);
419 if (blk_queue_discard(q)) {
420 set_opt(sbi, DISCARD);
421 } else if (!f2fs_sb_has_blkzoned(sb)) {
422 f2fs_msg(sb, KERN_WARNING,
423 "mounting with \"discard\" option, but "
424 "the device does not support discard");
428 if (f2fs_sb_has_blkzoned(sb)) {
429 f2fs_msg(sb, KERN_WARNING,
430 "discard is required for zoned block devices");
433 clear_opt(sbi, DISCARD);
436 set_opt(sbi, NOHEAP);
439 clear_opt(sbi, NOHEAP);
441 #ifdef CONFIG_F2FS_FS_XATTR
443 set_opt(sbi, XATTR_USER);
445 case Opt_nouser_xattr:
446 clear_opt(sbi, XATTR_USER);
448 case Opt_inline_xattr:
449 set_opt(sbi, INLINE_XATTR);
451 case Opt_noinline_xattr:
452 clear_opt(sbi, INLINE_XATTR);
454 case Opt_inline_xattr_size:
455 if (args->from && match_int(args, &arg))
457 set_opt(sbi, INLINE_XATTR_SIZE);
458 F2FS_OPTION(sbi).inline_xattr_size = arg;
462 f2fs_msg(sb, KERN_INFO,
463 "user_xattr options not supported");
465 case Opt_nouser_xattr:
466 f2fs_msg(sb, KERN_INFO,
467 "nouser_xattr options not supported");
469 case Opt_inline_xattr:
470 f2fs_msg(sb, KERN_INFO,
471 "inline_xattr options not supported");
473 case Opt_noinline_xattr:
474 f2fs_msg(sb, KERN_INFO,
475 "noinline_xattr options not supported");
478 #ifdef CONFIG_F2FS_FS_POSIX_ACL
480 set_opt(sbi, POSIX_ACL);
483 clear_opt(sbi, POSIX_ACL);
487 f2fs_msg(sb, KERN_INFO, "acl options not supported");
490 f2fs_msg(sb, KERN_INFO, "noacl options not supported");
493 case Opt_active_logs:
494 if (args->from && match_int(args, &arg))
496 if (arg != 2 && arg != 4 && arg != NR_CURSEG_TYPE)
498 F2FS_OPTION(sbi).active_logs = arg;
500 case Opt_disable_ext_identify:
501 set_opt(sbi, DISABLE_EXT_IDENTIFY);
503 case Opt_inline_data:
504 set_opt(sbi, INLINE_DATA);
506 case Opt_inline_dentry:
507 set_opt(sbi, INLINE_DENTRY);
509 case Opt_noinline_dentry:
510 clear_opt(sbi, INLINE_DENTRY);
512 case Opt_flush_merge:
513 set_opt(sbi, FLUSH_MERGE);
515 case Opt_noflush_merge:
516 clear_opt(sbi, FLUSH_MERGE);
519 set_opt(sbi, NOBARRIER);
522 set_opt(sbi, FASTBOOT);
524 case Opt_extent_cache:
525 set_opt(sbi, EXTENT_CACHE);
527 case Opt_noextent_cache:
528 clear_opt(sbi, EXTENT_CACHE);
530 case Opt_noinline_data:
531 clear_opt(sbi, INLINE_DATA);
534 set_opt(sbi, DATA_FLUSH);
536 case Opt_reserve_root:
537 if (args->from && match_int(args, &arg))
539 if (test_opt(sbi, RESERVE_ROOT)) {
540 f2fs_msg(sb, KERN_INFO,
541 "Preserve previous reserve_root=%u",
542 F2FS_OPTION(sbi).root_reserved_blocks);
544 F2FS_OPTION(sbi).root_reserved_blocks = arg;
545 set_opt(sbi, RESERVE_ROOT);
549 if (args->from && match_int(args, &arg))
551 uid = make_kuid(current_user_ns(), arg);
552 if (!uid_valid(uid)) {
553 f2fs_msg(sb, KERN_ERR,
554 "Invalid uid value %d", arg);
557 F2FS_OPTION(sbi).s_resuid = uid;
560 if (args->from && match_int(args, &arg))
562 gid = make_kgid(current_user_ns(), arg);
563 if (!gid_valid(gid)) {
564 f2fs_msg(sb, KERN_ERR,
565 "Invalid gid value %d", arg);
568 F2FS_OPTION(sbi).s_resgid = gid;
571 name = match_strdup(&args[0]);
575 if (strlen(name) == 8 &&
576 !strncmp(name, "adaptive", 8)) {
577 if (f2fs_sb_has_blkzoned(sb)) {
578 f2fs_msg(sb, KERN_WARNING,
579 "adaptive mode is not allowed with "
580 "zoned block device feature");
584 set_opt_mode(sbi, F2FS_MOUNT_ADAPTIVE);
585 } else if (strlen(name) == 3 &&
586 !strncmp(name, "lfs", 3)) {
587 set_opt_mode(sbi, F2FS_MOUNT_LFS);
594 case Opt_io_size_bits:
595 if (args->from && match_int(args, &arg))
597 if (arg > __ilog2_u32(BIO_MAX_PAGES)) {
598 f2fs_msg(sb, KERN_WARNING,
599 "Not support %d, larger than %d",
600 1 << arg, BIO_MAX_PAGES);
603 F2FS_OPTION(sbi).write_io_size_bits = arg;
605 case Opt_fault_injection:
606 if (args->from && match_int(args, &arg))
608 #ifdef CONFIG_F2FS_FAULT_INJECTION
609 f2fs_build_fault_attr(sbi, arg);
610 set_opt(sbi, FAULT_INJECTION);
612 f2fs_msg(sb, KERN_INFO,
613 "FAULT_INJECTION was not selected");
617 sb->s_flags |= SB_LAZYTIME;
620 sb->s_flags &= ~SB_LAZYTIME;
625 set_opt(sbi, USRQUOTA);
628 set_opt(sbi, GRPQUOTA);
631 set_opt(sbi, PRJQUOTA);
634 ret = f2fs_set_qf_name(sb, USRQUOTA, &args[0]);
639 ret = f2fs_set_qf_name(sb, GRPQUOTA, &args[0]);
644 ret = f2fs_set_qf_name(sb, PRJQUOTA, &args[0]);
648 case Opt_offusrjquota:
649 ret = f2fs_clear_qf_name(sb, USRQUOTA);
653 case Opt_offgrpjquota:
654 ret = f2fs_clear_qf_name(sb, GRPQUOTA);
658 case Opt_offprjjquota:
659 ret = f2fs_clear_qf_name(sb, PRJQUOTA);
663 case Opt_jqfmt_vfsold:
664 F2FS_OPTION(sbi).s_jquota_fmt = QFMT_VFS_OLD;
666 case Opt_jqfmt_vfsv0:
667 F2FS_OPTION(sbi).s_jquota_fmt = QFMT_VFS_V0;
669 case Opt_jqfmt_vfsv1:
670 F2FS_OPTION(sbi).s_jquota_fmt = QFMT_VFS_V1;
673 clear_opt(sbi, QUOTA);
674 clear_opt(sbi, USRQUOTA);
675 clear_opt(sbi, GRPQUOTA);
676 clear_opt(sbi, PRJQUOTA);
686 case Opt_offusrjquota:
687 case Opt_offgrpjquota:
688 case Opt_offprjjquota:
689 case Opt_jqfmt_vfsold:
690 case Opt_jqfmt_vfsv0:
691 case Opt_jqfmt_vfsv1:
693 f2fs_msg(sb, KERN_INFO,
694 "quota operations not supported");
698 name = match_strdup(&args[0]);
701 if (strlen(name) == 10 &&
702 !strncmp(name, "user-based", 10)) {
703 F2FS_OPTION(sbi).whint_mode = WHINT_MODE_USER;
704 } else if (strlen(name) == 3 &&
705 !strncmp(name, "off", 3)) {
706 F2FS_OPTION(sbi).whint_mode = WHINT_MODE_OFF;
707 } else if (strlen(name) == 8 &&
708 !strncmp(name, "fs-based", 8)) {
709 F2FS_OPTION(sbi).whint_mode = WHINT_MODE_FS;
717 name = match_strdup(&args[0]);
721 if (strlen(name) == 7 &&
722 !strncmp(name, "default", 7)) {
723 F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_DEFAULT;
724 } else if (strlen(name) == 5 &&
725 !strncmp(name, "reuse", 5)) {
726 F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_REUSE;
734 name = match_strdup(&args[0]);
737 if (strlen(name) == 5 &&
738 !strncmp(name, "posix", 5)) {
739 F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_POSIX;
740 } else if (strlen(name) == 6 &&
741 !strncmp(name, "strict", 6)) {
742 F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_STRICT;
743 } else if (strlen(name) == 9 &&
744 !strncmp(name, "nobarrier", 9)) {
745 F2FS_OPTION(sbi).fsync_mode =
746 FSYNC_MODE_NOBARRIER;
753 case Opt_test_dummy_encryption:
754 #ifdef CONFIG_F2FS_FS_ENCRYPTION
755 if (!f2fs_sb_has_encrypt(sb)) {
756 f2fs_msg(sb, KERN_ERR, "Encrypt feature is off");
760 F2FS_OPTION(sbi).test_dummy_encryption = true;
761 f2fs_msg(sb, KERN_INFO,
762 "Test dummy encryption mode enabled");
764 f2fs_msg(sb, KERN_INFO,
765 "Test dummy encryption mount option ignored");
769 f2fs_msg(sb, KERN_ERR,
770 "Unrecognized mount option \"%s\" or missing value",
776 if (f2fs_check_quota_options(sbi))
780 if (F2FS_IO_SIZE_BITS(sbi) && !test_opt(sbi, LFS)) {
781 f2fs_msg(sb, KERN_ERR,
782 "Should set mode=lfs with %uKB-sized IO",
783 F2FS_IO_SIZE_KB(sbi));
787 if (test_opt(sbi, INLINE_XATTR_SIZE)) {
788 if (!f2fs_sb_has_extra_attr(sb) ||
789 !f2fs_sb_has_flexible_inline_xattr(sb)) {
790 f2fs_msg(sb, KERN_ERR,
791 "extra_attr or flexible_inline_xattr "
795 if (!test_opt(sbi, INLINE_XATTR)) {
796 f2fs_msg(sb, KERN_ERR,
797 "inline_xattr_size option should be "
798 "set with inline_xattr option");
801 if (!F2FS_OPTION(sbi).inline_xattr_size ||
802 F2FS_OPTION(sbi).inline_xattr_size >=
803 DEF_ADDRS_PER_INODE -
804 F2FS_TOTAL_EXTRA_ATTR_SIZE -
805 DEF_INLINE_RESERVED_SIZE -
806 DEF_MIN_INLINE_SIZE) {
807 f2fs_msg(sb, KERN_ERR,
808 "inline xattr size is out of range");
813 /* Not pass down write hints if the number of active logs is lesser
814 * than NR_CURSEG_TYPE.
816 if (F2FS_OPTION(sbi).active_logs != NR_CURSEG_TYPE)
817 F2FS_OPTION(sbi).whint_mode = WHINT_MODE_OFF;
821 static struct inode *f2fs_alloc_inode(struct super_block *sb)
823 struct f2fs_inode_info *fi;
825 fi = kmem_cache_alloc(f2fs_inode_cachep, GFP_F2FS_ZERO);
829 init_once((void *) fi);
831 /* Initialize f2fs-specific inode info */
832 atomic_set(&fi->dirty_pages, 0);
833 fi->i_current_depth = 1;
834 init_rwsem(&fi->i_sem);
835 INIT_LIST_HEAD(&fi->dirty_list);
836 INIT_LIST_HEAD(&fi->gdirty_list);
837 INIT_LIST_HEAD(&fi->inmem_ilist);
838 INIT_LIST_HEAD(&fi->inmem_pages);
839 mutex_init(&fi->inmem_lock);
840 init_rwsem(&fi->dio_rwsem[READ]);
841 init_rwsem(&fi->dio_rwsem[WRITE]);
842 init_rwsem(&fi->i_mmap_sem);
843 init_rwsem(&fi->i_xattr_sem);
845 /* Will be used by directory only */
846 fi->i_dir_level = F2FS_SB(sb)->dir_level;
848 return &fi->vfs_inode;
851 static int f2fs_drop_inode(struct inode *inode)
855 * This is to avoid a deadlock condition like below.
856 * writeback_single_inode(inode)
857 * - f2fs_write_data_page
858 * - f2fs_gc -> iput -> evict
859 * - inode_wait_for_writeback(inode)
861 if ((!inode_unhashed(inode) && inode->i_state & I_SYNC)) {
862 if (!inode->i_nlink && !is_bad_inode(inode)) {
863 /* to avoid evict_inode call simultaneously */
864 atomic_inc(&inode->i_count);
865 spin_unlock(&inode->i_lock);
867 /* some remained atomic pages should discarded */
868 if (f2fs_is_atomic_file(inode))
869 drop_inmem_pages(inode);
871 /* should remain fi->extent_tree for writepage */
872 f2fs_destroy_extent_node(inode);
874 sb_start_intwrite(inode->i_sb);
875 f2fs_i_size_write(inode, 0);
877 if (F2FS_HAS_BLOCKS(inode))
878 f2fs_truncate(inode);
880 sb_end_intwrite(inode->i_sb);
882 spin_lock(&inode->i_lock);
883 atomic_dec(&inode->i_count);
885 trace_f2fs_drop_inode(inode, 0);
888 ret = generic_drop_inode(inode);
889 trace_f2fs_drop_inode(inode, ret);
893 int f2fs_inode_dirtied(struct inode *inode, bool sync)
895 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
898 spin_lock(&sbi->inode_lock[DIRTY_META]);
899 if (is_inode_flag_set(inode, FI_DIRTY_INODE)) {
902 set_inode_flag(inode, FI_DIRTY_INODE);
903 stat_inc_dirty_inode(sbi, DIRTY_META);
905 if (sync && list_empty(&F2FS_I(inode)->gdirty_list)) {
906 list_add_tail(&F2FS_I(inode)->gdirty_list,
907 &sbi->inode_list[DIRTY_META]);
908 inc_page_count(sbi, F2FS_DIRTY_IMETA);
910 spin_unlock(&sbi->inode_lock[DIRTY_META]);
914 void f2fs_inode_synced(struct inode *inode)
916 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
918 spin_lock(&sbi->inode_lock[DIRTY_META]);
919 if (!is_inode_flag_set(inode, FI_DIRTY_INODE)) {
920 spin_unlock(&sbi->inode_lock[DIRTY_META]);
923 if (!list_empty(&F2FS_I(inode)->gdirty_list)) {
924 list_del_init(&F2FS_I(inode)->gdirty_list);
925 dec_page_count(sbi, F2FS_DIRTY_IMETA);
927 clear_inode_flag(inode, FI_DIRTY_INODE);
928 clear_inode_flag(inode, FI_AUTO_RECOVER);
929 stat_dec_dirty_inode(F2FS_I_SB(inode), DIRTY_META);
930 spin_unlock(&sbi->inode_lock[DIRTY_META]);
934 * f2fs_dirty_inode() is called from __mark_inode_dirty()
936 * We should call set_dirty_inode to write the dirty inode through write_inode.
938 static void f2fs_dirty_inode(struct inode *inode, int flags)
940 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
942 if (inode->i_ino == F2FS_NODE_INO(sbi) ||
943 inode->i_ino == F2FS_META_INO(sbi))
946 if (flags == I_DIRTY_TIME)
949 if (is_inode_flag_set(inode, FI_AUTO_RECOVER))
950 clear_inode_flag(inode, FI_AUTO_RECOVER);
952 f2fs_inode_dirtied(inode, false);
955 static void f2fs_i_callback(struct rcu_head *head)
957 struct inode *inode = container_of(head, struct inode, i_rcu);
958 kmem_cache_free(f2fs_inode_cachep, F2FS_I(inode));
961 static void f2fs_destroy_inode(struct inode *inode)
963 call_rcu(&inode->i_rcu, f2fs_i_callback);
966 static void destroy_percpu_info(struct f2fs_sb_info *sbi)
968 percpu_counter_destroy(&sbi->alloc_valid_block_count);
969 percpu_counter_destroy(&sbi->total_valid_inode_count);
972 static void destroy_device_list(struct f2fs_sb_info *sbi)
976 for (i = 0; i < sbi->s_ndevs; i++) {
977 blkdev_put(FDEV(i).bdev, FMODE_EXCL);
978 #ifdef CONFIG_BLK_DEV_ZONED
979 kfree(FDEV(i).blkz_type);
985 static void f2fs_put_super(struct super_block *sb)
987 struct f2fs_sb_info *sbi = F2FS_SB(sb);
991 f2fs_quota_off_umount(sb);
993 /* prevent remaining shrinker jobs */
994 mutex_lock(&sbi->umount_mutex);
997 * We don't need to do checkpoint when superblock is clean.
998 * But, the previous checkpoint was not done by umount, it needs to do
999 * clean checkpoint again.
1001 if (is_sbi_flag_set(sbi, SBI_IS_DIRTY) ||
1002 !is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) {
1003 struct cp_control cpc = {
1004 .reason = CP_UMOUNT,
1006 write_checkpoint(sbi, &cpc);
1009 /* be sure to wait for any on-going discard commands */
1010 dropped = f2fs_wait_discard_bios(sbi);
1012 if (f2fs_discard_en(sbi) && !sbi->discard_blks && !dropped) {
1013 struct cp_control cpc = {
1014 .reason = CP_UMOUNT | CP_TRIMMED,
1016 write_checkpoint(sbi, &cpc);
1019 /* write_checkpoint can update stat informaion */
1020 f2fs_destroy_stats(sbi);
1023 * normally superblock is clean, so we need to release this.
1024 * In addition, EIO will skip do checkpoint, we need this as well.
1026 release_ino_entry(sbi, true);
1028 f2fs_leave_shrinker(sbi);
1029 mutex_unlock(&sbi->umount_mutex);
1031 /* our cp_error case, we can wait for any writeback page */
1032 f2fs_flush_merged_writes(sbi);
1034 iput(sbi->node_inode);
1035 iput(sbi->meta_inode);
1037 /* destroy f2fs internal modules */
1038 destroy_node_manager(sbi);
1039 destroy_segment_manager(sbi);
1043 f2fs_unregister_sysfs(sbi);
1045 sb->s_fs_info = NULL;
1046 if (sbi->s_chksum_driver)
1047 crypto_free_shash(sbi->s_chksum_driver);
1048 kfree(sbi->raw_super);
1050 destroy_device_list(sbi);
1051 mempool_destroy(sbi->write_io_dummy);
1053 for (i = 0; i < MAXQUOTAS; i++)
1054 kfree(F2FS_OPTION(sbi).s_qf_names[i]);
1056 destroy_percpu_info(sbi);
1057 for (i = 0; i < NR_PAGE_TYPE; i++)
1058 kfree(sbi->write_io[i]);
1062 int f2fs_sync_fs(struct super_block *sb, int sync)
1064 struct f2fs_sb_info *sbi = F2FS_SB(sb);
1067 if (unlikely(f2fs_cp_error(sbi)))
1070 trace_f2fs_sync_fs(sb, sync);
1072 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
1076 struct cp_control cpc;
1078 cpc.reason = __get_cp_reason(sbi);
1080 mutex_lock(&sbi->gc_mutex);
1081 err = write_checkpoint(sbi, &cpc);
1082 mutex_unlock(&sbi->gc_mutex);
1084 f2fs_trace_ios(NULL, 1);
1089 static int f2fs_freeze(struct super_block *sb)
1091 if (f2fs_readonly(sb))
1094 /* IO error happened before */
1095 if (unlikely(f2fs_cp_error(F2FS_SB(sb))))
1098 /* must be clean, since sync_filesystem() was already called */
1099 if (is_sbi_flag_set(F2FS_SB(sb), SBI_IS_DIRTY))
1104 static int f2fs_unfreeze(struct super_block *sb)
1110 static int f2fs_statfs_project(struct super_block *sb,
1111 kprojid_t projid, struct kstatfs *buf)
1114 struct dquot *dquot;
1118 qid = make_kqid_projid(projid);
1119 dquot = dqget(sb, qid);
1121 return PTR_ERR(dquot);
1122 spin_lock(&dq_data_lock);
1124 limit = (dquot->dq_dqb.dqb_bsoftlimit ?
1125 dquot->dq_dqb.dqb_bsoftlimit :
1126 dquot->dq_dqb.dqb_bhardlimit) >> sb->s_blocksize_bits;
1127 if (limit && buf->f_blocks > limit) {
1128 curblock = dquot->dq_dqb.dqb_curspace >> sb->s_blocksize_bits;
1129 buf->f_blocks = limit;
1130 buf->f_bfree = buf->f_bavail =
1131 (buf->f_blocks > curblock) ?
1132 (buf->f_blocks - curblock) : 0;
1135 limit = dquot->dq_dqb.dqb_isoftlimit ?
1136 dquot->dq_dqb.dqb_isoftlimit :
1137 dquot->dq_dqb.dqb_ihardlimit;
1138 if (limit && buf->f_files > limit) {
1139 buf->f_files = limit;
1141 (buf->f_files > dquot->dq_dqb.dqb_curinodes) ?
1142 (buf->f_files - dquot->dq_dqb.dqb_curinodes) : 0;
1145 spin_unlock(&dq_data_lock);
1151 static int f2fs_statfs(struct dentry *dentry, struct kstatfs *buf)
1153 struct super_block *sb = dentry->d_sb;
1154 struct f2fs_sb_info *sbi = F2FS_SB(sb);
1155 u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
1156 block_t total_count, user_block_count, start_count;
1157 u64 avail_node_count;
1159 total_count = le64_to_cpu(sbi->raw_super->block_count);
1160 user_block_count = sbi->user_block_count;
1161 start_count = le32_to_cpu(sbi->raw_super->segment0_blkaddr);
1162 buf->f_type = F2FS_SUPER_MAGIC;
1163 buf->f_bsize = sbi->blocksize;
1165 buf->f_blocks = total_count - start_count;
1166 buf->f_bfree = user_block_count - valid_user_blocks(sbi) -
1167 sbi->current_reserved_blocks;
1168 if (buf->f_bfree > F2FS_OPTION(sbi).root_reserved_blocks)
1169 buf->f_bavail = buf->f_bfree -
1170 F2FS_OPTION(sbi).root_reserved_blocks;
1174 avail_node_count = sbi->total_node_count - sbi->nquota_files -
1175 F2FS_RESERVED_NODE_NUM;
1177 if (avail_node_count > user_block_count) {
1178 buf->f_files = user_block_count;
1179 buf->f_ffree = buf->f_bavail;
1181 buf->f_files = avail_node_count;
1182 buf->f_ffree = min(avail_node_count - valid_node_count(sbi),
1186 buf->f_namelen = F2FS_NAME_LEN;
1187 buf->f_fsid.val[0] = (u32)id;
1188 buf->f_fsid.val[1] = (u32)(id >> 32);
1191 if (is_inode_flag_set(dentry->d_inode, FI_PROJ_INHERIT) &&
1192 sb_has_quota_limits_enabled(sb, PRJQUOTA)) {
1193 f2fs_statfs_project(sb, F2FS_I(dentry->d_inode)->i_projid, buf);
1199 static inline void f2fs_show_quota_options(struct seq_file *seq,
1200 struct super_block *sb)
1203 struct f2fs_sb_info *sbi = F2FS_SB(sb);
1205 if (F2FS_OPTION(sbi).s_jquota_fmt) {
1208 switch (F2FS_OPTION(sbi).s_jquota_fmt) {
1219 seq_printf(seq, ",jqfmt=%s", fmtname);
1222 if (F2FS_OPTION(sbi).s_qf_names[USRQUOTA])
1223 seq_show_option(seq, "usrjquota",
1224 F2FS_OPTION(sbi).s_qf_names[USRQUOTA]);
1226 if (F2FS_OPTION(sbi).s_qf_names[GRPQUOTA])
1227 seq_show_option(seq, "grpjquota",
1228 F2FS_OPTION(sbi).s_qf_names[GRPQUOTA]);
1230 if (F2FS_OPTION(sbi).s_qf_names[PRJQUOTA])
1231 seq_show_option(seq, "prjjquota",
1232 F2FS_OPTION(sbi).s_qf_names[PRJQUOTA]);
1236 static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
1238 struct f2fs_sb_info *sbi = F2FS_SB(root->d_sb);
1240 if (!f2fs_readonly(sbi->sb) && test_opt(sbi, BG_GC)) {
1241 if (test_opt(sbi, FORCE_FG_GC))
1242 seq_printf(seq, ",background_gc=%s", "sync");
1244 seq_printf(seq, ",background_gc=%s", "on");
1246 seq_printf(seq, ",background_gc=%s", "off");
1248 if (test_opt(sbi, DISABLE_ROLL_FORWARD))
1249 seq_puts(seq, ",disable_roll_forward");
1250 if (test_opt(sbi, DISCARD))
1251 seq_puts(seq, ",discard");
1252 if (test_opt(sbi, NOHEAP))
1253 seq_puts(seq, ",no_heap");
1255 seq_puts(seq, ",heap");
1256 #ifdef CONFIG_F2FS_FS_XATTR
1257 if (test_opt(sbi, XATTR_USER))
1258 seq_puts(seq, ",user_xattr");
1260 seq_puts(seq, ",nouser_xattr");
1261 if (test_opt(sbi, INLINE_XATTR))
1262 seq_puts(seq, ",inline_xattr");
1264 seq_puts(seq, ",noinline_xattr");
1265 if (test_opt(sbi, INLINE_XATTR_SIZE))
1266 seq_printf(seq, ",inline_xattr_size=%u",
1267 F2FS_OPTION(sbi).inline_xattr_size);
1269 #ifdef CONFIG_F2FS_FS_POSIX_ACL
1270 if (test_opt(sbi, POSIX_ACL))
1271 seq_puts(seq, ",acl");
1273 seq_puts(seq, ",noacl");
1275 if (test_opt(sbi, DISABLE_EXT_IDENTIFY))
1276 seq_puts(seq, ",disable_ext_identify");
1277 if (test_opt(sbi, INLINE_DATA))
1278 seq_puts(seq, ",inline_data");
1280 seq_puts(seq, ",noinline_data");
1281 if (test_opt(sbi, INLINE_DENTRY))
1282 seq_puts(seq, ",inline_dentry");
1284 seq_puts(seq, ",noinline_dentry");
1285 if (!f2fs_readonly(sbi->sb) && test_opt(sbi, FLUSH_MERGE))
1286 seq_puts(seq, ",flush_merge");
1287 if (test_opt(sbi, NOBARRIER))
1288 seq_puts(seq, ",nobarrier");
1289 if (test_opt(sbi, FASTBOOT))
1290 seq_puts(seq, ",fastboot");
1291 if (test_opt(sbi, EXTENT_CACHE))
1292 seq_puts(seq, ",extent_cache");
1294 seq_puts(seq, ",noextent_cache");
1295 if (test_opt(sbi, DATA_FLUSH))
1296 seq_puts(seq, ",data_flush");
1298 seq_puts(seq, ",mode=");
1299 if (test_opt(sbi, ADAPTIVE))
1300 seq_puts(seq, "adaptive");
1301 else if (test_opt(sbi, LFS))
1302 seq_puts(seq, "lfs");
1303 seq_printf(seq, ",active_logs=%u", F2FS_OPTION(sbi).active_logs);
1304 if (test_opt(sbi, RESERVE_ROOT))
1305 seq_printf(seq, ",reserve_root=%u,resuid=%u,resgid=%u",
1306 F2FS_OPTION(sbi).root_reserved_blocks,
1307 from_kuid_munged(&init_user_ns,
1308 F2FS_OPTION(sbi).s_resuid),
1309 from_kgid_munged(&init_user_ns,
1310 F2FS_OPTION(sbi).s_resgid));
1311 if (F2FS_IO_SIZE_BITS(sbi))
1312 seq_printf(seq, ",io_size=%uKB", F2FS_IO_SIZE_KB(sbi));
1313 #ifdef CONFIG_F2FS_FAULT_INJECTION
1314 if (test_opt(sbi, FAULT_INJECTION))
1315 seq_printf(seq, ",fault_injection=%u",
1316 F2FS_OPTION(sbi).fault_info.inject_rate);
1319 if (test_opt(sbi, QUOTA))
1320 seq_puts(seq, ",quota");
1321 if (test_opt(sbi, USRQUOTA))
1322 seq_puts(seq, ",usrquota");
1323 if (test_opt(sbi, GRPQUOTA))
1324 seq_puts(seq, ",grpquota");
1325 if (test_opt(sbi, PRJQUOTA))
1326 seq_puts(seq, ",prjquota");
1328 f2fs_show_quota_options(seq, sbi->sb);
1329 if (F2FS_OPTION(sbi).whint_mode == WHINT_MODE_USER)
1330 seq_printf(seq, ",whint_mode=%s", "user-based");
1331 else if (F2FS_OPTION(sbi).whint_mode == WHINT_MODE_FS)
1332 seq_printf(seq, ",whint_mode=%s", "fs-based");
1333 #ifdef CONFIG_F2FS_FS_ENCRYPTION
1334 if (F2FS_OPTION(sbi).test_dummy_encryption)
1335 seq_puts(seq, ",test_dummy_encryption");
1338 if (F2FS_OPTION(sbi).alloc_mode == ALLOC_MODE_DEFAULT)
1339 seq_printf(seq, ",alloc_mode=%s", "default");
1340 else if (F2FS_OPTION(sbi).alloc_mode == ALLOC_MODE_REUSE)
1341 seq_printf(seq, ",alloc_mode=%s", "reuse");
1343 if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_POSIX)
1344 seq_printf(seq, ",fsync_mode=%s", "posix");
1345 else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT)
1346 seq_printf(seq, ",fsync_mode=%s", "strict");
1350 static void default_options(struct f2fs_sb_info *sbi)
1352 /* init some FS parameters */
1353 F2FS_OPTION(sbi).active_logs = NR_CURSEG_TYPE;
1354 F2FS_OPTION(sbi).inline_xattr_size = DEFAULT_INLINE_XATTR_ADDRS;
1355 F2FS_OPTION(sbi).whint_mode = WHINT_MODE_OFF;
1356 F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_DEFAULT;
1357 F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_POSIX;
1358 F2FS_OPTION(sbi).test_dummy_encryption = false;
1359 sbi->readdir_ra = 1;
1361 set_opt(sbi, BG_GC);
1362 set_opt(sbi, INLINE_XATTR);
1363 set_opt(sbi, INLINE_DATA);
1364 set_opt(sbi, INLINE_DENTRY);
1365 set_opt(sbi, EXTENT_CACHE);
1366 set_opt(sbi, NOHEAP);
1367 sbi->sb->s_flags |= SB_LAZYTIME;
1368 set_opt(sbi, FLUSH_MERGE);
1369 if (f2fs_sb_has_blkzoned(sbi->sb)) {
1370 set_opt_mode(sbi, F2FS_MOUNT_LFS);
1371 set_opt(sbi, DISCARD);
1373 set_opt_mode(sbi, F2FS_MOUNT_ADAPTIVE);
1376 #ifdef CONFIG_F2FS_FS_XATTR
1377 set_opt(sbi, XATTR_USER);
1379 #ifdef CONFIG_F2FS_FS_POSIX_ACL
1380 set_opt(sbi, POSIX_ACL);
1383 #ifdef CONFIG_F2FS_FAULT_INJECTION
1384 f2fs_build_fault_attr(sbi, 0);
1389 static int f2fs_enable_quotas(struct super_block *sb);
1391 static int f2fs_remount(struct super_block *sb, int *flags, char *data)
1393 struct f2fs_sb_info *sbi = F2FS_SB(sb);
1394 struct f2fs_mount_info org_mount_opt;
1395 unsigned long old_sb_flags;
1397 bool need_restart_gc = false;
1398 bool need_stop_gc = false;
1399 bool no_extent_cache = !test_opt(sbi, EXTENT_CACHE);
1405 * Save the old mount options in case we
1406 * need to restore them.
1408 org_mount_opt = sbi->mount_opt;
1409 old_sb_flags = sb->s_flags;
1412 org_mount_opt.s_jquota_fmt = F2FS_OPTION(sbi).s_jquota_fmt;
1413 for (i = 0; i < MAXQUOTAS; i++) {
1414 if (F2FS_OPTION(sbi).s_qf_names[i]) {
1415 org_mount_opt.s_qf_names[i] =
1416 kstrdup(F2FS_OPTION(sbi).s_qf_names[i],
1418 if (!org_mount_opt.s_qf_names[i]) {
1419 for (j = 0; j < i; j++)
1420 kfree(org_mount_opt.s_qf_names[j]);
1424 org_mount_opt.s_qf_names[i] = NULL;
1429 /* recover superblocks we couldn't write due to previous RO mount */
1430 if (!(*flags & SB_RDONLY) && is_sbi_flag_set(sbi, SBI_NEED_SB_WRITE)) {
1431 err = f2fs_commit_super(sbi, false);
1432 f2fs_msg(sb, KERN_INFO,
1433 "Try to recover all the superblocks, ret: %d", err);
1435 clear_sbi_flag(sbi, SBI_NEED_SB_WRITE);
1438 default_options(sbi);
1440 /* parse mount options */
1441 err = parse_options(sb, data);
1446 * Previous and new state of filesystem is RO,
1447 * so skip checking GC and FLUSH_MERGE conditions.
1449 if (f2fs_readonly(sb) && (*flags & SB_RDONLY))
1453 if (!f2fs_readonly(sb) && (*flags & SB_RDONLY)) {
1454 err = dquot_suspend(sb, -1);
1457 } else if (f2fs_readonly(sb) && !(*flags & MS_RDONLY)) {
1458 /* dquot_resume needs RW */
1459 sb->s_flags &= ~SB_RDONLY;
1460 if (sb_any_quota_suspended(sb)) {
1461 dquot_resume(sb, -1);
1462 } else if (f2fs_sb_has_quota_ino(sb)) {
1463 err = f2fs_enable_quotas(sb);
1469 /* disallow enable/disable extent_cache dynamically */
1470 if (no_extent_cache == !!test_opt(sbi, EXTENT_CACHE)) {
1472 f2fs_msg(sbi->sb, KERN_WARNING,
1473 "switch extent_cache option is not allowed");
1478 * We stop the GC thread if FS is mounted as RO
1479 * or if background_gc = off is passed in mount
1480 * option. Also sync the filesystem.
1482 if ((*flags & SB_RDONLY) || !test_opt(sbi, BG_GC)) {
1483 if (sbi->gc_thread) {
1484 stop_gc_thread(sbi);
1485 need_restart_gc = true;
1487 } else if (!sbi->gc_thread) {
1488 err = start_gc_thread(sbi);
1491 need_stop_gc = true;
1494 if (*flags & SB_RDONLY ||
1495 F2FS_OPTION(sbi).whint_mode != org_mount_opt.whint_mode) {
1496 writeback_inodes_sb(sb, WB_REASON_SYNC);
1499 set_sbi_flag(sbi, SBI_IS_DIRTY);
1500 set_sbi_flag(sbi, SBI_IS_CLOSE);
1501 f2fs_sync_fs(sb, 1);
1502 clear_sbi_flag(sbi, SBI_IS_CLOSE);
1506 * We stop issue flush thread if FS is mounted as RO
1507 * or if flush_merge is not passed in mount option.
1509 if ((*flags & SB_RDONLY) || !test_opt(sbi, FLUSH_MERGE)) {
1510 clear_opt(sbi, FLUSH_MERGE);
1511 destroy_flush_cmd_control(sbi, false);
1513 err = create_flush_cmd_control(sbi);
1519 /* Release old quota file names */
1520 for (i = 0; i < MAXQUOTAS; i++)
1521 kfree(org_mount_opt.s_qf_names[i]);
1523 /* Update the POSIXACL Flag */
1524 sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
1525 (test_opt(sbi, POSIX_ACL) ? SB_POSIXACL : 0);
1527 limit_reserve_root(sbi);
1530 if (need_restart_gc) {
1531 if (start_gc_thread(sbi))
1532 f2fs_msg(sbi->sb, KERN_WARNING,
1533 "background gc thread has stopped");
1534 } else if (need_stop_gc) {
1535 stop_gc_thread(sbi);
1539 F2FS_OPTION(sbi).s_jquota_fmt = org_mount_opt.s_jquota_fmt;
1540 for (i = 0; i < MAXQUOTAS; i++) {
1541 kfree(F2FS_OPTION(sbi).s_qf_names[i]);
1542 F2FS_OPTION(sbi).s_qf_names[i] = org_mount_opt.s_qf_names[i];
1545 sbi->mount_opt = org_mount_opt;
1546 sb->s_flags = old_sb_flags;
1551 /* Read data from quotafile */
1552 static ssize_t f2fs_quota_read(struct super_block *sb, int type, char *data,
1553 size_t len, loff_t off)
1555 struct inode *inode = sb_dqopt(sb)->files[type];
1556 struct address_space *mapping = inode->i_mapping;
1557 block_t blkidx = F2FS_BYTES_TO_BLK(off);
1558 int offset = off & (sb->s_blocksize - 1);
1561 loff_t i_size = i_size_read(inode);
1568 if (off + len > i_size)
1571 while (toread > 0) {
1572 tocopy = min_t(unsigned long, sb->s_blocksize - offset, toread);
1574 page = read_cache_page_gfp(mapping, blkidx, GFP_NOFS);
1576 if (PTR_ERR(page) == -ENOMEM) {
1577 congestion_wait(BLK_RW_ASYNC, HZ/50);
1580 return PTR_ERR(page);
1585 if (unlikely(page->mapping != mapping)) {
1586 f2fs_put_page(page, 1);
1589 if (unlikely(!PageUptodate(page))) {
1590 f2fs_put_page(page, 1);
1594 kaddr = kmap_atomic(page);
1595 memcpy(data, kaddr + offset, tocopy);
1596 kunmap_atomic(kaddr);
1597 f2fs_put_page(page, 1);
1607 /* Write to quotafile */
1608 static ssize_t f2fs_quota_write(struct super_block *sb, int type,
1609 const char *data, size_t len, loff_t off)
1611 struct inode *inode = sb_dqopt(sb)->files[type];
1612 struct address_space *mapping = inode->i_mapping;
1613 const struct address_space_operations *a_ops = mapping->a_ops;
1614 int offset = off & (sb->s_blocksize - 1);
1615 size_t towrite = len;
1621 while (towrite > 0) {
1622 tocopy = min_t(unsigned long, sb->s_blocksize - offset,
1625 err = a_ops->write_begin(NULL, mapping, off, tocopy, 0,
1627 if (unlikely(err)) {
1628 if (err == -ENOMEM) {
1629 congestion_wait(BLK_RW_ASYNC, HZ/50);
1635 kaddr = kmap_atomic(page);
1636 memcpy(kaddr + offset, data, tocopy);
1637 kunmap_atomic(kaddr);
1638 flush_dcache_page(page);
1640 a_ops->write_end(NULL, mapping, off, tocopy, tocopy,
1651 inode->i_mtime = inode->i_ctime = current_time(inode);
1652 f2fs_mark_inode_dirty_sync(inode, false);
1653 return len - towrite;
1656 static struct dquot **f2fs_get_dquots(struct inode *inode)
1658 return F2FS_I(inode)->i_dquot;
1661 static qsize_t *f2fs_get_reserved_space(struct inode *inode)
1663 return &F2FS_I(inode)->i_reserved_quota;
1666 static int f2fs_quota_on_mount(struct f2fs_sb_info *sbi, int type)
1668 return dquot_quota_on_mount(sbi->sb, F2FS_OPTION(sbi).s_qf_names[type],
1669 F2FS_OPTION(sbi).s_jquota_fmt, type);
1672 int f2fs_enable_quota_files(struct f2fs_sb_info *sbi, bool rdonly)
1677 if (f2fs_sb_has_quota_ino(sbi->sb) && rdonly) {
1678 err = f2fs_enable_quotas(sbi->sb);
1680 f2fs_msg(sbi->sb, KERN_ERR,
1681 "Cannot turn on quota_ino: %d", err);
1687 for (i = 0; i < MAXQUOTAS; i++) {
1688 if (F2FS_OPTION(sbi).s_qf_names[i]) {
1689 err = f2fs_quota_on_mount(sbi, i);
1694 f2fs_msg(sbi->sb, KERN_ERR,
1695 "Cannot turn on quotas: %d on %d", err, i);
1701 static int f2fs_quota_enable(struct super_block *sb, int type, int format_id,
1704 struct inode *qf_inode;
1705 unsigned long qf_inum;
1708 BUG_ON(!f2fs_sb_has_quota_ino(sb));
1710 qf_inum = f2fs_qf_ino(sb, type);
1714 qf_inode = f2fs_iget(sb, qf_inum);
1715 if (IS_ERR(qf_inode)) {
1716 f2fs_msg(sb, KERN_ERR,
1717 "Bad quota inode %u:%lu", type, qf_inum);
1718 return PTR_ERR(qf_inode);
1721 /* Don't account quota for quota files to avoid recursion */
1722 qf_inode->i_flags |= S_NOQUOTA;
1723 err = dquot_enable(qf_inode, type, format_id, flags);
1728 static int f2fs_enable_quotas(struct super_block *sb)
1731 unsigned long qf_inum;
1732 bool quota_mopt[MAXQUOTAS] = {
1733 test_opt(F2FS_SB(sb), USRQUOTA),
1734 test_opt(F2FS_SB(sb), GRPQUOTA),
1735 test_opt(F2FS_SB(sb), PRJQUOTA),
1738 sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE | DQUOT_NOLIST_DIRTY;
1739 for (type = 0; type < MAXQUOTAS; type++) {
1740 qf_inum = f2fs_qf_ino(sb, type);
1742 err = f2fs_quota_enable(sb, type, QFMT_VFS_V1,
1743 DQUOT_USAGE_ENABLED |
1744 (quota_mopt[type] ? DQUOT_LIMITS_ENABLED : 0));
1746 f2fs_msg(sb, KERN_ERR,
1747 "Failed to enable quota tracking "
1748 "(type=%d, err=%d). Please run "
1749 "fsck to fix.", type, err);
1750 for (type--; type >= 0; type--)
1751 dquot_quota_off(sb, type);
1759 static int f2fs_quota_sync(struct super_block *sb, int type)
1761 struct quota_info *dqopt = sb_dqopt(sb);
1765 ret = dquot_writeback_dquots(sb, type);
1770 * Now when everything is written we can discard the pagecache so
1771 * that userspace sees the changes.
1773 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1774 if (type != -1 && cnt != type)
1776 if (!sb_has_quota_active(sb, cnt))
1779 ret = filemap_write_and_wait(dqopt->files[cnt]->i_mapping);
1783 inode_lock(dqopt->files[cnt]);
1784 truncate_inode_pages(&dqopt->files[cnt]->i_data, 0);
1785 inode_unlock(dqopt->files[cnt]);
1790 static int f2fs_quota_on(struct super_block *sb, int type, int format_id,
1791 const struct path *path)
1793 struct inode *inode;
1796 err = f2fs_quota_sync(sb, type);
1800 err = dquot_quota_on(sb, type, format_id, path);
1804 inode = d_inode(path->dentry);
1807 F2FS_I(inode)->i_flags |= F2FS_NOATIME_FL | F2FS_IMMUTABLE_FL;
1808 inode_set_flags(inode, S_NOATIME | S_IMMUTABLE,
1809 S_NOATIME | S_IMMUTABLE);
1810 inode_unlock(inode);
1811 f2fs_mark_inode_dirty_sync(inode, false);
1816 static int f2fs_quota_off(struct super_block *sb, int type)
1818 struct inode *inode = sb_dqopt(sb)->files[type];
1821 if (!inode || !igrab(inode))
1822 return dquot_quota_off(sb, type);
1824 f2fs_quota_sync(sb, type);
1826 err = dquot_quota_off(sb, type);
1827 if (err || f2fs_sb_has_quota_ino(sb))
1831 F2FS_I(inode)->i_flags &= ~(F2FS_NOATIME_FL | F2FS_IMMUTABLE_FL);
1832 inode_set_flags(inode, 0, S_NOATIME | S_IMMUTABLE);
1833 inode_unlock(inode);
1834 f2fs_mark_inode_dirty_sync(inode, false);
1840 void f2fs_quota_off_umount(struct super_block *sb)
1844 for (type = 0; type < MAXQUOTAS; type++)
1845 f2fs_quota_off(sb, type);
1848 static int f2fs_get_projid(struct inode *inode, kprojid_t *projid)
1850 *projid = F2FS_I(inode)->i_projid;
1854 static const struct dquot_operations f2fs_quota_operations = {
1855 .get_reserved_space = f2fs_get_reserved_space,
1856 .write_dquot = dquot_commit,
1857 .acquire_dquot = dquot_acquire,
1858 .release_dquot = dquot_release,
1859 .mark_dirty = dquot_mark_dquot_dirty,
1860 .write_info = dquot_commit_info,
1861 .alloc_dquot = dquot_alloc,
1862 .destroy_dquot = dquot_destroy,
1863 .get_projid = f2fs_get_projid,
1864 .get_next_id = dquot_get_next_id,
1867 static const struct quotactl_ops f2fs_quotactl_ops = {
1868 .quota_on = f2fs_quota_on,
1869 .quota_off = f2fs_quota_off,
1870 .quota_sync = f2fs_quota_sync,
1871 .get_state = dquot_get_state,
1872 .set_info = dquot_set_dqinfo,
1873 .get_dqblk = dquot_get_dqblk,
1874 .set_dqblk = dquot_set_dqblk,
1875 .get_nextdqblk = dquot_get_next_dqblk,
1878 void f2fs_quota_off_umount(struct super_block *sb)
1883 static const struct super_operations f2fs_sops = {
1884 .alloc_inode = f2fs_alloc_inode,
1885 .drop_inode = f2fs_drop_inode,
1886 .destroy_inode = f2fs_destroy_inode,
1887 .write_inode = f2fs_write_inode,
1888 .dirty_inode = f2fs_dirty_inode,
1889 .show_options = f2fs_show_options,
1891 .quota_read = f2fs_quota_read,
1892 .quota_write = f2fs_quota_write,
1893 .get_dquots = f2fs_get_dquots,
1895 .evict_inode = f2fs_evict_inode,
1896 .put_super = f2fs_put_super,
1897 .sync_fs = f2fs_sync_fs,
1898 .freeze_fs = f2fs_freeze,
1899 .unfreeze_fs = f2fs_unfreeze,
1900 .statfs = f2fs_statfs,
1901 .remount_fs = f2fs_remount,
1904 #ifdef CONFIG_F2FS_FS_ENCRYPTION
1905 static int f2fs_get_context(struct inode *inode, void *ctx, size_t len)
1907 return f2fs_getxattr(inode, F2FS_XATTR_INDEX_ENCRYPTION,
1908 F2FS_XATTR_NAME_ENCRYPTION_CONTEXT,
1912 static int f2fs_set_context(struct inode *inode, const void *ctx, size_t len,
1915 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1918 * Encrypting the root directory is not allowed because fsck
1919 * expects lost+found directory to exist and remain unencrypted
1920 * if LOST_FOUND feature is enabled.
1923 if (f2fs_sb_has_lost_found(sbi->sb) &&
1924 inode->i_ino == F2FS_ROOT_INO(sbi))
1927 return f2fs_setxattr(inode, F2FS_XATTR_INDEX_ENCRYPTION,
1928 F2FS_XATTR_NAME_ENCRYPTION_CONTEXT,
1929 ctx, len, fs_data, XATTR_CREATE);
1932 static bool f2fs_dummy_context(struct inode *inode)
1934 return DUMMY_ENCRYPTION_ENABLED(F2FS_I_SB(inode));
1937 static unsigned f2fs_max_namelen(struct inode *inode)
1939 return S_ISLNK(inode->i_mode) ?
1940 inode->i_sb->s_blocksize : F2FS_NAME_LEN;
1943 static const struct fscrypt_operations f2fs_cryptops = {
1944 .key_prefix = "f2fs:",
1945 .get_context = f2fs_get_context,
1946 .set_context = f2fs_set_context,
1947 .dummy_context = f2fs_dummy_context,
1948 .empty_dir = f2fs_empty_dir,
1949 .max_namelen = f2fs_max_namelen,
1953 static struct inode *f2fs_nfs_get_inode(struct super_block *sb,
1954 u64 ino, u32 generation)
1956 struct f2fs_sb_info *sbi = F2FS_SB(sb);
1957 struct inode *inode;
1959 if (check_nid_range(sbi, ino))
1960 return ERR_PTR(-ESTALE);
1963 * f2fs_iget isn't quite right if the inode is currently unallocated!
1964 * However f2fs_iget currently does appropriate checks to handle stale
1965 * inodes so everything is OK.
1967 inode = f2fs_iget(sb, ino);
1969 return ERR_CAST(inode);
1970 if (unlikely(generation && inode->i_generation != generation)) {
1971 /* we didn't find the right inode.. */
1973 return ERR_PTR(-ESTALE);
1978 static struct dentry *f2fs_fh_to_dentry(struct super_block *sb, struct fid *fid,
1979 int fh_len, int fh_type)
1981 return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
1982 f2fs_nfs_get_inode);
1985 static struct dentry *f2fs_fh_to_parent(struct super_block *sb, struct fid *fid,
1986 int fh_len, int fh_type)
1988 return generic_fh_to_parent(sb, fid, fh_len, fh_type,
1989 f2fs_nfs_get_inode);
1992 static const struct export_operations f2fs_export_ops = {
1993 .fh_to_dentry = f2fs_fh_to_dentry,
1994 .fh_to_parent = f2fs_fh_to_parent,
1995 .get_parent = f2fs_get_parent,
1998 static loff_t max_file_blocks(void)
2001 loff_t leaf_count = ADDRS_PER_BLOCK;
2004 * note: previously, result is equal to (DEF_ADDRS_PER_INODE -
2005 * DEFAULT_INLINE_XATTR_ADDRS), but now f2fs try to reserve more
2006 * space in inode.i_addr, it will be more safe to reassign
2010 /* two direct node blocks */
2011 result += (leaf_count * 2);
2013 /* two indirect node blocks */
2014 leaf_count *= NIDS_PER_BLOCK;
2015 result += (leaf_count * 2);
2017 /* one double indirect node block */
2018 leaf_count *= NIDS_PER_BLOCK;
2019 result += leaf_count;
2024 static int __f2fs_commit_super(struct buffer_head *bh,
2025 struct f2fs_super_block *super)
2029 memcpy(bh->b_data + F2FS_SUPER_OFFSET, super, sizeof(*super));
2030 set_buffer_dirty(bh);
2033 /* it's rare case, we can do fua all the time */
2034 return __sync_dirty_buffer(bh, REQ_SYNC | REQ_PREFLUSH | REQ_FUA);
2037 static inline bool sanity_check_area_boundary(struct f2fs_sb_info *sbi,
2038 struct buffer_head *bh)
2040 struct f2fs_super_block *raw_super = (struct f2fs_super_block *)
2041 (bh->b_data + F2FS_SUPER_OFFSET);
2042 struct super_block *sb = sbi->sb;
2043 u32 segment0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr);
2044 u32 cp_blkaddr = le32_to_cpu(raw_super->cp_blkaddr);
2045 u32 sit_blkaddr = le32_to_cpu(raw_super->sit_blkaddr);
2046 u32 nat_blkaddr = le32_to_cpu(raw_super->nat_blkaddr);
2047 u32 ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr);
2048 u32 main_blkaddr = le32_to_cpu(raw_super->main_blkaddr);
2049 u32 segment_count_ckpt = le32_to_cpu(raw_super->segment_count_ckpt);
2050 u32 segment_count_sit = le32_to_cpu(raw_super->segment_count_sit);
2051 u32 segment_count_nat = le32_to_cpu(raw_super->segment_count_nat);
2052 u32 segment_count_ssa = le32_to_cpu(raw_super->segment_count_ssa);
2053 u32 segment_count_main = le32_to_cpu(raw_super->segment_count_main);
2054 u32 segment_count = le32_to_cpu(raw_super->segment_count);
2055 u32 log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
2056 u64 main_end_blkaddr = main_blkaddr +
2057 (segment_count_main << log_blocks_per_seg);
2058 u64 seg_end_blkaddr = segment0_blkaddr +
2059 (segment_count << log_blocks_per_seg);
2061 if (segment0_blkaddr != cp_blkaddr) {
2062 f2fs_msg(sb, KERN_INFO,
2063 "Mismatch start address, segment0(%u) cp_blkaddr(%u)",
2064 segment0_blkaddr, cp_blkaddr);
2068 if (cp_blkaddr + (segment_count_ckpt << log_blocks_per_seg) !=
2070 f2fs_msg(sb, KERN_INFO,
2071 "Wrong CP boundary, start(%u) end(%u) blocks(%u)",
2072 cp_blkaddr, sit_blkaddr,
2073 segment_count_ckpt << log_blocks_per_seg);
2077 if (sit_blkaddr + (segment_count_sit << log_blocks_per_seg) !=
2079 f2fs_msg(sb, KERN_INFO,
2080 "Wrong SIT boundary, start(%u) end(%u) blocks(%u)",
2081 sit_blkaddr, nat_blkaddr,
2082 segment_count_sit << log_blocks_per_seg);
2086 if (nat_blkaddr + (segment_count_nat << log_blocks_per_seg) !=
2088 f2fs_msg(sb, KERN_INFO,
2089 "Wrong NAT boundary, start(%u) end(%u) blocks(%u)",
2090 nat_blkaddr, ssa_blkaddr,
2091 segment_count_nat << log_blocks_per_seg);
2095 if (ssa_blkaddr + (segment_count_ssa << log_blocks_per_seg) !=
2097 f2fs_msg(sb, KERN_INFO,
2098 "Wrong SSA boundary, start(%u) end(%u) blocks(%u)",
2099 ssa_blkaddr, main_blkaddr,
2100 segment_count_ssa << log_blocks_per_seg);
2104 if (main_end_blkaddr > seg_end_blkaddr) {
2105 f2fs_msg(sb, KERN_INFO,
2106 "Wrong MAIN_AREA boundary, start(%u) end(%u) block(%u)",
2109 (segment_count << log_blocks_per_seg),
2110 segment_count_main << log_blocks_per_seg);
2112 } else if (main_end_blkaddr < seg_end_blkaddr) {
2116 /* fix in-memory information all the time */
2117 raw_super->segment_count = cpu_to_le32((main_end_blkaddr -
2118 segment0_blkaddr) >> log_blocks_per_seg);
2120 if (f2fs_readonly(sb) || bdev_read_only(sb->s_bdev)) {
2121 set_sbi_flag(sbi, SBI_NEED_SB_WRITE);
2124 err = __f2fs_commit_super(bh, NULL);
2125 res = err ? "failed" : "done";
2127 f2fs_msg(sb, KERN_INFO,
2128 "Fix alignment : %s, start(%u) end(%u) block(%u)",
2131 (segment_count << log_blocks_per_seg),
2132 segment_count_main << log_blocks_per_seg);
2139 static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
2140 struct buffer_head *bh)
2142 struct f2fs_super_block *raw_super = (struct f2fs_super_block *)
2143 (bh->b_data + F2FS_SUPER_OFFSET);
2144 struct super_block *sb = sbi->sb;
2145 unsigned int blocksize;
2147 if (F2FS_SUPER_MAGIC != le32_to_cpu(raw_super->magic)) {
2148 f2fs_msg(sb, KERN_INFO,
2149 "Magic Mismatch, valid(0x%x) - read(0x%x)",
2150 F2FS_SUPER_MAGIC, le32_to_cpu(raw_super->magic));
2154 /* Currently, support only 4KB page cache size */
2155 if (F2FS_BLKSIZE != PAGE_SIZE) {
2156 f2fs_msg(sb, KERN_INFO,
2157 "Invalid page_cache_size (%lu), supports only 4KB\n",
2162 /* Currently, support only 4KB block size */
2163 blocksize = 1 << le32_to_cpu(raw_super->log_blocksize);
2164 if (blocksize != F2FS_BLKSIZE) {
2165 f2fs_msg(sb, KERN_INFO,
2166 "Invalid blocksize (%u), supports only 4KB\n",
2171 /* check log blocks per segment */
2172 if (le32_to_cpu(raw_super->log_blocks_per_seg) != 9) {
2173 f2fs_msg(sb, KERN_INFO,
2174 "Invalid log blocks per segment (%u)\n",
2175 le32_to_cpu(raw_super->log_blocks_per_seg));
2179 /* Currently, support 512/1024/2048/4096 bytes sector size */
2180 if (le32_to_cpu(raw_super->log_sectorsize) >
2181 F2FS_MAX_LOG_SECTOR_SIZE ||
2182 le32_to_cpu(raw_super->log_sectorsize) <
2183 F2FS_MIN_LOG_SECTOR_SIZE) {
2184 f2fs_msg(sb, KERN_INFO, "Invalid log sectorsize (%u)",
2185 le32_to_cpu(raw_super->log_sectorsize));
2188 if (le32_to_cpu(raw_super->log_sectors_per_block) +
2189 le32_to_cpu(raw_super->log_sectorsize) !=
2190 F2FS_MAX_LOG_SECTOR_SIZE) {
2191 f2fs_msg(sb, KERN_INFO,
2192 "Invalid log sectors per block(%u) log sectorsize(%u)",
2193 le32_to_cpu(raw_super->log_sectors_per_block),
2194 le32_to_cpu(raw_super->log_sectorsize));
2198 /* check reserved ino info */
2199 if (le32_to_cpu(raw_super->node_ino) != 1 ||
2200 le32_to_cpu(raw_super->meta_ino) != 2 ||
2201 le32_to_cpu(raw_super->root_ino) != 3) {
2202 f2fs_msg(sb, KERN_INFO,
2203 "Invalid Fs Meta Ino: node(%u) meta(%u) root(%u)",
2204 le32_to_cpu(raw_super->node_ino),
2205 le32_to_cpu(raw_super->meta_ino),
2206 le32_to_cpu(raw_super->root_ino));
2210 if (le32_to_cpu(raw_super->segment_count) > F2FS_MAX_SEGMENT) {
2211 f2fs_msg(sb, KERN_INFO,
2212 "Invalid segment count (%u)",
2213 le32_to_cpu(raw_super->segment_count));
2217 /* check CP/SIT/NAT/SSA/MAIN_AREA area boundary */
2218 if (sanity_check_area_boundary(sbi, bh))
2224 int sanity_check_ckpt(struct f2fs_sb_info *sbi)
2226 unsigned int total, fsmeta;
2227 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
2228 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
2229 unsigned int ovp_segments, reserved_segments;
2230 unsigned int main_segs, blocks_per_seg;
2233 total = le32_to_cpu(raw_super->segment_count);
2234 fsmeta = le32_to_cpu(raw_super->segment_count_ckpt);
2235 fsmeta += le32_to_cpu(raw_super->segment_count_sit);
2236 fsmeta += le32_to_cpu(raw_super->segment_count_nat);
2237 fsmeta += le32_to_cpu(ckpt->rsvd_segment_count);
2238 fsmeta += le32_to_cpu(raw_super->segment_count_ssa);
2240 if (unlikely(fsmeta >= total))
2243 ovp_segments = le32_to_cpu(ckpt->overprov_segment_count);
2244 reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count);
2246 if (unlikely(fsmeta < F2FS_MIN_SEGMENTS ||
2247 ovp_segments == 0 || reserved_segments == 0)) {
2248 f2fs_msg(sbi->sb, KERN_ERR,
2249 "Wrong layout: check mkfs.f2fs version");
2253 main_segs = le32_to_cpu(raw_super->segment_count_main);
2254 blocks_per_seg = sbi->blocks_per_seg;
2256 for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) {
2257 if (le32_to_cpu(ckpt->cur_node_segno[i]) >= main_segs ||
2258 le16_to_cpu(ckpt->cur_node_blkoff[i]) >= blocks_per_seg)
2261 for (i = 0; i < NR_CURSEG_DATA_TYPE; i++) {
2262 if (le32_to_cpu(ckpt->cur_data_segno[i]) >= main_segs ||
2263 le16_to_cpu(ckpt->cur_data_blkoff[i]) >= blocks_per_seg)
2267 if (unlikely(f2fs_cp_error(sbi))) {
2268 f2fs_msg(sbi->sb, KERN_ERR, "A bug case: need to run fsck");
2274 static void init_sb_info(struct f2fs_sb_info *sbi)
2276 struct f2fs_super_block *raw_super = sbi->raw_super;
2279 sbi->log_sectors_per_block =
2280 le32_to_cpu(raw_super->log_sectors_per_block);
2281 sbi->log_blocksize = le32_to_cpu(raw_super->log_blocksize);
2282 sbi->blocksize = 1 << sbi->log_blocksize;
2283 sbi->log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
2284 sbi->blocks_per_seg = 1 << sbi->log_blocks_per_seg;
2285 sbi->segs_per_sec = le32_to_cpu(raw_super->segs_per_sec);
2286 sbi->secs_per_zone = le32_to_cpu(raw_super->secs_per_zone);
2287 sbi->total_sections = le32_to_cpu(raw_super->section_count);
2288 sbi->total_node_count =
2289 (le32_to_cpu(raw_super->segment_count_nat) / 2)
2290 * sbi->blocks_per_seg * NAT_ENTRY_PER_BLOCK;
2291 sbi->root_ino_num = le32_to_cpu(raw_super->root_ino);
2292 sbi->node_ino_num = le32_to_cpu(raw_super->node_ino);
2293 sbi->meta_ino_num = le32_to_cpu(raw_super->meta_ino);
2294 sbi->cur_victim_sec = NULL_SECNO;
2295 sbi->max_victim_search = DEF_MAX_VICTIM_SEARCH;
2297 sbi->dir_level = DEF_DIR_LEVEL;
2298 sbi->interval_time[CP_TIME] = DEF_CP_INTERVAL;
2299 sbi->interval_time[REQ_TIME] = DEF_IDLE_INTERVAL;
2300 clear_sbi_flag(sbi, SBI_NEED_FSCK);
2302 for (i = 0; i < NR_COUNT_TYPE; i++)
2303 atomic_set(&sbi->nr_pages[i], 0);
2305 atomic_set(&sbi->wb_sync_req, 0);
2307 INIT_LIST_HEAD(&sbi->s_list);
2308 mutex_init(&sbi->umount_mutex);
2309 for (i = 0; i < NR_PAGE_TYPE - 1; i++)
2310 for (j = HOT; j < NR_TEMP_TYPE; j++)
2311 mutex_init(&sbi->wio_mutex[i][j]);
2312 spin_lock_init(&sbi->cp_lock);
2314 sbi->dirty_device = 0;
2315 spin_lock_init(&sbi->dev_lock);
2317 init_rwsem(&sbi->sb_lock);
2320 static int init_percpu_info(struct f2fs_sb_info *sbi)
2324 err = percpu_counter_init(&sbi->alloc_valid_block_count, 0, GFP_KERNEL);
2328 return percpu_counter_init(&sbi->total_valid_inode_count, 0,
2332 #ifdef CONFIG_BLK_DEV_ZONED
2333 static int init_blkz_info(struct f2fs_sb_info *sbi, int devi)
2335 struct block_device *bdev = FDEV(devi).bdev;
2336 sector_t nr_sectors = bdev->bd_part->nr_sects;
2337 sector_t sector = 0;
2338 struct blk_zone *zones;
2339 unsigned int i, nr_zones;
2343 if (!f2fs_sb_has_blkzoned(sbi->sb))
2346 if (sbi->blocks_per_blkz && sbi->blocks_per_blkz !=
2347 SECTOR_TO_BLOCK(bdev_zone_sectors(bdev)))
2349 sbi->blocks_per_blkz = SECTOR_TO_BLOCK(bdev_zone_sectors(bdev));
2350 if (sbi->log_blocks_per_blkz && sbi->log_blocks_per_blkz !=
2351 __ilog2_u32(sbi->blocks_per_blkz))
2353 sbi->log_blocks_per_blkz = __ilog2_u32(sbi->blocks_per_blkz);
2354 FDEV(devi).nr_blkz = SECTOR_TO_BLOCK(nr_sectors) >>
2355 sbi->log_blocks_per_blkz;
2356 if (nr_sectors & (bdev_zone_sectors(bdev) - 1))
2357 FDEV(devi).nr_blkz++;
2359 FDEV(devi).blkz_type = f2fs_kmalloc(sbi, FDEV(devi).nr_blkz,
2361 if (!FDEV(devi).blkz_type)
2364 #define F2FS_REPORT_NR_ZONES 4096
2366 zones = f2fs_kzalloc(sbi, sizeof(struct blk_zone) *
2367 F2FS_REPORT_NR_ZONES, GFP_KERNEL);
2371 /* Get block zones type */
2372 while (zones && sector < nr_sectors) {
2374 nr_zones = F2FS_REPORT_NR_ZONES;
2375 err = blkdev_report_zones(bdev, sector,
2385 for (i = 0; i < nr_zones; i++) {
2386 FDEV(devi).blkz_type[n] = zones[i].type;
2387 sector += zones[i].len;
2399 * Read f2fs raw super block.
2400 * Because we have two copies of super block, so read both of them
2401 * to get the first valid one. If any one of them is broken, we pass
2402 * them recovery flag back to the caller.
2404 static int read_raw_super_block(struct f2fs_sb_info *sbi,
2405 struct f2fs_super_block **raw_super,
2406 int *valid_super_block, int *recovery)
2408 struct super_block *sb = sbi->sb;
2410 struct buffer_head *bh;
2411 struct f2fs_super_block *super;
2414 super = kzalloc(sizeof(struct f2fs_super_block), GFP_KERNEL);
2418 for (block = 0; block < 2; block++) {
2419 bh = sb_bread(sb, block);
2421 f2fs_msg(sb, KERN_ERR, "Unable to read %dth superblock",
2427 /* sanity checking of raw super */
2428 if (sanity_check_raw_super(sbi, bh)) {
2429 f2fs_msg(sb, KERN_ERR,
2430 "Can't find valid F2FS filesystem in %dth superblock",
2438 memcpy(super, bh->b_data + F2FS_SUPER_OFFSET,
2440 *valid_super_block = block;
2446 /* Fail to read any one of the superblocks*/
2450 /* No valid superblock */
2459 int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover)
2461 struct buffer_head *bh;
2464 if ((recover && f2fs_readonly(sbi->sb)) ||
2465 bdev_read_only(sbi->sb->s_bdev)) {
2466 set_sbi_flag(sbi, SBI_NEED_SB_WRITE);
2470 /* write back-up superblock first */
2471 bh = sb_bread(sbi->sb, sbi->valid_super_block ? 0 : 1);
2474 err = __f2fs_commit_super(bh, F2FS_RAW_SUPER(sbi));
2477 /* if we are in recovery path, skip writing valid superblock */
2481 /* write current valid superblock */
2482 bh = sb_bread(sbi->sb, sbi->valid_super_block);
2485 err = __f2fs_commit_super(bh, F2FS_RAW_SUPER(sbi));
2490 static int f2fs_scan_devices(struct f2fs_sb_info *sbi)
2492 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
2493 unsigned int max_devices = MAX_DEVICES;
2496 /* Initialize single device information */
2497 if (!RDEV(0).path[0]) {
2498 if (!bdev_is_zoned(sbi->sb->s_bdev))
2504 * Initialize multiple devices information, or single
2505 * zoned block device information.
2507 sbi->devs = f2fs_kzalloc(sbi, sizeof(struct f2fs_dev_info) *
2508 max_devices, GFP_KERNEL);
2512 for (i = 0; i < max_devices; i++) {
2514 if (i > 0 && !RDEV(i).path[0])
2517 if (max_devices == 1) {
2518 /* Single zoned block device mount */
2520 blkdev_get_by_dev(sbi->sb->s_bdev->bd_dev,
2521 sbi->sb->s_mode, sbi->sb->s_type);
2523 /* Multi-device mount */
2524 memcpy(FDEV(i).path, RDEV(i).path, MAX_PATH_LEN);
2525 FDEV(i).total_segments =
2526 le32_to_cpu(RDEV(i).total_segments);
2528 FDEV(i).start_blk = 0;
2529 FDEV(i).end_blk = FDEV(i).start_blk +
2530 (FDEV(i).total_segments <<
2531 sbi->log_blocks_per_seg) - 1 +
2532 le32_to_cpu(raw_super->segment0_blkaddr);
2534 FDEV(i).start_blk = FDEV(i - 1).end_blk + 1;
2535 FDEV(i).end_blk = FDEV(i).start_blk +
2536 (FDEV(i).total_segments <<
2537 sbi->log_blocks_per_seg) - 1;
2539 FDEV(i).bdev = blkdev_get_by_path(FDEV(i).path,
2540 sbi->sb->s_mode, sbi->sb->s_type);
2542 if (IS_ERR(FDEV(i).bdev))
2543 return PTR_ERR(FDEV(i).bdev);
2545 /* to release errored devices */
2546 sbi->s_ndevs = i + 1;
2548 #ifdef CONFIG_BLK_DEV_ZONED
2549 if (bdev_zoned_model(FDEV(i).bdev) == BLK_ZONED_HM &&
2550 !f2fs_sb_has_blkzoned(sbi->sb)) {
2551 f2fs_msg(sbi->sb, KERN_ERR,
2552 "Zoned block device feature not enabled\n");
2555 if (bdev_zoned_model(FDEV(i).bdev) != BLK_ZONED_NONE) {
2556 if (init_blkz_info(sbi, i)) {
2557 f2fs_msg(sbi->sb, KERN_ERR,
2558 "Failed to initialize F2FS blkzone information");
2561 if (max_devices == 1)
2563 f2fs_msg(sbi->sb, KERN_INFO,
2564 "Mount Device [%2d]: %20s, %8u, %8x - %8x (zone: %s)",
2566 FDEV(i).total_segments,
2567 FDEV(i).start_blk, FDEV(i).end_blk,
2568 bdev_zoned_model(FDEV(i).bdev) == BLK_ZONED_HA ?
2569 "Host-aware" : "Host-managed");
2573 f2fs_msg(sbi->sb, KERN_INFO,
2574 "Mount Device [%2d]: %20s, %8u, %8x - %8x",
2576 FDEV(i).total_segments,
2577 FDEV(i).start_blk, FDEV(i).end_blk);
2579 f2fs_msg(sbi->sb, KERN_INFO,
2580 "IO Block Size: %8d KB", F2FS_IO_SIZE_KB(sbi));
2584 static void f2fs_tuning_parameters(struct f2fs_sb_info *sbi)
2586 struct f2fs_sm_info *sm_i = SM_I(sbi);
2588 /* adjust parameters according to the volume size */
2589 if (sm_i->main_segments <= SMALL_VOLUME_SEGMENTS) {
2590 F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_REUSE;
2591 sm_i->dcc_info->discard_granularity = 1;
2592 sm_i->ipu_policy = 1 << F2FS_IPU_FORCE;
2596 static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
2598 struct f2fs_sb_info *sbi;
2599 struct f2fs_super_block *raw_super;
2602 bool retry = true, need_fsck = false;
2603 char *options = NULL;
2604 int recovery, i, valid_super_block;
2605 struct curseg_info *seg_i;
2610 valid_super_block = -1;
2613 /* allocate memory for f2fs-specific super block info */
2614 sbi = kzalloc(sizeof(struct f2fs_sb_info), GFP_KERNEL);
2620 /* Load the checksum driver */
2621 sbi->s_chksum_driver = crypto_alloc_shash("crc32", 0, 0);
2622 if (IS_ERR(sbi->s_chksum_driver)) {
2623 f2fs_msg(sb, KERN_ERR, "Cannot load crc32 driver.");
2624 err = PTR_ERR(sbi->s_chksum_driver);
2625 sbi->s_chksum_driver = NULL;
2629 /* set a block size */
2630 if (unlikely(!sb_set_blocksize(sb, F2FS_BLKSIZE))) {
2631 f2fs_msg(sb, KERN_ERR, "unable to set blocksize");
2635 err = read_raw_super_block(sbi, &raw_super, &valid_super_block,
2640 sb->s_fs_info = sbi;
2641 sbi->raw_super = raw_super;
2643 F2FS_OPTION(sbi).s_resuid = make_kuid(&init_user_ns, F2FS_DEF_RESUID);
2644 F2FS_OPTION(sbi).s_resgid = make_kgid(&init_user_ns, F2FS_DEF_RESGID);
2646 /* precompute checksum seed for metadata */
2647 if (f2fs_sb_has_inode_chksum(sb))
2648 sbi->s_chksum_seed = f2fs_chksum(sbi, ~0, raw_super->uuid,
2649 sizeof(raw_super->uuid));
2652 * The BLKZONED feature indicates that the drive was formatted with
2653 * zone alignment optimization. This is optional for host-aware
2654 * devices, but mandatory for host-managed zoned block devices.
2656 #ifndef CONFIG_BLK_DEV_ZONED
2657 if (f2fs_sb_has_blkzoned(sb)) {
2658 f2fs_msg(sb, KERN_ERR,
2659 "Zoned block device support is not enabled\n");
2664 default_options(sbi);
2665 /* parse mount options */
2666 options = kstrdup((const char *)data, GFP_KERNEL);
2667 if (data && !options) {
2672 err = parse_options(sb, options);
2676 sbi->max_file_blocks = max_file_blocks();
2677 sb->s_maxbytes = sbi->max_file_blocks <<
2678 le32_to_cpu(raw_super->log_blocksize);
2679 sb->s_max_links = F2FS_LINK_MAX;
2680 get_random_bytes(&sbi->s_next_generation, sizeof(u32));
2683 sb->dq_op = &f2fs_quota_operations;
2684 if (f2fs_sb_has_quota_ino(sb))
2685 sb->s_qcop = &dquot_quotactl_sysfile_ops;
2687 sb->s_qcop = &f2fs_quotactl_ops;
2688 sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ;
2690 if (f2fs_sb_has_quota_ino(sbi->sb)) {
2691 for (i = 0; i < MAXQUOTAS; i++) {
2692 if (f2fs_qf_ino(sbi->sb, i))
2693 sbi->nquota_files++;
2698 sb->s_op = &f2fs_sops;
2699 #ifdef CONFIG_F2FS_FS_ENCRYPTION
2700 sb->s_cop = &f2fs_cryptops;
2702 sb->s_xattr = f2fs_xattr_handlers;
2703 sb->s_export_op = &f2fs_export_ops;
2704 sb->s_magic = F2FS_SUPER_MAGIC;
2705 sb->s_time_gran = 1;
2706 sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
2707 (test_opt(sbi, POSIX_ACL) ? SB_POSIXACL : 0);
2708 memcpy(&sb->s_uuid, raw_super->uuid, sizeof(raw_super->uuid));
2709 sb->s_iflags |= SB_I_CGROUPWB;
2711 /* init f2fs-specific super block info */
2712 sbi->valid_super_block = valid_super_block;
2713 mutex_init(&sbi->gc_mutex);
2714 mutex_init(&sbi->cp_mutex);
2715 init_rwsem(&sbi->node_write);
2716 init_rwsem(&sbi->node_change);
2718 /* disallow all the data/node/meta page writes */
2719 set_sbi_flag(sbi, SBI_POR_DOING);
2720 spin_lock_init(&sbi->stat_lock);
2722 /* init iostat info */
2723 spin_lock_init(&sbi->iostat_lock);
2724 sbi->iostat_enable = false;
2726 for (i = 0; i < NR_PAGE_TYPE; i++) {
2727 int n = (i == META) ? 1: NR_TEMP_TYPE;
2730 sbi->write_io[i] = f2fs_kmalloc(sbi,
2731 n * sizeof(struct f2fs_bio_info),
2733 if (!sbi->write_io[i]) {
2738 for (j = HOT; j < n; j++) {
2739 init_rwsem(&sbi->write_io[i][j].io_rwsem);
2740 sbi->write_io[i][j].sbi = sbi;
2741 sbi->write_io[i][j].bio = NULL;
2742 spin_lock_init(&sbi->write_io[i][j].io_lock);
2743 INIT_LIST_HEAD(&sbi->write_io[i][j].io_list);
2747 init_rwsem(&sbi->cp_rwsem);
2748 init_waitqueue_head(&sbi->cp_wait);
2751 err = init_percpu_info(sbi);
2755 if (F2FS_IO_SIZE(sbi) > 1) {
2756 sbi->write_io_dummy =
2757 mempool_create_page_pool(2 * (F2FS_IO_SIZE(sbi) - 1), 0);
2758 if (!sbi->write_io_dummy) {
2764 /* get an inode for meta space */
2765 sbi->meta_inode = f2fs_iget(sb, F2FS_META_INO(sbi));
2766 if (IS_ERR(sbi->meta_inode)) {
2767 f2fs_msg(sb, KERN_ERR, "Failed to read F2FS meta data inode");
2768 err = PTR_ERR(sbi->meta_inode);
2772 err = get_valid_checkpoint(sbi);
2774 f2fs_msg(sb, KERN_ERR, "Failed to get valid F2FS checkpoint");
2775 goto free_meta_inode;
2778 /* Initialize device list */
2779 err = f2fs_scan_devices(sbi);
2781 f2fs_msg(sb, KERN_ERR, "Failed to find devices");
2785 sbi->total_valid_node_count =
2786 le32_to_cpu(sbi->ckpt->valid_node_count);
2787 percpu_counter_set(&sbi->total_valid_inode_count,
2788 le32_to_cpu(sbi->ckpt->valid_inode_count));
2789 sbi->user_block_count = le64_to_cpu(sbi->ckpt->user_block_count);
2790 sbi->total_valid_block_count =
2791 le64_to_cpu(sbi->ckpt->valid_block_count);
2792 sbi->last_valid_block_count = sbi->total_valid_block_count;
2793 sbi->reserved_blocks = 0;
2794 sbi->current_reserved_blocks = 0;
2795 limit_reserve_root(sbi);
2797 for (i = 0; i < NR_INODE_TYPE; i++) {
2798 INIT_LIST_HEAD(&sbi->inode_list[i]);
2799 spin_lock_init(&sbi->inode_lock[i]);
2802 init_extent_cache_info(sbi);
2804 init_ino_entry_info(sbi);
2806 /* setup f2fs internal modules */
2807 err = build_segment_manager(sbi);
2809 f2fs_msg(sb, KERN_ERR,
2810 "Failed to initialize F2FS segment manager");
2813 err = build_node_manager(sbi);
2815 f2fs_msg(sb, KERN_ERR,
2816 "Failed to initialize F2FS node manager");
2820 /* For write statistics */
2821 if (sb->s_bdev->bd_part)
2822 sbi->sectors_written_start =
2823 (u64)part_stat_read(sb->s_bdev->bd_part, sectors[1]);
2825 /* Read accumulated write IO statistics if exists */
2826 seg_i = CURSEG_I(sbi, CURSEG_HOT_NODE);
2827 if (__exist_node_summaries(sbi))
2828 sbi->kbytes_written =
2829 le64_to_cpu(seg_i->journal->info.kbytes_written);
2831 build_gc_manager(sbi);
2833 /* get an inode for node space */
2834 sbi->node_inode = f2fs_iget(sb, F2FS_NODE_INO(sbi));
2835 if (IS_ERR(sbi->node_inode)) {
2836 f2fs_msg(sb, KERN_ERR, "Failed to read node inode");
2837 err = PTR_ERR(sbi->node_inode);
2841 err = f2fs_build_stats(sbi);
2843 goto free_node_inode;
2845 /* read root inode and dentry */
2846 root = f2fs_iget(sb, F2FS_ROOT_INO(sbi));
2848 f2fs_msg(sb, KERN_ERR, "Failed to read root inode");
2849 err = PTR_ERR(root);
2852 if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) {
2855 goto free_node_inode;
2858 sb->s_root = d_make_root(root); /* allocate root dentry */
2861 goto free_root_inode;
2864 err = f2fs_register_sysfs(sbi);
2866 goto free_root_inode;
2870 * Turn on quotas which were not enabled for read-only mounts if
2871 * filesystem has quota feature, so that they are updated correctly.
2873 if (f2fs_sb_has_quota_ino(sb) && !f2fs_readonly(sb)) {
2874 err = f2fs_enable_quotas(sb);
2876 f2fs_msg(sb, KERN_ERR,
2877 "Cannot turn on quotas: error %d", err);
2882 /* if there are nt orphan nodes free them */
2883 err = recover_orphan_inodes(sbi);
2887 /* recover fsynced data */
2888 if (!test_opt(sbi, DISABLE_ROLL_FORWARD)) {
2890 * mount should be failed, when device has readonly mode, and
2891 * previous checkpoint was not done by clean system shutdown.
2893 if (bdev_read_only(sb->s_bdev) &&
2894 !is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) {
2900 set_sbi_flag(sbi, SBI_NEED_FSCK);
2905 err = recover_fsync_data(sbi, false);
2908 f2fs_msg(sb, KERN_ERR,
2909 "Cannot recover all fsync data errno=%d", err);
2913 err = recover_fsync_data(sbi, true);
2915 if (!f2fs_readonly(sb) && err > 0) {
2917 f2fs_msg(sb, KERN_ERR,
2918 "Need to recover fsync data");
2923 /* recover_fsync_data() cleared this already */
2924 clear_sbi_flag(sbi, SBI_POR_DOING);
2927 * If filesystem is not mounted as read-only then
2928 * do start the gc_thread.
2930 if (test_opt(sbi, BG_GC) && !f2fs_readonly(sb)) {
2931 /* After POR, we can run background GC thread.*/
2932 err = start_gc_thread(sbi);
2938 /* recover broken superblock */
2940 err = f2fs_commit_super(sbi, true);
2941 f2fs_msg(sb, KERN_INFO,
2942 "Try to recover %dth superblock, ret: %d",
2943 sbi->valid_super_block ? 1 : 2, err);
2946 f2fs_join_shrinker(sbi);
2948 f2fs_tuning_parameters(sbi);
2950 f2fs_msg(sbi->sb, KERN_NOTICE, "Mounted with checkpoint version = %llx",
2951 cur_cp_version(F2FS_CKPT(sbi)));
2952 f2fs_update_time(sbi, CP_TIME);
2953 f2fs_update_time(sbi, REQ_TIME);
2958 if (f2fs_sb_has_quota_ino(sb) && !f2fs_readonly(sb))
2959 f2fs_quota_off_umount(sbi->sb);
2961 f2fs_sync_inode_meta(sbi);
2963 * Some dirty meta pages can be produced by recover_orphan_inodes()
2964 * failed by EIO. Then, iput(node_inode) can trigger balance_fs_bg()
2965 * followed by write_checkpoint() through f2fs_write_node_pages(), which
2966 * falls into an infinite loop in sync_meta_pages().
2968 truncate_inode_pages_final(META_MAPPING(sbi));
2972 f2fs_unregister_sysfs(sbi);
2977 f2fs_destroy_stats(sbi);
2979 release_ino_entry(sbi, true);
2980 truncate_inode_pages_final(NODE_MAPPING(sbi));
2981 iput(sbi->node_inode);
2983 destroy_node_manager(sbi);
2985 destroy_segment_manager(sbi);
2987 destroy_device_list(sbi);
2990 make_bad_inode(sbi->meta_inode);
2991 iput(sbi->meta_inode);
2993 mempool_destroy(sbi->write_io_dummy);
2995 destroy_percpu_info(sbi);
2997 for (i = 0; i < NR_PAGE_TYPE; i++)
2998 kfree(sbi->write_io[i]);
3001 for (i = 0; i < MAXQUOTAS; i++)
3002 kfree(F2FS_OPTION(sbi).s_qf_names[i]);
3008 if (sbi->s_chksum_driver)
3009 crypto_free_shash(sbi->s_chksum_driver);
3012 /* give only one another chance */
3015 shrink_dcache_sb(sb);
3021 static struct dentry *f2fs_mount(struct file_system_type *fs_type, int flags,
3022 const char *dev_name, void *data)
3024 return mount_bdev(fs_type, flags, dev_name, data, f2fs_fill_super);
3027 static void kill_f2fs_super(struct super_block *sb)
3030 set_sbi_flag(F2FS_SB(sb), SBI_IS_CLOSE);
3031 stop_gc_thread(F2FS_SB(sb));
3032 stop_discard_thread(F2FS_SB(sb));
3034 kill_block_super(sb);
3037 static struct file_system_type f2fs_fs_type = {
3038 .owner = THIS_MODULE,
3040 .mount = f2fs_mount,
3041 .kill_sb = kill_f2fs_super,
3042 .fs_flags = FS_REQUIRES_DEV,
3044 MODULE_ALIAS_FS("f2fs");
3046 static int __init init_inodecache(void)
3048 f2fs_inode_cachep = kmem_cache_create("f2fs_inode_cache",
3049 sizeof(struct f2fs_inode_info), 0,
3050 SLAB_RECLAIM_ACCOUNT|SLAB_ACCOUNT, NULL);
3051 if (!f2fs_inode_cachep)
3056 static void destroy_inodecache(void)
3059 * Make sure all delayed rcu free inodes are flushed before we
3063 kmem_cache_destroy(f2fs_inode_cachep);
3066 static int __init init_f2fs_fs(void)
3070 f2fs_build_trace_ios();
3072 err = init_inodecache();
3075 err = create_node_manager_caches();
3077 goto free_inodecache;
3078 err = create_segment_manager_caches();
3080 goto free_node_manager_caches;
3081 err = create_checkpoint_caches();
3083 goto free_segment_manager_caches;
3084 err = create_extent_cache();
3086 goto free_checkpoint_caches;
3087 err = f2fs_init_sysfs();
3089 goto free_extent_cache;
3090 err = register_shrinker(&f2fs_shrinker_info);
3093 err = register_filesystem(&f2fs_fs_type);
3096 err = f2fs_create_root_stats();
3098 goto free_filesystem;
3099 err = f2fs_init_post_read_processing();
3101 goto free_root_stats;
3105 f2fs_destroy_root_stats();
3107 unregister_filesystem(&f2fs_fs_type);
3109 unregister_shrinker(&f2fs_shrinker_info);
3113 destroy_extent_cache();
3114 free_checkpoint_caches:
3115 destroy_checkpoint_caches();
3116 free_segment_manager_caches:
3117 destroy_segment_manager_caches();
3118 free_node_manager_caches:
3119 destroy_node_manager_caches();
3121 destroy_inodecache();
3126 static void __exit exit_f2fs_fs(void)
3128 f2fs_destroy_post_read_processing();
3129 f2fs_destroy_root_stats();
3130 unregister_filesystem(&f2fs_fs_type);
3131 unregister_shrinker(&f2fs_shrinker_info);
3133 destroy_extent_cache();
3134 destroy_checkpoint_caches();
3135 destroy_segment_manager_caches();
3136 destroy_node_manager_caches();
3137 destroy_inodecache();
3138 f2fs_destroy_trace_ios();
3141 module_init(init_f2fs_fs)
3142 module_exit(exit_f2fs_fs)
3144 MODULE_AUTHOR("Samsung Electronics's Praesto Team");
3145 MODULE_DESCRIPTION("Flash Friendly File System");
3146 MODULE_LICENSE("GPL");