f2fs: enhance on-disk inode structure scalability
[linux-2.6-block.git] / fs / f2fs / super.c
CommitLineData
0a8165d7 1/*
aff063e2
JK
2 * fs/f2fs/super.c
3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/module.h>
12#include <linux/init.h>
13#include <linux/fs.h>
14#include <linux/statfs.h>
aff063e2
JK
15#include <linux/buffer_head.h>
16#include <linux/backing-dev.h>
17#include <linux/kthread.h>
18#include <linux/parser.h>
19#include <linux/mount.h>
20#include <linux/seq_file.h>
5e176d54 21#include <linux/proc_fs.h>
aff063e2
JK
22#include <linux/random.h>
23#include <linux/exportfs.h>
d3ee456d 24#include <linux/blkdev.h>
0abd675e 25#include <linux/quotaops.h>
aff063e2 26#include <linux/f2fs_fs.h>
b59d0bae 27#include <linux/sysfs.h>
aff063e2
JK
28
29#include "f2fs.h"
30#include "node.h"
5ec4e49f 31#include "segment.h"
aff063e2 32#include "xattr.h"
b59d0bae 33#include "gc.h"
db9f7c1a 34#include "trace.h"
aff063e2 35
a2a4a7e4
NJ
36#define CREATE_TRACE_POINTS
37#include <trace/events/f2fs.h>
38
aff063e2
JK
39static struct kmem_cache *f2fs_inode_cachep;
40
73faec4d 41#ifdef CONFIG_F2FS_FAULT_INJECTION
2c63fead
JK
42
43char *fault_name[FAULT_MAX] = {
44 [FAULT_KMALLOC] = "kmalloc",
c41f3cc3 45 [FAULT_PAGE_ALLOC] = "page alloc",
cb78942b
JK
46 [FAULT_ALLOC_NID] = "alloc nid",
47 [FAULT_ORPHAN] = "orphan",
48 [FAULT_BLOCK] = "no more block",
49 [FAULT_DIR_DEPTH] = "too big dir depth",
53aa6bbf 50 [FAULT_EVICT_INODE] = "evict_inode fail",
14b44d23 51 [FAULT_TRUNCATE] = "truncate fail",
8b038c70 52 [FAULT_IO] = "IO error",
0f348028 53 [FAULT_CHECKPOINT] = "checkpoint error",
2c63fead 54};
08796897 55
1ecc0c5c
CY
56static void f2fs_build_fault_attr(struct f2fs_sb_info *sbi,
57 unsigned int rate)
08796897 58{
1ecc0c5c
CY
59 struct f2fs_fault_info *ffi = &sbi->fault_info;
60
08796897 61 if (rate) {
1ecc0c5c
CY
62 atomic_set(&ffi->inject_ops, 0);
63 ffi->inject_rate = rate;
64 ffi->inject_type = (1 << FAULT_MAX) - 1;
08796897 65 } else {
1ecc0c5c 66 memset(ffi, 0, sizeof(struct f2fs_fault_info));
08796897
SY
67 }
68}
73faec4d
JK
69#endif
70
2658e50d
JK
71/* f2fs-wide shrinker description */
72static struct shrinker f2fs_shrinker_info = {
73 .scan_objects = f2fs_shrink_scan,
74 .count_objects = f2fs_shrink_count,
75 .seeks = DEFAULT_SEEKS,
76};
77
aff063e2 78enum {
696c018c 79 Opt_gc_background,
aff063e2 80 Opt_disable_roll_forward,
2d834bf9 81 Opt_norecovery,
aff063e2 82 Opt_discard,
64058be9 83 Opt_nodiscard,
aff063e2 84 Opt_noheap,
7a20b8a6 85 Opt_heap,
4058c511 86 Opt_user_xattr,
aff063e2 87 Opt_nouser_xattr,
4058c511 88 Opt_acl,
aff063e2
JK
89 Opt_noacl,
90 Opt_active_logs,
91 Opt_disable_ext_identify,
444c580f 92 Opt_inline_xattr,
23cf7212 93 Opt_noinline_xattr,
8274de77 94 Opt_inline_data,
5efd3c6f 95 Opt_inline_dentry,
97c1794a 96 Opt_noinline_dentry,
6b4afdd7 97 Opt_flush_merge,
69e9e427 98 Opt_noflush_merge,
0f7b2abd 99 Opt_nobarrier,
d5053a34 100 Opt_fastboot,
89672159 101 Opt_extent_cache,
7daaea25 102 Opt_noextent_cache,
75342797 103 Opt_noinline_data,
343f40f0 104 Opt_data_flush,
36abef4e 105 Opt_mode,
ec91538d 106 Opt_io_size_bits,
73faec4d 107 Opt_fault_injection,
6d94c74a
JK
108 Opt_lazytime,
109 Opt_nolazytime,
0abd675e
CY
110 Opt_usrquota,
111 Opt_grpquota,
aff063e2
JK
112 Opt_err,
113};
114
115static match_table_t f2fs_tokens = {
696c018c 116 {Opt_gc_background, "background_gc=%s"},
aff063e2 117 {Opt_disable_roll_forward, "disable_roll_forward"},
2d834bf9 118 {Opt_norecovery, "norecovery"},
aff063e2 119 {Opt_discard, "discard"},
64058be9 120 {Opt_nodiscard, "nodiscard"},
aff063e2 121 {Opt_noheap, "no_heap"},
7a20b8a6 122 {Opt_heap, "heap"},
4058c511 123 {Opt_user_xattr, "user_xattr"},
aff063e2 124 {Opt_nouser_xattr, "nouser_xattr"},
4058c511 125 {Opt_acl, "acl"},
aff063e2
JK
126 {Opt_noacl, "noacl"},
127 {Opt_active_logs, "active_logs=%u"},
128 {Opt_disable_ext_identify, "disable_ext_identify"},
444c580f 129 {Opt_inline_xattr, "inline_xattr"},
23cf7212 130 {Opt_noinline_xattr, "noinline_xattr"},
8274de77 131 {Opt_inline_data, "inline_data"},
5efd3c6f 132 {Opt_inline_dentry, "inline_dentry"},
97c1794a 133 {Opt_noinline_dentry, "noinline_dentry"},
6b4afdd7 134 {Opt_flush_merge, "flush_merge"},
69e9e427 135 {Opt_noflush_merge, "noflush_merge"},
0f7b2abd 136 {Opt_nobarrier, "nobarrier"},
d5053a34 137 {Opt_fastboot, "fastboot"},
89672159 138 {Opt_extent_cache, "extent_cache"},
7daaea25 139 {Opt_noextent_cache, "noextent_cache"},
75342797 140 {Opt_noinline_data, "noinline_data"},
343f40f0 141 {Opt_data_flush, "data_flush"},
36abef4e 142 {Opt_mode, "mode=%s"},
ec91538d 143 {Opt_io_size_bits, "io_bits=%u"},
73faec4d 144 {Opt_fault_injection, "fault_injection=%u"},
6d94c74a
JK
145 {Opt_lazytime, "lazytime"},
146 {Opt_nolazytime, "nolazytime"},
0abd675e
CY
147 {Opt_usrquota, "usrquota"},
148 {Opt_grpquota, "grpquota"},
aff063e2
JK
149 {Opt_err, NULL},
150};
151
a07ef784
NJ
152void f2fs_msg(struct super_block *sb, const char *level, const char *fmt, ...)
153{
154 struct va_format vaf;
155 va_list args;
156
157 va_start(args, fmt);
158 vaf.fmt = fmt;
159 vaf.va = &args;
160 printk("%sF2FS-fs (%s): %pV\n", level, sb->s_id, &vaf);
161 va_end(args);
162}
163
aff063e2
JK
164static void init_once(void *foo)
165{
166 struct f2fs_inode_info *fi = (struct f2fs_inode_info *) foo;
167
aff063e2
JK
168 inode_init_once(&fi->vfs_inode);
169}
170
696c018c
NJ
171static int parse_options(struct super_block *sb, char *options)
172{
173 struct f2fs_sb_info *sbi = F2FS_SB(sb);
09d54cdd 174 struct request_queue *q;
696c018c
NJ
175 substring_t args[MAX_OPT_ARGS];
176 char *p, *name;
177 int arg = 0;
178
179 if (!options)
180 return 0;
181
182 while ((p = strsep(&options, ",")) != NULL) {
183 int token;
184 if (!*p)
185 continue;
186 /*
187 * Initialize args struct so we know whether arg was
188 * found; some options take optional arguments.
189 */
190 args[0].to = args[0].from = NULL;
191 token = match_token(p, f2fs_tokens, args);
192
193 switch (token) {
194 case Opt_gc_background:
195 name = match_strdup(&args[0]);
196
197 if (!name)
198 return -ENOMEM;
6aefd93b 199 if (strlen(name) == 2 && !strncmp(name, "on", 2)) {
696c018c 200 set_opt(sbi, BG_GC);
6aefd93b
JK
201 clear_opt(sbi, FORCE_FG_GC);
202 } else if (strlen(name) == 3 && !strncmp(name, "off", 3)) {
696c018c 203 clear_opt(sbi, BG_GC);
6aefd93b
JK
204 clear_opt(sbi, FORCE_FG_GC);
205 } else if (strlen(name) == 4 && !strncmp(name, "sync", 4)) {
206 set_opt(sbi, BG_GC);
207 set_opt(sbi, FORCE_FG_GC);
208 } else {
696c018c
NJ
209 kfree(name);
210 return -EINVAL;
211 }
212 kfree(name);
213 break;
214 case Opt_disable_roll_forward:
215 set_opt(sbi, DISABLE_ROLL_FORWARD);
216 break;
2d834bf9
JK
217 case Opt_norecovery:
218 /* this option mounts f2fs with ro */
219 set_opt(sbi, DISABLE_ROLL_FORWARD);
220 if (!f2fs_readonly(sb))
221 return -EINVAL;
222 break;
696c018c 223 case Opt_discard:
09d54cdd
CY
224 q = bdev_get_queue(sb->s_bdev);
225 if (blk_queue_discard(q)) {
226 set_opt(sbi, DISCARD);
0ab02998 227 } else if (!f2fs_sb_mounted_blkzoned(sb)) {
09d54cdd
CY
228 f2fs_msg(sb, KERN_WARNING,
229 "mounting with \"discard\" option, but "
230 "the device does not support discard");
231 }
696c018c 232 break;
64058be9 233 case Opt_nodiscard:
96ba2dec
DLM
234 if (f2fs_sb_mounted_blkzoned(sb)) {
235 f2fs_msg(sb, KERN_WARNING,
236 "discard is required for zoned block devices");
237 return -EINVAL;
238 }
64058be9 239 clear_opt(sbi, DISCARD);
487df616 240 break;
696c018c
NJ
241 case Opt_noheap:
242 set_opt(sbi, NOHEAP);
243 break;
7a20b8a6
JK
244 case Opt_heap:
245 clear_opt(sbi, NOHEAP);
246 break;
696c018c 247#ifdef CONFIG_F2FS_FS_XATTR
4058c511
KA
248 case Opt_user_xattr:
249 set_opt(sbi, XATTR_USER);
250 break;
696c018c
NJ
251 case Opt_nouser_xattr:
252 clear_opt(sbi, XATTR_USER);
253 break;
444c580f
JK
254 case Opt_inline_xattr:
255 set_opt(sbi, INLINE_XATTR);
256 break;
23cf7212
CY
257 case Opt_noinline_xattr:
258 clear_opt(sbi, INLINE_XATTR);
259 break;
696c018c 260#else
4058c511
KA
261 case Opt_user_xattr:
262 f2fs_msg(sb, KERN_INFO,
263 "user_xattr options not supported");
264 break;
696c018c
NJ
265 case Opt_nouser_xattr:
266 f2fs_msg(sb, KERN_INFO,
267 "nouser_xattr options not supported");
268 break;
444c580f
JK
269 case Opt_inline_xattr:
270 f2fs_msg(sb, KERN_INFO,
271 "inline_xattr options not supported");
272 break;
23cf7212
CY
273 case Opt_noinline_xattr:
274 f2fs_msg(sb, KERN_INFO,
275 "noinline_xattr options not supported");
276 break;
696c018c
NJ
277#endif
278#ifdef CONFIG_F2FS_FS_POSIX_ACL
4058c511
KA
279 case Opt_acl:
280 set_opt(sbi, POSIX_ACL);
281 break;
696c018c
NJ
282 case Opt_noacl:
283 clear_opt(sbi, POSIX_ACL);
284 break;
285#else
4058c511
KA
286 case Opt_acl:
287 f2fs_msg(sb, KERN_INFO, "acl options not supported");
288 break;
696c018c
NJ
289 case Opt_noacl:
290 f2fs_msg(sb, KERN_INFO, "noacl options not supported");
291 break;
292#endif
293 case Opt_active_logs:
294 if (args->from && match_int(args, &arg))
295 return -EINVAL;
296 if (arg != 2 && arg != 4 && arg != NR_CURSEG_TYPE)
297 return -EINVAL;
298 sbi->active_logs = arg;
299 break;
300 case Opt_disable_ext_identify:
301 set_opt(sbi, DISABLE_EXT_IDENTIFY);
302 break;
8274de77
HL
303 case Opt_inline_data:
304 set_opt(sbi, INLINE_DATA);
305 break;
5efd3c6f
CY
306 case Opt_inline_dentry:
307 set_opt(sbi, INLINE_DENTRY);
308 break;
97c1794a
CY
309 case Opt_noinline_dentry:
310 clear_opt(sbi, INLINE_DENTRY);
311 break;
6b4afdd7
JK
312 case Opt_flush_merge:
313 set_opt(sbi, FLUSH_MERGE);
314 break;
69e9e427
JK
315 case Opt_noflush_merge:
316 clear_opt(sbi, FLUSH_MERGE);
317 break;
0f7b2abd
JK
318 case Opt_nobarrier:
319 set_opt(sbi, NOBARRIER);
320 break;
d5053a34
JK
321 case Opt_fastboot:
322 set_opt(sbi, FASTBOOT);
323 break;
89672159
CY
324 case Opt_extent_cache:
325 set_opt(sbi, EXTENT_CACHE);
326 break;
7daaea25
JK
327 case Opt_noextent_cache:
328 clear_opt(sbi, EXTENT_CACHE);
329 break;
75342797
WL
330 case Opt_noinline_data:
331 clear_opt(sbi, INLINE_DATA);
332 break;
343f40f0
CY
333 case Opt_data_flush:
334 set_opt(sbi, DATA_FLUSH);
335 break;
36abef4e
JK
336 case Opt_mode:
337 name = match_strdup(&args[0]);
338
339 if (!name)
340 return -ENOMEM;
341 if (strlen(name) == 8 &&
342 !strncmp(name, "adaptive", 8)) {
3adc57e9
DLM
343 if (f2fs_sb_mounted_blkzoned(sb)) {
344 f2fs_msg(sb, KERN_WARNING,
345 "adaptive mode is not allowed with "
346 "zoned block device feature");
347 kfree(name);
348 return -EINVAL;
349 }
52763a4b 350 set_opt_mode(sbi, F2FS_MOUNT_ADAPTIVE);
36abef4e
JK
351 } else if (strlen(name) == 3 &&
352 !strncmp(name, "lfs", 3)) {
52763a4b 353 set_opt_mode(sbi, F2FS_MOUNT_LFS);
36abef4e
JK
354 } else {
355 kfree(name);
356 return -EINVAL;
357 }
358 kfree(name);
359 break;
ec91538d
JK
360 case Opt_io_size_bits:
361 if (args->from && match_int(args, &arg))
362 return -EINVAL;
363 if (arg > __ilog2_u32(BIO_MAX_PAGES)) {
364 f2fs_msg(sb, KERN_WARNING,
365 "Not support %d, larger than %d",
366 1 << arg, BIO_MAX_PAGES);
367 return -EINVAL;
368 }
369 sbi->write_io_size_bits = arg;
370 break;
73faec4d
JK
371 case Opt_fault_injection:
372 if (args->from && match_int(args, &arg))
373 return -EINVAL;
374#ifdef CONFIG_F2FS_FAULT_INJECTION
1ecc0c5c 375 f2fs_build_fault_attr(sbi, arg);
0cc0dec2 376 set_opt(sbi, FAULT_INJECTION);
73faec4d
JK
377#else
378 f2fs_msg(sb, KERN_INFO,
379 "FAULT_INJECTION was not selected");
380#endif
381 break;
6d94c74a
JK
382 case Opt_lazytime:
383 sb->s_flags |= MS_LAZYTIME;
384 break;
385 case Opt_nolazytime:
386 sb->s_flags &= ~MS_LAZYTIME;
387 break;
0abd675e
CY
388#ifdef CONFIG_QUOTA
389 case Opt_usrquota:
390 set_opt(sbi, USRQUOTA);
391 break;
392 case Opt_grpquota:
393 set_opt(sbi, GRPQUOTA);
394 break;
395#else
396 case Opt_usrquota:
397 case Opt_grpquota:
398 f2fs_msg(sb, KERN_INFO,
399 "quota operations not supported");
400 break;
401#endif
696c018c
NJ
402 default:
403 f2fs_msg(sb, KERN_ERR,
404 "Unrecognized mount option \"%s\" or missing value",
405 p);
406 return -EINVAL;
407 }
408 }
ec91538d
JK
409
410 if (F2FS_IO_SIZE_BITS(sbi) && !test_opt(sbi, LFS)) {
411 f2fs_msg(sb, KERN_ERR,
412 "Should set mode=lfs with %uKB-sized IO",
413 F2FS_IO_SIZE_KB(sbi));
414 return -EINVAL;
415 }
696c018c
NJ
416 return 0;
417}
418
aff063e2
JK
419static struct inode *f2fs_alloc_inode(struct super_block *sb)
420{
421 struct f2fs_inode_info *fi;
422
a0acdfe0 423 fi = kmem_cache_alloc(f2fs_inode_cachep, GFP_F2FS_ZERO);
aff063e2
JK
424 if (!fi)
425 return NULL;
426
427 init_once((void *) fi);
428
434720fa 429 /* Initialize f2fs-specific inode info */
aff063e2 430 fi->vfs_inode.i_version = 1;
204706c7 431 atomic_set(&fi->dirty_pages, 0);
aff063e2
JK
432 fi->i_current_depth = 1;
433 fi->i_advise = 0;
d928bfbf 434 init_rwsem(&fi->i_sem);
2710fd7e 435 INIT_LIST_HEAD(&fi->dirty_list);
0f18b462 436 INIT_LIST_HEAD(&fi->gdirty_list);
88b88a66
JK
437 INIT_LIST_HEAD(&fi->inmem_pages);
438 mutex_init(&fi->inmem_lock);
82e0a5aa
CY
439 init_rwsem(&fi->dio_rwsem[READ]);
440 init_rwsem(&fi->dio_rwsem[WRITE]);
5a3a2d83 441 init_rwsem(&fi->i_mmap_sem);
aff063e2 442
0abd675e
CY
443#ifdef CONFIG_QUOTA
444 memset(&fi->i_dquot, 0, sizeof(fi->i_dquot));
445 fi->i_reserved_quota = 0;
446#endif
ab9fa662
JK
447 /* Will be used by directory only */
448 fi->i_dir_level = F2FS_SB(sb)->dir_level;
f2470371 449
aff063e2
JK
450 return &fi->vfs_inode;
451}
452
531ad7d5
JK
453static int f2fs_drop_inode(struct inode *inode)
454{
b8d96a30 455 int ret;
531ad7d5
JK
456 /*
457 * This is to avoid a deadlock condition like below.
458 * writeback_single_inode(inode)
459 * - f2fs_write_data_page
460 * - f2fs_gc -> iput -> evict
461 * - inode_wait_for_writeback(inode)
462 */
0f18b462 463 if ((!inode_unhashed(inode) && inode->i_state & I_SYNC)) {
06e1bc05 464 if (!inode->i_nlink && !is_bad_inode(inode)) {
3e72f721
JK
465 /* to avoid evict_inode call simultaneously */
466 atomic_inc(&inode->i_count);
06e1bc05
JK
467 spin_unlock(&inode->i_lock);
468
469 /* some remained atomic pages should discarded */
470 if (f2fs_is_atomic_file(inode))
29b96b54 471 drop_inmem_pages(inode);
06e1bc05 472
3e72f721
JK
473 /* should remain fi->extent_tree for writepage */
474 f2fs_destroy_extent_node(inode);
475
06e1bc05 476 sb_start_intwrite(inode->i_sb);
fc9581c8 477 f2fs_i_size_write(inode, 0);
06e1bc05
JK
478
479 if (F2FS_HAS_BLOCKS(inode))
9a449e9c 480 f2fs_truncate(inode);
06e1bc05
JK
481
482 sb_end_intwrite(inode->i_sb);
483
0b81d077 484 fscrypt_put_encryption_info(inode, NULL);
06e1bc05 485 spin_lock(&inode->i_lock);
3e72f721 486 atomic_dec(&inode->i_count);
06e1bc05 487 }
b8d96a30 488 trace_f2fs_drop_inode(inode, 0);
531ad7d5 489 return 0;
06e1bc05 490 }
b8d96a30
HP
491 ret = generic_drop_inode(inode);
492 trace_f2fs_drop_inode(inode, ret);
493 return ret;
531ad7d5
JK
494}
495
7c45729a 496int f2fs_inode_dirtied(struct inode *inode, bool sync)
b3783873 497{
0f18b462 498 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
7c45729a 499 int ret = 0;
0f18b462 500
0f18b462
JK
501 spin_lock(&sbi->inode_lock[DIRTY_META]);
502 if (is_inode_flag_set(inode, FI_DIRTY_INODE)) {
7c45729a
JK
503 ret = 1;
504 } else {
505 set_inode_flag(inode, FI_DIRTY_INODE);
506 stat_inc_dirty_inode(sbi, DIRTY_META);
0f18b462 507 }
7c45729a
JK
508 if (sync && list_empty(&F2FS_I(inode)->gdirty_list)) {
509 list_add_tail(&F2FS_I(inode)->gdirty_list,
0f18b462 510 &sbi->inode_list[DIRTY_META]);
7c45729a
JK
511 inc_page_count(sbi, F2FS_DIRTY_IMETA);
512 }
338bbfa0 513 spin_unlock(&sbi->inode_lock[DIRTY_META]);
7c45729a 514 return ret;
0f18b462
JK
515}
516
517void f2fs_inode_synced(struct inode *inode)
518{
519 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
520
521 spin_lock(&sbi->inode_lock[DIRTY_META]);
522 if (!is_inode_flag_set(inode, FI_DIRTY_INODE)) {
523 spin_unlock(&sbi->inode_lock[DIRTY_META]);
524 return;
525 }
7c45729a
JK
526 if (!list_empty(&F2FS_I(inode)->gdirty_list)) {
527 list_del_init(&F2FS_I(inode)->gdirty_list);
528 dec_page_count(sbi, F2FS_DIRTY_IMETA);
529 }
0f18b462 530 clear_inode_flag(inode, FI_DIRTY_INODE);
26de9b11 531 clear_inode_flag(inode, FI_AUTO_RECOVER);
0f18b462 532 stat_dec_dirty_inode(F2FS_I_SB(inode), DIRTY_META);
338bbfa0 533 spin_unlock(&sbi->inode_lock[DIRTY_META]);
b3783873
JK
534}
535
b56ab837
JK
536/*
537 * f2fs_dirty_inode() is called from __mark_inode_dirty()
538 *
539 * We should call set_dirty_inode to write the dirty inode through write_inode.
540 */
541static void f2fs_dirty_inode(struct inode *inode, int flags)
542{
543 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
544
545 if (inode->i_ino == F2FS_NODE_INO(sbi) ||
546 inode->i_ino == F2FS_META_INO(sbi))
547 return;
548
549 if (flags == I_DIRTY_TIME)
550 return;
551
552 if (is_inode_flag_set(inode, FI_AUTO_RECOVER))
553 clear_inode_flag(inode, FI_AUTO_RECOVER);
554
7c45729a 555 f2fs_inode_dirtied(inode, false);
b56ab837
JK
556}
557
aff063e2
JK
558static void f2fs_i_callback(struct rcu_head *head)
559{
560 struct inode *inode = container_of(head, struct inode, i_rcu);
561 kmem_cache_free(f2fs_inode_cachep, F2FS_I(inode));
562}
563
25ca923b 564static void f2fs_destroy_inode(struct inode *inode)
aff063e2
JK
565{
566 call_rcu(&inode->i_rcu, f2fs_i_callback);
567}
568
523be8a6
JK
569static void destroy_percpu_info(struct f2fs_sb_info *sbi)
570{
41382ec4 571 percpu_counter_destroy(&sbi->alloc_valid_block_count);
513c5f37 572 percpu_counter_destroy(&sbi->total_valid_inode_count);
523be8a6
JK
573}
574
3c62be17
JK
575static void destroy_device_list(struct f2fs_sb_info *sbi)
576{
577 int i;
578
579 for (i = 0; i < sbi->s_ndevs; i++) {
580 blkdev_put(FDEV(i).bdev, FMODE_EXCL);
581#ifdef CONFIG_BLK_DEV_ZONED
582 kfree(FDEV(i).blkz_type);
583#endif
584 }
585 kfree(sbi->devs);
586}
587
0abd675e 588static void f2fs_quota_off_umount(struct super_block *sb);
aff063e2
JK
589static void f2fs_put_super(struct super_block *sb)
590{
591 struct f2fs_sb_info *sbi = F2FS_SB(sb);
a398101a 592 int i;
aff063e2 593
0abd675e 594 f2fs_quota_off_umount(sb);
aff063e2 595
2658e50d
JK
596 /* prevent remaining shrinker jobs */
597 mutex_lock(&sbi->umount_mutex);
598
85dc2f2c
JK
599 /*
600 * We don't need to do checkpoint when superblock is clean.
601 * But, the previous checkpoint was not done by umount, it needs to do
602 * clean checkpoint again.
603 */
caf0047e 604 if (is_sbi_flag_set(sbi, SBI_IS_DIRTY) ||
aaec2b1d 605 !is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) {
75ab4cb8
JK
606 struct cp_control cpc = {
607 .reason = CP_UMOUNT,
608 };
609 write_checkpoint(sbi, &cpc);
610 }
aff063e2 611
4e6a8d9b 612 /* be sure to wait for any on-going discard commands */
d431413f 613 f2fs_wait_discard_bios(sbi);
4e6a8d9b 614
663f387b 615 if (f2fs_discard_en(sbi) && !sbi->discard_blks) {
1f43e2ad
CY
616 struct cp_control cpc = {
617 .reason = CP_UMOUNT | CP_TRIMMED,
618 };
619 write_checkpoint(sbi, &cpc);
620 }
621
eca616f8
JK
622 /* write_checkpoint can update stat informaion */
623 f2fs_destroy_stats(sbi);
624
cf779cab
JK
625 /*
626 * normally superblock is clean, so we need to release this.
627 * In addition, EIO will skip do checkpoint, we need this as well.
628 */
74ef9241 629 release_ino_entry(sbi, true);
6f12ac25 630
2658e50d
JK
631 f2fs_leave_shrinker(sbi);
632 mutex_unlock(&sbi->umount_mutex);
633
17c19120 634 /* our cp_error case, we can wait for any writeback page */
b9109b0e 635 f2fs_flush_merged_writes(sbi);
17c19120 636
aff063e2
JK
637 iput(sbi->node_inode);
638 iput(sbi->meta_inode);
639
640 /* destroy f2fs internal modules */
641 destroy_node_manager(sbi);
642 destroy_segment_manager(sbi);
643
644 kfree(sbi->ckpt);
a398101a
CY
645
646 f2fs_exit_sysfs(sbi);
aff063e2
JK
647
648 sb->s_fs_info = NULL;
43b6573b
KM
649 if (sbi->s_chksum_driver)
650 crypto_free_shash(sbi->s_chksum_driver);
b39f0de2 651 kfree(sbi->raw_super);
523be8a6 652
3c62be17 653 destroy_device_list(sbi);
b6895e8f 654 mempool_destroy(sbi->write_io_dummy);
523be8a6 655 destroy_percpu_info(sbi);
a912b54d
JK
656 for (i = 0; i < NR_PAGE_TYPE; i++)
657 kfree(sbi->write_io[i]);
aff063e2
JK
658 kfree(sbi);
659}
660
661int f2fs_sync_fs(struct super_block *sb, int sync)
662{
663 struct f2fs_sb_info *sbi = F2FS_SB(sb);
c34f42e2 664 int err = 0;
aff063e2 665
a2a4a7e4
NJ
666 trace_f2fs_sync_fs(sb, sync);
667
b7473754 668 if (sync) {
d5053a34
JK
669 struct cp_control cpc;
670
119ee914
JK
671 cpc.reason = __get_cp_reason(sbi);
672
b7473754 673 mutex_lock(&sbi->gc_mutex);
c34f42e2 674 err = write_checkpoint(sbi, &cpc);
b7473754 675 mutex_unlock(&sbi->gc_mutex);
b7473754 676 }
05ca3632 677 f2fs_trace_ios(NULL, 1);
aff063e2 678
c34f42e2 679 return err;
aff063e2
JK
680}
681
d6212a5f
CL
682static int f2fs_freeze(struct super_block *sb)
683{
77888c1e 684 if (f2fs_readonly(sb))
d6212a5f
CL
685 return 0;
686
b4b9d34c
JK
687 /* IO error happened before */
688 if (unlikely(f2fs_cp_error(F2FS_SB(sb))))
689 return -EIO;
690
691 /* must be clean, since sync_filesystem() was already called */
692 if (is_sbi_flag_set(F2FS_SB(sb), SBI_IS_DIRTY))
693 return -EINVAL;
694 return 0;
d6212a5f
CL
695}
696
697static int f2fs_unfreeze(struct super_block *sb)
698{
699 return 0;
700}
701
aff063e2
JK
702static int f2fs_statfs(struct dentry *dentry, struct kstatfs *buf)
703{
704 struct super_block *sb = dentry->d_sb;
705 struct f2fs_sb_info *sbi = F2FS_SB(sb);
706 u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
707 block_t total_count, user_block_count, start_count, ovp_count;
0cc091d0 708 u64 avail_node_count;
aff063e2
JK
709
710 total_count = le64_to_cpu(sbi->raw_super->block_count);
711 user_block_count = sbi->user_block_count;
712 start_count = le32_to_cpu(sbi->raw_super->segment0_blkaddr);
713 ovp_count = SM_I(sbi)->ovp_segments << sbi->log_blocks_per_seg;
714 buf->f_type = F2FS_SUPER_MAGIC;
715 buf->f_bsize = sbi->blocksize;
716
717 buf->f_blocks = total_count - start_count;
3e6d0b4d 718 buf->f_bfree = user_block_count - valid_user_blocks(sbi) + ovp_count;
daeb433e
CY
719 buf->f_bavail = user_block_count - valid_user_blocks(sbi) -
720 sbi->reserved_blocks;
aff063e2 721
0cc091d0
JK
722 avail_node_count = sbi->total_node_count - F2FS_RESERVED_NODE_NUM;
723
724 if (avail_node_count > user_block_count) {
725 buf->f_files = user_block_count;
726 buf->f_ffree = buf->f_bavail;
727 } else {
728 buf->f_files = avail_node_count;
729 buf->f_ffree = min(avail_node_count - valid_node_count(sbi),
730 buf->f_bavail);
731 }
aff063e2 732
5a20d339 733 buf->f_namelen = F2FS_NAME_LEN;
aff063e2
JK
734 buf->f_fsid.val[0] = (u32)id;
735 buf->f_fsid.val[1] = (u32)(id >> 32);
736
737 return 0;
738}
739
740static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
741{
742 struct f2fs_sb_info *sbi = F2FS_SB(root->d_sb);
743
6aefd93b
JK
744 if (!f2fs_readonly(sbi->sb) && test_opt(sbi, BG_GC)) {
745 if (test_opt(sbi, FORCE_FG_GC))
746 seq_printf(seq, ",background_gc=%s", "sync");
747 else
748 seq_printf(seq, ",background_gc=%s", "on");
749 } else {
696c018c 750 seq_printf(seq, ",background_gc=%s", "off");
6aefd93b 751 }
aff063e2
JK
752 if (test_opt(sbi, DISABLE_ROLL_FORWARD))
753 seq_puts(seq, ",disable_roll_forward");
754 if (test_opt(sbi, DISCARD))
755 seq_puts(seq, ",discard");
756 if (test_opt(sbi, NOHEAP))
7a20b8a6
JK
757 seq_puts(seq, ",no_heap");
758 else
759 seq_puts(seq, ",heap");
aff063e2
JK
760#ifdef CONFIG_F2FS_FS_XATTR
761 if (test_opt(sbi, XATTR_USER))
762 seq_puts(seq, ",user_xattr");
763 else
764 seq_puts(seq, ",nouser_xattr");
444c580f
JK
765 if (test_opt(sbi, INLINE_XATTR))
766 seq_puts(seq, ",inline_xattr");
23cf7212
CY
767 else
768 seq_puts(seq, ",noinline_xattr");
aff063e2
JK
769#endif
770#ifdef CONFIG_F2FS_FS_POSIX_ACL
771 if (test_opt(sbi, POSIX_ACL))
772 seq_puts(seq, ",acl");
773 else
774 seq_puts(seq, ",noacl");
775#endif
776 if (test_opt(sbi, DISABLE_EXT_IDENTIFY))
aa43507f 777 seq_puts(seq, ",disable_ext_identify");
8274de77
HL
778 if (test_opt(sbi, INLINE_DATA))
779 seq_puts(seq, ",inline_data");
75342797
WL
780 else
781 seq_puts(seq, ",noinline_data");
5efd3c6f
CY
782 if (test_opt(sbi, INLINE_DENTRY))
783 seq_puts(seq, ",inline_dentry");
97c1794a
CY
784 else
785 seq_puts(seq, ",noinline_dentry");
b270ad6f 786 if (!f2fs_readonly(sbi->sb) && test_opt(sbi, FLUSH_MERGE))
6b4afdd7 787 seq_puts(seq, ",flush_merge");
0f7b2abd
JK
788 if (test_opt(sbi, NOBARRIER))
789 seq_puts(seq, ",nobarrier");
d5053a34
JK
790 if (test_opt(sbi, FASTBOOT))
791 seq_puts(seq, ",fastboot");
89672159
CY
792 if (test_opt(sbi, EXTENT_CACHE))
793 seq_puts(seq, ",extent_cache");
7daaea25
JK
794 else
795 seq_puts(seq, ",noextent_cache");
343f40f0
CY
796 if (test_opt(sbi, DATA_FLUSH))
797 seq_puts(seq, ",data_flush");
36abef4e
JK
798
799 seq_puts(seq, ",mode=");
800 if (test_opt(sbi, ADAPTIVE))
801 seq_puts(seq, "adaptive");
802 else if (test_opt(sbi, LFS))
803 seq_puts(seq, "lfs");
aff063e2 804 seq_printf(seq, ",active_logs=%u", sbi->active_logs);
ec91538d
JK
805 if (F2FS_IO_SIZE_BITS(sbi))
806 seq_printf(seq, ",io_size=%uKB", F2FS_IO_SIZE_KB(sbi));
0cc0dec2
KX
807#ifdef CONFIG_F2FS_FAULT_INJECTION
808 if (test_opt(sbi, FAULT_INJECTION))
44529f89
CY
809 seq_printf(seq, ",fault_injection=%u",
810 sbi->fault_info.inject_rate);
0cc0dec2 811#endif
0abd675e
CY
812#ifdef CONFIG_QUOTA
813 if (test_opt(sbi, USRQUOTA))
814 seq_puts(seq, ",usrquota");
815 if (test_opt(sbi, GRPQUOTA))
816 seq_puts(seq, ",grpquota");
0cc0dec2 817#endif
aff063e2
JK
818
819 return 0;
820}
821
498c5e9f
YH
822static void default_options(struct f2fs_sb_info *sbi)
823{
824 /* init some FS parameters */
825 sbi->active_logs = NR_CURSEG_TYPE;
826
827 set_opt(sbi, BG_GC);
39133a50 828 set_opt(sbi, INLINE_XATTR);
498c5e9f 829 set_opt(sbi, INLINE_DATA);
97c1794a 830 set_opt(sbi, INLINE_DENTRY);
3e72f721 831 set_opt(sbi, EXTENT_CACHE);
7a20b8a6 832 set_opt(sbi, NOHEAP);
6d94c74a 833 sbi->sb->s_flags |= MS_LAZYTIME;
69e9e427 834 set_opt(sbi, FLUSH_MERGE);
0bfd7a09 835 if (f2fs_sb_mounted_blkzoned(sbi->sb)) {
52763a4b
JK
836 set_opt_mode(sbi, F2FS_MOUNT_LFS);
837 set_opt(sbi, DISCARD);
838 } else {
839 set_opt_mode(sbi, F2FS_MOUNT_ADAPTIVE);
840 }
498c5e9f
YH
841
842#ifdef CONFIG_F2FS_FS_XATTR
843 set_opt(sbi, XATTR_USER);
844#endif
845#ifdef CONFIG_F2FS_FS_POSIX_ACL
846 set_opt(sbi, POSIX_ACL);
847#endif
36dbd328
CY
848
849#ifdef CONFIG_F2FS_FAULT_INJECTION
850 f2fs_build_fault_attr(sbi, 0);
851#endif
498c5e9f
YH
852}
853
696c018c
NJ
854static int f2fs_remount(struct super_block *sb, int *flags, char *data)
855{
856 struct f2fs_sb_info *sbi = F2FS_SB(sb);
857 struct f2fs_mount_info org_mount_opt;
0abd675e 858 unsigned long old_sb_flags;
696c018c 859 int err, active_logs;
876dc59e
GZ
860 bool need_restart_gc = false;
861 bool need_stop_gc = false;
9cd81ce3 862 bool no_extent_cache = !test_opt(sbi, EXTENT_CACHE);
2443b8b3
CY
863#ifdef CONFIG_F2FS_FAULT_INJECTION
864 struct f2fs_fault_info ffi = sbi->fault_info;
865#endif
696c018c
NJ
866
867 /*
868 * Save the old mount options in case we
869 * need to restore them.
870 */
871 org_mount_opt = sbi->mount_opt;
0abd675e 872 old_sb_flags = sb->s_flags;
696c018c
NJ
873 active_logs = sbi->active_logs;
874
df728b0f
JK
875 /* recover superblocks we couldn't write due to previous RO mount */
876 if (!(*flags & MS_RDONLY) && is_sbi_flag_set(sbi, SBI_NEED_SB_WRITE)) {
877 err = f2fs_commit_super(sbi, false);
878 f2fs_msg(sb, KERN_INFO,
879 "Try to recover all the superblocks, ret: %d", err);
880 if (!err)
881 clear_sbi_flag(sbi, SBI_NEED_SB_WRITE);
882 }
883
498c5e9f 884 default_options(sbi);
26666c8a 885
696c018c
NJ
886 /* parse mount options */
887 err = parse_options(sb, data);
888 if (err)
889 goto restore_opts;
890
891 /*
892 * Previous and new state of filesystem is RO,
876dc59e 893 * so skip checking GC and FLUSH_MERGE conditions.
696c018c 894 */
6b2920a5 895 if (f2fs_readonly(sb) && (*flags & MS_RDONLY))
696c018c
NJ
896 goto skip;
897
0abd675e
CY
898 if (!f2fs_readonly(sb) && (*flags & MS_RDONLY)) {
899 err = dquot_suspend(sb, -1);
900 if (err < 0)
901 goto restore_opts;
902 } else {
903 /* dquot_resume needs RW */
904 sb->s_flags &= ~MS_RDONLY;
905 dquot_resume(sb, -1);
906 }
907
9cd81ce3
CY
908 /* disallow enable/disable extent_cache dynamically */
909 if (no_extent_cache == !!test_opt(sbi, EXTENT_CACHE)) {
910 err = -EINVAL;
911 f2fs_msg(sbi->sb, KERN_WARNING,
912 "switch extent_cache option is not allowed");
913 goto restore_opts;
914 }
915
696c018c
NJ
916 /*
917 * We stop the GC thread if FS is mounted as RO
918 * or if background_gc = off is passed in mount
919 * option. Also sync the filesystem.
920 */
921 if ((*flags & MS_RDONLY) || !test_opt(sbi, BG_GC)) {
922 if (sbi->gc_thread) {
923 stop_gc_thread(sbi);
876dc59e 924 need_restart_gc = true;
696c018c 925 }
aba291b3 926 } else if (!sbi->gc_thread) {
696c018c
NJ
927 err = start_gc_thread(sbi);
928 if (err)
929 goto restore_opts;
876dc59e
GZ
930 need_stop_gc = true;
931 }
932
faa0e55b
JK
933 if (*flags & MS_RDONLY) {
934 writeback_inodes_sb(sb, WB_REASON_SYNC);
935 sync_inodes_sb(sb);
936
937 set_sbi_flag(sbi, SBI_IS_DIRTY);
938 set_sbi_flag(sbi, SBI_IS_CLOSE);
939 f2fs_sync_fs(sb, 1);
940 clear_sbi_flag(sbi, SBI_IS_CLOSE);
941 }
942
876dc59e
GZ
943 /*
944 * We stop issue flush thread if FS is mounted as RO
945 * or if flush_merge is not passed in mount option.
946 */
947 if ((*flags & MS_RDONLY) || !test_opt(sbi, FLUSH_MERGE)) {
5eba8c5d
JK
948 clear_opt(sbi, FLUSH_MERGE);
949 destroy_flush_cmd_control(sbi, false);
950 } else {
2163d198
GZ
951 err = create_flush_cmd_control(sbi);
952 if (err)
a688b9d9 953 goto restore_gc;
696c018c
NJ
954 }
955skip:
956 /* Update the POSIXACL Flag */
df728b0f 957 sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
696c018c 958 (test_opt(sbi, POSIX_ACL) ? MS_POSIXACL : 0);
df728b0f 959
696c018c 960 return 0;
876dc59e
GZ
961restore_gc:
962 if (need_restart_gc) {
963 if (start_gc_thread(sbi))
964 f2fs_msg(sbi->sb, KERN_WARNING,
e1c42045 965 "background gc thread has stopped");
876dc59e
GZ
966 } else if (need_stop_gc) {
967 stop_gc_thread(sbi);
968 }
696c018c
NJ
969restore_opts:
970 sbi->mount_opt = org_mount_opt;
971 sbi->active_logs = active_logs;
0abd675e 972 sb->s_flags = old_sb_flags;
2443b8b3
CY
973#ifdef CONFIG_F2FS_FAULT_INJECTION
974 sbi->fault_info = ffi;
975#endif
696c018c
NJ
976 return err;
977}
978
0abd675e
CY
979#ifdef CONFIG_QUOTA
980/* Read data from quotafile */
981static ssize_t f2fs_quota_read(struct super_block *sb, int type, char *data,
982 size_t len, loff_t off)
983{
984 struct inode *inode = sb_dqopt(sb)->files[type];
985 struct address_space *mapping = inode->i_mapping;
986 block_t blkidx = F2FS_BYTES_TO_BLK(off);
987 int offset = off & (sb->s_blocksize - 1);
988 int tocopy;
989 size_t toread;
990 loff_t i_size = i_size_read(inode);
991 struct page *page;
992 char *kaddr;
993
994 if (off > i_size)
995 return 0;
996
997 if (off + len > i_size)
998 len = i_size - off;
999 toread = len;
1000 while (toread > 0) {
1001 tocopy = min_t(unsigned long, sb->s_blocksize - offset, toread);
1002repeat:
1003 page = read_mapping_page(mapping, blkidx, NULL);
1004 if (IS_ERR(page))
1005 return PTR_ERR(page);
1006
1007 lock_page(page);
1008
1009 if (unlikely(page->mapping != mapping)) {
1010 f2fs_put_page(page, 1);
1011 goto repeat;
1012 }
1013 if (unlikely(!PageUptodate(page))) {
1014 f2fs_put_page(page, 1);
1015 return -EIO;
1016 }
1017
1018 kaddr = kmap_atomic(page);
1019 memcpy(data, kaddr + offset, tocopy);
1020 kunmap_atomic(kaddr);
1021 f2fs_put_page(page, 1);
1022
1023 offset = 0;
1024 toread -= tocopy;
1025 data += tocopy;
1026 blkidx++;
1027 }
1028 return len;
1029}
1030
1031/* Write to quotafile */
1032static ssize_t f2fs_quota_write(struct super_block *sb, int type,
1033 const char *data, size_t len, loff_t off)
1034{
1035 struct inode *inode = sb_dqopt(sb)->files[type];
1036 struct address_space *mapping = inode->i_mapping;
1037 const struct address_space_operations *a_ops = mapping->a_ops;
1038 int offset = off & (sb->s_blocksize - 1);
1039 size_t towrite = len;
1040 struct page *page;
1041 char *kaddr;
1042 int err = 0;
1043 int tocopy;
1044
1045 while (towrite > 0) {
1046 tocopy = min_t(unsigned long, sb->s_blocksize - offset,
1047 towrite);
1048
1049 err = a_ops->write_begin(NULL, mapping, off, tocopy, 0,
1050 &page, NULL);
1051 if (unlikely(err))
1052 break;
1053
1054 kaddr = kmap_atomic(page);
1055 memcpy(kaddr + offset, data, tocopy);
1056 kunmap_atomic(kaddr);
1057 flush_dcache_page(page);
1058
1059 a_ops->write_end(NULL, mapping, off, tocopy, tocopy,
1060 page, NULL);
1061 offset = 0;
1062 towrite -= tocopy;
1063 off += tocopy;
1064 data += tocopy;
1065 cond_resched();
1066 }
1067
1068 if (len == towrite)
1069 return err;
1070 inode->i_version++;
1071 inode->i_mtime = inode->i_ctime = current_time(inode);
1072 f2fs_mark_inode_dirty_sync(inode, false);
1073 return len - towrite;
1074}
1075
1076static struct dquot **f2fs_get_dquots(struct inode *inode)
1077{
1078 return F2FS_I(inode)->i_dquot;
1079}
1080
1081static qsize_t *f2fs_get_reserved_space(struct inode *inode)
1082{
1083 return &F2FS_I(inode)->i_reserved_quota;
1084}
1085
1086static int f2fs_quota_sync(struct super_block *sb, int type)
1087{
1088 struct quota_info *dqopt = sb_dqopt(sb);
1089 int cnt;
1090 int ret;
1091
1092 ret = dquot_writeback_dquots(sb, type);
1093 if (ret)
1094 return ret;
1095
1096 /*
1097 * Now when everything is written we can discard the pagecache so
1098 * that userspace sees the changes.
1099 */
1100 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1101 if (type != -1 && cnt != type)
1102 continue;
1103 if (!sb_has_quota_active(sb, cnt))
1104 continue;
1105
1106 ret = filemap_write_and_wait(dqopt->files[cnt]->i_mapping);
1107 if (ret)
1108 return ret;
1109
1110 inode_lock(dqopt->files[cnt]);
1111 truncate_inode_pages(&dqopt->files[cnt]->i_data, 0);
1112 inode_unlock(dqopt->files[cnt]);
1113 }
1114 return 0;
1115}
1116
1117static int f2fs_quota_on(struct super_block *sb, int type, int format_id,
1118 const struct path *path)
1119{
1120 struct inode *inode;
1121 int err;
1122
1123 err = f2fs_quota_sync(sb, -1);
1124 if (err)
1125 return err;
1126
1127 err = dquot_quota_on(sb, type, format_id, path);
1128 if (err)
1129 return err;
1130
1131 inode = d_inode(path->dentry);
1132
1133 inode_lock(inode);
1134 F2FS_I(inode)->i_flags |= FS_NOATIME_FL | FS_IMMUTABLE_FL;
1135 inode_set_flags(inode, S_NOATIME | S_IMMUTABLE,
1136 S_NOATIME | S_IMMUTABLE);
1137 inode_unlock(inode);
1138 f2fs_mark_inode_dirty_sync(inode, false);
1139
1140 return 0;
1141}
1142
1143static int f2fs_quota_off(struct super_block *sb, int type)
1144{
1145 struct inode *inode = sb_dqopt(sb)->files[type];
1146 int err;
1147
1148 if (!inode || !igrab(inode))
1149 return dquot_quota_off(sb, type);
1150
1151 f2fs_quota_sync(sb, -1);
1152
1153 err = dquot_quota_off(sb, type);
1154 if (err)
1155 goto out_put;
1156
1157 inode_lock(inode);
1158 F2FS_I(inode)->i_flags &= ~(FS_NOATIME_FL | FS_IMMUTABLE_FL);
1159 inode_set_flags(inode, 0, S_NOATIME | S_IMMUTABLE);
1160 inode_unlock(inode);
1161 f2fs_mark_inode_dirty_sync(inode, false);
1162out_put:
1163 iput(inode);
1164 return err;
1165}
1166
1167static void f2fs_quota_off_umount(struct super_block *sb)
1168{
1169 int type;
1170
1171 for (type = 0; type < MAXQUOTAS; type++)
1172 f2fs_quota_off(sb, type);
1173}
1174
1175static const struct dquot_operations f2fs_quota_operations = {
1176 .get_reserved_space = f2fs_get_reserved_space,
1177 .write_dquot = dquot_commit,
1178 .acquire_dquot = dquot_acquire,
1179 .release_dquot = dquot_release,
1180 .mark_dirty = dquot_mark_dquot_dirty,
1181 .write_info = dquot_commit_info,
1182 .alloc_dquot = dquot_alloc,
1183 .destroy_dquot = dquot_destroy,
1184 .get_next_id = dquot_get_next_id,
1185};
1186
1187static const struct quotactl_ops f2fs_quotactl_ops = {
1188 .quota_on = f2fs_quota_on,
1189 .quota_off = f2fs_quota_off,
1190 .quota_sync = f2fs_quota_sync,
1191 .get_state = dquot_get_state,
1192 .set_info = dquot_set_dqinfo,
1193 .get_dqblk = dquot_get_dqblk,
1194 .set_dqblk = dquot_set_dqblk,
1195 .get_nextdqblk = dquot_get_next_dqblk,
1196};
1197#else
1198static inline void f2fs_quota_off_umount(struct super_block *sb)
1199{
1200}
1201#endif
1202
aff063e2
JK
1203static struct super_operations f2fs_sops = {
1204 .alloc_inode = f2fs_alloc_inode,
531ad7d5 1205 .drop_inode = f2fs_drop_inode,
aff063e2
JK
1206 .destroy_inode = f2fs_destroy_inode,
1207 .write_inode = f2fs_write_inode,
b3783873 1208 .dirty_inode = f2fs_dirty_inode,
aff063e2 1209 .show_options = f2fs_show_options,
0abd675e
CY
1210#ifdef CONFIG_QUOTA
1211 .quota_read = f2fs_quota_read,
1212 .quota_write = f2fs_quota_write,
1213 .get_dquots = f2fs_get_dquots,
1214#endif
aff063e2
JK
1215 .evict_inode = f2fs_evict_inode,
1216 .put_super = f2fs_put_super,
1217 .sync_fs = f2fs_sync_fs,
d6212a5f
CL
1218 .freeze_fs = f2fs_freeze,
1219 .unfreeze_fs = f2fs_unfreeze,
aff063e2 1220 .statfs = f2fs_statfs,
696c018c 1221 .remount_fs = f2fs_remount,
aff063e2
JK
1222};
1223
0b81d077
JK
1224#ifdef CONFIG_F2FS_FS_ENCRYPTION
1225static int f2fs_get_context(struct inode *inode, void *ctx, size_t len)
1226{
1227 return f2fs_getxattr(inode, F2FS_XATTR_INDEX_ENCRYPTION,
1228 F2FS_XATTR_NAME_ENCRYPTION_CONTEXT,
1229 ctx, len, NULL);
1230}
1231
1232static int f2fs_set_context(struct inode *inode, const void *ctx, size_t len,
1233 void *fs_data)
1234{
1235 return f2fs_setxattr(inode, F2FS_XATTR_INDEX_ENCRYPTION,
1236 F2FS_XATTR_NAME_ENCRYPTION_CONTEXT,
1237 ctx, len, fs_data, XATTR_CREATE);
1238}
1239
1240static unsigned f2fs_max_namelen(struct inode *inode)
1241{
1242 return S_ISLNK(inode->i_mode) ?
1243 inode->i_sb->s_blocksize : F2FS_NAME_LEN;
1244}
1245
6f69f0ed 1246static const struct fscrypt_operations f2fs_cryptops = {
a5d431ef 1247 .key_prefix = "f2fs:",
0b81d077
JK
1248 .get_context = f2fs_get_context,
1249 .set_context = f2fs_set_context,
1250 .is_encrypted = f2fs_encrypted_inode,
1251 .empty_dir = f2fs_empty_dir,
1252 .max_namelen = f2fs_max_namelen,
1253};
1254#else
6f69f0ed 1255static const struct fscrypt_operations f2fs_cryptops = {
0b81d077
JK
1256 .is_encrypted = f2fs_encrypted_inode,
1257};
1258#endif
1259
aff063e2
JK
1260static struct inode *f2fs_nfs_get_inode(struct super_block *sb,
1261 u64 ino, u32 generation)
1262{
1263 struct f2fs_sb_info *sbi = F2FS_SB(sb);
1264 struct inode *inode;
1265
d6b7d4b3 1266 if (check_nid_range(sbi, ino))
910bb12d 1267 return ERR_PTR(-ESTALE);
aff063e2
JK
1268
1269 /*
1270 * f2fs_iget isn't quite right if the inode is currently unallocated!
1271 * However f2fs_iget currently does appropriate checks to handle stale
1272 * inodes so everything is OK.
1273 */
1274 inode = f2fs_iget(sb, ino);
1275 if (IS_ERR(inode))
1276 return ERR_CAST(inode);
6bacf52f 1277 if (unlikely(generation && inode->i_generation != generation)) {
aff063e2
JK
1278 /* we didn't find the right inode.. */
1279 iput(inode);
1280 return ERR_PTR(-ESTALE);
1281 }
1282 return inode;
1283}
1284
1285static struct dentry *f2fs_fh_to_dentry(struct super_block *sb, struct fid *fid,
1286 int fh_len, int fh_type)
1287{
1288 return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
1289 f2fs_nfs_get_inode);
1290}
1291
1292static struct dentry *f2fs_fh_to_parent(struct super_block *sb, struct fid *fid,
1293 int fh_len, int fh_type)
1294{
1295 return generic_fh_to_parent(sb, fid, fh_len, fh_type,
1296 f2fs_nfs_get_inode);
1297}
1298
1299static const struct export_operations f2fs_export_ops = {
1300 .fh_to_dentry = f2fs_fh_to_dentry,
1301 .fh_to_parent = f2fs_fh_to_parent,
1302 .get_parent = f2fs_get_parent,
1303};
1304
e0afc4d6 1305static loff_t max_file_blocks(void)
aff063e2 1306{
7a2af766 1307 loff_t result = 0;
aff063e2
JK
1308 loff_t leaf_count = ADDRS_PER_BLOCK;
1309
7a2af766
CY
1310 /*
1311 * note: previously, result is equal to (DEF_ADDRS_PER_INODE -
1312 * F2FS_INLINE_XATTR_ADDRS), but now f2fs try to reserve more
1313 * space in inode.i_addr, it will be more safe to reassign
1314 * result as zero.
1315 */
1316
aff063e2
JK
1317 /* two direct node blocks */
1318 result += (leaf_count * 2);
1319
1320 /* two indirect node blocks */
1321 leaf_count *= NIDS_PER_BLOCK;
1322 result += (leaf_count * 2);
1323
1324 /* one double indirect node block */
1325 leaf_count *= NIDS_PER_BLOCK;
1326 result += leaf_count;
1327
aff063e2
JK
1328 return result;
1329}
1330
fd694733
JK
1331static int __f2fs_commit_super(struct buffer_head *bh,
1332 struct f2fs_super_block *super)
1333{
1334 lock_buffer(bh);
1335 if (super)
1336 memcpy(bh->b_data + F2FS_SUPER_OFFSET, super, sizeof(*super));
1337 set_buffer_uptodate(bh);
1338 set_buffer_dirty(bh);
1339 unlock_buffer(bh);
1340
1341 /* it's rare case, we can do fua all the time */
3adc5fcb 1342 return __sync_dirty_buffer(bh, REQ_SYNC | REQ_PREFLUSH | REQ_FUA);
fd694733
JK
1343}
1344
df728b0f 1345static inline bool sanity_check_area_boundary(struct f2fs_sb_info *sbi,
fd694733 1346 struct buffer_head *bh)
9a59b62f 1347{
fd694733
JK
1348 struct f2fs_super_block *raw_super = (struct f2fs_super_block *)
1349 (bh->b_data + F2FS_SUPER_OFFSET);
df728b0f 1350 struct super_block *sb = sbi->sb;
9a59b62f
CY
1351 u32 segment0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr);
1352 u32 cp_blkaddr = le32_to_cpu(raw_super->cp_blkaddr);
1353 u32 sit_blkaddr = le32_to_cpu(raw_super->sit_blkaddr);
1354 u32 nat_blkaddr = le32_to_cpu(raw_super->nat_blkaddr);
1355 u32 ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr);
1356 u32 main_blkaddr = le32_to_cpu(raw_super->main_blkaddr);
1357 u32 segment_count_ckpt = le32_to_cpu(raw_super->segment_count_ckpt);
1358 u32 segment_count_sit = le32_to_cpu(raw_super->segment_count_sit);
1359 u32 segment_count_nat = le32_to_cpu(raw_super->segment_count_nat);
1360 u32 segment_count_ssa = le32_to_cpu(raw_super->segment_count_ssa);
1361 u32 segment_count_main = le32_to_cpu(raw_super->segment_count_main);
1362 u32 segment_count = le32_to_cpu(raw_super->segment_count);
1363 u32 log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
fd694733
JK
1364 u64 main_end_blkaddr = main_blkaddr +
1365 (segment_count_main << log_blocks_per_seg);
1366 u64 seg_end_blkaddr = segment0_blkaddr +
1367 (segment_count << log_blocks_per_seg);
9a59b62f
CY
1368
1369 if (segment0_blkaddr != cp_blkaddr) {
1370 f2fs_msg(sb, KERN_INFO,
1371 "Mismatch start address, segment0(%u) cp_blkaddr(%u)",
1372 segment0_blkaddr, cp_blkaddr);
1373 return true;
1374 }
1375
1376 if (cp_blkaddr + (segment_count_ckpt << log_blocks_per_seg) !=
1377 sit_blkaddr) {
1378 f2fs_msg(sb, KERN_INFO,
1379 "Wrong CP boundary, start(%u) end(%u) blocks(%u)",
1380 cp_blkaddr, sit_blkaddr,
1381 segment_count_ckpt << log_blocks_per_seg);
1382 return true;
1383 }
1384
1385 if (sit_blkaddr + (segment_count_sit << log_blocks_per_seg) !=
1386 nat_blkaddr) {
1387 f2fs_msg(sb, KERN_INFO,
1388 "Wrong SIT boundary, start(%u) end(%u) blocks(%u)",
1389 sit_blkaddr, nat_blkaddr,
1390 segment_count_sit << log_blocks_per_seg);
1391 return true;
1392 }
1393
1394 if (nat_blkaddr + (segment_count_nat << log_blocks_per_seg) !=
1395 ssa_blkaddr) {
1396 f2fs_msg(sb, KERN_INFO,
1397 "Wrong NAT boundary, start(%u) end(%u) blocks(%u)",
1398 nat_blkaddr, ssa_blkaddr,
1399 segment_count_nat << log_blocks_per_seg);
1400 return true;
1401 }
1402
1403 if (ssa_blkaddr + (segment_count_ssa << log_blocks_per_seg) !=
1404 main_blkaddr) {
1405 f2fs_msg(sb, KERN_INFO,
1406 "Wrong SSA boundary, start(%u) end(%u) blocks(%u)",
1407 ssa_blkaddr, main_blkaddr,
1408 segment_count_ssa << log_blocks_per_seg);
1409 return true;
1410 }
1411
fd694733 1412 if (main_end_blkaddr > seg_end_blkaddr) {
9a59b62f 1413 f2fs_msg(sb, KERN_INFO,
fd694733 1414 "Wrong MAIN_AREA boundary, start(%u) end(%u) block(%u)",
9a59b62f 1415 main_blkaddr,
fd694733
JK
1416 segment0_blkaddr +
1417 (segment_count << log_blocks_per_seg),
9a59b62f
CY
1418 segment_count_main << log_blocks_per_seg);
1419 return true;
fd694733
JK
1420 } else if (main_end_blkaddr < seg_end_blkaddr) {
1421 int err = 0;
1422 char *res;
1423
1424 /* fix in-memory information all the time */
1425 raw_super->segment_count = cpu_to_le32((main_end_blkaddr -
1426 segment0_blkaddr) >> log_blocks_per_seg);
1427
1428 if (f2fs_readonly(sb) || bdev_read_only(sb->s_bdev)) {
df728b0f 1429 set_sbi_flag(sbi, SBI_NEED_SB_WRITE);
fd694733
JK
1430 res = "internally";
1431 } else {
1432 err = __f2fs_commit_super(bh, NULL);
1433 res = err ? "failed" : "done";
1434 }
1435 f2fs_msg(sb, KERN_INFO,
1436 "Fix alignment : %s, start(%u) end(%u) block(%u)",
1437 res, main_blkaddr,
1438 segment0_blkaddr +
1439 (segment_count << log_blocks_per_seg),
1440 segment_count_main << log_blocks_per_seg);
1441 if (err)
1442 return true;
9a59b62f 1443 }
9a59b62f
CY
1444 return false;
1445}
1446
df728b0f 1447static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
fd694733 1448 struct buffer_head *bh)
aff063e2 1449{
fd694733
JK
1450 struct f2fs_super_block *raw_super = (struct f2fs_super_block *)
1451 (bh->b_data + F2FS_SUPER_OFFSET);
df728b0f 1452 struct super_block *sb = sbi->sb;
aff063e2
JK
1453 unsigned int blocksize;
1454
a07ef784
NJ
1455 if (F2FS_SUPER_MAGIC != le32_to_cpu(raw_super->magic)) {
1456 f2fs_msg(sb, KERN_INFO,
1457 "Magic Mismatch, valid(0x%x) - read(0x%x)",
1458 F2FS_SUPER_MAGIC, le32_to_cpu(raw_super->magic));
aff063e2 1459 return 1;
a07ef784 1460 }
aff063e2 1461
5c9b4692 1462 /* Currently, support only 4KB page cache size */
09cbfeaf 1463 if (F2FS_BLKSIZE != PAGE_SIZE) {
5c9b4692 1464 f2fs_msg(sb, KERN_INFO,
14d7e9de 1465 "Invalid page_cache_size (%lu), supports only 4KB\n",
09cbfeaf 1466 PAGE_SIZE);
5c9b4692 1467 return 1;
1468 }
1469
aff063e2
JK
1470 /* Currently, support only 4KB block size */
1471 blocksize = 1 << le32_to_cpu(raw_super->log_blocksize);
5c9b4692 1472 if (blocksize != F2FS_BLKSIZE) {
a07ef784
NJ
1473 f2fs_msg(sb, KERN_INFO,
1474 "Invalid blocksize (%u), supports only 4KB\n",
1475 blocksize);
aff063e2 1476 return 1;
a07ef784 1477 }
5c9b4692 1478
9a59b62f
CY
1479 /* check log blocks per segment */
1480 if (le32_to_cpu(raw_super->log_blocks_per_seg) != 9) {
1481 f2fs_msg(sb, KERN_INFO,
1482 "Invalid log blocks per segment (%u)\n",
1483 le32_to_cpu(raw_super->log_blocks_per_seg));
1484 return 1;
1485 }
1486
55cf9cb6
CY
1487 /* Currently, support 512/1024/2048/4096 bytes sector size */
1488 if (le32_to_cpu(raw_super->log_sectorsize) >
1489 F2FS_MAX_LOG_SECTOR_SIZE ||
1490 le32_to_cpu(raw_super->log_sectorsize) <
1491 F2FS_MIN_LOG_SECTOR_SIZE) {
1492 f2fs_msg(sb, KERN_INFO, "Invalid log sectorsize (%u)",
1493 le32_to_cpu(raw_super->log_sectorsize));
aff063e2 1494 return 1;
a07ef784 1495 }
55cf9cb6
CY
1496 if (le32_to_cpu(raw_super->log_sectors_per_block) +
1497 le32_to_cpu(raw_super->log_sectorsize) !=
1498 F2FS_MAX_LOG_SECTOR_SIZE) {
1499 f2fs_msg(sb, KERN_INFO,
1500 "Invalid log sectors per block(%u) log sectorsize(%u)",
1501 le32_to_cpu(raw_super->log_sectors_per_block),
1502 le32_to_cpu(raw_super->log_sectorsize));
aff063e2 1503 return 1;
a07ef784 1504 }
9a59b62f
CY
1505
1506 /* check reserved ino info */
1507 if (le32_to_cpu(raw_super->node_ino) != 1 ||
1508 le32_to_cpu(raw_super->meta_ino) != 2 ||
1509 le32_to_cpu(raw_super->root_ino) != 3) {
1510 f2fs_msg(sb, KERN_INFO,
1511 "Invalid Fs Meta Ino: node(%u) meta(%u) root(%u)",
1512 le32_to_cpu(raw_super->node_ino),
1513 le32_to_cpu(raw_super->meta_ino),
1514 le32_to_cpu(raw_super->root_ino));
1515 return 1;
1516 }
1517
b9dd4618
JQ
1518 if (le32_to_cpu(raw_super->segment_count) > F2FS_MAX_SEGMENT) {
1519 f2fs_msg(sb, KERN_INFO,
1520 "Invalid segment count (%u)",
1521 le32_to_cpu(raw_super->segment_count));
1522 return 1;
1523 }
1524
9a59b62f 1525 /* check CP/SIT/NAT/SSA/MAIN_AREA area boundary */
df728b0f 1526 if (sanity_check_area_boundary(sbi, bh))
9a59b62f
CY
1527 return 1;
1528
aff063e2
JK
1529 return 0;
1530}
1531
984ec63c 1532int sanity_check_ckpt(struct f2fs_sb_info *sbi)
aff063e2
JK
1533{
1534 unsigned int total, fsmeta;
577e3495
JK
1535 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
1536 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
2040fce8 1537 unsigned int ovp_segments, reserved_segments;
15d3042a
JQ
1538 unsigned int main_segs, blocks_per_seg;
1539 int i;
aff063e2
JK
1540
1541 total = le32_to_cpu(raw_super->segment_count);
1542 fsmeta = le32_to_cpu(raw_super->segment_count_ckpt);
1543 fsmeta += le32_to_cpu(raw_super->segment_count_sit);
1544 fsmeta += le32_to_cpu(raw_super->segment_count_nat);
1545 fsmeta += le32_to_cpu(ckpt->rsvd_segment_count);
1546 fsmeta += le32_to_cpu(raw_super->segment_count_ssa);
1547
6bacf52f 1548 if (unlikely(fsmeta >= total))
aff063e2 1549 return 1;
577e3495 1550
2040fce8
JK
1551 ovp_segments = le32_to_cpu(ckpt->overprov_segment_count);
1552 reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count);
1553
1554 if (unlikely(fsmeta < F2FS_MIN_SEGMENTS ||
1555 ovp_segments == 0 || reserved_segments == 0)) {
1556 f2fs_msg(sbi->sb, KERN_ERR,
1557 "Wrong layout: check mkfs.f2fs version");
1558 return 1;
1559 }
1560
15d3042a
JQ
1561 main_segs = le32_to_cpu(raw_super->segment_count_main);
1562 blocks_per_seg = sbi->blocks_per_seg;
1563
1564 for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) {
1565 if (le32_to_cpu(ckpt->cur_node_segno[i]) >= main_segs ||
1566 le16_to_cpu(ckpt->cur_node_blkoff[i]) >= blocks_per_seg)
1567 return 1;
1568 }
1569 for (i = 0; i < NR_CURSEG_DATA_TYPE; i++) {
1570 if (le32_to_cpu(ckpt->cur_data_segno[i]) >= main_segs ||
1571 le16_to_cpu(ckpt->cur_data_blkoff[i]) >= blocks_per_seg)
1572 return 1;
1573 }
1574
1e968fdf 1575 if (unlikely(f2fs_cp_error(sbi))) {
577e3495
JK
1576 f2fs_msg(sbi->sb, KERN_ERR, "A bug case: need to run fsck");
1577 return 1;
1578 }
aff063e2
JK
1579 return 0;
1580}
1581
1582static void init_sb_info(struct f2fs_sb_info *sbi)
1583{
1584 struct f2fs_super_block *raw_super = sbi->raw_super;
e41e6d75 1585 int i, j;
aff063e2
JK
1586
1587 sbi->log_sectors_per_block =
1588 le32_to_cpu(raw_super->log_sectors_per_block);
1589 sbi->log_blocksize = le32_to_cpu(raw_super->log_blocksize);
1590 sbi->blocksize = 1 << sbi->log_blocksize;
1591 sbi->log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
1592 sbi->blocks_per_seg = 1 << sbi->log_blocks_per_seg;
1593 sbi->segs_per_sec = le32_to_cpu(raw_super->segs_per_sec);
1594 sbi->secs_per_zone = le32_to_cpu(raw_super->secs_per_zone);
1595 sbi->total_sections = le32_to_cpu(raw_super->section_count);
1596 sbi->total_node_count =
1597 (le32_to_cpu(raw_super->segment_count_nat) / 2)
1598 * sbi->blocks_per_seg * NAT_ENTRY_PER_BLOCK;
1599 sbi->root_ino_num = le32_to_cpu(raw_super->root_ino);
1600 sbi->node_ino_num = le32_to_cpu(raw_super->node_ino);
1601 sbi->meta_ino_num = le32_to_cpu(raw_super->meta_ino);
5ec4e49f 1602 sbi->cur_victim_sec = NULL_SECNO;
b1c57c1c 1603 sbi->max_victim_search = DEF_MAX_VICTIM_SEARCH;
aff063e2 1604
ab9fa662 1605 sbi->dir_level = DEF_DIR_LEVEL;
6beceb54 1606 sbi->interval_time[CP_TIME] = DEF_CP_INTERVAL;
d0239e1b 1607 sbi->interval_time[REQ_TIME] = DEF_IDLE_INTERVAL;
caf0047e 1608 clear_sbi_flag(sbi, SBI_NEED_FSCK);
2658e50d 1609
35782b23
JK
1610 for (i = 0; i < NR_COUNT_TYPE; i++)
1611 atomic_set(&sbi->nr_pages[i], 0);
1612
687de7f1
JK
1613 atomic_set(&sbi->wb_sync_req, 0);
1614
2658e50d
JK
1615 INIT_LIST_HEAD(&sbi->s_list);
1616 mutex_init(&sbi->umount_mutex);
e41e6d75
CY
1617 for (i = 0; i < NR_PAGE_TYPE - 1; i++)
1618 for (j = HOT; j < NR_TEMP_TYPE; j++)
1619 mutex_init(&sbi->wio_mutex[i][j]);
aaec2b1d 1620 spin_lock_init(&sbi->cp_lock);
aff063e2
JK
1621}
1622
523be8a6
JK
1623static int init_percpu_info(struct f2fs_sb_info *sbi)
1624{
35782b23 1625 int err;
41382ec4 1626
513c5f37
JK
1627 err = percpu_counter_init(&sbi->alloc_valid_block_count, 0, GFP_KERNEL);
1628 if (err)
1629 return err;
1630
1631 return percpu_counter_init(&sbi->total_valid_inode_count, 0,
41382ec4 1632 GFP_KERNEL);
523be8a6
JK
1633}
1634
178053e2 1635#ifdef CONFIG_BLK_DEV_ZONED
3c62be17 1636static int init_blkz_info(struct f2fs_sb_info *sbi, int devi)
178053e2 1637{
3c62be17 1638 struct block_device *bdev = FDEV(devi).bdev;
178053e2
DLM
1639 sector_t nr_sectors = bdev->bd_part->nr_sects;
1640 sector_t sector = 0;
1641 struct blk_zone *zones;
1642 unsigned int i, nr_zones;
1643 unsigned int n = 0;
1644 int err = -EIO;
1645
1646 if (!f2fs_sb_mounted_blkzoned(sbi->sb))
1647 return 0;
1648
3c62be17 1649 if (sbi->blocks_per_blkz && sbi->blocks_per_blkz !=
f99e8648 1650 SECTOR_TO_BLOCK(bdev_zone_sectors(bdev)))
3c62be17 1651 return -EINVAL;
f99e8648 1652 sbi->blocks_per_blkz = SECTOR_TO_BLOCK(bdev_zone_sectors(bdev));
3c62be17
JK
1653 if (sbi->log_blocks_per_blkz && sbi->log_blocks_per_blkz !=
1654 __ilog2_u32(sbi->blocks_per_blkz))
1655 return -EINVAL;
178053e2 1656 sbi->log_blocks_per_blkz = __ilog2_u32(sbi->blocks_per_blkz);
3c62be17
JK
1657 FDEV(devi).nr_blkz = SECTOR_TO_BLOCK(nr_sectors) >>
1658 sbi->log_blocks_per_blkz;
f99e8648 1659 if (nr_sectors & (bdev_zone_sectors(bdev) - 1))
3c62be17 1660 FDEV(devi).nr_blkz++;
178053e2 1661
3c62be17
JK
1662 FDEV(devi).blkz_type = kmalloc(FDEV(devi).nr_blkz, GFP_KERNEL);
1663 if (!FDEV(devi).blkz_type)
178053e2
DLM
1664 return -ENOMEM;
1665
1666#define F2FS_REPORT_NR_ZONES 4096
1667
1668 zones = kcalloc(F2FS_REPORT_NR_ZONES, sizeof(struct blk_zone),
1669 GFP_KERNEL);
1670 if (!zones)
1671 return -ENOMEM;
1672
1673 /* Get block zones type */
1674 while (zones && sector < nr_sectors) {
1675
1676 nr_zones = F2FS_REPORT_NR_ZONES;
1677 err = blkdev_report_zones(bdev, sector,
1678 zones, &nr_zones,
1679 GFP_KERNEL);
1680 if (err)
1681 break;
1682 if (!nr_zones) {
1683 err = -EIO;
1684 break;
1685 }
1686
1687 for (i = 0; i < nr_zones; i++) {
3c62be17 1688 FDEV(devi).blkz_type[n] = zones[i].type;
178053e2
DLM
1689 sector += zones[i].len;
1690 n++;
1691 }
1692 }
1693
1694 kfree(zones);
1695
1696 return err;
1697}
1698#endif
1699
9076a75f
GZ
1700/*
1701 * Read f2fs raw super block.
2b39e907
SL
1702 * Because we have two copies of super block, so read both of them
1703 * to get the first valid one. If any one of them is broken, we pass
1704 * them recovery flag back to the caller.
9076a75f 1705 */
df728b0f 1706static int read_raw_super_block(struct f2fs_sb_info *sbi,
9076a75f 1707 struct f2fs_super_block **raw_super,
e8240f65 1708 int *valid_super_block, int *recovery)
14d7e9de 1709{
df728b0f 1710 struct super_block *sb = sbi->sb;
2b39e907 1711 int block;
e8240f65 1712 struct buffer_head *bh;
fd694733 1713 struct f2fs_super_block *super;
da554e48 1714 int err = 0;
14d7e9de 1715
b39f0de2
YH
1716 super = kzalloc(sizeof(struct f2fs_super_block), GFP_KERNEL);
1717 if (!super)
1718 return -ENOMEM;
2b39e907
SL
1719
1720 for (block = 0; block < 2; block++) {
1721 bh = sb_bread(sb, block);
1722 if (!bh) {
1723 f2fs_msg(sb, KERN_ERR, "Unable to read %dth superblock",
9076a75f 1724 block + 1);
2b39e907
SL
1725 err = -EIO;
1726 continue;
1727 }
14d7e9de 1728
2b39e907 1729 /* sanity checking of raw super */
df728b0f 1730 if (sanity_check_raw_super(sbi, bh)) {
2b39e907
SL
1731 f2fs_msg(sb, KERN_ERR,
1732 "Can't find valid F2FS filesystem in %dth superblock",
1733 block + 1);
1734 err = -EINVAL;
1735 brelse(bh);
1736 continue;
1737 }
14d7e9de 1738
2b39e907 1739 if (!*raw_super) {
fd694733
JK
1740 memcpy(super, bh->b_data + F2FS_SUPER_OFFSET,
1741 sizeof(*super));
2b39e907
SL
1742 *valid_super_block = block;
1743 *raw_super = super;
1744 }
1745 brelse(bh);
da554e48 1746 }
1747
2b39e907
SL
1748 /* Fail to read any one of the superblocks*/
1749 if (err < 0)
1750 *recovery = 1;
da554e48 1751
da554e48 1752 /* No valid superblock */
2b39e907 1753 if (!*raw_super)
b39f0de2 1754 kfree(super);
2b39e907
SL
1755 else
1756 err = 0;
da554e48 1757
2b39e907 1758 return err;
14d7e9de 1759}
1760
fd694733 1761int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover)
26d815ad 1762{
5d909cdb 1763 struct buffer_head *bh;
26d815ad
JK
1764 int err;
1765
df728b0f
JK
1766 if ((recover && f2fs_readonly(sbi->sb)) ||
1767 bdev_read_only(sbi->sb->s_bdev)) {
1768 set_sbi_flag(sbi, SBI_NEED_SB_WRITE);
f2353d7b 1769 return -EROFS;
df728b0f 1770 }
f2353d7b 1771
fd694733
JK
1772 /* write back-up superblock first */
1773 bh = sb_getblk(sbi->sb, sbi->valid_super_block ? 0: 1);
5d909cdb
JK
1774 if (!bh)
1775 return -EIO;
fd694733 1776 err = __f2fs_commit_super(bh, F2FS_RAW_SUPER(sbi));
5d909cdb 1777 brelse(bh);
c5bda1c8
CY
1778
1779 /* if we are in recovery path, skip writing valid superblock */
1780 if (recover || err)
5d909cdb 1781 return err;
26d815ad
JK
1782
1783 /* write current valid superblock */
fd694733
JK
1784 bh = sb_getblk(sbi->sb, sbi->valid_super_block);
1785 if (!bh)
1786 return -EIO;
1787 err = __f2fs_commit_super(bh, F2FS_RAW_SUPER(sbi));
1788 brelse(bh);
1789 return err;
26d815ad
JK
1790}
1791
3c62be17
JK
1792static int f2fs_scan_devices(struct f2fs_sb_info *sbi)
1793{
1794 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
7bb3a371 1795 unsigned int max_devices = MAX_DEVICES;
3c62be17
JK
1796 int i;
1797
7bb3a371
MS
1798 /* Initialize single device information */
1799 if (!RDEV(0).path[0]) {
1800 if (!bdev_is_zoned(sbi->sb->s_bdev))
3c62be17 1801 return 0;
7bb3a371
MS
1802 max_devices = 1;
1803 }
3c62be17 1804
7bb3a371
MS
1805 /*
1806 * Initialize multiple devices information, or single
1807 * zoned block device information.
1808 */
1809 sbi->devs = kcalloc(max_devices, sizeof(struct f2fs_dev_info),
1810 GFP_KERNEL);
1811 if (!sbi->devs)
1812 return -ENOMEM;
3c62be17 1813
7bb3a371 1814 for (i = 0; i < max_devices; i++) {
3c62be17 1815
7bb3a371
MS
1816 if (i > 0 && !RDEV(i).path[0])
1817 break;
1818
1819 if (max_devices == 1) {
1820 /* Single zoned block device mount */
1821 FDEV(0).bdev =
1822 blkdev_get_by_dev(sbi->sb->s_bdev->bd_dev,
3c62be17 1823 sbi->sb->s_mode, sbi->sb->s_type);
7bb3a371
MS
1824 } else {
1825 /* Multi-device mount */
1826 memcpy(FDEV(i).path, RDEV(i).path, MAX_PATH_LEN);
1827 FDEV(i).total_segments =
1828 le32_to_cpu(RDEV(i).total_segments);
1829 if (i == 0) {
1830 FDEV(i).start_blk = 0;
1831 FDEV(i).end_blk = FDEV(i).start_blk +
1832 (FDEV(i).total_segments <<
1833 sbi->log_blocks_per_seg) - 1 +
1834 le32_to_cpu(raw_super->segment0_blkaddr);
1835 } else {
1836 FDEV(i).start_blk = FDEV(i - 1).end_blk + 1;
1837 FDEV(i).end_blk = FDEV(i).start_blk +
1838 (FDEV(i).total_segments <<
1839 sbi->log_blocks_per_seg) - 1;
1840 }
1841 FDEV(i).bdev = blkdev_get_by_path(FDEV(i).path,
3c62be17 1842 sbi->sb->s_mode, sbi->sb->s_type);
7bb3a371 1843 }
3c62be17
JK
1844 if (IS_ERR(FDEV(i).bdev))
1845 return PTR_ERR(FDEV(i).bdev);
1846
1847 /* to release errored devices */
1848 sbi->s_ndevs = i + 1;
1849
1850#ifdef CONFIG_BLK_DEV_ZONED
1851 if (bdev_zoned_model(FDEV(i).bdev) == BLK_ZONED_HM &&
1852 !f2fs_sb_mounted_blkzoned(sbi->sb)) {
1853 f2fs_msg(sbi->sb, KERN_ERR,
1854 "Zoned block device feature not enabled\n");
1855 return -EINVAL;
1856 }
1857 if (bdev_zoned_model(FDEV(i).bdev) != BLK_ZONED_NONE) {
1858 if (init_blkz_info(sbi, i)) {
1859 f2fs_msg(sbi->sb, KERN_ERR,
1860 "Failed to initialize F2FS blkzone information");
1861 return -EINVAL;
1862 }
7bb3a371
MS
1863 if (max_devices == 1)
1864 break;
3c62be17
JK
1865 f2fs_msg(sbi->sb, KERN_INFO,
1866 "Mount Device [%2d]: %20s, %8u, %8x - %8x (zone: %s)",
1867 i, FDEV(i).path,
1868 FDEV(i).total_segments,
1869 FDEV(i).start_blk, FDEV(i).end_blk,
1870 bdev_zoned_model(FDEV(i).bdev) == BLK_ZONED_HA ?
1871 "Host-aware" : "Host-managed");
1872 continue;
1873 }
1874#endif
1875 f2fs_msg(sbi->sb, KERN_INFO,
1876 "Mount Device [%2d]: %20s, %8u, %8x - %8x",
1877 i, FDEV(i).path,
1878 FDEV(i).total_segments,
1879 FDEV(i).start_blk, FDEV(i).end_blk);
1880 }
0a595eba
JK
1881 f2fs_msg(sbi->sb, KERN_INFO,
1882 "IO Block Size: %8d KB", F2FS_IO_SIZE_KB(sbi));
3c62be17
JK
1883 return 0;
1884}
1885
aff063e2
JK
1886static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
1887{
1888 struct f2fs_sb_info *sbi;
da554e48 1889 struct f2fs_super_block *raw_super;
aff063e2 1890 struct inode *root;
99e3e858 1891 int err;
2adc3505 1892 bool retry = true, need_fsck = false;
dabc4a5c 1893 char *options = NULL;
e8240f65 1894 int recovery, i, valid_super_block;
8f1dbbbb 1895 struct curseg_info *seg_i;
aff063e2 1896
ed2e621a 1897try_onemore:
da554e48 1898 err = -EINVAL;
1899 raw_super = NULL;
e8240f65 1900 valid_super_block = -1;
da554e48 1901 recovery = 0;
1902
aff063e2
JK
1903 /* allocate memory for f2fs-specific super block info */
1904 sbi = kzalloc(sizeof(struct f2fs_sb_info), GFP_KERNEL);
1905 if (!sbi)
1906 return -ENOMEM;
1907
df728b0f
JK
1908 sbi->sb = sb;
1909
43b6573b
KM
1910 /* Load the checksum driver */
1911 sbi->s_chksum_driver = crypto_alloc_shash("crc32", 0, 0);
1912 if (IS_ERR(sbi->s_chksum_driver)) {
1913 f2fs_msg(sb, KERN_ERR, "Cannot load crc32 driver.");
1914 err = PTR_ERR(sbi->s_chksum_driver);
1915 sbi->s_chksum_driver = NULL;
1916 goto free_sbi;
1917 }
1918
ff9234ad 1919 /* set a block size */
6bacf52f 1920 if (unlikely(!sb_set_blocksize(sb, F2FS_BLKSIZE))) {
a07ef784 1921 f2fs_msg(sb, KERN_ERR, "unable to set blocksize");
aff063e2 1922 goto free_sbi;
a07ef784 1923 }
aff063e2 1924
df728b0f 1925 err = read_raw_super_block(sbi, &raw_super, &valid_super_block,
e8240f65 1926 &recovery);
9076a75f
GZ
1927 if (err)
1928 goto free_sbi;
1929
5fb08372 1930 sb->s_fs_info = sbi;
52763a4b
JK
1931 sbi->raw_super = raw_super;
1932
d1b959c8
DLM
1933 /*
1934 * The BLKZONED feature indicates that the drive was formatted with
1935 * zone alignment optimization. This is optional for host-aware
1936 * devices, but mandatory for host-managed zoned block devices.
1937 */
1938#ifndef CONFIG_BLK_DEV_ZONED
1939 if (f2fs_sb_mounted_blkzoned(sb)) {
1940 f2fs_msg(sb, KERN_ERR,
1941 "Zoned block device support is not enabled\n");
1727f317 1942 err = -EOPNOTSUPP;
d1b959c8
DLM
1943 goto free_sb_buf;
1944 }
d1b959c8 1945#endif
498c5e9f 1946 default_options(sbi);
aff063e2 1947 /* parse mount options */
dabc4a5c
JK
1948 options = kstrdup((const char *)data, GFP_KERNEL);
1949 if (data && !options) {
1950 err = -ENOMEM;
aff063e2 1951 goto free_sb_buf;
dabc4a5c
JK
1952 }
1953
1954 err = parse_options(sb, options);
1955 if (err)
1956 goto free_options;
aff063e2 1957
e0afc4d6
CY
1958 sbi->max_file_blocks = max_file_blocks();
1959 sb->s_maxbytes = sbi->max_file_blocks <<
1960 le32_to_cpu(raw_super->log_blocksize);
aff063e2
JK
1961 sb->s_max_links = F2FS_LINK_MAX;
1962 get_random_bytes(&sbi->s_next_generation, sizeof(u32));
1963
0abd675e
CY
1964#ifdef CONFIG_QUOTA
1965 sb->dq_op = &f2fs_quota_operations;
1966 sb->s_qcop = &f2fs_quotactl_ops;
1967 sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP;
1968#endif
1969
aff063e2 1970 sb->s_op = &f2fs_sops;
0b81d077 1971 sb->s_cop = &f2fs_cryptops;
aff063e2
JK
1972 sb->s_xattr = f2fs_xattr_handlers;
1973 sb->s_export_op = &f2fs_export_ops;
1974 sb->s_magic = F2FS_SUPER_MAGIC;
aff063e2
JK
1975 sb->s_time_gran = 1;
1976 sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
1977 (test_opt(sbi, POSIX_ACL) ? MS_POSIXACL : 0);
85787090 1978 memcpy(&sb->s_uuid, raw_super->uuid, sizeof(raw_super->uuid));
aff063e2
JK
1979
1980 /* init f2fs-specific super block info */
e8240f65 1981 sbi->valid_super_block = valid_super_block;
aff063e2 1982 mutex_init(&sbi->gc_mutex);
aff063e2 1983 mutex_init(&sbi->cp_mutex);
b3582c68 1984 init_rwsem(&sbi->node_write);
59c9081b 1985 init_rwsem(&sbi->node_change);
315df839
JK
1986
1987 /* disallow all the data/node/meta page writes */
1988 set_sbi_flag(sbi, SBI_POR_DOING);
aff063e2 1989 spin_lock_init(&sbi->stat_lock);
971767ca 1990
458e6197 1991 for (i = 0; i < NR_PAGE_TYPE; i++) {
a912b54d
JK
1992 int n = (i == META) ? 1: NR_TEMP_TYPE;
1993 int j;
1994
1995 sbi->write_io[i] = kmalloc(n * sizeof(struct f2fs_bio_info),
1996 GFP_KERNEL);
b63def91
CJ
1997 if (!sbi->write_io[i]) {
1998 err = -ENOMEM;
a912b54d 1999 goto free_options;
b63def91 2000 }
a912b54d
JK
2001
2002 for (j = HOT; j < n; j++) {
2003 init_rwsem(&sbi->write_io[i][j].io_rwsem);
2004 sbi->write_io[i][j].sbi = sbi;
2005 sbi->write_io[i][j].bio = NULL;
fb830fc5
CY
2006 spin_lock_init(&sbi->write_io[i][j].io_lock);
2007 INIT_LIST_HEAD(&sbi->write_io[i][j].io_list);
a912b54d 2008 }
458e6197 2009 }
971767ca 2010
b873b798 2011 init_rwsem(&sbi->cp_rwsem);
fb51b5ef 2012 init_waitqueue_head(&sbi->cp_wait);
aff063e2
JK
2013 init_sb_info(sbi);
2014
523be8a6
JK
2015 err = init_percpu_info(sbi);
2016 if (err)
2017 goto free_options;
2018
0a595eba
JK
2019 if (F2FS_IO_SIZE(sbi) > 1) {
2020 sbi->write_io_dummy =
a3ebfe4f 2021 mempool_create_page_pool(2 * (F2FS_IO_SIZE(sbi) - 1), 0);
1727f317
CY
2022 if (!sbi->write_io_dummy) {
2023 err = -ENOMEM;
0a595eba 2024 goto free_options;
1727f317 2025 }
0a595eba
JK
2026 }
2027
aff063e2
JK
2028 /* get an inode for meta space */
2029 sbi->meta_inode = f2fs_iget(sb, F2FS_META_INO(sbi));
2030 if (IS_ERR(sbi->meta_inode)) {
a07ef784 2031 f2fs_msg(sb, KERN_ERR, "Failed to read F2FS meta data inode");
aff063e2 2032 err = PTR_ERR(sbi->meta_inode);
0a595eba 2033 goto free_io_dummy;
aff063e2
JK
2034 }
2035
2036 err = get_valid_checkpoint(sbi);
a07ef784
NJ
2037 if (err) {
2038 f2fs_msg(sb, KERN_ERR, "Failed to get valid F2FS checkpoint");
aff063e2 2039 goto free_meta_inode;
a07ef784 2040 }
aff063e2 2041
3c62be17
JK
2042 /* Initialize device list */
2043 err = f2fs_scan_devices(sbi);
2044 if (err) {
2045 f2fs_msg(sb, KERN_ERR, "Failed to find devices");
2046 goto free_devices;
2047 }
2048
aff063e2
JK
2049 sbi->total_valid_node_count =
2050 le32_to_cpu(sbi->ckpt->valid_node_count);
513c5f37
JK
2051 percpu_counter_set(&sbi->total_valid_inode_count,
2052 le32_to_cpu(sbi->ckpt->valid_inode_count));
aff063e2
JK
2053 sbi->user_block_count = le64_to_cpu(sbi->ckpt->user_block_count);
2054 sbi->total_valid_block_count =
2055 le64_to_cpu(sbi->ckpt->valid_block_count);
2056 sbi->last_valid_block_count = sbi->total_valid_block_count;
daeb433e 2057 sbi->reserved_blocks = 0;
41382ec4 2058
c227f912
CY
2059 for (i = 0; i < NR_INODE_TYPE; i++) {
2060 INIT_LIST_HEAD(&sbi->inode_list[i]);
2061 spin_lock_init(&sbi->inode_lock[i]);
2062 }
aff063e2 2063
1dcc336b
CY
2064 init_extent_cache_info(sbi);
2065
6451e041 2066 init_ino_entry_info(sbi);
aff063e2
JK
2067
2068 /* setup f2fs internal modules */
2069 err = build_segment_manager(sbi);
a07ef784
NJ
2070 if (err) {
2071 f2fs_msg(sb, KERN_ERR,
2072 "Failed to initialize F2FS segment manager");
aff063e2 2073 goto free_sm;
a07ef784 2074 }
aff063e2 2075 err = build_node_manager(sbi);
a07ef784
NJ
2076 if (err) {
2077 f2fs_msg(sb, KERN_ERR,
2078 "Failed to initialize F2FS node manager");
aff063e2 2079 goto free_nm;
a07ef784 2080 }
aff063e2 2081
8f1dbbbb
SL
2082 /* For write statistics */
2083 if (sb->s_bdev->bd_part)
2084 sbi->sectors_written_start =
2085 (u64)part_stat_read(sb->s_bdev->bd_part, sectors[1]);
2086
2087 /* Read accumulated write IO statistics if exists */
2088 seg_i = CURSEG_I(sbi, CURSEG_HOT_NODE);
2089 if (__exist_node_summaries(sbi))
2090 sbi->kbytes_written =
b2dde6fc 2091 le64_to_cpu(seg_i->journal->info.kbytes_written);
8f1dbbbb 2092
aff063e2
JK
2093 build_gc_manager(sbi);
2094
2095 /* get an inode for node space */
2096 sbi->node_inode = f2fs_iget(sb, F2FS_NODE_INO(sbi));
2097 if (IS_ERR(sbi->node_inode)) {
a07ef784 2098 f2fs_msg(sb, KERN_ERR, "Failed to read node inode");
aff063e2
JK
2099 err = PTR_ERR(sbi->node_inode);
2100 goto free_nm;
2101 }
2102
2658e50d
JK
2103 f2fs_join_shrinker(sbi);
2104
aa51d08a
JK
2105 err = f2fs_build_stats(sbi);
2106 if (err)
2107 goto free_nm;
2108
aff063e2 2109 /* if there are nt orphan nodes free them */
8c14bfad
CY
2110 err = recover_orphan_inodes(sbi);
2111 if (err)
2112 goto free_node_inode;
aff063e2
JK
2113
2114 /* read root inode and dentry */
2115 root = f2fs_iget(sb, F2FS_ROOT_INO(sbi));
2116 if (IS_ERR(root)) {
a07ef784 2117 f2fs_msg(sb, KERN_ERR, "Failed to read root inode");
aff063e2
JK
2118 err = PTR_ERR(root);
2119 goto free_node_inode;
2120 }
8f99a946 2121 if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) {
9d847950 2122 iput(root);
8f99a946 2123 err = -EINVAL;
9d847950 2124 goto free_node_inode;
8f99a946 2125 }
aff063e2
JK
2126
2127 sb->s_root = d_make_root(root); /* allocate root dentry */
2128 if (!sb->s_root) {
2129 err = -ENOMEM;
2130 goto free_root_inode;
2131 }
2132
a398101a 2133 err = f2fs_init_sysfs(sbi);
b59d0bae 2134 if (err)
a398101a 2135 goto free_root_inode;
b59d0bae 2136
6437d1b0
JK
2137 /* recover fsynced data */
2138 if (!test_opt(sbi, DISABLE_ROLL_FORWARD)) {
081d78c2
JK
2139 /*
2140 * mount should be failed, when device has readonly mode, and
2141 * previous checkpoint was not done by clean system shutdown.
2142 */
2143 if (bdev_read_only(sb->s_bdev) &&
aaec2b1d 2144 !is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) {
081d78c2 2145 err = -EROFS;
a398101a 2146 goto free_sysfs;
081d78c2 2147 }
2adc3505
CY
2148
2149 if (need_fsck)
2150 set_sbi_flag(sbi, SBI_NEED_FSCK);
2151
a468f0ef
JK
2152 if (!retry)
2153 goto skip_recovery;
2154
6781eabb
JK
2155 err = recover_fsync_data(sbi, false);
2156 if (err < 0) {
2adc3505 2157 need_fsck = true;
6437d1b0 2158 f2fs_msg(sb, KERN_ERR,
99e3e858 2159 "Cannot recover all fsync data errno=%d", err);
a398101a 2160 goto free_sysfs;
ed2e621a 2161 }
6781eabb
JK
2162 } else {
2163 err = recover_fsync_data(sbi, true);
2164
2165 if (!f2fs_readonly(sb) && err > 0) {
2166 err = -EINVAL;
2167 f2fs_msg(sb, KERN_ERR,
2168 "Need to recover fsync data");
a398101a 2169 goto free_sysfs;
6781eabb 2170 }
6437d1b0 2171 }
a468f0ef 2172skip_recovery:
315df839
JK
2173 /* recover_fsync_data() cleared this already */
2174 clear_sbi_flag(sbi, SBI_POR_DOING);
b59d0bae 2175
6437d1b0
JK
2176 /*
2177 * If filesystem is not mounted as read-only then
2178 * do start the gc_thread.
2179 */
6c029932 2180 if (test_opt(sbi, BG_GC) && !f2fs_readonly(sb)) {
6437d1b0
JK
2181 /* After POR, we can run background GC thread.*/
2182 err = start_gc_thread(sbi);
2183 if (err)
a398101a 2184 goto free_sysfs;
6437d1b0 2185 }
dabc4a5c 2186 kfree(options);
da554e48 2187
2188 /* recover broken superblock */
f2353d7b 2189 if (recovery) {
41214b3c
CY
2190 err = f2fs_commit_super(sbi, true);
2191 f2fs_msg(sb, KERN_INFO,
99e3e858 2192 "Try to recover %dth superblock, ret: %d",
41214b3c 2193 sbi->valid_super_block ? 1 : 2, err);
da554e48 2194 }
2195
1200abb2
JK
2196 f2fs_msg(sbi->sb, KERN_NOTICE, "Mounted with checkpoint version = %llx",
2197 cur_cp_version(F2FS_CKPT(sbi)));
6beceb54 2198 f2fs_update_time(sbi, CP_TIME);
d0239e1b 2199 f2fs_update_time(sbi, REQ_TIME);
aff063e2 2200 return 0;
6437d1b0 2201
a398101a 2202free_sysfs:
0f18b462 2203 f2fs_sync_inode_meta(sbi);
a398101a 2204 f2fs_exit_sysfs(sbi);
aff063e2
JK
2205free_root_inode:
2206 dput(sb->s_root);
2207 sb->s_root = NULL;
2208free_node_inode:
bb5dada7 2209 truncate_inode_pages_final(NODE_MAPPING(sbi));
2658e50d 2210 mutex_lock(&sbi->umount_mutex);
d41065e2 2211 release_ino_entry(sbi, true);
2658e50d 2212 f2fs_leave_shrinker(sbi);
09922800
JK
2213 /*
2214 * Some dirty meta pages can be produced by recover_orphan_inodes()
2215 * failed by EIO. Then, iput(node_inode) can trigger balance_fs_bg()
2216 * followed by write_checkpoint() through f2fs_write_node_pages(), which
2217 * falls into an infinite loop in sync_meta_pages().
2218 */
2219 truncate_inode_pages_final(META_MAPPING(sbi));
aff063e2 2220 iput(sbi->node_inode);
2658e50d 2221 mutex_unlock(&sbi->umount_mutex);
aa51d08a 2222 f2fs_destroy_stats(sbi);
aff063e2
JK
2223free_nm:
2224 destroy_node_manager(sbi);
2225free_sm:
2226 destroy_segment_manager(sbi);
3c62be17
JK
2227free_devices:
2228 destroy_device_list(sbi);
aff063e2
JK
2229 kfree(sbi->ckpt);
2230free_meta_inode:
2231 make_bad_inode(sbi->meta_inode);
2232 iput(sbi->meta_inode);
0a595eba
JK
2233free_io_dummy:
2234 mempool_destroy(sbi->write_io_dummy);
dabc4a5c 2235free_options:
a912b54d
JK
2236 for (i = 0; i < NR_PAGE_TYPE; i++)
2237 kfree(sbi->write_io[i]);
523be8a6 2238 destroy_percpu_info(sbi);
dabc4a5c 2239 kfree(options);
aff063e2 2240free_sb_buf:
b39f0de2 2241 kfree(raw_super);
aff063e2 2242free_sbi:
43b6573b
KM
2243 if (sbi->s_chksum_driver)
2244 crypto_free_shash(sbi->s_chksum_driver);
aff063e2 2245 kfree(sbi);
ed2e621a
JK
2246
2247 /* give only one another chance */
2248 if (retry) {
9df47ba7 2249 retry = false;
ed2e621a
JK
2250 shrink_dcache_sb(sb);
2251 goto try_onemore;
2252 }
aff063e2
JK
2253 return err;
2254}
2255
2256static struct dentry *f2fs_mount(struct file_system_type *fs_type, int flags,
2257 const char *dev_name, void *data)
2258{
2259 return mount_bdev(fs_type, flags, dev_name, data, f2fs_fill_super);
2260}
2261
30a5537f
JK
2262static void kill_f2fs_super(struct super_block *sb)
2263{
cce13252 2264 if (sb->s_root) {
caf0047e 2265 set_sbi_flag(F2FS_SB(sb), SBI_IS_CLOSE);
cce13252
CY
2266 stop_gc_thread(F2FS_SB(sb));
2267 stop_discard_thread(F2FS_SB(sb));
2268 }
30a5537f
JK
2269 kill_block_super(sb);
2270}
2271
aff063e2
JK
2272static struct file_system_type f2fs_fs_type = {
2273 .owner = THIS_MODULE,
2274 .name = "f2fs",
2275 .mount = f2fs_mount,
30a5537f 2276 .kill_sb = kill_f2fs_super,
aff063e2
JK
2277 .fs_flags = FS_REQUIRES_DEV,
2278};
7f78e035 2279MODULE_ALIAS_FS("f2fs");
aff063e2 2280
6e6093a8 2281static int __init init_inodecache(void)
aff063e2 2282{
5d097056
VD
2283 f2fs_inode_cachep = kmem_cache_create("f2fs_inode_cache",
2284 sizeof(struct f2fs_inode_info), 0,
2285 SLAB_RECLAIM_ACCOUNT|SLAB_ACCOUNT, NULL);
6bacf52f 2286 if (!f2fs_inode_cachep)
aff063e2
JK
2287 return -ENOMEM;
2288 return 0;
2289}
2290
2291static void destroy_inodecache(void)
2292{
2293 /*
2294 * Make sure all delayed rcu free inodes are flushed before we
2295 * destroy cache.
2296 */
2297 rcu_barrier();
2298 kmem_cache_destroy(f2fs_inode_cachep);
2299}
2300
2301static int __init init_f2fs_fs(void)
2302{
2303 int err;
2304
c0508650
JK
2305 f2fs_build_trace_ios();
2306
aff063e2
JK
2307 err = init_inodecache();
2308 if (err)
2309 goto fail;
2310 err = create_node_manager_caches();
2311 if (err)
9890ff3f 2312 goto free_inodecache;
7fd9e544 2313 err = create_segment_manager_caches();
aff063e2 2314 if (err)
9890ff3f 2315 goto free_node_manager_caches;
aff063e2
JK
2316 err = create_checkpoint_caches();
2317 if (err)
06292073 2318 goto free_segment_manager_caches;
1dcc336b
CY
2319 err = create_extent_cache();
2320 if (err)
2321 goto free_checkpoint_caches;
a398101a
CY
2322 err = f2fs_register_sysfs();
2323 if (err)
1dcc336b 2324 goto free_extent_cache;
2658e50d 2325 err = register_shrinker(&f2fs_shrinker_info);
cfc4d971 2326 if (err)
a398101a 2327 goto free_sysfs;
2658e50d
JK
2328 err = register_filesystem(&f2fs_fs_type);
2329 if (err)
2330 goto free_shrinker;
787c7b8c
CY
2331 err = f2fs_create_root_stats();
2332 if (err)
2333 goto free_filesystem;
9890ff3f
ZH
2334 return 0;
2335
787c7b8c
CY
2336free_filesystem:
2337 unregister_filesystem(&f2fs_fs_type);
2658e50d
JK
2338free_shrinker:
2339 unregister_shrinker(&f2fs_shrinker_info);
a398101a
CY
2340free_sysfs:
2341 f2fs_unregister_sysfs();
1dcc336b
CY
2342free_extent_cache:
2343 destroy_extent_cache();
9890ff3f
ZH
2344free_checkpoint_caches:
2345 destroy_checkpoint_caches();
7fd9e544
JK
2346free_segment_manager_caches:
2347 destroy_segment_manager_caches();
9890ff3f
ZH
2348free_node_manager_caches:
2349 destroy_node_manager_caches();
2350free_inodecache:
2351 destroy_inodecache();
aff063e2
JK
2352fail:
2353 return err;
2354}
2355
2356static void __exit exit_f2fs_fs(void)
2357{
4589d25d 2358 f2fs_destroy_root_stats();
aff063e2 2359 unregister_filesystem(&f2fs_fs_type);
b8bef79d 2360 unregister_shrinker(&f2fs_shrinker_info);
a398101a 2361 f2fs_unregister_sysfs();
fdf6c8be 2362 destroy_extent_cache();
aff063e2 2363 destroy_checkpoint_caches();
5dcd8a71 2364 destroy_segment_manager_caches();
aff063e2
JK
2365 destroy_node_manager_caches();
2366 destroy_inodecache();
351f4fba 2367 f2fs_destroy_trace_ios();
aff063e2
JK
2368}
2369
2370module_init(init_f2fs_fs)
2371module_exit(exit_f2fs_fs)
2372
2373MODULE_AUTHOR("Samsung Electronics's Praesto Team");
2374MODULE_DESCRIPTION("Flash Friendly File System");
2375MODULE_LICENSE("GPL");
b4b9d34c 2376