Merge tag 'pm-6.16-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
[linux-2.6-block.git] / fs / f2fs / gc.c
CommitLineData
7c1a000d 1// SPDX-License-Identifier: GPL-2.0
0a8165d7 2/*
7bc09003
JK
3 * fs/f2fs/gc.c
4 *
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
7bc09003
JK
7 */
8#include <linux/fs.h>
9#include <linux/module.h>
7bc09003
JK
10#include <linux/init.h>
11#include <linux/f2fs_fs.h>
12#include <linux/kthread.h>
13#include <linux/delay.h>
14#include <linux/freezer.h>
b4b10061 15#include <linux/sched/signal.h>
6691d940 16#include <linux/random.h>
4034247a 17#include <linux/sched/mm.h>
7bc09003
JK
18
19#include "f2fs.h"
20#include "node.h"
21#include "segment.h"
22#include "gc.h"
52118743 23#include "iostat.h"
8e46b3ed 24#include <trace/events/f2fs.h>
7bc09003 25
093749e2
CY
26static struct kmem_cache *victim_entry_slab;
27
da52f8ad
JQ
28static unsigned int count_bits(const unsigned long *addr,
29 unsigned int offset, unsigned int len);
30
7bc09003
JK
31static int gc_thread_func(void *data)
32{
33 struct f2fs_sb_info *sbi = data;
b59d0bae 34 struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
7bc09003 35 wait_queue_head_t *wq = &sbi->gc_thread->gc_wait_queue_head;
5911d2d1 36 wait_queue_head_t *fggc_wq = &sbi->gc_thread->fggc_wq;
b8c502b8 37 unsigned int wait_ms;
d147ea4a
JK
38 struct f2fs_gc_control gc_control = {
39 .victim_segno = NULL_SEGNO,
c58d7c55
JK
40 .should_migrate_blocks = false,
41 .err_gc_skipped = false };
7bc09003 42
b59d0bae 43 wait_ms = gc_th->min_sleep_time;
7bc09003 44
1d7be270 45 set_freezable();
7bc09003 46 do {
5911d2d1 47 bool sync_mode, foreground = false;
bbbc34fd 48
94e7eb42
KH
49 wait_event_freezable_timeout(*wq,
50 kthread_should_stop() ||
5911d2d1 51 waitqueue_active(fggc_wq) ||
d9872a69 52 gc_th->gc_wake,
1d7be270
JK
53 msecs_to_jiffies(wait_ms));
54
5911d2d1
CY
55 if (test_opt(sbi, GC_MERGE) && waitqueue_active(fggc_wq))
56 foreground = true;
57
d9872a69
JK
58 /* give it a try one time */
59 if (gc_th->gc_wake)
45c98f5a 60 gc_th->gc_wake = false;
d9872a69 61
94e7eb42 62 if (f2fs_readonly(sbi->sb)) {
274bd9ba 63 stat_other_skip_bggc_count(sbi);
7bc09003 64 continue;
274bd9ba 65 }
7bc09003
JK
66 if (kthread_should_stop())
67 break;
68
d6212a5f 69 if (sbi->sb->s_writers.frozen >= SB_FREEZE_WRITE) {
88dd8934 70 increase_sleep_time(gc_th, &wait_ms);
274bd9ba 71 stat_other_skip_bggc_count(sbi);
d6212a5f
CL
72 continue;
73 }
74
c40e15a9 75 if (time_to_inject(sbi, FAULT_CHECKPOINT))
a9cfee0e
CY
76 f2fs_stop_checkpoint(sbi, false,
77 STOP_CP_REASON_FAULT_INJECT);
0f348028 78
274bd9ba
CY
79 if (!sb_start_write_trylock(sbi->sb)) {
80 stat_other_skip_bggc_count(sbi);
dc6febb6 81 continue;
274bd9ba 82 }
dc6febb6 83
9748c2dd
DJ
84 gc_control.one_time = false;
85
7bc09003
JK
86 /*
87 * [GC triggering condition]
88 * 0. GC is not conducted currently.
89 * 1. There are enough dirty segments.
90 * 2. IO subsystem is idle by checking the # of writeback pages.
91 * 3. IO subsystem is idle by checking the # of requests in
92 * bdev's request list.
93 *
e1c42045 94 * Note) We have to avoid triggering GCs frequently.
7bc09003
JK
95 * Because it is possible that some segments can be
96 * invalidated soon after by user update or deletion.
97 * So, I'd like to wait some time to collect dirty segments.
98 */
d98af5f4
DJ
99 if (sbi->gc_mode == GC_URGENT_HIGH ||
100 sbi->gc_mode == GC_URGENT_MID) {
d9872a69 101 wait_ms = gc_th->urgent_sleep_time;
e4544b63 102 f2fs_down_write(&sbi->gc_lock);
d9872a69
JK
103 goto do_gc;
104 }
105
5911d2d1 106 if (foreground) {
e4544b63 107 f2fs_down_write(&sbi->gc_lock);
5911d2d1 108 goto do_gc;
e4544b63 109 } else if (!f2fs_down_write_trylock(&sbi->gc_lock)) {
274bd9ba 110 stat_other_skip_bggc_count(sbi);
69babac0 111 goto next;
274bd9ba 112 }
69babac0 113
a7d10cf3 114 if (!is_idle(sbi, GC_TIME)) {
88dd8934 115 increase_sleep_time(gc_th, &wait_ms);
e4544b63 116 f2fs_up_write(&sbi->gc_lock);
274bd9ba 117 stat_io_skip_bggc_count(sbi);
dc6febb6 118 goto next;
7bc09003
JK
119 }
120
5062b5be 121 if (f2fs_sb_has_blkzoned(sbi)) {
9a481a1c
DJ
122 if (has_enough_free_blocks(sbi,
123 gc_th->no_zoned_gc_percent)) {
5062b5be
DJ
124 wait_ms = gc_th->no_gc_sleep_time;
125 f2fs_up_write(&sbi->gc_lock);
126 goto next;
127 }
128 if (wait_ms == gc_th->no_gc_sleep_time)
129 wait_ms = gc_th->max_sleep_time;
130 }
131
9748c2dd 132 if (need_to_boost_gc(sbi)) {
88dd8934 133 decrease_sleep_time(gc_th, &wait_ms);
9748c2dd
DJ
134 if (f2fs_sb_has_blkzoned(sbi))
135 gc_control.one_time = true;
136 } else {
88dd8934 137 increase_sleep_time(gc_th, &wait_ms);
9748c2dd 138 }
d9872a69 139do_gc:
9bf1dcbd
CY
140 stat_inc_gc_call_count(sbi, foreground ?
141 FOREGROUND : BACKGROUND);
7bc09003 142
9748c2dd
DJ
143 sync_mode = (F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_SYNC) ||
144 gc_control.one_time;
bbbc34fd 145
5911d2d1
CY
146 /* foreground GC was been triggered via f2fs_balance_fs() */
147 if (foreground)
148 sync_mode = false;
149
d147ea4a
JK
150 gc_control.init_gc_type = sync_mode ? FG_GC : BG_GC;
151 gc_control.no_bg_gc = foreground;
c81d5bae 152 gc_control.nr_free_secs = foreground ? 1 : 0;
d147ea4a 153
43727527 154 /* if return value is not zero, no victim was selected */
1adaa71e 155 if (f2fs_gc(sbi, &gc_control)) {
156 /* don't bother wait_ms by foreground gc */
157 if (!foreground)
158 wait_ms = gc_th->no_gc_sleep_time;
26a8057a
YG
159 } else {
160 /* reset wait_ms to default sleep time */
161 if (wait_ms == gc_th->no_gc_sleep_time)
162 wait_ms = gc_th->min_sleep_time;
1adaa71e 163 }
81eb8d6e 164
5911d2d1
CY
165 if (foreground)
166 wake_up_all(&gc_th->fggc_wq);
167
84e4214f
JK
168 trace_f2fs_background_gc(sbi->sb, wait_ms,
169 prefree_segments(sbi), free_segments(sbi));
170
4660f9c0 171 /* balancing f2fs's metadata periodically */
7bcd0cfa 172 f2fs_balance_fs_bg(sbi, true);
dc6febb6 173next:
e5a0db6a
YL
174 if (sbi->gc_mode != GC_NORMAL) {
175 spin_lock(&sbi->gc_remaining_trials_lock);
176 if (sbi->gc_remaining_trials) {
177 sbi->gc_remaining_trials--;
178 if (!sbi->gc_remaining_trials)
6359a1aa
YL
179 sbi->gc_mode = GC_NORMAL;
180 }
e5a0db6a 181 spin_unlock(&sbi->gc_remaining_trials_lock);
6359a1aa 182 }
dc6febb6 183 sb_end_write(sbi->sb);
81eb8d6e 184
7bc09003
JK
185 } while (!kthread_should_stop());
186 return 0;
187}
188
4d57b86d 189int f2fs_start_gc_thread(struct f2fs_sb_info *sbi)
7bc09003 190{
1042d60f 191 struct f2fs_gc_kthread *gc_th;
ec7b1f2d 192 dev_t dev = sbi->sb->s_bdev->bd_dev;
7bc09003 193
1ecc0c5c 194 gc_th = f2fs_kmalloc(sbi, sizeof(struct f2fs_gc_kthread), GFP_KERNEL);
146dbcbf
YL
195 if (!gc_th)
196 return -ENOMEM;
7bc09003 197
d9872a69 198 gc_th->urgent_sleep_time = DEF_GC_THREAD_URGENT_SLEEP_TIME;
e791d00b 199 gc_th->valid_thresh_ratio = DEF_GC_THREAD_VALID_THRESH_RATIO;
5062b5be
DJ
200
201 if (f2fs_sb_has_blkzoned(sbi)) {
202 gc_th->min_sleep_time = DEF_GC_THREAD_MIN_SLEEP_TIME_ZONED;
203 gc_th->max_sleep_time = DEF_GC_THREAD_MAX_SLEEP_TIME_ZONED;
204 gc_th->no_gc_sleep_time = DEF_GC_THREAD_NOGC_SLEEP_TIME_ZONED;
9a481a1c
DJ
205 gc_th->no_zoned_gc_percent = LIMIT_NO_ZONED_GC;
206 gc_th->boost_zoned_gc_percent = LIMIT_BOOST_ZONED_GC;
5062b5be
DJ
207 } else {
208 gc_th->min_sleep_time = DEF_GC_THREAD_MIN_SLEEP_TIME;
209 gc_th->max_sleep_time = DEF_GC_THREAD_MAX_SLEEP_TIME;
210 gc_th->no_gc_sleep_time = DEF_GC_THREAD_NOGC_SLEEP_TIME;
9a481a1c
DJ
211 gc_th->no_zoned_gc_percent = 0;
212 gc_th->boost_zoned_gc_percent = 0;
5062b5be 213 }
b59d0bae 214
45c98f5a 215 gc_th->gc_wake = false;
d2dc095f 216
7bc09003
JK
217 sbi->gc_thread = gc_th;
218 init_waitqueue_head(&sbi->gc_thread->gc_wait_queue_head);
5911d2d1 219 init_waitqueue_head(&sbi->gc_thread->fggc_wq);
7bc09003 220 sbi->gc_thread->f2fs_gc_task = kthread_run(gc_thread_func, sbi,
ec7b1f2d 221 "f2fs_gc-%u:%u", MAJOR(dev), MINOR(dev));
7bc09003 222 if (IS_ERR(gc_th->f2fs_gc_task)) {
146dbcbf
YL
223 int err = PTR_ERR(gc_th->f2fs_gc_task);
224
c8eb7024 225 kfree(gc_th);
25718423 226 sbi->gc_thread = NULL;
146dbcbf 227 return err;
7bc09003 228 }
146dbcbf
YL
229
230 return 0;
7bc09003
JK
231}
232
4d57b86d 233void f2fs_stop_gc_thread(struct f2fs_sb_info *sbi)
7bc09003
JK
234{
235 struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
5f029c04 236
7bc09003
JK
237 if (!gc_th)
238 return;
239 kthread_stop(gc_th->f2fs_gc_task);
5911d2d1 240 wake_up_all(&gc_th->fggc_wq);
c8eb7024 241 kfree(gc_th);
7bc09003
JK
242 sbi->gc_thread = NULL;
243}
244
5b0e9539 245static int select_gc_type(struct f2fs_sb_info *sbi, int gc_type)
7bc09003 246{
093749e2
CY
247 int gc_mode;
248
249 if (gc_type == BG_GC) {
250 if (sbi->am.atgc_enabled)
251 gc_mode = GC_AT;
252 else
253 gc_mode = GC_CB;
254 } else {
255 gc_mode = GC_GREEDY;
256 }
d2dc095f 257
5b0e9539
JK
258 switch (sbi->gc_mode) {
259 case GC_IDLE_CB:
296b8cb3
ZN
260 case GC_URGENT_LOW:
261 case GC_URGENT_MID:
5b0e9539
JK
262 gc_mode = GC_CB;
263 break;
264 case GC_IDLE_GREEDY:
0e5e8111 265 case GC_URGENT_HIGH:
b27bc809 266 gc_mode = GC_GREEDY;
5b0e9539 267 break;
093749e2
CY
268 case GC_IDLE_AT:
269 gc_mode = GC_AT;
270 break;
5b0e9539 271 }
093749e2 272
d2dc095f 273 return gc_mode;
7bc09003
JK
274}
275
276static void select_policy(struct f2fs_sb_info *sbi, int gc_type,
277 int type, struct victim_sel_policy *p)
278{
279 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
280
4ebefc44 281 if (p->alloc_mode == SSR) {
7bc09003 282 p->gc_mode = GC_GREEDY;
da52f8ad 283 p->dirty_bitmap = dirty_i->dirty_segmap[type];
a26b7c8a 284 p->max_search = dirty_i->nr_dirty[type];
7bc09003 285 p->ofs_unit = 1;
093749e2
CY
286 } else if (p->alloc_mode == AT_SSR) {
287 p->gc_mode = GC_GREEDY;
288 p->dirty_bitmap = dirty_i->dirty_segmap[type];
289 p->max_search = dirty_i->nr_dirty[type];
290 p->ofs_unit = 1;
7bc09003 291 } else {
5b0e9539 292 p->gc_mode = select_gc_type(sbi, gc_type);
a60108f7 293 p->ofs_unit = SEGS_PER_SEC(sbi);
da52f8ad
JQ
294 if (__is_large_section(sbi)) {
295 p->dirty_bitmap = dirty_i->dirty_secmap;
296 p->max_search = count_bits(p->dirty_bitmap,
297 0, MAIN_SECS(sbi));
298 } else {
299 p->dirty_bitmap = dirty_i->dirty_segmap[DIRTY];
300 p->max_search = dirty_i->nr_dirty[DIRTY];
301 }
7bc09003 302 }
a26b7c8a 303
7a88ddb5
CY
304 /*
305 * adjust candidates range, should select all dirty segments for
306 * foreground GC and urgent GC cases.
307 */
b27bc809 308 if (gc_type != FG_GC &&
0e5e8111 309 (sbi->gc_mode != GC_URGENT_HIGH) &&
093749e2 310 (p->gc_mode != GC_AT && p->alloc_mode != AT_SSR) &&
b27bc809 311 p->max_search > sbi->max_victim_search)
b1c57c1c 312 p->max_search = sbi->max_victim_search;
a26b7c8a 313
4e0197f9 314 /* let's select beginning hot/small space first. */
6691d940 315 if (f2fs_need_rand_seg(sbi))
a60108f7
JK
316 p->offset = get_random_u32_below(MAIN_SECS(sbi) *
317 SEGS_PER_SEC(sbi));
4e0197f9 318 else if (type == CURSEG_HOT_DATA || IS_NODESEG(type))
7a20b8a6
JK
319 p->offset = 0;
320 else
e066b83c 321 p->offset = SIT_I(sbi)->last_victim[p->gc_mode];
7bc09003
JK
322}
323
324static unsigned int get_max_cost(struct f2fs_sb_info *sbi,
325 struct victim_sel_policy *p)
326{
b7250d2d
JK
327 /* SSR allocates in a segment unit */
328 if (p->alloc_mode == SSR)
a60108f7 329 return BLKS_PER_SEG(sbi);
093749e2
CY
330 else if (p->alloc_mode == AT_SSR)
331 return UINT_MAX;
332
333 /* LFS */
7bc09003 334 if (p->gc_mode == GC_GREEDY)
45809cd3 335 return SEGS_TO_BLKS(sbi, 2 * p->ofs_unit);
7bc09003
JK
336 else if (p->gc_mode == GC_CB)
337 return UINT_MAX;
093749e2
CY
338 else if (p->gc_mode == GC_AT)
339 return UINT_MAX;
7bc09003
JK
340 else /* No other gc_mode */
341 return 0;
342}
343
344static unsigned int check_bg_victims(struct f2fs_sb_info *sbi)
345{
346 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
5ec4e49f 347 unsigned int secno;
7bc09003
JK
348
349 /*
350 * If the gc_type is FG_GC, we can select victim segments
351 * selected by background GC before.
352 * Those segments guarantee they have small valid blocks.
353 */
7cd8558b 354 for_each_set_bit(secno, dirty_i->victim_secmap, MAIN_SECS(sbi)) {
5ec4e49f 355 if (sec_usage_check(sbi, secno))
b65ee148 356 continue;
5ec4e49f 357 clear_bit(secno, dirty_i->victim_secmap);
4ddb1a4d 358 return GET_SEG_FROM_SEC(sbi, secno);
7bc09003
JK
359 }
360 return NULL_SEGNO;
361}
362
363static unsigned int get_cb_cost(struct f2fs_sb_info *sbi, unsigned int segno)
364{
365 struct sit_info *sit_i = SIT_I(sbi);
7bc09003
JK
366 unsigned long long mtime = 0;
367 unsigned int vblocks;
368 unsigned char age = 0;
369 unsigned char u;
2af583af 370 unsigned int usable_segs_per_sec = f2fs_usable_segs_in_sec(sbi);
7bc09003 371
b19ee727 372 mtime = f2fs_get_section_mtime(sbi, segno);
373 f2fs_bug_on(sbi, mtime == INVALID_MTIME);
302bd348 374 vblocks = get_valid_blocks(sbi, segno, true);
de881df9 375 vblocks = div_u64(vblocks, usable_segs_per_sec);
7bc09003 376
45809cd3 377 u = BLKS_TO_SEGS(sbi, vblocks * 100);
7bc09003 378
e1c42045 379 /* Handle if the system time has changed by the user */
7bc09003
JK
380 if (mtime < sit_i->min_mtime)
381 sit_i->min_mtime = mtime;
382 if (mtime > sit_i->max_mtime)
383 sit_i->max_mtime = mtime;
384 if (sit_i->max_mtime != sit_i->min_mtime)
385 age = 100 - div64_u64(100 * (mtime - sit_i->min_mtime),
386 sit_i->max_mtime - sit_i->min_mtime);
387
388 return UINT_MAX - ((100 * (100 - u) * age) / (100 + u));
389}
390
a57e564d
JX
391static inline unsigned int get_gc_cost(struct f2fs_sb_info *sbi,
392 unsigned int segno, struct victim_sel_policy *p)
7bc09003
JK
393{
394 if (p->alloc_mode == SSR)
2afce76a 395 return get_seg_entry(sbi, segno)->ckpt_valid_blocks;
7bc09003 396
e791d00b
DJ
397 if (p->one_time_gc && (get_valid_blocks(sbi, segno, true) >=
398 CAP_BLKS_PER_SEC(sbi) * sbi->gc_thread->valid_thresh_ratio /
399 100))
400 return UINT_MAX;
401
7bc09003
JK
402 /* alloc_mode == LFS */
403 if (p->gc_mode == GC_GREEDY)
91f4382b 404 return get_valid_blocks(sbi, segno, true);
093749e2 405 else if (p->gc_mode == GC_CB)
7bc09003 406 return get_cb_cost(sbi, segno);
093749e2
CY
407
408 f2fs_bug_on(sbi, 1);
409 return 0;
7bc09003
JK
410}
411
688159b6
FL
412static unsigned int count_bits(const unsigned long *addr,
413 unsigned int offset, unsigned int len)
414{
415 unsigned int end = offset + len, sum = 0;
416
417 while (offset < end) {
418 if (test_bit(offset++, addr))
419 ++sum;
420 }
421 return sum;
422}
423
043d2d00
JK
424static bool f2fs_check_victim_tree(struct f2fs_sb_info *sbi,
425 struct rb_root_cached *root)
426{
427#ifdef CONFIG_F2FS_CHECK_FS
428 struct rb_node *cur = rb_first_cached(root), *next;
429 struct victim_entry *cur_ve, *next_ve;
430
431 while (cur) {
432 next = rb_next(cur);
433 if (!next)
434 return true;
435
436 cur_ve = rb_entry(cur, struct victim_entry, rb_node);
437 next_ve = rb_entry(next, struct victim_entry, rb_node);
438
439 if (cur_ve->mtime > next_ve->mtime) {
440 f2fs_info(sbi, "broken victim_rbtree, "
441 "cur_mtime(%llu) next_mtime(%llu)",
442 cur_ve->mtime, next_ve->mtime);
443 return false;
444 }
445 cur = next;
446 }
447#endif
448 return true;
449}
450
451static struct victim_entry *__lookup_victim_entry(struct f2fs_sb_info *sbi,
452 unsigned long long mtime)
453{
454 struct atgc_management *am = &sbi->am;
455 struct rb_node *node = am->root.rb_root.rb_node;
456 struct victim_entry *ve = NULL;
457
458 while (node) {
459 ve = rb_entry(node, struct victim_entry, rb_node);
460
461 if (mtime < ve->mtime)
462 node = node->rb_left;
463 else
464 node = node->rb_right;
465 }
466 return ve;
467}
468
469static struct victim_entry *__create_victim_entry(struct f2fs_sb_info *sbi,
470 unsigned long long mtime, unsigned int segno)
093749e2
CY
471{
472 struct atgc_management *am = &sbi->am;
473 struct victim_entry *ve;
474
043d2d00 475 ve = f2fs_kmem_cache_alloc(victim_entry_slab, GFP_NOFS, true, NULL);
093749e2
CY
476
477 ve->mtime = mtime;
478 ve->segno = segno;
479
093749e2 480 list_add_tail(&ve->list, &am->victim_list);
093749e2
CY
481 am->victim_count++;
482
483 return ve;
484}
485
043d2d00 486static void __insert_victim_entry(struct f2fs_sb_info *sbi,
093749e2
CY
487 unsigned long long mtime, unsigned int segno)
488{
489 struct atgc_management *am = &sbi->am;
043d2d00
JK
490 struct rb_root_cached *root = &am->root;
491 struct rb_node **p = &root->rb_root.rb_node;
093749e2 492 struct rb_node *parent = NULL;
043d2d00 493 struct victim_entry *ve;
093749e2
CY
494 bool left_most = true;
495
043d2d00
JK
496 /* look up rb tree to find parent node */
497 while (*p) {
498 parent = *p;
499 ve = rb_entry(parent, struct victim_entry, rb_node);
500
501 if (mtime < ve->mtime) {
502 p = &(*p)->rb_left;
503 } else {
504 p = &(*p)->rb_right;
505 left_most = false;
506 }
507 }
508
509 ve = __create_victim_entry(sbi, mtime, segno);
510
511 rb_link_node(&ve->rb_node, parent, p);
512 rb_insert_color_cached(&ve->rb_node, root, left_most);
093749e2
CY
513}
514
515static void add_victim_entry(struct f2fs_sb_info *sbi,
516 struct victim_sel_policy *p, unsigned int segno)
517{
518 struct sit_info *sit_i = SIT_I(sbi);
093749e2 519 unsigned long long mtime = 0;
093749e2
CY
520
521 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
522 if (p->gc_mode == GC_AT &&
523 get_valid_blocks(sbi, segno, true) == 0)
524 return;
093749e2
CY
525 }
526
b19ee727 527 mtime = f2fs_get_section_mtime(sbi, segno);
528 f2fs_bug_on(sbi, mtime == INVALID_MTIME);
093749e2
CY
529
530 /* Handle if the system time has changed by the user */
531 if (mtime < sit_i->min_mtime)
532 sit_i->min_mtime = mtime;
533 if (mtime > sit_i->max_mtime)
534 sit_i->max_mtime = mtime;
535 if (mtime < sit_i->dirty_min_mtime)
536 sit_i->dirty_min_mtime = mtime;
537 if (mtime > sit_i->dirty_max_mtime)
538 sit_i->dirty_max_mtime = mtime;
539
540 /* don't choose young section as candidate */
541 if (sit_i->dirty_max_mtime - mtime < p->age_threshold)
542 return;
543
043d2d00 544 __insert_victim_entry(sbi, mtime, segno);
093749e2
CY
545}
546
547static void atgc_lookup_victim(struct f2fs_sb_info *sbi,
548 struct victim_sel_policy *p)
549{
550 struct sit_info *sit_i = SIT_I(sbi);
551 struct atgc_management *am = &sbi->am;
552 struct rb_root_cached *root = &am->root;
553 struct rb_node *node;
093749e2
CY
554 struct victim_entry *ve;
555 unsigned long long total_time;
556 unsigned long long age, u, accu;
557 unsigned long long max_mtime = sit_i->dirty_max_mtime;
558 unsigned long long min_mtime = sit_i->dirty_min_mtime;
074b5ea2 559 unsigned int sec_blocks = CAP_BLKS_PER_SEC(sbi);
093749e2
CY
560 unsigned int vblocks;
561 unsigned int dirty_threshold = max(am->max_candidate_count,
562 am->candidate_ratio *
563 am->victim_count / 100);
564 unsigned int age_weight = am->age_weight;
565 unsigned int cost;
566 unsigned int iter = 0;
567
568 if (max_mtime < min_mtime)
569 return;
570
571 max_mtime += 1;
572 total_time = max_mtime - min_mtime;
573
574 accu = div64_u64(ULLONG_MAX, total_time);
575 accu = min_t(unsigned long long, div_u64(accu, 100),
576 DEFAULT_ACCURACY_CLASS);
577
578 node = rb_first_cached(root);
579next:
043d2d00
JK
580 ve = rb_entry_safe(node, struct victim_entry, rb_node);
581 if (!ve)
093749e2
CY
582 return;
583
093749e2
CY
584 if (ve->mtime >= max_mtime || ve->mtime < min_mtime)
585 goto skip;
586
587 /* age = 10000 * x% * 60 */
588 age = div64_u64(accu * (max_mtime - ve->mtime), total_time) *
589 age_weight;
590
591 vblocks = get_valid_blocks(sbi, ve->segno, true);
592 f2fs_bug_on(sbi, !vblocks || vblocks == sec_blocks);
593
594 /* u = 10000 * x% * 40 */
595 u = div64_u64(accu * (sec_blocks - vblocks), sec_blocks) *
596 (100 - age_weight);
597
598 f2fs_bug_on(sbi, age + u >= UINT_MAX);
599
600 cost = UINT_MAX - (age + u);
601 iter++;
602
603 if (cost < p->min_cost ||
604 (cost == p->min_cost && age > p->oldest_age)) {
605 p->min_cost = cost;
606 p->oldest_age = age;
607 p->min_segno = ve->segno;
608 }
609skip:
610 if (iter < dirty_threshold) {
611 node = rb_next(node);
612 goto next;
613 }
614}
615
616/*
617 * select candidates around source section in range of
618 * [target - dirty_threshold, target + dirty_threshold]
619 */
620static void atssr_lookup_victim(struct f2fs_sb_info *sbi,
621 struct victim_sel_policy *p)
622{
623 struct sit_info *sit_i = SIT_I(sbi);
624 struct atgc_management *am = &sbi->am;
093749e2
CY
625 struct victim_entry *ve;
626 unsigned long long age;
627 unsigned long long max_mtime = sit_i->dirty_max_mtime;
628 unsigned long long min_mtime = sit_i->dirty_min_mtime;
093749e2
CY
629 unsigned int vblocks;
630 unsigned int dirty_threshold = max(am->max_candidate_count,
631 am->candidate_ratio *
632 am->victim_count / 100);
043d2d00 633 unsigned int cost, iter;
093749e2
CY
634 int stage = 0;
635
636 if (max_mtime < min_mtime)
637 return;
638 max_mtime += 1;
639next_stage:
043d2d00
JK
640 iter = 0;
641 ve = __lookup_victim_entry(sbi, p->age);
093749e2 642next_node:
043d2d00
JK
643 if (!ve) {
644 if (stage++ == 0)
645 goto next_stage;
093749e2
CY
646 return;
647 }
648
093749e2
CY
649 if (ve->mtime >= max_mtime || ve->mtime < min_mtime)
650 goto skip_node;
651
652 age = max_mtime - ve->mtime;
653
654 vblocks = get_seg_entry(sbi, ve->segno)->ckpt_valid_blocks;
655 f2fs_bug_on(sbi, !vblocks);
656
657 /* rare case */
a60108f7 658 if (vblocks == BLKS_PER_SEG(sbi))
093749e2
CY
659 goto skip_node;
660
661 iter++;
662
663 age = max_mtime - abs(p->age - age);
664 cost = UINT_MAX - vblocks;
665
666 if (cost < p->min_cost ||
667 (cost == p->min_cost && age > p->oldest_age)) {
668 p->min_cost = cost;
669 p->oldest_age = age;
670 p->min_segno = ve->segno;
671 }
672skip_node:
673 if (iter < dirty_threshold) {
043d2d00
JK
674 ve = rb_entry(stage == 0 ? rb_prev(&ve->rb_node) :
675 rb_next(&ve->rb_node),
676 struct victim_entry, rb_node);
093749e2
CY
677 goto next_node;
678 }
043d2d00
JK
679
680 if (stage++ == 0)
093749e2 681 goto next_stage;
093749e2 682}
043d2d00 683
093749e2
CY
684static void lookup_victim_by_age(struct f2fs_sb_info *sbi,
685 struct victim_sel_policy *p)
686{
043d2d00 687 f2fs_bug_on(sbi, !f2fs_check_victim_tree(sbi, &sbi->am.root));
093749e2
CY
688
689 if (p->gc_mode == GC_AT)
690 atgc_lookup_victim(sbi, p);
691 else if (p->alloc_mode == AT_SSR)
692 atssr_lookup_victim(sbi, p);
693 else
694 f2fs_bug_on(sbi, 1);
695}
696
697static void release_victim_entry(struct f2fs_sb_info *sbi)
698{
699 struct atgc_management *am = &sbi->am;
700 struct victim_entry *ve, *tmp;
701
702 list_for_each_entry_safe(ve, tmp, &am->victim_list, list) {
703 list_del(&ve->list);
704 kmem_cache_free(victim_entry_slab, ve);
705 am->victim_count--;
706 }
707
708 am->root = RB_ROOT_CACHED;
709
710 f2fs_bug_on(sbi, am->victim_count);
711 f2fs_bug_on(sbi, !list_empty(&am->victim_list));
712}
713
71419129
CY
714static bool f2fs_pin_section(struct f2fs_sb_info *sbi, unsigned int segno)
715{
716 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
717 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
718
719 if (!dirty_i->enable_pin_section)
720 return false;
721 if (!test_and_set_bit(secno, dirty_i->pinned_secmap))
722 dirty_i->pinned_secmap_cnt++;
723 return true;
724}
725
726static bool f2fs_pinned_section_exists(struct dirty_seglist_info *dirty_i)
727{
728 return dirty_i->pinned_secmap_cnt;
729}
730
731static bool f2fs_section_is_pinned(struct dirty_seglist_info *dirty_i,
732 unsigned int secno)
733{
734 return dirty_i->enable_pin_section &&
735 f2fs_pinned_section_exists(dirty_i) &&
736 test_bit(secno, dirty_i->pinned_secmap);
737}
738
739static void f2fs_unpin_all_sections(struct f2fs_sb_info *sbi, bool enable)
740{
741 unsigned int bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
742
743 if (f2fs_pinned_section_exists(DIRTY_I(sbi))) {
744 memset(DIRTY_I(sbi)->pinned_secmap, 0, bitmap_size);
745 DIRTY_I(sbi)->pinned_secmap_cnt = 0;
746 }
747 DIRTY_I(sbi)->enable_pin_section = enable;
748}
749
750static int f2fs_gc_pinned_control(struct inode *inode, int gc_type,
751 unsigned int segno)
752{
753 if (!f2fs_is_pinned_file(inode))
754 return 0;
755 if (gc_type != FG_GC)
756 return -EBUSY;
757 if (!f2fs_pin_section(F2FS_I_SB(inode), segno))
758 f2fs_pin_file_control(inode, true);
759 return -EAGAIN;
760}
761
0a8165d7 762/*
111d2495 763 * This function is called from two paths.
7bc09003
JK
764 * One is garbage collection and the other is SSR segment selection.
765 * When it is called during GC, it just gets a victim segment
766 * and it does not remove it from dirty seglist.
767 * When it is called from SSR segment selection, it finds a segment
768 * which has minimum valid blocks and removes it from dirty seglist.
769 */
19e0e21a
YL
770int f2fs_get_victim(struct f2fs_sb_info *sbi, unsigned int *result,
771 int gc_type, int type, char alloc_mode,
e791d00b 772 unsigned long long age, bool one_time)
7bc09003
JK
773{
774 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
e066b83c 775 struct sit_info *sm = SIT_I(sbi);
7bc09003 776 struct victim_sel_policy p;
3fa56503 777 unsigned int secno, last_victim;
04f0b2ea 778 unsigned int last_segment;
093749e2
CY
779 unsigned int nsearched;
780 bool is_atgc;
97767500 781 int ret = 0;
7bc09003 782
210f41bc 783 mutex_lock(&dirty_i->seglist_lock);
a60108f7 784 last_segment = MAIN_SECS(sbi) * SEGS_PER_SEC(sbi);
210f41bc 785
7bc09003 786 p.alloc_mode = alloc_mode;
093749e2
CY
787 p.age = age;
788 p.age_threshold = sbi->am.age_threshold;
e791d00b 789 p.one_time_gc = one_time;
7bc09003 790
093749e2
CY
791retry:
792 select_policy(sbi, gc_type, type, &p);
7bc09003 793 p.min_segno = NULL_SEGNO;
093749e2 794 p.oldest_age = 0;
3fa56503 795 p.min_cost = get_max_cost(sbi, &p);
7bc09003 796
093749e2
CY
797 is_atgc = (p.gc_mode == GC_AT || p.alloc_mode == AT_SSR);
798 nsearched = 0;
799
800 if (is_atgc)
801 SIT_I(sbi)->dirty_min_mtime = ULLONG_MAX;
802
e066b83c 803 if (*result != NULL_SEGNO) {
97767500
QZ
804 if (!get_valid_blocks(sbi, *result, false)) {
805 ret = -ENODATA;
806 goto out;
807 }
808
e9a844f6 809 if (sec_usage_check(sbi, GET_SEC_FROM_SEG(sbi, *result))) {
97767500 810 ret = -EBUSY;
e9a844f6
YY
811 goto out;
812 }
813 if (gc_type == FG_GC)
814 clear_bit(GET_SEC_FROM_SEG(sbi, *result), dirty_i->victim_secmap);
815 p.min_segno = *result;
816 goto got_result;
e066b83c
JK
817 }
818
97767500 819 ret = -ENODATA;
3342bb30
CY
820 if (p.max_search == 0)
821 goto out;
822
e3080b01
CY
823 if (__is_large_section(sbi) && p.alloc_mode == LFS) {
824 if (sbi->next_victim_seg[BG_GC] != NULL_SEGNO) {
825 p.min_segno = sbi->next_victim_seg[BG_GC];
826 *result = p.min_segno;
827 sbi->next_victim_seg[BG_GC] = NULL_SEGNO;
828 goto got_result;
829 }
830 if (gc_type == FG_GC &&
831 sbi->next_victim_seg[FG_GC] != NULL_SEGNO) {
832 p.min_segno = sbi->next_victim_seg[FG_GC];
833 *result = p.min_segno;
834 sbi->next_victim_seg[FG_GC] = NULL_SEGNO;
835 goto got_result;
836 }
837 }
838
e066b83c 839 last_victim = sm->last_victim[p.gc_mode];
7bc09003
JK
840 if (p.alloc_mode == LFS && gc_type == FG_GC) {
841 p.min_segno = check_bg_victims(sbi);
842 if (p.min_segno != NULL_SEGNO)
843 goto got_it;
844 }
845
846 while (1) {
da52f8ad
JQ
847 unsigned long cost, *dirty_bitmap;
848 unsigned int unit_no, segno;
849
850 dirty_bitmap = p.dirty_bitmap;
851 unit_no = find_next_bit(dirty_bitmap,
852 last_segment / p.ofs_unit,
853 p.offset / p.ofs_unit);
854 segno = unit_no * p.ofs_unit;
a43f7ec3 855 if (segno >= last_segment) {
e066b83c
JK
856 if (sm->last_victim[p.gc_mode]) {
857 last_segment =
858 sm->last_victim[p.gc_mode];
859 sm->last_victim[p.gc_mode] = 0;
7bc09003
JK
860 p.offset = 0;
861 continue;
862 }
863 break;
864 }
a57e564d
JX
865
866 p.offset = segno + p.ofs_unit;
da52f8ad 867 nsearched++;
688159b6 868
bbf9f7d9
ST
869#ifdef CONFIG_F2FS_CHECK_FS
870 /*
871 * skip selecting the invalid segno (that is failed due to block
872 * validity check failure during GC) to avoid endless GC loop in
873 * such cases.
874 */
875 if (test_bit(segno, sm->invalid_segmap))
876 goto next;
877#endif
878
4ddb1a4d 879 secno = GET_SEC_FROM_SEG(sbi, segno);
7bc09003 880
5ec4e49f 881 if (sec_usage_check(sbi, secno))
688159b6 882 goto next;
61461fc9 883
4354994f 884 /* Don't touch checkpointed data */
61461fc9
CY
885 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
886 if (p.alloc_mode == LFS) {
887 /*
888 * LFS is set to find source section during GC.
889 * The victim should have no checkpointed data.
890 */
891 if (get_ckpt_valid_blocks(sbi, segno, true))
892 goto next;
893 } else {
894 /*
895 * SSR | AT_SSR are set to find target segment
896 * for writes which can be full by checkpointed
897 * and newly written blocks.
898 */
899 if (!f2fs_segment_has_free_slot(sbi, segno))
900 goto next;
901 }
902 }
903
5ec4e49f 904 if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap))
688159b6 905 goto next;
7bc09003 906
71419129
CY
907 if (gc_type == FG_GC && f2fs_section_is_pinned(dirty_i, secno))
908 goto next;
909
093749e2
CY
910 if (is_atgc) {
911 add_victim_entry(sbi, &p, segno);
912 goto next;
913 }
914
7bc09003
JK
915 cost = get_gc_cost(sbi, segno, &p);
916
917 if (p.min_cost > cost) {
918 p.min_segno = segno;
919 p.min_cost = cost;
a57e564d 920 }
688159b6
FL
921next:
922 if (nsearched >= p.max_search) {
e066b83c 923 if (!sm->last_victim[p.gc_mode] && segno <= last_victim)
da52f8ad
JQ
924 sm->last_victim[p.gc_mode] =
925 last_victim + p.ofs_unit;
4ce53776 926 else
da52f8ad 927 sm->last_victim[p.gc_mode] = segno + p.ofs_unit;
04f0b2ea 928 sm->last_victim[p.gc_mode] %=
a60108f7 929 (MAIN_SECS(sbi) * SEGS_PER_SEC(sbi));
7bc09003
JK
930 break;
931 }
932 }
093749e2
CY
933
934 /* get victim for GC_AT/AT_SSR */
935 if (is_atgc) {
936 lookup_victim_by_age(sbi, &p);
937 release_victim_entry(sbi);
938 }
939
940 if (is_atgc && p.min_segno == NULL_SEGNO &&
941 sm->elapsed_time < p.age_threshold) {
942 p.age_threshold = 0;
943 goto retry;
944 }
945
7bc09003 946 if (p.min_segno != NULL_SEGNO) {
b2b3460a 947got_it:
e3080b01
CY
948 *result = (p.min_segno / p.ofs_unit) * p.ofs_unit;
949got_result:
7bc09003 950 if (p.alloc_mode == LFS) {
4ddb1a4d 951 secno = GET_SEC_FROM_SEG(sbi, p.min_segno);
5ec4e49f
JK
952 if (gc_type == FG_GC)
953 sbi->cur_victim_sec = secno;
954 else
955 set_bit(secno, dirty_i->victim_secmap);
7bc09003 956 }
97767500 957 ret = 0;
8e46b3ed 958
e3c59108
ST
959 }
960out:
961 if (p.min_segno != NULL_SEGNO)
8e46b3ed
NJ
962 trace_f2fs_get_victim(sbi->sb, type, gc_type, &p,
963 sbi->cur_victim_sec,
964 prefree_segments(sbi), free_segments(sbi));
7bc09003
JK
965 mutex_unlock(&dirty_i->seglist_lock);
966
97767500 967 return ret;
7bc09003
JK
968}
969
7dda2af8 970static struct inode *find_gc_inode(struct gc_inode_list *gc_list, nid_t ino)
7bc09003 971{
7bc09003
JK
972 struct inode_entry *ie;
973
7dda2af8
CL
974 ie = radix_tree_lookup(&gc_list->iroot, ino);
975 if (ie)
976 return ie->inode;
7bc09003
JK
977 return NULL;
978}
979
7dda2af8 980static void add_gc_inode(struct gc_inode_list *gc_list, struct inode *inode)
7bc09003 981{
6cc4af56
GZ
982 struct inode_entry *new_ie;
983
7dda2af8 984 if (inode == find_gc_inode(gc_list, inode->i_ino)) {
6cc4af56
GZ
985 iput(inode);
986 return;
7bc09003 987 }
32410577
CY
988 new_ie = f2fs_kmem_cache_alloc(f2fs_inode_entry_slab,
989 GFP_NOFS, true, NULL);
7bc09003 990 new_ie->inode = inode;
f28e5034
CY
991
992 f2fs_radix_tree_insert(&gc_list->iroot, inode->i_ino, new_ie);
7dda2af8 993 list_add_tail(&new_ie->list, &gc_list->ilist);
7bc09003
JK
994}
995
7dda2af8 996static void put_gc_inode(struct gc_inode_list *gc_list)
7bc09003
JK
997{
998 struct inode_entry *ie, *next_ie;
5f029c04 999
7dda2af8
CL
1000 list_for_each_entry_safe(ie, next_ie, &gc_list->ilist, list) {
1001 radix_tree_delete(&gc_list->iroot, ie->inode->i_ino);
7bc09003
JK
1002 iput(ie->inode);
1003 list_del(&ie->list);
4d57b86d 1004 kmem_cache_free(f2fs_inode_entry_slab, ie);
7bc09003
JK
1005 }
1006}
1007
1008static int check_valid_map(struct f2fs_sb_info *sbi,
1009 unsigned int segno, int offset)
1010{
1011 struct sit_info *sit_i = SIT_I(sbi);
1012 struct seg_entry *sentry;
1013 int ret;
1014
3d26fa6b 1015 down_read(&sit_i->sentry_lock);
7bc09003
JK
1016 sentry = get_seg_entry(sbi, segno);
1017 ret = f2fs_test_bit(offset, sentry->cur_valid_map);
3d26fa6b 1018 up_read(&sit_i->sentry_lock);
43727527 1019 return ret;
7bc09003
JK
1020}
1021
0a8165d7 1022/*
7bc09003
JK
1023 * This function compares node address got in summary with that in NAT.
1024 * On validity, copy that node with cold status, otherwise (invalid node)
1025 * ignore that.
1026 */
48018b4c 1027static int gc_node_segment(struct f2fs_sb_info *sbi,
7bc09003
JK
1028 struct f2fs_summary *sum, unsigned int segno, int gc_type)
1029{
7bc09003 1030 struct f2fs_summary *entry;
26d58599 1031 block_t start_addr;
7bc09003 1032 int off;
7ea984b0 1033 int phase = 0;
c29fd0c0 1034 bool fggc = (gc_type == FG_GC);
48018b4c 1035 int submitted = 0;
de881df9 1036 unsigned int usable_blks_in_seg = f2fs_usable_blks_in_seg(sbi, segno);
7bc09003 1037
26d58599
JK
1038 start_addr = START_BLOCK(sbi, segno);
1039
7bc09003
JK
1040next_step:
1041 entry = sum;
c718379b 1042
c29fd0c0
CY
1043 if (fggc && phase == 2)
1044 atomic_inc(&sbi->wb_sync_req[NODE]);
1045
de881df9 1046 for (off = 0; off < usable_blks_in_seg; off++, entry++) {
7bc09003 1047 nid_t nid = le32_to_cpu(entry->nid);
c528defa 1048 struct folio *node_folio;
26d58599 1049 struct node_info ni;
48018b4c 1050 int err;
7bc09003 1051
43727527 1052 /* stop BG_GC if there is not enough free sections. */
7f3037a5 1053 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0))
48018b4c 1054 return submitted;
7bc09003 1055
43727527 1056 if (check_valid_map(sbi, segno, off) == 0)
7bc09003
JK
1057 continue;
1058
7ea984b0 1059 if (phase == 0) {
4d57b86d 1060 f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
7ea984b0
CY
1061 META_NAT, true);
1062 continue;
1063 }
1064
1065 if (phase == 1) {
4d57b86d 1066 f2fs_ra_node_page(sbi, nid);
7bc09003
JK
1067 continue;
1068 }
7ea984b0
CY
1069
1070 /* phase == 2 */
c528defa
MWO
1071 node_folio = f2fs_get_node_folio(sbi, nid);
1072 if (IS_ERR(node_folio))
7bc09003
JK
1073 continue;
1074
c528defa 1075 /* block may become invalid during f2fs_get_node_folio */
9a01b56b 1076 if (check_valid_map(sbi, segno, off) == 0) {
c528defa 1077 f2fs_folio_put(node_folio, true);
9a01b56b 1078 continue;
26d58599
JK
1079 }
1080
a9419b63 1081 if (f2fs_get_node_info(sbi, nid, &ni, false)) {
c528defa 1082 f2fs_folio_put(node_folio, true);
7735730d
CY
1083 continue;
1084 }
1085
26d58599 1086 if (ni.blk_addr != start_addr + off) {
c528defa 1087 f2fs_folio_put(node_folio, true);
26d58599 1088 continue;
9a01b56b
HY
1089 }
1090
c795d9db 1091 err = f2fs_move_node_folio(node_folio, gc_type);
48018b4c
CY
1092 if (!err && gc_type == FG_GC)
1093 submitted++;
e1235983 1094 stat_inc_node_blk_count(sbi, 1, gc_type);
7bc09003 1095 }
c718379b 1096
7ea984b0 1097 if (++phase < 3)
7bc09003 1098 goto next_step;
c29fd0c0
CY
1099
1100 if (fggc)
1101 atomic_dec(&sbi->wb_sync_req[NODE]);
48018b4c 1102 return submitted;
7bc09003
JK
1103}
1104
0a8165d7 1105/*
9af45ef5
JK
1106 * Calculate start block index indicating the given node offset.
1107 * Be careful, caller should give this node offset only indicating direct node
1108 * blocks. If any node offsets, which point the other types of node blocks such
1109 * as indirect or double indirect node blocks, are given, it must be a caller's
1110 * bug.
7bc09003 1111 */
4d57b86d 1112block_t f2fs_start_bidx_of_node(unsigned int node_ofs, struct inode *inode)
7bc09003 1113{
ce19a5d4
JK
1114 unsigned int indirect_blks = 2 * NIDS_PER_BLOCK + 4;
1115 unsigned int bidx;
7bc09003 1116
ce19a5d4
JK
1117 if (node_ofs == 0)
1118 return 0;
7bc09003 1119
ce19a5d4 1120 if (node_ofs <= 2) {
7bc09003
JK
1121 bidx = node_ofs - 1;
1122 } else if (node_ofs <= indirect_blks) {
ce19a5d4 1123 int dec = (node_ofs - 4) / (NIDS_PER_BLOCK + 1);
5f029c04 1124
7bc09003
JK
1125 bidx = node_ofs - 2 - dec;
1126 } else {
ce19a5d4 1127 int dec = (node_ofs - indirect_blks - 3) / (NIDS_PER_BLOCK + 1);
5f029c04 1128
7bc09003
JK
1129 bidx = node_ofs - 5 - dec;
1130 }
d02a6e61 1131 return bidx * ADDRS_PER_BLOCK(inode) + ADDRS_PER_INODE(inode);
7bc09003
JK
1132}
1133
c1079892 1134static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
7bc09003
JK
1135 struct node_info *dni, block_t blkaddr, unsigned int *nofs)
1136{
1a116e87 1137 struct folio *node_folio;
7bc09003 1138 nid_t nid;
d3b7b4af 1139 unsigned int ofs_in_node, max_addrs, base;
7bc09003
JK
1140 block_t source_blkaddr;
1141
1142 nid = le32_to_cpu(sum->nid);
1143 ofs_in_node = le16_to_cpu(sum->ofs_in_node);
1144
1a116e87
MWO
1145 node_folio = f2fs_get_node_folio(sbi, nid);
1146 if (IS_ERR(node_folio))
c1079892 1147 return false;
7bc09003 1148
a9419b63 1149 if (f2fs_get_node_info(sbi, nid, dni, false)) {
1a116e87 1150 f2fs_folio_put(node_folio, true);
7735730d
CY
1151 return false;
1152 }
7bc09003
JK
1153
1154 if (sum->version != dni->version) {
dcbb4c10
JP
1155 f2fs_warn(sbi, "%s: valid data with mismatched node version.",
1156 __func__);
c13ff37e 1157 set_sbi_flag(sbi, SBI_NEED_FSCK);
7bc09003
JK
1158 }
1159
6d18762e 1160 if (f2fs_check_nid_range(sbi, dni->ino)) {
1a116e87 1161 f2fs_folio_put(node_folio, true);
77900c45 1162 return false;
6d18762e 1163 }
77900c45 1164
1a116e87
MWO
1165 if (IS_INODE(&node_folio->page)) {
1166 base = offset_in_addr(F2FS_INODE(&node_folio->page));
d3b7b4af
CY
1167 max_addrs = DEF_ADDRS_PER_INODE;
1168 } else {
1169 base = 0;
1170 max_addrs = DEF_ADDRS_PER_BLOCK;
1171 }
1172
1173 if (base + ofs_in_node >= max_addrs) {
1174 f2fs_err(sbi, "Inconsistent blkaddr offset: base:%u, ofs_in_node:%u, max:%u, ino:%u, nid:%u",
1175 base, ofs_in_node, max_addrs, dni->ino, dni->nid);
1a116e87 1176 f2fs_folio_put(node_folio, true);
c6ad7fd1
CY
1177 return false;
1178 }
1179
1a116e87 1180 *nofs = ofs_of_node(&node_folio->page);
6f7ec661 1181 source_blkaddr = data_blkaddr(NULL, node_folio, ofs_in_node);
1a116e87 1182 f2fs_folio_put(node_folio, true);
7bc09003 1183
bbf9f7d9
ST
1184 if (source_blkaddr != blkaddr) {
1185#ifdef CONFIG_F2FS_CHECK_FS
1186 unsigned int segno = GET_SEGNO(sbi, blkaddr);
1187 unsigned long offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
1188
1189 if (unlikely(check_valid_map(sbi, segno, offset))) {
1190 if (!test_and_set_bit(segno, SIT_I(sbi)->invalid_segmap)) {
833dcd35
JP
1191 f2fs_err(sbi, "mismatched blkaddr %u (source_blkaddr %u) in seg %u",
1192 blkaddr, source_blkaddr, segno);
f6db4307 1193 set_sbi_flag(sbi, SBI_NEED_FSCK);
bbf9f7d9
ST
1194 }
1195 }
1196#endif
c1079892 1197 return false;
bbf9f7d9 1198 }
c1079892 1199 return true;
7bc09003
JK
1200}
1201
6aa58d8a
CY
1202static int ra_data_block(struct inode *inode, pgoff_t index)
1203{
1204 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
f18d0076
SJ
1205 struct address_space *mapping = f2fs_is_cow_file(inode) ?
1206 F2FS_I(inode)->atomic_inode->i_mapping : inode->i_mapping;
6aa58d8a 1207 struct dnode_of_data dn;
0d53be23 1208 struct folio *folio;
6aa58d8a
CY
1209 struct f2fs_io_info fio = {
1210 .sbi = sbi,
1211 .ino = inode->i_ino,
1212 .type = DATA,
1213 .temp = COLD,
1214 .op = REQ_OP_READ,
1215 .op_flags = 0,
1216 .encrypted_page = NULL,
2eae077e 1217 .in_list = 0,
6aa58d8a
CY
1218 };
1219 int err;
1220
0d53be23
MWO
1221 folio = f2fs_grab_cache_folio(mapping, index, true);
1222 if (IS_ERR(folio))
1223 return PTR_ERR(folio);
6aa58d8a 1224
04a91ab0
CH
1225 if (f2fs_lookup_read_extent_cache_block(inode, index,
1226 &dn.data_blkaddr)) {
93770ab7
CY
1227 if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr,
1228 DATA_GENERIC_ENHANCE_READ))) {
10f966bb 1229 err = -EFSCORRUPTED;
0d53be23 1230 goto put_folio;
93770ab7 1231 }
6aa58d8a
CY
1232 goto got_it;
1233 }
1234
1235 set_new_dnode(&dn, inode, NULL, NULL, 0);
1236 err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
1237 if (err)
0d53be23 1238 goto put_folio;
6aa58d8a
CY
1239 f2fs_put_dnode(&dn);
1240
93770ab7
CY
1241 if (!__is_valid_data_blkaddr(dn.data_blkaddr)) {
1242 err = -ENOENT;
0d53be23 1243 goto put_folio;
93770ab7 1244 }
6aa58d8a 1245 if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr,
93770ab7 1246 DATA_GENERIC_ENHANCE))) {
10f966bb 1247 err = -EFSCORRUPTED;
0d53be23 1248 goto put_folio;
6aa58d8a
CY
1249 }
1250got_it:
0d53be23
MWO
1251 /* read folio */
1252 fio.page = &folio->page;
6aa58d8a
CY
1253 fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
1254
9bf1a3f7
YS
1255 /*
1256 * don't cache encrypted data into meta inode until previous dirty
1257 * data were writebacked to avoid racing between GC and flush.
1258 */
0d53be23 1259 f2fs_folio_wait_writeback(folio, DATA, true, true);
9bf1a3f7
YS
1260
1261 f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
1262
6aa58d8a
CY
1263 fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(sbi),
1264 dn.data_blkaddr,
1265 FGP_LOCK | FGP_CREAT, GFP_NOFS);
1266 if (!fio.encrypted_page) {
1267 err = -ENOMEM;
0d53be23 1268 goto put_folio;
6aa58d8a
CY
1269 }
1270
1271 err = f2fs_submit_page_bio(&fio);
1272 if (err)
1273 goto put_encrypted_page;
1274 f2fs_put_page(fio.encrypted_page, 0);
0d53be23 1275 f2fs_folio_put(folio, true);
8b83ac81 1276
34a23525
CY
1277 f2fs_update_iostat(sbi, inode, FS_DATA_READ_IO, F2FS_BLKSIZE);
1278 f2fs_update_iostat(sbi, NULL, FS_GDATA_READ_IO, F2FS_BLKSIZE);
8b83ac81 1279
6aa58d8a
CY
1280 return 0;
1281put_encrypted_page:
1282 f2fs_put_page(fio.encrypted_page, 1);
0d53be23
MWO
1283put_folio:
1284 f2fs_folio_put(folio, true);
6aa58d8a
CY
1285 return err;
1286}
1287
d4c759ee
JK
1288/*
1289 * Move data block via META_MAPPING while keeping locked data page.
1290 * This can be used to move blocks, aka LBAs, directly on disk.
1291 */
48018b4c 1292static int move_data_block(struct inode *inode, block_t bidx,
2ef79ecb 1293 int gc_type, unsigned int segno, int off)
4375a336 1294{
f18d0076
SJ
1295 struct address_space *mapping = f2fs_is_cow_file(inode) ?
1296 F2FS_I(inode)->atomic_inode->i_mapping : inode->i_mapping;
4375a336
JK
1297 struct f2fs_io_info fio = {
1298 .sbi = F2FS_I_SB(inode),
39d787be 1299 .ino = inode->i_ino,
4375a336 1300 .type = DATA,
a912b54d 1301 .temp = COLD,
04d328de 1302 .op = REQ_OP_READ,
70fd7614 1303 .op_flags = 0,
4375a336 1304 .encrypted_page = NULL,
2eae077e 1305 .in_list = 0,
4375a336
JK
1306 };
1307 struct dnode_of_data dn;
1308 struct f2fs_summary sum;
1309 struct node_info ni;
2a96ddcb 1310 struct folio *folio, *mfolio;
4356e48e 1311 block_t newaddr;
48018b4c 1312 int err = 0;
b0332a0f 1313 bool lfs_mode = f2fs_lfs_mode(fio.sbi);
ac2d750b
WG
1314 int type = fio.sbi->am.atgc_enabled && (gc_type == BG_GC) &&
1315 (fio.sbi->gc_mode != GC_URGENT_HIGH) ?
093749e2 1316 CURSEG_ALL_DATA_ATGC : CURSEG_COLD_DATA;
4375a336
JK
1317
1318 /* do not read out */
2a96ddcb
MWO
1319 folio = f2fs_grab_cache_folio(mapping, bidx, false);
1320 if (IS_ERR(folio))
1321 return PTR_ERR(folio);
4375a336 1322
48018b4c
CY
1323 if (!check_valid_map(F2FS_I_SB(inode), segno, off)) {
1324 err = -ENOENT;
20614711 1325 goto out;
48018b4c 1326 }
20614711 1327
71419129
CY
1328 err = f2fs_gc_pinned_control(inode, gc_type, segno);
1329 if (err)
1ad71a27 1330 goto out;
1ad71a27 1331
4375a336 1332 set_new_dnode(&dn, inode, NULL, NULL, 0);
4d57b86d 1333 err = f2fs_get_dnode_of_data(&dn, bidx, LOOKUP_NODE);
4375a336
JK
1334 if (err)
1335 goto out;
1336
08b39fbd 1337 if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
2a96ddcb 1338 folio_clear_uptodate(folio);
48018b4c 1339 err = -ENOENT;
4375a336 1340 goto put_out;
08b39fbd
CY
1341 }
1342
1343 /*
1344 * don't cache encrypted data into meta inode until previous dirty
1345 * data were writebacked to avoid racing between GC and flush.
1346 */
2a96ddcb 1347 f2fs_folio_wait_writeback(folio, DATA, true, true);
4375a336 1348
9bf1a3f7
YS
1349 f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
1350
a9419b63 1351 err = f2fs_get_node_info(fio.sbi, dn.nid, &ni, false);
7735730d
CY
1352 if (err)
1353 goto put_out;
1354
4375a336 1355 /* read page */
2a96ddcb 1356 fio.page = &folio->page;
7a9d7548 1357 fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
4375a336 1358
107a805d 1359 if (lfs_mode)
e4544b63 1360 f2fs_down_write(&fio.sbi->io_order_lock);
107a805d 1361
c14b4562 1362 mfolio = f2fs_grab_cache_folio(META_MAPPING(fio.sbi),
543b8c46 1363 fio.old_blkaddr, false);
c14b4562
MWO
1364 if (IS_ERR(mfolio)) {
1365 err = PTR_ERR(mfolio);
543b8c46 1366 goto up_out;
d7cd3702 1367 }
543b8c46 1368
c14b4562 1369 fio.encrypted_page = folio_file_page(mfolio, fio.old_blkaddr);
543b8c46 1370
c14b4562
MWO
1371 /* read source block in mfolio */
1372 if (!folio_test_uptodate(mfolio)) {
543b8c46
JK
1373 err = f2fs_submit_page_bio(&fio);
1374 if (err) {
c14b4562 1375 f2fs_folio_put(mfolio, true);
543b8c46
JK
1376 goto up_out;
1377 }
8b83ac81 1378
34a23525
CY
1379 f2fs_update_iostat(fio.sbi, inode, FS_DATA_READ_IO,
1380 F2FS_BLKSIZE);
1381 f2fs_update_iostat(fio.sbi, NULL, FS_GDATA_READ_IO,
1382 F2FS_BLKSIZE);
8b83ac81 1383
c14b4562 1384 folio_lock(mfolio);
019a8912 1385 if (unlikely(!is_meta_folio(mfolio) ||
c14b4562 1386 !folio_test_uptodate(mfolio))) {
543b8c46 1387 err = -EIO;
c14b4562 1388 f2fs_folio_put(mfolio, true);
543b8c46
JK
1389 goto up_out;
1390 }
1391 }
1392
cf740403
CY
1393 set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version);
1394
1395 /* allocate block address */
7d009e04 1396 err = f2fs_allocate_data_block(fio.sbi, NULL, fio.old_blkaddr, &newaddr,
093749e2 1397 &sum, type, NULL);
7d009e04 1398 if (err) {
c14b4562 1399 f2fs_folio_put(mfolio, true);
7d009e04
CY
1400 /* filesystem should shutdown, no need to recovery block */
1401 goto up_out;
1402 }
4356e48e 1403
01eccef7
CY
1404 fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(fio.sbi),
1405 newaddr, FGP_LOCK | FGP_CREAT, GFP_NOFS);
4356e48e
CY
1406 if (!fio.encrypted_page) {
1407 err = -ENOMEM;
c14b4562 1408 f2fs_folio_put(mfolio, true);
543b8c46 1409 goto recover_block;
4356e48e 1410 }
548aedac 1411
543b8c46 1412 /* write target block */
bae0ee7a 1413 f2fs_wait_on_page_writeback(fio.encrypted_page, DATA, true, true);
543b8c46 1414 memcpy(page_address(fio.encrypted_page),
c14b4562
MWO
1415 folio_address(mfolio), PAGE_SIZE);
1416 f2fs_folio_put(mfolio, true);
4e4f1eb9 1417
d217b5ce 1418 f2fs_invalidate_internal_cache(fio.sbi, fio.old_blkaddr, 1);
543b8c46 1419
8d64d365 1420 set_page_dirty(fio.encrypted_page);
6282adbf
JK
1421 if (clear_page_dirty_for_io(fio.encrypted_page))
1422 dec_page_count(fio.sbi, F2FS_DIRTY_META);
1423
548aedac 1424 set_page_writeback(fio.encrypted_page);
4375a336 1425
04d328de 1426 fio.op = REQ_OP_WRITE;
70fd7614 1427 fio.op_flags = REQ_SYNC;
4356e48e 1428 fio.new_blkaddr = newaddr;
fe16efe6 1429 f2fs_submit_page_write(&fio);
4375a336 1430
34a23525 1431 f2fs_update_iostat(fio.sbi, NULL, FS_GC_DATA_IO, F2FS_BLKSIZE);
b0af6d49 1432
f28b3434 1433 f2fs_update_data_blkaddr(&dn, newaddr);
91942321 1434 set_inode_flag(inode, FI_APPEND_WRITE);
87161a2b 1435
4375a336 1436 f2fs_put_page(fio.encrypted_page, 1);
4356e48e
CY
1437recover_block:
1438 if (err)
4d57b86d 1439 f2fs_do_replace_block(fio.sbi, &sum, newaddr, fio.old_blkaddr,
c5d02785 1440 true, true, true);
543b8c46
JK
1441up_out:
1442 if (lfs_mode)
e4544b63 1443 f2fs_up_write(&fio.sbi->io_order_lock);
4375a336
JK
1444put_out:
1445 f2fs_put_dnode(&dn);
1446out:
2a96ddcb 1447 f2fs_folio_put(folio, true);
48018b4c 1448 return err;
4375a336
JK
1449}
1450
48018b4c 1451static int move_data_page(struct inode *inode, block_t bidx, int gc_type,
6d1ba45c 1452 unsigned int segno, int off)
7bc09003 1453{
6d1ba45c 1454 struct folio *folio;
48018b4c 1455 int err = 0;
c879f90d 1456
6d1ba45c
MWO
1457 folio = f2fs_get_lock_data_folio(inode, bidx, true);
1458 if (IS_ERR(folio))
1459 return PTR_ERR(folio);
63a0b7cb 1460
48018b4c
CY
1461 if (!check_valid_map(F2FS_I_SB(inode), segno, off)) {
1462 err = -ENOENT;
20614711 1463 goto out;
48018b4c 1464 }
20614711 1465
71419129
CY
1466 err = f2fs_gc_pinned_control(inode, gc_type, segno);
1467 if (err)
1ad71a27 1468 goto out;
5fe45743 1469
7bc09003 1470 if (gc_type == BG_GC) {
6d1ba45c 1471 if (folio_test_writeback(folio)) {
48018b4c 1472 err = -EAGAIN;
4ebefc44 1473 goto out;
48018b4c 1474 }
6d1ba45c
MWO
1475 folio_mark_dirty(folio);
1476 set_page_private_gcing(&folio->page);
7bc09003 1477 } else {
c879f90d
JK
1478 struct f2fs_io_info fio = {
1479 .sbi = F2FS_I_SB(inode),
39d787be 1480 .ino = inode->i_ino,
c879f90d 1481 .type = DATA,
a912b54d 1482 .temp = COLD,
04d328de 1483 .op = REQ_OP_WRITE,
70fd7614 1484 .op_flags = REQ_SYNC,
e959c8f5 1485 .old_blkaddr = NULL_ADDR,
6d1ba45c 1486 .page = &folio->page,
4375a336 1487 .encrypted_page = NULL,
cc15620b 1488 .need_lock = LOCK_REQ,
b0af6d49 1489 .io_type = FS_GC_DATA_IO,
c879f90d 1490 };
6d1ba45c 1491 bool is_dirty = folio_test_dirty(folio);
72e1c797
CY
1492
1493retry:
6d1ba45c 1494 f2fs_folio_wait_writeback(folio, DATA, true, true);
8d64d365 1495
6d1ba45c
MWO
1496 folio_mark_dirty(folio);
1497 if (folio_clear_dirty_for_io(folio)) {
a7ffdbe2 1498 inode_dec_dirty_pages(inode);
4d57b86d 1499 f2fs_remove_dirty_inode(inode);
933439c8 1500 }
72e1c797 1501
6d1ba45c 1502 set_page_private_gcing(&folio->page);
72e1c797 1503
4d57b86d 1504 err = f2fs_do_write_data_page(&fio);
14a28559 1505 if (err) {
6d1ba45c 1506 clear_page_private_gcing(&folio->page);
14a28559 1507 if (err == -ENOMEM) {
4034247a 1508 memalloc_retry_wait(GFP_NOFS);
14a28559
CY
1509 goto retry;
1510 }
1511 if (is_dirty)
6d1ba45c 1512 folio_mark_dirty(folio);
72e1c797 1513 }
7bc09003
JK
1514 }
1515out:
6d1ba45c 1516 f2fs_folio_put(folio, true);
48018b4c 1517 return err;
7bc09003
JK
1518}
1519
0a8165d7 1520/*
7bc09003
JK
1521 * This function tries to get parent node of victim data block, and identifies
1522 * data block validity. If the block is valid, copy that with cold status and
1523 * modify parent node.
1524 * If the parent node is not valid or the data block address is different,
1525 * the victim data block is ignored.
1526 */
48018b4c 1527static int gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
7dede886
CY
1528 struct gc_inode_list *gc_list, unsigned int segno, int gc_type,
1529 bool force_migrate)
7bc09003
JK
1530{
1531 struct super_block *sb = sbi->sb;
1532 struct f2fs_summary *entry;
1533 block_t start_addr;
43727527 1534 int off;
7bc09003 1535 int phase = 0;
48018b4c 1536 int submitted = 0;
de881df9 1537 unsigned int usable_blks_in_seg = f2fs_usable_blks_in_seg(sbi, segno);
7bc09003
JK
1538
1539 start_addr = START_BLOCK(sbi, segno);
1540
1541next_step:
1542 entry = sum;
c718379b 1543
de881df9 1544 for (off = 0; off < usable_blks_in_seg; off++, entry++) {
7bc09003
JK
1545 struct inode *inode;
1546 struct node_info dni; /* dnode info for the data */
1547 unsigned int ofs_in_node, nofs;
1548 block_t start_bidx;
7ea984b0 1549 nid_t nid = le32_to_cpu(entry->nid);
7bc09003 1550
803e74be
JK
1551 /*
1552 * stop BG_GC if there is not enough free sections.
1553 * Or, stop GC if the segment becomes fully valid caused by
1554 * race condition along with SSR block allocation.
1555 */
1556 if ((gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) ||
7dede886 1557 (!force_migrate && get_valid_blocks(sbi, segno, true) ==
074b5ea2 1558 CAP_BLKS_PER_SEC(sbi)))
48018b4c 1559 return submitted;
7bc09003 1560
43727527 1561 if (check_valid_map(sbi, segno, off) == 0)
7bc09003
JK
1562 continue;
1563
1564 if (phase == 0) {
4d57b86d 1565 f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
7ea984b0
CY
1566 META_NAT, true);
1567 continue;
1568 }
1569
1570 if (phase == 1) {
4d57b86d 1571 f2fs_ra_node_page(sbi, nid);
7bc09003
JK
1572 continue;
1573 }
1574
1575 /* Get an inode by ino with checking validity */
c1079892 1576 if (!is_alive(sbi, entry, &dni, start_addr + off, &nofs))
7bc09003
JK
1577 continue;
1578
7ea984b0 1579 if (phase == 2) {
4d57b86d 1580 f2fs_ra_node_page(sbi, dni.ino);
7bc09003
JK
1581 continue;
1582 }
1583
7bc09003
JK
1584 ofs_in_node = le16_to_cpu(entry->ofs_in_node);
1585
7ea984b0 1586 if (phase == 3) {
a86e109e 1587 struct folio *data_folio;
71419129
CY
1588 int err;
1589
d4686d56 1590 inode = f2fs_iget(sb, dni.ino);
a798ff17 1591 if (IS_ERR(inode))
7bc09003
JK
1592 continue;
1593
a798ff17
CY
1594 if (is_bad_inode(inode) ||
1595 special_file(inode->i_mode)) {
1596 iput(inode);
1597 continue;
1598 }
1599
fc01008c
CY
1600 if (f2fs_has_inline_data(inode)) {
1601 iput(inode);
1602 set_sbi_flag(sbi, SBI_NEED_FSCK);
1603 f2fs_err_ratelimited(sbi,
1604 "inode %lx has both inline_data flag and "
1605 "data block, nid=%u, ofs_in_node=%u",
1606 inode->i_ino, dni.nid, ofs_in_node);
1607 continue;
1608 }
1609
71419129
CY
1610 err = f2fs_gc_pinned_control(inode, gc_type, segno);
1611 if (err == -EAGAIN) {
a22bb552
CY
1612 iput(inode);
1613 return submitted;
1614 }
1615
e4544b63 1616 if (!f2fs_down_write_trylock(
b2532c69 1617 &F2FS_I(inode)->i_gc_rwsem[WRITE])) {
bb06664a 1618 iput(inode);
6f8d4455 1619 sbi->skipped_gc_rwsem++;
bb06664a
CY
1620 continue;
1621 }
1622
6aa58d8a
CY
1623 start_bidx = f2fs_start_bidx_of_node(nofs, inode) +
1624 ofs_in_node;
1625
b40a2b00 1626 if (f2fs_meta_inode_gc_required(inode)) {
6aa58d8a
CY
1627 int err = ra_data_block(inode, start_bidx);
1628
e4544b63 1629 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
6aa58d8a
CY
1630 if (err) {
1631 iput(inode);
1632 continue;
1633 }
1634 add_gc_inode(gc_list, inode);
1635 continue;
1636 }
1637
a86e109e 1638 data_folio = f2fs_get_read_data_folio(inode, start_bidx,
59237a21 1639 REQ_RAHEAD, true, NULL);
e4544b63 1640 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
a86e109e 1641 if (IS_ERR(data_folio)) {
31a32688
CL
1642 iput(inode);
1643 continue;
1644 }
7bc09003 1645
a86e109e 1646 f2fs_folio_put(data_folio, false);
7dda2af8 1647 add_gc_inode(gc_list, inode);
31a32688
CL
1648 continue;
1649 }
1650
7ea984b0 1651 /* phase 4 */
7dda2af8 1652 inode = find_gc_inode(gc_list, dni.ino);
31a32688 1653 if (inode) {
82e0a5aa
CY
1654 struct f2fs_inode_info *fi = F2FS_I(inode);
1655 bool locked = false;
48018b4c 1656 int err;
82e0a5aa
CY
1657
1658 if (S_ISREG(inode->i_mode)) {
6fd257cb 1659 if (!f2fs_down_write_trylock(&fi->i_gc_rwsem[WRITE])) {
ad126ebd 1660 sbi->skipped_gc_rwsem++;
82e0a5aa 1661 continue;
ad126ebd 1662 }
e4544b63 1663 if (!f2fs_down_write_trylock(
6fd257cb 1664 &fi->i_gc_rwsem[READ])) {
6f8d4455 1665 sbi->skipped_gc_rwsem++;
6fd257cb 1666 f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
82e0a5aa
CY
1667 continue;
1668 }
1669 locked = true;
73ac2f4e
CY
1670
1671 /* wait for all inflight aio data */
1672 inode_dio_wait(inode);
82e0a5aa
CY
1673 }
1674
4d57b86d 1675 start_bidx = f2fs_start_bidx_of_node(nofs, inode)
c879f90d 1676 + ofs_in_node;
b40a2b00 1677 if (f2fs_meta_inode_gc_required(inode))
48018b4c
CY
1678 err = move_data_block(inode, start_bidx,
1679 gc_type, segno, off);
4375a336 1680 else
48018b4c 1681 err = move_data_page(inode, start_bidx, gc_type,
d4c759ee 1682 segno, off);
82e0a5aa 1683
48018b4c 1684 if (!err && (gc_type == FG_GC ||
b40a2b00 1685 f2fs_meta_inode_gc_required(inode)))
48018b4c
CY
1686 submitted++;
1687
82e0a5aa 1688 if (locked) {
e4544b63 1689 f2fs_up_write(&fi->i_gc_rwsem[READ]);
6fd257cb 1690 f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
82e0a5aa
CY
1691 }
1692
e1235983 1693 stat_inc_data_blk_count(sbi, 1, gc_type);
7bc09003 1694 }
7bc09003 1695 }
c718379b 1696
7ea984b0 1697 if (++phase < 5)
7bc09003 1698 goto next_step;
48018b4c
CY
1699
1700 return submitted;
7bc09003
JK
1701}
1702
1703static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
e791d00b 1704 int gc_type, bool one_time)
7bc09003
JK
1705{
1706 struct sit_info *sit_i = SIT_I(sbi);
1707 int ret;
8a2d0ace 1708
3d26fa6b 1709 down_write(&sit_i->sentry_lock);
e791d00b
DJ
1710 ret = f2fs_get_victim(sbi, victim, gc_type, NO_CHECK_TYPE,
1711 LFS, 0, one_time);
3d26fa6b 1712 up_write(&sit_i->sentry_lock);
7bc09003
JK
1713 return ret;
1714}
1715
718e53fa
CY
1716static int do_garbage_collect(struct f2fs_sb_info *sbi,
1717 unsigned int start_segno,
7dede886 1718 struct gc_inode_list *gc_list, int gc_type,
9748c2dd 1719 bool force_migrate, bool one_time)
7bc09003 1720{
c718379b 1721 struct blk_plug plug;
718e53fa 1722 unsigned int segno = start_segno;
a60108f7 1723 unsigned int end_segno = start_segno + SEGS_PER_SEC(sbi);
8c890c4c 1724 unsigned int sec_end_segno;
e3080b01 1725 int seg_freed = 0, migrated = 0;
718e53fa
CY
1726 unsigned char type = IS_DATASEG(get_seg_entry(sbi, segno)->type) ?
1727 SUM_TYPE_DATA : SUM_TYPE_NODE;
9bf1dcbd 1728 unsigned char data_type = (type == SUM_TYPE_DATA) ? DATA : NODE;
48018b4c 1729 int submitted = 0;
7bc09003 1730
8c890c4c
DJ
1731 if (__is_large_section(sbi)) {
1732 sec_end_segno = rounddown(end_segno, SEGS_PER_SEC(sbi));
e3080b01 1733
8c890c4c
DJ
1734 /*
1735 * zone-capacity can be less than zone-size in zoned devices,
1736 * resulting in less than expected usable segments in the zone,
1737 * calculate the end segno in the zone which can be garbage
1738 * collected
1739 */
1740 if (f2fs_sb_has_blkzoned(sbi))
1741 sec_end_segno -= SEGS_PER_SEC(sbi) -
2af583af 1742 f2fs_usable_segs_in_sec(sbi);
de881df9 1743
9748c2dd 1744 if (gc_type == BG_GC || one_time) {
2223fe65 1745 unsigned int window_granularity =
8c890c4c
DJ
1746 sbi->migration_window_granularity;
1747
2223fe65
DJ
1748 if (f2fs_sb_has_blkzoned(sbi) &&
1749 !has_enough_free_blocks(sbi,
9a481a1c
DJ
1750 sbi->gc_thread->boost_zoned_gc_percent))
1751 window_granularity *=
1752 BOOST_GC_MULTIPLE;
2223fe65
DJ
1753
1754 end_segno = start_segno + window_granularity;
1755 }
1756
8c890c4c
DJ
1757 if (end_segno > sec_end_segno)
1758 end_segno = sec_end_segno;
1759 }
1760
093749e2
CY
1761 sanity_check_seg_type(sbi, get_seg_entry(sbi, segno)->type);
1762
718e53fa 1763 /* readahead multi ssa blocks those have contiguous address */
2c70c5e3 1764 if (__is_large_section(sbi))
4d57b86d 1765 f2fs_ra_meta_pages(sbi, GET_SUM_BLOCK(sbi, segno),
e3080b01 1766 end_segno - segno, META_SSA, true);
718e53fa
CY
1767
1768 /* reference all summary page */
1769 while (segno < end_segno) {
5d895f7b
MWO
1770 struct folio *sum_folio = f2fs_get_sum_folio(sbi, segno++);
1771 if (IS_ERR(sum_folio)) {
1772 int err = PTR_ERR(sum_folio);
edc55aaf
JK
1773
1774 end_segno = segno - 1;
1775 for (segno = start_segno; segno < end_segno; segno++) {
5d895f7b 1776 sum_folio = filemap_get_folio(META_MAPPING(sbi),
edc55aaf 1777 GET_SUM_BLOCK(sbi, segno));
5d895f7b 1778 folio_put_refs(sum_folio, 2);
edc55aaf
JK
1779 }
1780 return err;
1781 }
5d895f7b 1782 folio_unlock(sum_folio);
718e53fa 1783 }
7bc09003 1784
c718379b
JK
1785 blk_start_plug(&plug);
1786
718e53fa 1787 for (segno = start_segno; segno < end_segno; segno++) {
5d895f7b 1788 struct f2fs_summary_block *sum;
aa987273 1789
718e53fa 1790 /* find segment summary of victim */
5d895f7b 1791 struct folio *sum_folio = filemap_get_folio(META_MAPPING(sbi),
718e53fa 1792 GET_SUM_BLOCK(sbi, segno));
7bc09003 1793
d6c66cd1
YS
1794 if (get_valid_blocks(sbi, segno, false) == 0)
1795 goto freed;
dabfbbc8 1796 if (gc_type == BG_GC && __is_large_section(sbi) &&
e3080b01
CY
1797 migrated >= sbi->migration_granularity)
1798 goto skip;
5d895f7b
MWO
1799 if (!folio_test_uptodate(sum_folio) ||
1800 unlikely(f2fs_cp_error(sbi)))
e3080b01 1801 goto skip;
de0dcc40 1802
5d895f7b 1803 sum = folio_address(sum_folio);
10d255c3 1804 if (type != GET_SUM_TYPE((&sum->footer))) {
dcbb4c10
JP
1805 f2fs_err(sbi, "Inconsistent segment (%u) type [%d, %d] in SSA and SIT",
1806 segno, type, GET_SUM_TYPE((&sum->footer)));
a9cfee0e
CY
1807 f2fs_stop_checkpoint(sbi, false,
1808 STOP_CP_REASON_CORRUPTED_SUMMARY);
e3080b01 1809 goto skip;
10d255c3 1810 }
718e53fa
CY
1811
1812 /*
1813 * this is to avoid deadlock:
1814 * - lock_page(sum_page) - f2fs_replace_block
3d26fa6b
CY
1815 * - check_valid_map() - down_write(sentry_lock)
1816 * - down_read(sentry_lock) - change_curseg()
718e53fa
CY
1817 * - lock_page(sum_page)
1818 */
718e53fa 1819 if (type == SUM_TYPE_NODE)
48018b4c 1820 submitted += gc_node_segment(sbi, sum->entries, segno,
718e53fa 1821 gc_type);
48018b4c
CY
1822 else
1823 submitted += gc_data_segment(sbi, sum->entries, gc_list,
7dede886
CY
1824 segno, gc_type,
1825 force_migrate);
718e53fa 1826
9bf1dcbd 1827 stat_inc_gc_seg_count(sbi, data_type, gc_type);
07c6b593 1828 sbi->gc_reclaimed_segs[sbi->gc_mode]++;
8c7b9ac1 1829 migrated++;
c56f16da 1830
d6c66cd1 1831freed:
c56f16da
CY
1832 if (gc_type == FG_GC &&
1833 get_valid_blocks(sbi, segno, false) == 0)
1834 seg_freed++;
e3080b01 1835
e219aecf
YS
1836 if (__is_large_section(sbi))
1837 sbi->next_victim_seg[gc_type] =
8c890c4c
DJ
1838 (segno + 1 < sec_end_segno) ?
1839 segno + 1 : NULL_SEGNO;
e3080b01 1840skip:
5d895f7b 1841 folio_put_refs(sum_folio, 2);
718e53fa
CY
1842 }
1843
48018b4c 1844 if (submitted)
9bf1dcbd 1845 f2fs_submit_merged_write(sbi, data_type);
c718379b 1846
718e53fa 1847 blk_finish_plug(&plug);
7bc09003 1848
9bf1dcbd
CY
1849 if (migrated)
1850 stat_inc_gc_sec_count(sbi, data_type, gc_type);
17d899df 1851
c56f16da 1852 return seg_freed;
7bc09003
JK
1853}
1854
d147ea4a 1855int f2fs_gc(struct f2fs_sb_info *sbi, struct f2fs_gc_control *gc_control)
7bc09003 1856{
d147ea4a
JK
1857 int gc_type = gc_control->init_gc_type;
1858 unsigned int segno = gc_control->victim_segno;
36ded4c1 1859 int sec_freed = 0, seg_freed = 0, total_freed = 0, total_sec_freed = 0;
c56f16da 1860 int ret = 0;
d5053a34 1861 struct cp_control cpc;
7dda2af8
CL
1862 struct gc_inode_list gc_list = {
1863 .ilist = LIST_HEAD_INIT(gc_list.ilist),
f6bb2a2c 1864 .iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS),
7dda2af8 1865 };
2ef79ecb 1866 unsigned int skipped_round = 0, round = 0;
d11cef14 1867 unsigned int upper_secs;
d5053a34 1868
d147ea4a 1869 trace_f2fs_gc_begin(sbi->sb, gc_type, gc_control->no_bg_gc,
c81d5bae 1870 gc_control->nr_free_secs,
c56f16da
CY
1871 get_pages(sbi, F2FS_DIRTY_NODES),
1872 get_pages(sbi, F2FS_DIRTY_DENTS),
1873 get_pages(sbi, F2FS_DIRTY_IMETA),
1874 free_sections(sbi),
1875 free_segments(sbi),
1876 reserved_segments(sbi),
1877 prefree_segments(sbi));
1878
119ee914 1879 cpc.reason = __get_cp_reason(sbi);
7bc09003 1880gc_more:
c17caf0b 1881 sbi->skipped_gc_rwsem = 0;
1751e8a6 1882 if (unlikely(!(sbi->sb->s_flags & SB_ACTIVE))) {
e5dbd956 1883 ret = -EINVAL;
408e9375 1884 goto stop;
e5dbd956 1885 }
6d5a1495
CY
1886 if (unlikely(f2fs_cp_error(sbi))) {
1887 ret = -EIO;
203681f6 1888 goto stop;
6d5a1495 1889 }
7bc09003 1890
2d3f197b
JK
1891 /* Let's run FG_GC, if we don't have enough space. */
1892 if (has_not_enough_free_secs(sbi, 0, 0)) {
1893 gc_type = FG_GC;
1894
6e17bfbc 1895 /*
19f4e688
HP
1896 * For example, if there are many prefree_segments below given
1897 * threshold, we can make them free by checkpoint. Then, we
1898 * secure free segments which doesn't need fggc any more.
6e17bfbc 1899 */
d147ea4a 1900 if (prefree_segments(sbi)) {
eb61c2cc 1901 stat_inc_cp_call_count(sbi, TOTAL_CALL);
4d57b86d 1902 ret = f2fs_write_checkpoint(sbi, &cpc);
8fd5a37e
JK
1903 if (ret)
1904 goto stop;
36ded4c1
YS
1905 /* Reset due to checkpoint */
1906 sec_freed = 0;
8fd5a37e 1907 }
d64f8047 1908 }
7bc09003 1909
19f4e688 1910 /* f2fs_balance_fs doesn't need to do BG_GC in critical path. */
d147ea4a 1911 if (gc_type == BG_GC && gc_control->no_bg_gc) {
c56f16da 1912 ret = -EINVAL;
19f4e688 1913 goto stop;
c56f16da 1914 }
71419129 1915retry:
e791d00b 1916 ret = __get_victim(sbi, &segno, gc_type, gc_control->one_time);
71419129
CY
1917 if (ret) {
1918 /* allow to search victim from sections has pinned data */
1919 if (ret == -ENODATA && gc_type == FG_GC &&
1920 f2fs_pinned_section_exists(DIRTY_I(sbi))) {
1921 f2fs_unpin_all_sections(sbi, false);
1922 goto retry;
1923 }
408e9375 1924 goto stop;
71419129 1925 }
7bc09003 1926
d147ea4a 1927 seg_freed = do_garbage_collect(sbi, segno, &gc_list, gc_type,
9748c2dd
DJ
1928 gc_control->should_migrate_blocks,
1929 gc_control->one_time);
19ec1d31
YY
1930 if (seg_freed < 0)
1931 goto stop;
1932
c56f16da 1933 total_freed += seg_freed;
43727527 1934
2af583af 1935 if (seg_freed == f2fs_usable_segs_in_sec(sbi)) {
d147ea4a 1936 sec_freed++;
36ded4c1
YS
1937 total_sec_freed++;
1938 }
2ef79ecb 1939
9748c2dd
DJ
1940 if (gc_control->one_time)
1941 goto stop;
1942
2d3f197b 1943 if (gc_type == FG_GC) {
5ec4e49f 1944 sbi->cur_victim_sec = NULL_SEGNO;
43727527 1945
c1660d88 1946 if (has_enough_free_secs(sbi, sec_freed, 0)) {
2d3f197b 1947 if (!gc_control->no_bg_gc &&
36ded4c1 1948 total_sec_freed < gc_control->nr_free_secs)
2d3f197b
JK
1949 goto go_gc_more;
1950 goto stop;
1951 }
d147ea4a
JK
1952 if (sbi->skipped_gc_rwsem)
1953 skipped_round++;
1954 round++;
1955 if (skipped_round > MAX_SKIP_GC_COUNT &&
1956 skipped_round * 2 >= round) {
eb61c2cc 1957 stat_inc_cp_call_count(sbi, TOTAL_CALL);
4d57b86d 1958 ret = f2fs_write_checkpoint(sbi, &cpc);
d147ea4a 1959 goto stop;
a9163b94 1960 }
c1660d88 1961 } else if (has_enough_free_secs(sbi, 0, 0)) {
2d3f197b 1962 goto stop;
a9163b94 1963 }
d147ea4a 1964
d11cef14
YS
1965 __get_secs_required(sbi, NULL, &upper_secs, NULL);
1966
1967 /*
1968 * Write checkpoint to reclaim prefree segments.
1969 * We need more three extra sections for writer's data/node/dentry.
1970 */
1971 if (free_sections(sbi) <= upper_secs + NR_GC_CHECKPOINT_SECS &&
d147ea4a 1972 prefree_segments(sbi)) {
eb61c2cc 1973 stat_inc_cp_call_count(sbi, TOTAL_CALL);
a9163b94 1974 ret = f2fs_write_checkpoint(sbi, &cpc);
d147ea4a
JK
1975 if (ret)
1976 goto stop;
36ded4c1
YS
1977 /* Reset due to checkpoint */
1978 sec_freed = 0;
d147ea4a 1979 }
c81d5bae 1980go_gc_more:
d147ea4a
JK
1981 segno = NULL_SEGNO;
1982 goto gc_more;
1983
408e9375 1984stop:
e066b83c 1985 SIT_I(sbi)->last_victim[ALLOC_NEXT] = 0;
d147ea4a 1986 SIT_I(sbi)->last_victim[FLUSH_DEVICE] = gc_control->victim_segno;
c56f16da 1987
71419129
CY
1988 if (gc_type == FG_GC)
1989 f2fs_unpin_all_sections(sbi, true);
1990
36ded4c1 1991 trace_f2fs_gc_end(sbi->sb, ret, total_freed, total_sec_freed,
c56f16da
CY
1992 get_pages(sbi, F2FS_DIRTY_NODES),
1993 get_pages(sbi, F2FS_DIRTY_DENTS),
1994 get_pages(sbi, F2FS_DIRTY_IMETA),
1995 free_sections(sbi),
1996 free_segments(sbi),
1997 reserved_segments(sbi),
1998 prefree_segments(sbi));
1999
e4544b63 2000 f2fs_up_write(&sbi->gc_lock);
7bc09003 2001
7dda2af8 2002 put_gc_inode(&gc_list);
d530d4d8 2003
d147ea4a 2004 if (gc_control->err_gc_skipped && !ret)
36ded4c1 2005 ret = total_sec_freed ? 0 : -EAGAIN;
43727527 2006 return ret;
7bc09003
JK
2007}
2008
093749e2
CY
2009int __init f2fs_create_garbage_collection_cache(void)
2010{
2011 victim_entry_slab = f2fs_kmem_cache_create("f2fs_victim_entry",
2012 sizeof(struct victim_entry));
870af777 2013 return victim_entry_slab ? 0 : -ENOMEM;
093749e2
CY
2014}
2015
2016void f2fs_destroy_garbage_collection_cache(void)
2017{
2018 kmem_cache_destroy(victim_entry_slab);
2019}
2020
2021static void init_atgc_management(struct f2fs_sb_info *sbi)
2022{
2023 struct atgc_management *am = &sbi->am;
2024
2025 if (test_opt(sbi, ATGC) &&
2026 SIT_I(sbi)->elapsed_time >= DEF_GC_THREAD_AGE_THRESHOLD)
2027 am->atgc_enabled = true;
2028
2029 am->root = RB_ROOT_CACHED;
2030 INIT_LIST_HEAD(&am->victim_list);
2031 am->victim_count = 0;
2032
2033 am->candidate_ratio = DEF_GC_THREAD_CANDIDATE_RATIO;
2034 am->max_candidate_count = DEF_GC_THREAD_MAX_CANDIDATE_COUNT;
2035 am->age_weight = DEF_GC_THREAD_AGE_WEIGHT;
89e53ff1 2036 am->age_threshold = DEF_GC_THREAD_AGE_THRESHOLD;
093749e2
CY
2037}
2038
4d57b86d 2039void f2fs_build_gc_manager(struct f2fs_sb_info *sbi)
7bc09003 2040{
1ad71a27 2041 sbi->gc_pin_file_threshold = DEF_GC_FAILED_PINNED_FILES;
d5793249
JK
2042
2043 /* give warm/cold data area from slower device */
0916878d 2044 if (f2fs_is_multi_device(sbi) && !__is_large_section(sbi))
d5793249
JK
2045 SIT_I(sbi)->last_victim[ALLOC_NEXT] =
2046 GET_SEGNO(sbi, FDEV(0).end_blk) + 1;
093749e2
CY
2047
2048 init_atgc_management(sbi);
7bc09003 2049}
04f0b2ea 2050
9703d69d
DJ
2051int f2fs_gc_range(struct f2fs_sb_info *sbi,
2052 unsigned int start_seg, unsigned int end_seg,
2053 bool dry_run, unsigned int dry_run_sections)
2f0209f5
DJ
2054{
2055 unsigned int segno;
9703d69d 2056 unsigned int gc_secs = dry_run_sections;
2f0209f5 2057
22af1b8c
ZN
2058 if (unlikely(f2fs_cp_error(sbi)))
2059 return -EIO;
2060
2f0209f5
DJ
2061 for (segno = start_seg; segno <= end_seg; segno += SEGS_PER_SEC(sbi)) {
2062 struct gc_inode_list gc_list = {
2063 .ilist = LIST_HEAD_INIT(gc_list.ilist),
2064 .iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS),
2065 };
2066
773704c1
CY
2067 if (IS_CURSEC(sbi, GET_SEC_FROM_SEG(sbi, segno)))
2068 continue;
2069
5cc69a27 2070 do_garbage_collect(sbi, segno, &gc_list, FG_GC, true, false);
2f0209f5
DJ
2071 put_gc_inode(&gc_list);
2072
2073 if (!dry_run && get_valid_blocks(sbi, segno, true))
2074 return -EAGAIN;
9703d69d
DJ
2075 if (dry_run && dry_run_sections &&
2076 !get_valid_blocks(sbi, segno, true) && --gc_secs == 0)
2077 break;
2f0209f5
DJ
2078
2079 if (fatal_signal_pending(current))
2080 return -ERESTARTSYS;
2081 }
2082
2083 return 0;
2084}
2085
b4b10061 2086static int free_segment_range(struct f2fs_sb_info *sbi,
2f0209f5 2087 unsigned int secs, bool dry_run)
04f0b2ea 2088{
2f0209f5 2089 unsigned int next_inuse, start, end;
b4b10061
JK
2090 struct cp_control cpc = { CP_RESIZE, 0, 0, 0 };
2091 int gc_mode, gc_type;
04f0b2ea 2092 int err = 0;
b4b10061
JK
2093 int type;
2094
2095 /* Force block allocation for GC */
2096 MAIN_SECS(sbi) -= secs;
a60108f7 2097 start = MAIN_SECS(sbi) * SEGS_PER_SEC(sbi);
b4b10061
JK
2098 end = MAIN_SEGS(sbi) - 1;
2099
2100 mutex_lock(&DIRTY_I(sbi)->seglist_lock);
2101 for (gc_mode = 0; gc_mode < MAX_GC_POLICY; gc_mode++)
2102 if (SIT_I(sbi)->last_victim[gc_mode] >= start)
2103 SIT_I(sbi)->last_victim[gc_mode] = 0;
2104
2105 for (gc_type = BG_GC; gc_type <= FG_GC; gc_type++)
2106 if (sbi->next_victim_seg[gc_type] >= start)
2107 sbi->next_victim_seg[gc_type] = NULL_SEGNO;
2108 mutex_unlock(&DIRTY_I(sbi)->seglist_lock);
04f0b2ea
QS
2109
2110 /* Move out cursegs from the target range */
24593061
ZN
2111 for (type = CURSEG_HOT_DATA; type < NR_CURSEG_PERSIST_TYPE; type++) {
2112 err = f2fs_allocate_segment_for_resize(sbi, type, start, end);
2113 if (err)
2114 goto out;
2115 }
04f0b2ea
QS
2116
2117 /* do GC to move out valid blocks in the range */
9703d69d 2118 err = f2fs_gc_range(sbi, start, end, dry_run, 0);
2f0209f5 2119 if (err || dry_run)
b4b10061 2120 goto out;
04f0b2ea 2121
eb61c2cc 2122 stat_inc_cp_call_count(sbi, TOTAL_CALL);
b4b10061 2123 err = f2fs_write_checkpoint(sbi, &cpc);
04f0b2ea 2124 if (err)
b4b10061 2125 goto out;
04f0b2ea
QS
2126
2127 next_inuse = find_next_inuse(FREE_I(sbi), end + 1, start);
2128 if (next_inuse <= end) {
dcbb4c10
JP
2129 f2fs_err(sbi, "segno %u should be free but still inuse!",
2130 next_inuse);
04f0b2ea
QS
2131 f2fs_bug_on(sbi, 1);
2132 }
b4b10061
JK
2133out:
2134 MAIN_SECS(sbi) += secs;
04f0b2ea
QS
2135 return err;
2136}
2137
2138static void update_sb_metadata(struct f2fs_sb_info *sbi, int secs)
2139{
2140 struct f2fs_super_block *raw_sb = F2FS_RAW_SUPER(sbi);
a4ba5dfc
CY
2141 int section_count;
2142 int segment_count;
2143 int segment_count_main;
2144 long long block_count;
a60108f7 2145 int segs = secs * SEGS_PER_SEC(sbi);
04f0b2ea 2146
e4544b63 2147 f2fs_down_write(&sbi->sb_lock);
a4ba5dfc
CY
2148
2149 section_count = le32_to_cpu(raw_sb->section_count);
2150 segment_count = le32_to_cpu(raw_sb->segment_count);
2151 segment_count_main = le32_to_cpu(raw_sb->segment_count_main);
2152 block_count = le64_to_cpu(raw_sb->block_count);
2153
04f0b2ea
QS
2154 raw_sb->section_count = cpu_to_le32(section_count + secs);
2155 raw_sb->segment_count = cpu_to_le32(segment_count + segs);
2156 raw_sb->segment_count_main = cpu_to_le32(segment_count_main + segs);
2157 raw_sb->block_count = cpu_to_le64(block_count +
45809cd3 2158 (long long)SEGS_TO_BLKS(sbi, segs));
46d9ce19
QS
2159 if (f2fs_is_multi_device(sbi)) {
2160 int last_dev = sbi->s_ndevs - 1;
2161 int dev_segs =
2162 le32_to_cpu(raw_sb->devs[last_dev].total_segments);
2163
2164 raw_sb->devs[last_dev].total_segments =
2165 cpu_to_le32(dev_segs + segs);
2166 }
a4ba5dfc 2167
e4544b63 2168 f2fs_up_write(&sbi->sb_lock);
04f0b2ea
QS
2169}
2170
2171static void update_fs_metadata(struct f2fs_sb_info *sbi, int secs)
2172{
a60108f7 2173 int segs = secs * SEGS_PER_SEC(sbi);
45809cd3 2174 long long blks = SEGS_TO_BLKS(sbi, segs);
04f0b2ea
QS
2175 long long user_block_count =
2176 le64_to_cpu(F2FS_CKPT(sbi)->user_block_count);
2177
2178 SM_I(sbi)->segment_count = (int)SM_I(sbi)->segment_count + segs;
2179 MAIN_SEGS(sbi) = (int)MAIN_SEGS(sbi) + segs;
b4b10061 2180 MAIN_SECS(sbi) += secs;
04f0b2ea
QS
2181 FREE_I(sbi)->free_sections = (int)FREE_I(sbi)->free_sections + secs;
2182 FREE_I(sbi)->free_segments = (int)FREE_I(sbi)->free_segments + segs;
46d9ce19
QS
2183 F2FS_CKPT(sbi)->user_block_count = cpu_to_le64(user_block_count + blks);
2184
2185 if (f2fs_is_multi_device(sbi)) {
2186 int last_dev = sbi->s_ndevs - 1;
2187
2188 FDEV(last_dev).total_segments =
2189 (int)FDEV(last_dev).total_segments + segs;
2190 FDEV(last_dev).end_blk =
2191 (long long)FDEV(last_dev).end_blk + blks;
2192#ifdef CONFIG_BLK_DEV_ZONED
2e2c6e9b
JK
2193 FDEV(last_dev).nr_blkz = FDEV(last_dev).nr_blkz +
2194 div_u64(blks, sbi->blocks_per_blkz);
46d9ce19
QS
2195#endif
2196 }
04f0b2ea
QS
2197}
2198
d8189834 2199int f2fs_resize_fs(struct file *filp, __u64 block_count)
04f0b2ea 2200{
d8189834 2201 struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp));
04f0b2ea 2202 __u64 old_block_count, shrunk_blocks;
b4b10061 2203 struct cp_control cpc = { CP_RESIZE, 0, 0, 0 };
04f0b2ea 2204 unsigned int secs;
04f0b2ea
QS
2205 int err = 0;
2206 __u32 rem;
2207
2208 old_block_count = le64_to_cpu(F2FS_RAW_SUPER(sbi)->block_count);
2209 if (block_count > old_block_count)
2210 return -EINVAL;
2211
46d9ce19
QS
2212 if (f2fs_is_multi_device(sbi)) {
2213 int last_dev = sbi->s_ndevs - 1;
2214 __u64 last_segs = FDEV(last_dev).total_segments;
2215
45809cd3 2216 if (block_count + SEGS_TO_BLKS(sbi, last_segs) <=
46d9ce19
QS
2217 old_block_count)
2218 return -EINVAL;
2219 }
2220
04f0b2ea
QS
2221 /* new fs size should align to section size */
2222 div_u64_rem(block_count, BLKS_PER_SEC(sbi), &rem);
2223 if (rem)
2224 return -EINVAL;
2225
2226 if (block_count == old_block_count)
2227 return 0;
2228
2229 if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) {
dcbb4c10 2230 f2fs_err(sbi, "Should run fsck to repair first.");
10f966bb 2231 return -EFSCORRUPTED;
04f0b2ea
QS
2232 }
2233
2234 if (test_opt(sbi, DISABLE_CHECKPOINT)) {
dcbb4c10 2235 f2fs_err(sbi, "Checkpoint should be enabled.");
04f0b2ea
QS
2236 return -EINVAL;
2237 }
2238
d8189834
CY
2239 err = mnt_want_write_file(filp);
2240 if (err)
2241 return err;
2242
04f0b2ea
QS
2243 shrunk_blocks = old_block_count - block_count;
2244 secs = div_u64(shrunk_blocks, BLKS_PER_SEC(sbi));
b4b10061
JK
2245
2246 /* stop other GC */
d8189834
CY
2247 if (!f2fs_down_write_trylock(&sbi->gc_lock)) {
2248 err = -EAGAIN;
2249 goto out_drop_write;
2250 }
b4b10061
JK
2251
2252 /* stop CP to protect MAIN_SEC in free_segment_range */
2253 f2fs_lock_op(sbi);
3ab0598e
CY
2254
2255 spin_lock(&sbi->stat_lock);
2256 if (shrunk_blocks + valid_user_blocks(sbi) +
2257 sbi->current_reserved_blocks + sbi->unusable_block_count +
2258 F2FS_OPTION(sbi).root_reserved_blocks > sbi->user_block_count)
2259 err = -ENOSPC;
2260 spin_unlock(&sbi->stat_lock);
2261
2262 if (err)
2263 goto out_unlock;
2264
b4b10061 2265 err = free_segment_range(sbi, secs, true);
3ab0598e
CY
2266
2267out_unlock:
b4b10061 2268 f2fs_unlock_op(sbi);
e4544b63 2269 f2fs_up_write(&sbi->gc_lock);
d8189834
CY
2270out_drop_write:
2271 mnt_drop_write_file(filp);
b4b10061
JK
2272 if (err)
2273 return err;
2274
1afe9e7d 2275 err = freeze_super(sbi->sb, FREEZE_HOLDER_KERNEL, NULL);
8bec7dd1
CY
2276 if (err)
2277 return err;
d8189834
CY
2278
2279 if (f2fs_readonly(sbi->sb)) {
1afe9e7d 2280 err = thaw_super(sbi->sb, FREEZE_HOLDER_KERNEL, NULL);
880b9577
DW
2281 if (err)
2282 return err;
d8189834
CY
2283 return -EROFS;
2284 }
2285
e4544b63
TM
2286 f2fs_down_write(&sbi->gc_lock);
2287 f2fs_down_write(&sbi->cp_global_sem);
b4b10061 2288
04f0b2ea
QS
2289 spin_lock(&sbi->stat_lock);
2290 if (shrunk_blocks + valid_user_blocks(sbi) +
2291 sbi->current_reserved_blocks + sbi->unusable_block_count +
2292 F2FS_OPTION(sbi).root_reserved_blocks > sbi->user_block_count)
2293 err = -ENOSPC;
2294 else
2295 sbi->user_block_count -= shrunk_blocks;
2296 spin_unlock(&sbi->stat_lock);
b4b10061
JK
2297 if (err)
2298 goto out_err;
04f0b2ea 2299
28fc4e90 2300 set_sbi_flag(sbi, SBI_IS_RESIZEFS);
b4b10061 2301 err = free_segment_range(sbi, secs, false);
04f0b2ea 2302 if (err)
b4b10061 2303 goto recover_out;
04f0b2ea
QS
2304
2305 update_sb_metadata(sbi, -secs);
2306
2307 err = f2fs_commit_super(sbi, false);
2308 if (err) {
2309 update_sb_metadata(sbi, secs);
b4b10061 2310 goto recover_out;
04f0b2ea
QS
2311 }
2312
2313 update_fs_metadata(sbi, -secs);
2314 clear_sbi_flag(sbi, SBI_IS_RESIZEFS);
68275682 2315 set_sbi_flag(sbi, SBI_IS_DIRTY);
68275682 2316
eb61c2cc 2317 stat_inc_cp_call_count(sbi, TOTAL_CALL);
b4b10061 2318 err = f2fs_write_checkpoint(sbi, &cpc);
04f0b2ea
QS
2319 if (err) {
2320 update_fs_metadata(sbi, secs);
2321 update_sb_metadata(sbi, secs);
2322 f2fs_commit_super(sbi, false);
2323 }
b4b10061 2324recover_out:
28fc4e90 2325 clear_sbi_flag(sbi, SBI_IS_RESIZEFS);
04f0b2ea
QS
2326 if (err) {
2327 set_sbi_flag(sbi, SBI_NEED_FSCK);
dcbb4c10 2328 f2fs_err(sbi, "resize_fs failed, should run fsck to repair!");
04f0b2ea 2329
04f0b2ea
QS
2330 spin_lock(&sbi->stat_lock);
2331 sbi->user_block_count += shrunk_blocks;
2332 spin_unlock(&sbi->stat_lock);
2333 }
b4b10061 2334out_err:
e4544b63
TM
2335 f2fs_up_write(&sbi->cp_global_sem);
2336 f2fs_up_write(&sbi->gc_lock);
1afe9e7d 2337 thaw_super(sbi->sb, FREEZE_HOLDER_KERNEL, NULL);
04f0b2ea
QS
2338 return err;
2339}