Merge tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
[linux-2.6-block.git] / fs / f2fs / gc.c
CommitLineData
7c1a000d 1// SPDX-License-Identifier: GPL-2.0
0a8165d7 2/*
7bc09003
JK
3 * fs/f2fs/gc.c
4 *
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
7bc09003
JK
7 */
8#include <linux/fs.h>
9#include <linux/module.h>
7bc09003
JK
10#include <linux/init.h>
11#include <linux/f2fs_fs.h>
12#include <linux/kthread.h>
13#include <linux/delay.h>
14#include <linux/freezer.h>
b4b10061 15#include <linux/sched/signal.h>
6691d940 16#include <linux/random.h>
4034247a 17#include <linux/sched/mm.h>
7bc09003
JK
18
19#include "f2fs.h"
20#include "node.h"
21#include "segment.h"
22#include "gc.h"
52118743 23#include "iostat.h"
8e46b3ed 24#include <trace/events/f2fs.h>
7bc09003 25
093749e2
CY
26static struct kmem_cache *victim_entry_slab;
27
da52f8ad
JQ
28static unsigned int count_bits(const unsigned long *addr,
29 unsigned int offset, unsigned int len);
30
7bc09003
JK
31static int gc_thread_func(void *data)
32{
33 struct f2fs_sb_info *sbi = data;
b59d0bae 34 struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
7bc09003 35 wait_queue_head_t *wq = &sbi->gc_thread->gc_wait_queue_head;
5911d2d1 36 wait_queue_head_t *fggc_wq = &sbi->gc_thread->fggc_wq;
b8c502b8 37 unsigned int wait_ms;
d147ea4a
JK
38 struct f2fs_gc_control gc_control = {
39 .victim_segno = NULL_SEGNO,
c58d7c55
JK
40 .should_migrate_blocks = false,
41 .err_gc_skipped = false };
7bc09003 42
b59d0bae 43 wait_ms = gc_th->min_sleep_time;
7bc09003 44
1d7be270 45 set_freezable();
7bc09003 46 do {
5911d2d1 47 bool sync_mode, foreground = false;
bbbc34fd 48
94e7eb42
KH
49 wait_event_freezable_timeout(*wq,
50 kthread_should_stop() ||
5911d2d1 51 waitqueue_active(fggc_wq) ||
d9872a69 52 gc_th->gc_wake,
1d7be270
JK
53 msecs_to_jiffies(wait_ms));
54
5911d2d1
CY
55 if (test_opt(sbi, GC_MERGE) && waitqueue_active(fggc_wq))
56 foreground = true;
57
d9872a69
JK
58 /* give it a try one time */
59 if (gc_th->gc_wake)
45c98f5a 60 gc_th->gc_wake = false;
d9872a69 61
94e7eb42 62 if (f2fs_readonly(sbi->sb)) {
274bd9ba 63 stat_other_skip_bggc_count(sbi);
7bc09003 64 continue;
274bd9ba 65 }
7bc09003
JK
66 if (kthread_should_stop())
67 break;
68
d6212a5f 69 if (sbi->sb->s_writers.frozen >= SB_FREEZE_WRITE) {
88dd8934 70 increase_sleep_time(gc_th, &wait_ms);
274bd9ba 71 stat_other_skip_bggc_count(sbi);
d6212a5f
CL
72 continue;
73 }
74
c40e15a9 75 if (time_to_inject(sbi, FAULT_CHECKPOINT))
a9cfee0e
CY
76 f2fs_stop_checkpoint(sbi, false,
77 STOP_CP_REASON_FAULT_INJECT);
0f348028 78
274bd9ba
CY
79 if (!sb_start_write_trylock(sbi->sb)) {
80 stat_other_skip_bggc_count(sbi);
dc6febb6 81 continue;
274bd9ba 82 }
dc6febb6 83
7bc09003
JK
84 /*
85 * [GC triggering condition]
86 * 0. GC is not conducted currently.
87 * 1. There are enough dirty segments.
88 * 2. IO subsystem is idle by checking the # of writeback pages.
89 * 3. IO subsystem is idle by checking the # of requests in
90 * bdev's request list.
91 *
e1c42045 92 * Note) We have to avoid triggering GCs frequently.
7bc09003
JK
93 * Because it is possible that some segments can be
94 * invalidated soon after by user update or deletion.
95 * So, I'd like to wait some time to collect dirty segments.
96 */
d98af5f4
DJ
97 if (sbi->gc_mode == GC_URGENT_HIGH ||
98 sbi->gc_mode == GC_URGENT_MID) {
d9872a69 99 wait_ms = gc_th->urgent_sleep_time;
e4544b63 100 f2fs_down_write(&sbi->gc_lock);
d9872a69
JK
101 goto do_gc;
102 }
103
5911d2d1 104 if (foreground) {
e4544b63 105 f2fs_down_write(&sbi->gc_lock);
5911d2d1 106 goto do_gc;
e4544b63 107 } else if (!f2fs_down_write_trylock(&sbi->gc_lock)) {
274bd9ba 108 stat_other_skip_bggc_count(sbi);
69babac0 109 goto next;
274bd9ba 110 }
69babac0 111
a7d10cf3 112 if (!is_idle(sbi, GC_TIME)) {
88dd8934 113 increase_sleep_time(gc_th, &wait_ms);
e4544b63 114 f2fs_up_write(&sbi->gc_lock);
274bd9ba 115 stat_io_skip_bggc_count(sbi);
dc6febb6 116 goto next;
7bc09003
JK
117 }
118
119 if (has_enough_invalid_blocks(sbi))
88dd8934 120 decrease_sleep_time(gc_th, &wait_ms);
7bc09003 121 else
88dd8934 122 increase_sleep_time(gc_th, &wait_ms);
d9872a69 123do_gc:
9bf1dcbd
CY
124 stat_inc_gc_call_count(sbi, foreground ?
125 FOREGROUND : BACKGROUND);
7bc09003 126
bbbc34fd
CY
127 sync_mode = F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_SYNC;
128
5911d2d1
CY
129 /* foreground GC was been triggered via f2fs_balance_fs() */
130 if (foreground)
131 sync_mode = false;
132
d147ea4a
JK
133 gc_control.init_gc_type = sync_mode ? FG_GC : BG_GC;
134 gc_control.no_bg_gc = foreground;
c81d5bae 135 gc_control.nr_free_secs = foreground ? 1 : 0;
d147ea4a 136
43727527 137 /* if return value is not zero, no victim was selected */
1adaa71e 138 if (f2fs_gc(sbi, &gc_control)) {
139 /* don't bother wait_ms by foreground gc */
140 if (!foreground)
141 wait_ms = gc_th->no_gc_sleep_time;
26a8057a
YG
142 } else {
143 /* reset wait_ms to default sleep time */
144 if (wait_ms == gc_th->no_gc_sleep_time)
145 wait_ms = gc_th->min_sleep_time;
1adaa71e 146 }
81eb8d6e 147
5911d2d1
CY
148 if (foreground)
149 wake_up_all(&gc_th->fggc_wq);
150
84e4214f
JK
151 trace_f2fs_background_gc(sbi->sb, wait_ms,
152 prefree_segments(sbi), free_segments(sbi));
153
4660f9c0 154 /* balancing f2fs's metadata periodically */
7bcd0cfa 155 f2fs_balance_fs_bg(sbi, true);
dc6febb6 156next:
e5a0db6a
YL
157 if (sbi->gc_mode != GC_NORMAL) {
158 spin_lock(&sbi->gc_remaining_trials_lock);
159 if (sbi->gc_remaining_trials) {
160 sbi->gc_remaining_trials--;
161 if (!sbi->gc_remaining_trials)
6359a1aa
YL
162 sbi->gc_mode = GC_NORMAL;
163 }
e5a0db6a 164 spin_unlock(&sbi->gc_remaining_trials_lock);
6359a1aa 165 }
dc6febb6 166 sb_end_write(sbi->sb);
81eb8d6e 167
7bc09003
JK
168 } while (!kthread_should_stop());
169 return 0;
170}
171
4d57b86d 172int f2fs_start_gc_thread(struct f2fs_sb_info *sbi)
7bc09003 173{
1042d60f 174 struct f2fs_gc_kthread *gc_th;
ec7b1f2d 175 dev_t dev = sbi->sb->s_bdev->bd_dev;
7bc09003 176
1ecc0c5c 177 gc_th = f2fs_kmalloc(sbi, sizeof(struct f2fs_gc_kthread), GFP_KERNEL);
146dbcbf
YL
178 if (!gc_th)
179 return -ENOMEM;
7bc09003 180
d9872a69 181 gc_th->urgent_sleep_time = DEF_GC_THREAD_URGENT_SLEEP_TIME;
b59d0bae
NJ
182 gc_th->min_sleep_time = DEF_GC_THREAD_MIN_SLEEP_TIME;
183 gc_th->max_sleep_time = DEF_GC_THREAD_MAX_SLEEP_TIME;
184 gc_th->no_gc_sleep_time = DEF_GC_THREAD_NOGC_SLEEP_TIME;
185
45c98f5a 186 gc_th->gc_wake = false;
d2dc095f 187
7bc09003
JK
188 sbi->gc_thread = gc_th;
189 init_waitqueue_head(&sbi->gc_thread->gc_wait_queue_head);
5911d2d1 190 init_waitqueue_head(&sbi->gc_thread->fggc_wq);
7bc09003 191 sbi->gc_thread->f2fs_gc_task = kthread_run(gc_thread_func, sbi,
ec7b1f2d 192 "f2fs_gc-%u:%u", MAJOR(dev), MINOR(dev));
7bc09003 193 if (IS_ERR(gc_th->f2fs_gc_task)) {
146dbcbf
YL
194 int err = PTR_ERR(gc_th->f2fs_gc_task);
195
c8eb7024 196 kfree(gc_th);
25718423 197 sbi->gc_thread = NULL;
146dbcbf 198 return err;
7bc09003 199 }
146dbcbf
YL
200
201 return 0;
7bc09003
JK
202}
203
4d57b86d 204void f2fs_stop_gc_thread(struct f2fs_sb_info *sbi)
7bc09003
JK
205{
206 struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
5f029c04 207
7bc09003
JK
208 if (!gc_th)
209 return;
210 kthread_stop(gc_th->f2fs_gc_task);
5911d2d1 211 wake_up_all(&gc_th->fggc_wq);
c8eb7024 212 kfree(gc_th);
7bc09003
JK
213 sbi->gc_thread = NULL;
214}
215
5b0e9539 216static int select_gc_type(struct f2fs_sb_info *sbi, int gc_type)
7bc09003 217{
093749e2
CY
218 int gc_mode;
219
220 if (gc_type == BG_GC) {
221 if (sbi->am.atgc_enabled)
222 gc_mode = GC_AT;
223 else
224 gc_mode = GC_CB;
225 } else {
226 gc_mode = GC_GREEDY;
227 }
d2dc095f 228
5b0e9539
JK
229 switch (sbi->gc_mode) {
230 case GC_IDLE_CB:
231 gc_mode = GC_CB;
232 break;
233 case GC_IDLE_GREEDY:
0e5e8111 234 case GC_URGENT_HIGH:
b27bc809 235 gc_mode = GC_GREEDY;
5b0e9539 236 break;
093749e2
CY
237 case GC_IDLE_AT:
238 gc_mode = GC_AT;
239 break;
5b0e9539 240 }
093749e2 241
d2dc095f 242 return gc_mode;
7bc09003
JK
243}
244
245static void select_policy(struct f2fs_sb_info *sbi, int gc_type,
246 int type, struct victim_sel_policy *p)
247{
248 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
249
4ebefc44 250 if (p->alloc_mode == SSR) {
7bc09003 251 p->gc_mode = GC_GREEDY;
da52f8ad 252 p->dirty_bitmap = dirty_i->dirty_segmap[type];
a26b7c8a 253 p->max_search = dirty_i->nr_dirty[type];
7bc09003 254 p->ofs_unit = 1;
093749e2
CY
255 } else if (p->alloc_mode == AT_SSR) {
256 p->gc_mode = GC_GREEDY;
257 p->dirty_bitmap = dirty_i->dirty_segmap[type];
258 p->max_search = dirty_i->nr_dirty[type];
259 p->ofs_unit = 1;
7bc09003 260 } else {
5b0e9539 261 p->gc_mode = select_gc_type(sbi, gc_type);
a60108f7 262 p->ofs_unit = SEGS_PER_SEC(sbi);
da52f8ad
JQ
263 if (__is_large_section(sbi)) {
264 p->dirty_bitmap = dirty_i->dirty_secmap;
265 p->max_search = count_bits(p->dirty_bitmap,
266 0, MAIN_SECS(sbi));
267 } else {
268 p->dirty_bitmap = dirty_i->dirty_segmap[DIRTY];
269 p->max_search = dirty_i->nr_dirty[DIRTY];
270 }
7bc09003 271 }
a26b7c8a 272
7a88ddb5
CY
273 /*
274 * adjust candidates range, should select all dirty segments for
275 * foreground GC and urgent GC cases.
276 */
b27bc809 277 if (gc_type != FG_GC &&
0e5e8111 278 (sbi->gc_mode != GC_URGENT_HIGH) &&
093749e2 279 (p->gc_mode != GC_AT && p->alloc_mode != AT_SSR) &&
b27bc809 280 p->max_search > sbi->max_victim_search)
b1c57c1c 281 p->max_search = sbi->max_victim_search;
a26b7c8a 282
4e0197f9 283 /* let's select beginning hot/small space first. */
6691d940 284 if (f2fs_need_rand_seg(sbi))
a60108f7
JK
285 p->offset = get_random_u32_below(MAIN_SECS(sbi) *
286 SEGS_PER_SEC(sbi));
4e0197f9 287 else if (type == CURSEG_HOT_DATA || IS_NODESEG(type))
7a20b8a6
JK
288 p->offset = 0;
289 else
e066b83c 290 p->offset = SIT_I(sbi)->last_victim[p->gc_mode];
7bc09003
JK
291}
292
293static unsigned int get_max_cost(struct f2fs_sb_info *sbi,
294 struct victim_sel_policy *p)
295{
b7250d2d
JK
296 /* SSR allocates in a segment unit */
297 if (p->alloc_mode == SSR)
a60108f7 298 return BLKS_PER_SEG(sbi);
093749e2
CY
299 else if (p->alloc_mode == AT_SSR)
300 return UINT_MAX;
301
302 /* LFS */
7bc09003 303 if (p->gc_mode == GC_GREEDY)
45809cd3 304 return SEGS_TO_BLKS(sbi, 2 * p->ofs_unit);
7bc09003
JK
305 else if (p->gc_mode == GC_CB)
306 return UINT_MAX;
093749e2
CY
307 else if (p->gc_mode == GC_AT)
308 return UINT_MAX;
7bc09003
JK
309 else /* No other gc_mode */
310 return 0;
311}
312
313static unsigned int check_bg_victims(struct f2fs_sb_info *sbi)
314{
315 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
5ec4e49f 316 unsigned int secno;
7bc09003
JK
317
318 /*
319 * If the gc_type is FG_GC, we can select victim segments
320 * selected by background GC before.
321 * Those segments guarantee they have small valid blocks.
322 */
7cd8558b 323 for_each_set_bit(secno, dirty_i->victim_secmap, MAIN_SECS(sbi)) {
5ec4e49f 324 if (sec_usage_check(sbi, secno))
b65ee148 325 continue;
5ec4e49f 326 clear_bit(secno, dirty_i->victim_secmap);
4ddb1a4d 327 return GET_SEG_FROM_SEC(sbi, secno);
7bc09003
JK
328 }
329 return NULL_SEGNO;
330}
331
332static unsigned int get_cb_cost(struct f2fs_sb_info *sbi, unsigned int segno)
333{
334 struct sit_info *sit_i = SIT_I(sbi);
4ddb1a4d
JK
335 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
336 unsigned int start = GET_SEG_FROM_SEC(sbi, secno);
7bc09003
JK
337 unsigned long long mtime = 0;
338 unsigned int vblocks;
339 unsigned char age = 0;
340 unsigned char u;
341 unsigned int i;
de881df9 342 unsigned int usable_segs_per_sec = f2fs_usable_segs_in_sec(sbi, segno);
7bc09003 343
de881df9 344 for (i = 0; i < usable_segs_per_sec; i++)
7bc09003 345 mtime += get_seg_entry(sbi, start + i)->mtime;
302bd348 346 vblocks = get_valid_blocks(sbi, segno, true);
7bc09003 347
de881df9
AR
348 mtime = div_u64(mtime, usable_segs_per_sec);
349 vblocks = div_u64(vblocks, usable_segs_per_sec);
7bc09003 350
45809cd3 351 u = BLKS_TO_SEGS(sbi, vblocks * 100);
7bc09003 352
e1c42045 353 /* Handle if the system time has changed by the user */
7bc09003
JK
354 if (mtime < sit_i->min_mtime)
355 sit_i->min_mtime = mtime;
356 if (mtime > sit_i->max_mtime)
357 sit_i->max_mtime = mtime;
358 if (sit_i->max_mtime != sit_i->min_mtime)
359 age = 100 - div64_u64(100 * (mtime - sit_i->min_mtime),
360 sit_i->max_mtime - sit_i->min_mtime);
361
362 return UINT_MAX - ((100 * (100 - u) * age) / (100 + u));
363}
364
a57e564d
JX
365static inline unsigned int get_gc_cost(struct f2fs_sb_info *sbi,
366 unsigned int segno, struct victim_sel_policy *p)
7bc09003
JK
367{
368 if (p->alloc_mode == SSR)
2afce76a 369 return get_seg_entry(sbi, segno)->ckpt_valid_blocks;
7bc09003
JK
370
371 /* alloc_mode == LFS */
372 if (p->gc_mode == GC_GREEDY)
91f4382b 373 return get_valid_blocks(sbi, segno, true);
093749e2 374 else if (p->gc_mode == GC_CB)
7bc09003 375 return get_cb_cost(sbi, segno);
093749e2
CY
376
377 f2fs_bug_on(sbi, 1);
378 return 0;
7bc09003
JK
379}
380
688159b6
FL
381static unsigned int count_bits(const unsigned long *addr,
382 unsigned int offset, unsigned int len)
383{
384 unsigned int end = offset + len, sum = 0;
385
386 while (offset < end) {
387 if (test_bit(offset++, addr))
388 ++sum;
389 }
390 return sum;
391}
392
043d2d00
JK
393static bool f2fs_check_victim_tree(struct f2fs_sb_info *sbi,
394 struct rb_root_cached *root)
395{
396#ifdef CONFIG_F2FS_CHECK_FS
397 struct rb_node *cur = rb_first_cached(root), *next;
398 struct victim_entry *cur_ve, *next_ve;
399
400 while (cur) {
401 next = rb_next(cur);
402 if (!next)
403 return true;
404
405 cur_ve = rb_entry(cur, struct victim_entry, rb_node);
406 next_ve = rb_entry(next, struct victim_entry, rb_node);
407
408 if (cur_ve->mtime > next_ve->mtime) {
409 f2fs_info(sbi, "broken victim_rbtree, "
410 "cur_mtime(%llu) next_mtime(%llu)",
411 cur_ve->mtime, next_ve->mtime);
412 return false;
413 }
414 cur = next;
415 }
416#endif
417 return true;
418}
419
420static struct victim_entry *__lookup_victim_entry(struct f2fs_sb_info *sbi,
421 unsigned long long mtime)
422{
423 struct atgc_management *am = &sbi->am;
424 struct rb_node *node = am->root.rb_root.rb_node;
425 struct victim_entry *ve = NULL;
426
427 while (node) {
428 ve = rb_entry(node, struct victim_entry, rb_node);
429
430 if (mtime < ve->mtime)
431 node = node->rb_left;
432 else
433 node = node->rb_right;
434 }
435 return ve;
436}
437
438static struct victim_entry *__create_victim_entry(struct f2fs_sb_info *sbi,
439 unsigned long long mtime, unsigned int segno)
093749e2
CY
440{
441 struct atgc_management *am = &sbi->am;
442 struct victim_entry *ve;
443
043d2d00 444 ve = f2fs_kmem_cache_alloc(victim_entry_slab, GFP_NOFS, true, NULL);
093749e2
CY
445
446 ve->mtime = mtime;
447 ve->segno = segno;
448
093749e2 449 list_add_tail(&ve->list, &am->victim_list);
093749e2
CY
450 am->victim_count++;
451
452 return ve;
453}
454
043d2d00 455static void __insert_victim_entry(struct f2fs_sb_info *sbi,
093749e2
CY
456 unsigned long long mtime, unsigned int segno)
457{
458 struct atgc_management *am = &sbi->am;
043d2d00
JK
459 struct rb_root_cached *root = &am->root;
460 struct rb_node **p = &root->rb_root.rb_node;
093749e2 461 struct rb_node *parent = NULL;
043d2d00 462 struct victim_entry *ve;
093749e2
CY
463 bool left_most = true;
464
043d2d00
JK
465 /* look up rb tree to find parent node */
466 while (*p) {
467 parent = *p;
468 ve = rb_entry(parent, struct victim_entry, rb_node);
469
470 if (mtime < ve->mtime) {
471 p = &(*p)->rb_left;
472 } else {
473 p = &(*p)->rb_right;
474 left_most = false;
475 }
476 }
477
478 ve = __create_victim_entry(sbi, mtime, segno);
479
480 rb_link_node(&ve->rb_node, parent, p);
481 rb_insert_color_cached(&ve->rb_node, root, left_most);
093749e2
CY
482}
483
484static void add_victim_entry(struct f2fs_sb_info *sbi,
485 struct victim_sel_policy *p, unsigned int segno)
486{
487 struct sit_info *sit_i = SIT_I(sbi);
488 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
489 unsigned int start = GET_SEG_FROM_SEC(sbi, secno);
490 unsigned long long mtime = 0;
491 unsigned int i;
492
493 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
494 if (p->gc_mode == GC_AT &&
495 get_valid_blocks(sbi, segno, true) == 0)
496 return;
093749e2
CY
497 }
498
a60108f7 499 for (i = 0; i < SEGS_PER_SEC(sbi); i++)
093749e2 500 mtime += get_seg_entry(sbi, start + i)->mtime;
a60108f7 501 mtime = div_u64(mtime, SEGS_PER_SEC(sbi));
093749e2
CY
502
503 /* Handle if the system time has changed by the user */
504 if (mtime < sit_i->min_mtime)
505 sit_i->min_mtime = mtime;
506 if (mtime > sit_i->max_mtime)
507 sit_i->max_mtime = mtime;
508 if (mtime < sit_i->dirty_min_mtime)
509 sit_i->dirty_min_mtime = mtime;
510 if (mtime > sit_i->dirty_max_mtime)
511 sit_i->dirty_max_mtime = mtime;
512
513 /* don't choose young section as candidate */
514 if (sit_i->dirty_max_mtime - mtime < p->age_threshold)
515 return;
516
043d2d00 517 __insert_victim_entry(sbi, mtime, segno);
093749e2
CY
518}
519
520static void atgc_lookup_victim(struct f2fs_sb_info *sbi,
521 struct victim_sel_policy *p)
522{
523 struct sit_info *sit_i = SIT_I(sbi);
524 struct atgc_management *am = &sbi->am;
525 struct rb_root_cached *root = &am->root;
526 struct rb_node *node;
093749e2
CY
527 struct victim_entry *ve;
528 unsigned long long total_time;
529 unsigned long long age, u, accu;
530 unsigned long long max_mtime = sit_i->dirty_max_mtime;
531 unsigned long long min_mtime = sit_i->dirty_min_mtime;
074b5ea2 532 unsigned int sec_blocks = CAP_BLKS_PER_SEC(sbi);
093749e2
CY
533 unsigned int vblocks;
534 unsigned int dirty_threshold = max(am->max_candidate_count,
535 am->candidate_ratio *
536 am->victim_count / 100);
537 unsigned int age_weight = am->age_weight;
538 unsigned int cost;
539 unsigned int iter = 0;
540
541 if (max_mtime < min_mtime)
542 return;
543
544 max_mtime += 1;
545 total_time = max_mtime - min_mtime;
546
547 accu = div64_u64(ULLONG_MAX, total_time);
548 accu = min_t(unsigned long long, div_u64(accu, 100),
549 DEFAULT_ACCURACY_CLASS);
550
551 node = rb_first_cached(root);
552next:
043d2d00
JK
553 ve = rb_entry_safe(node, struct victim_entry, rb_node);
554 if (!ve)
093749e2
CY
555 return;
556
093749e2
CY
557 if (ve->mtime >= max_mtime || ve->mtime < min_mtime)
558 goto skip;
559
560 /* age = 10000 * x% * 60 */
561 age = div64_u64(accu * (max_mtime - ve->mtime), total_time) *
562 age_weight;
563
564 vblocks = get_valid_blocks(sbi, ve->segno, true);
565 f2fs_bug_on(sbi, !vblocks || vblocks == sec_blocks);
566
567 /* u = 10000 * x% * 40 */
568 u = div64_u64(accu * (sec_blocks - vblocks), sec_blocks) *
569 (100 - age_weight);
570
571 f2fs_bug_on(sbi, age + u >= UINT_MAX);
572
573 cost = UINT_MAX - (age + u);
574 iter++;
575
576 if (cost < p->min_cost ||
577 (cost == p->min_cost && age > p->oldest_age)) {
578 p->min_cost = cost;
579 p->oldest_age = age;
580 p->min_segno = ve->segno;
581 }
582skip:
583 if (iter < dirty_threshold) {
584 node = rb_next(node);
585 goto next;
586 }
587}
588
589/*
590 * select candidates around source section in range of
591 * [target - dirty_threshold, target + dirty_threshold]
592 */
593static void atssr_lookup_victim(struct f2fs_sb_info *sbi,
594 struct victim_sel_policy *p)
595{
596 struct sit_info *sit_i = SIT_I(sbi);
597 struct atgc_management *am = &sbi->am;
093749e2
CY
598 struct victim_entry *ve;
599 unsigned long long age;
600 unsigned long long max_mtime = sit_i->dirty_max_mtime;
601 unsigned long long min_mtime = sit_i->dirty_min_mtime;
093749e2
CY
602 unsigned int vblocks;
603 unsigned int dirty_threshold = max(am->max_candidate_count,
604 am->candidate_ratio *
605 am->victim_count / 100);
043d2d00 606 unsigned int cost, iter;
093749e2
CY
607 int stage = 0;
608
609 if (max_mtime < min_mtime)
610 return;
611 max_mtime += 1;
612next_stage:
043d2d00
JK
613 iter = 0;
614 ve = __lookup_victim_entry(sbi, p->age);
093749e2 615next_node:
043d2d00
JK
616 if (!ve) {
617 if (stage++ == 0)
618 goto next_stage;
093749e2
CY
619 return;
620 }
621
093749e2
CY
622 if (ve->mtime >= max_mtime || ve->mtime < min_mtime)
623 goto skip_node;
624
625 age = max_mtime - ve->mtime;
626
627 vblocks = get_seg_entry(sbi, ve->segno)->ckpt_valid_blocks;
628 f2fs_bug_on(sbi, !vblocks);
629
630 /* rare case */
a60108f7 631 if (vblocks == BLKS_PER_SEG(sbi))
093749e2
CY
632 goto skip_node;
633
634 iter++;
635
636 age = max_mtime - abs(p->age - age);
637 cost = UINT_MAX - vblocks;
638
639 if (cost < p->min_cost ||
640 (cost == p->min_cost && age > p->oldest_age)) {
641 p->min_cost = cost;
642 p->oldest_age = age;
643 p->min_segno = ve->segno;
644 }
645skip_node:
646 if (iter < dirty_threshold) {
043d2d00
JK
647 ve = rb_entry(stage == 0 ? rb_prev(&ve->rb_node) :
648 rb_next(&ve->rb_node),
649 struct victim_entry, rb_node);
093749e2
CY
650 goto next_node;
651 }
043d2d00
JK
652
653 if (stage++ == 0)
093749e2 654 goto next_stage;
093749e2 655}
043d2d00 656
093749e2
CY
657static void lookup_victim_by_age(struct f2fs_sb_info *sbi,
658 struct victim_sel_policy *p)
659{
043d2d00 660 f2fs_bug_on(sbi, !f2fs_check_victim_tree(sbi, &sbi->am.root));
093749e2
CY
661
662 if (p->gc_mode == GC_AT)
663 atgc_lookup_victim(sbi, p);
664 else if (p->alloc_mode == AT_SSR)
665 atssr_lookup_victim(sbi, p);
666 else
667 f2fs_bug_on(sbi, 1);
668}
669
670static void release_victim_entry(struct f2fs_sb_info *sbi)
671{
672 struct atgc_management *am = &sbi->am;
673 struct victim_entry *ve, *tmp;
674
675 list_for_each_entry_safe(ve, tmp, &am->victim_list, list) {
676 list_del(&ve->list);
677 kmem_cache_free(victim_entry_slab, ve);
678 am->victim_count--;
679 }
680
681 am->root = RB_ROOT_CACHED;
682
683 f2fs_bug_on(sbi, am->victim_count);
684 f2fs_bug_on(sbi, !list_empty(&am->victim_list));
685}
686
71419129
CY
687static bool f2fs_pin_section(struct f2fs_sb_info *sbi, unsigned int segno)
688{
689 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
690 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
691
692 if (!dirty_i->enable_pin_section)
693 return false;
694 if (!test_and_set_bit(secno, dirty_i->pinned_secmap))
695 dirty_i->pinned_secmap_cnt++;
696 return true;
697}
698
699static bool f2fs_pinned_section_exists(struct dirty_seglist_info *dirty_i)
700{
701 return dirty_i->pinned_secmap_cnt;
702}
703
704static bool f2fs_section_is_pinned(struct dirty_seglist_info *dirty_i,
705 unsigned int secno)
706{
707 return dirty_i->enable_pin_section &&
708 f2fs_pinned_section_exists(dirty_i) &&
709 test_bit(secno, dirty_i->pinned_secmap);
710}
711
712static void f2fs_unpin_all_sections(struct f2fs_sb_info *sbi, bool enable)
713{
714 unsigned int bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
715
716 if (f2fs_pinned_section_exists(DIRTY_I(sbi))) {
717 memset(DIRTY_I(sbi)->pinned_secmap, 0, bitmap_size);
718 DIRTY_I(sbi)->pinned_secmap_cnt = 0;
719 }
720 DIRTY_I(sbi)->enable_pin_section = enable;
721}
722
723static int f2fs_gc_pinned_control(struct inode *inode, int gc_type,
724 unsigned int segno)
725{
726 if (!f2fs_is_pinned_file(inode))
727 return 0;
728 if (gc_type != FG_GC)
729 return -EBUSY;
730 if (!f2fs_pin_section(F2FS_I_SB(inode), segno))
731 f2fs_pin_file_control(inode, true);
732 return -EAGAIN;
733}
734
0a8165d7 735/*
111d2495 736 * This function is called from two paths.
7bc09003
JK
737 * One is garbage collection and the other is SSR segment selection.
738 * When it is called during GC, it just gets a victim segment
739 * and it does not remove it from dirty seglist.
740 * When it is called from SSR segment selection, it finds a segment
741 * which has minimum valid blocks and removes it from dirty seglist.
742 */
19e0e21a
YL
743int f2fs_get_victim(struct f2fs_sb_info *sbi, unsigned int *result,
744 int gc_type, int type, char alloc_mode,
745 unsigned long long age)
7bc09003
JK
746{
747 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
e066b83c 748 struct sit_info *sm = SIT_I(sbi);
7bc09003 749 struct victim_sel_policy p;
3fa56503 750 unsigned int secno, last_victim;
04f0b2ea 751 unsigned int last_segment;
093749e2
CY
752 unsigned int nsearched;
753 bool is_atgc;
97767500 754 int ret = 0;
7bc09003 755
210f41bc 756 mutex_lock(&dirty_i->seglist_lock);
a60108f7 757 last_segment = MAIN_SECS(sbi) * SEGS_PER_SEC(sbi);
210f41bc 758
7bc09003 759 p.alloc_mode = alloc_mode;
093749e2
CY
760 p.age = age;
761 p.age_threshold = sbi->am.age_threshold;
7bc09003 762
093749e2
CY
763retry:
764 select_policy(sbi, gc_type, type, &p);
7bc09003 765 p.min_segno = NULL_SEGNO;
093749e2 766 p.oldest_age = 0;
3fa56503 767 p.min_cost = get_max_cost(sbi, &p);
7bc09003 768
093749e2
CY
769 is_atgc = (p.gc_mode == GC_AT || p.alloc_mode == AT_SSR);
770 nsearched = 0;
771
772 if (is_atgc)
773 SIT_I(sbi)->dirty_min_mtime = ULLONG_MAX;
774
e066b83c 775 if (*result != NULL_SEGNO) {
97767500
QZ
776 if (!get_valid_blocks(sbi, *result, false)) {
777 ret = -ENODATA;
778 goto out;
779 }
780
781 if (sec_usage_check(sbi, GET_SEC_FROM_SEG(sbi, *result)))
782 ret = -EBUSY;
783 else
e066b83c
JK
784 p.min_segno = *result;
785 goto out;
786 }
787
97767500 788 ret = -ENODATA;
3342bb30
CY
789 if (p.max_search == 0)
790 goto out;
791
e3080b01
CY
792 if (__is_large_section(sbi) && p.alloc_mode == LFS) {
793 if (sbi->next_victim_seg[BG_GC] != NULL_SEGNO) {
794 p.min_segno = sbi->next_victim_seg[BG_GC];
795 *result = p.min_segno;
796 sbi->next_victim_seg[BG_GC] = NULL_SEGNO;
797 goto got_result;
798 }
799 if (gc_type == FG_GC &&
800 sbi->next_victim_seg[FG_GC] != NULL_SEGNO) {
801 p.min_segno = sbi->next_victim_seg[FG_GC];
802 *result = p.min_segno;
803 sbi->next_victim_seg[FG_GC] = NULL_SEGNO;
804 goto got_result;
805 }
806 }
807
e066b83c 808 last_victim = sm->last_victim[p.gc_mode];
7bc09003
JK
809 if (p.alloc_mode == LFS && gc_type == FG_GC) {
810 p.min_segno = check_bg_victims(sbi);
811 if (p.min_segno != NULL_SEGNO)
812 goto got_it;
813 }
814
815 while (1) {
da52f8ad
JQ
816 unsigned long cost, *dirty_bitmap;
817 unsigned int unit_no, segno;
818
819 dirty_bitmap = p.dirty_bitmap;
820 unit_no = find_next_bit(dirty_bitmap,
821 last_segment / p.ofs_unit,
822 p.offset / p.ofs_unit);
823 segno = unit_no * p.ofs_unit;
a43f7ec3 824 if (segno >= last_segment) {
e066b83c
JK
825 if (sm->last_victim[p.gc_mode]) {
826 last_segment =
827 sm->last_victim[p.gc_mode];
828 sm->last_victim[p.gc_mode] = 0;
7bc09003
JK
829 p.offset = 0;
830 continue;
831 }
832 break;
833 }
a57e564d
JX
834
835 p.offset = segno + p.ofs_unit;
da52f8ad 836 nsearched++;
688159b6 837
bbf9f7d9
ST
838#ifdef CONFIG_F2FS_CHECK_FS
839 /*
840 * skip selecting the invalid segno (that is failed due to block
841 * validity check failure during GC) to avoid endless GC loop in
842 * such cases.
843 */
844 if (test_bit(segno, sm->invalid_segmap))
845 goto next;
846#endif
847
4ddb1a4d 848 secno = GET_SEC_FROM_SEG(sbi, segno);
7bc09003 849
5ec4e49f 850 if (sec_usage_check(sbi, secno))
688159b6 851 goto next;
61461fc9 852
4354994f 853 /* Don't touch checkpointed data */
61461fc9
CY
854 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
855 if (p.alloc_mode == LFS) {
856 /*
857 * LFS is set to find source section during GC.
858 * The victim should have no checkpointed data.
859 */
860 if (get_ckpt_valid_blocks(sbi, segno, true))
861 goto next;
862 } else {
863 /*
864 * SSR | AT_SSR are set to find target segment
865 * for writes which can be full by checkpointed
866 * and newly written blocks.
867 */
868 if (!f2fs_segment_has_free_slot(sbi, segno))
869 goto next;
870 }
871 }
872
5ec4e49f 873 if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap))
688159b6 874 goto next;
7bc09003 875
71419129
CY
876 if (gc_type == FG_GC && f2fs_section_is_pinned(dirty_i, secno))
877 goto next;
878
093749e2
CY
879 if (is_atgc) {
880 add_victim_entry(sbi, &p, segno);
881 goto next;
882 }
883
7bc09003
JK
884 cost = get_gc_cost(sbi, segno, &p);
885
886 if (p.min_cost > cost) {
887 p.min_segno = segno;
888 p.min_cost = cost;
a57e564d 889 }
688159b6
FL
890next:
891 if (nsearched >= p.max_search) {
e066b83c 892 if (!sm->last_victim[p.gc_mode] && segno <= last_victim)
da52f8ad
JQ
893 sm->last_victim[p.gc_mode] =
894 last_victim + p.ofs_unit;
4ce53776 895 else
da52f8ad 896 sm->last_victim[p.gc_mode] = segno + p.ofs_unit;
04f0b2ea 897 sm->last_victim[p.gc_mode] %=
a60108f7 898 (MAIN_SECS(sbi) * SEGS_PER_SEC(sbi));
7bc09003
JK
899 break;
900 }
901 }
093749e2
CY
902
903 /* get victim for GC_AT/AT_SSR */
904 if (is_atgc) {
905 lookup_victim_by_age(sbi, &p);
906 release_victim_entry(sbi);
907 }
908
909 if (is_atgc && p.min_segno == NULL_SEGNO &&
910 sm->elapsed_time < p.age_threshold) {
911 p.age_threshold = 0;
912 goto retry;
913 }
914
7bc09003 915 if (p.min_segno != NULL_SEGNO) {
b2b3460a 916got_it:
e3080b01
CY
917 *result = (p.min_segno / p.ofs_unit) * p.ofs_unit;
918got_result:
7bc09003 919 if (p.alloc_mode == LFS) {
4ddb1a4d 920 secno = GET_SEC_FROM_SEG(sbi, p.min_segno);
5ec4e49f
JK
921 if (gc_type == FG_GC)
922 sbi->cur_victim_sec = secno;
923 else
924 set_bit(secno, dirty_i->victim_secmap);
7bc09003 925 }
97767500 926 ret = 0;
8e46b3ed 927
e3c59108
ST
928 }
929out:
930 if (p.min_segno != NULL_SEGNO)
8e46b3ed
NJ
931 trace_f2fs_get_victim(sbi->sb, type, gc_type, &p,
932 sbi->cur_victim_sec,
933 prefree_segments(sbi), free_segments(sbi));
7bc09003
JK
934 mutex_unlock(&dirty_i->seglist_lock);
935
97767500 936 return ret;
7bc09003
JK
937}
938
7dda2af8 939static struct inode *find_gc_inode(struct gc_inode_list *gc_list, nid_t ino)
7bc09003 940{
7bc09003
JK
941 struct inode_entry *ie;
942
7dda2af8
CL
943 ie = radix_tree_lookup(&gc_list->iroot, ino);
944 if (ie)
945 return ie->inode;
7bc09003
JK
946 return NULL;
947}
948
7dda2af8 949static void add_gc_inode(struct gc_inode_list *gc_list, struct inode *inode)
7bc09003 950{
6cc4af56
GZ
951 struct inode_entry *new_ie;
952
7dda2af8 953 if (inode == find_gc_inode(gc_list, inode->i_ino)) {
6cc4af56
GZ
954 iput(inode);
955 return;
7bc09003 956 }
32410577
CY
957 new_ie = f2fs_kmem_cache_alloc(f2fs_inode_entry_slab,
958 GFP_NOFS, true, NULL);
7bc09003 959 new_ie->inode = inode;
f28e5034
CY
960
961 f2fs_radix_tree_insert(&gc_list->iroot, inode->i_ino, new_ie);
7dda2af8 962 list_add_tail(&new_ie->list, &gc_list->ilist);
7bc09003
JK
963}
964
7dda2af8 965static void put_gc_inode(struct gc_inode_list *gc_list)
7bc09003
JK
966{
967 struct inode_entry *ie, *next_ie;
5f029c04 968
7dda2af8
CL
969 list_for_each_entry_safe(ie, next_ie, &gc_list->ilist, list) {
970 radix_tree_delete(&gc_list->iroot, ie->inode->i_ino);
7bc09003
JK
971 iput(ie->inode);
972 list_del(&ie->list);
4d57b86d 973 kmem_cache_free(f2fs_inode_entry_slab, ie);
7bc09003
JK
974 }
975}
976
977static int check_valid_map(struct f2fs_sb_info *sbi,
978 unsigned int segno, int offset)
979{
980 struct sit_info *sit_i = SIT_I(sbi);
981 struct seg_entry *sentry;
982 int ret;
983
3d26fa6b 984 down_read(&sit_i->sentry_lock);
7bc09003
JK
985 sentry = get_seg_entry(sbi, segno);
986 ret = f2fs_test_bit(offset, sentry->cur_valid_map);
3d26fa6b 987 up_read(&sit_i->sentry_lock);
43727527 988 return ret;
7bc09003
JK
989}
990
0a8165d7 991/*
7bc09003
JK
992 * This function compares node address got in summary with that in NAT.
993 * On validity, copy that node with cold status, otherwise (invalid node)
994 * ignore that.
995 */
48018b4c 996static int gc_node_segment(struct f2fs_sb_info *sbi,
7bc09003
JK
997 struct f2fs_summary *sum, unsigned int segno, int gc_type)
998{
7bc09003 999 struct f2fs_summary *entry;
26d58599 1000 block_t start_addr;
7bc09003 1001 int off;
7ea984b0 1002 int phase = 0;
c29fd0c0 1003 bool fggc = (gc_type == FG_GC);
48018b4c 1004 int submitted = 0;
de881df9 1005 unsigned int usable_blks_in_seg = f2fs_usable_blks_in_seg(sbi, segno);
7bc09003 1006
26d58599
JK
1007 start_addr = START_BLOCK(sbi, segno);
1008
7bc09003
JK
1009next_step:
1010 entry = sum;
c718379b 1011
c29fd0c0
CY
1012 if (fggc && phase == 2)
1013 atomic_inc(&sbi->wb_sync_req[NODE]);
1014
de881df9 1015 for (off = 0; off < usable_blks_in_seg; off++, entry++) {
7bc09003
JK
1016 nid_t nid = le32_to_cpu(entry->nid);
1017 struct page *node_page;
26d58599 1018 struct node_info ni;
48018b4c 1019 int err;
7bc09003 1020
43727527 1021 /* stop BG_GC if there is not enough free sections. */
7f3037a5 1022 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0))
48018b4c 1023 return submitted;
7bc09003 1024
43727527 1025 if (check_valid_map(sbi, segno, off) == 0)
7bc09003
JK
1026 continue;
1027
7ea984b0 1028 if (phase == 0) {
4d57b86d 1029 f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
7ea984b0
CY
1030 META_NAT, true);
1031 continue;
1032 }
1033
1034 if (phase == 1) {
4d57b86d 1035 f2fs_ra_node_page(sbi, nid);
7bc09003
JK
1036 continue;
1037 }
7ea984b0
CY
1038
1039 /* phase == 2 */
4d57b86d 1040 node_page = f2fs_get_node_page(sbi, nid);
7bc09003
JK
1041 if (IS_ERR(node_page))
1042 continue;
1043
4d57b86d 1044 /* block may become invalid during f2fs_get_node_page */
9a01b56b
HY
1045 if (check_valid_map(sbi, segno, off) == 0) {
1046 f2fs_put_page(node_page, 1);
1047 continue;
26d58599
JK
1048 }
1049
a9419b63 1050 if (f2fs_get_node_info(sbi, nid, &ni, false)) {
7735730d
CY
1051 f2fs_put_page(node_page, 1);
1052 continue;
1053 }
1054
26d58599
JK
1055 if (ni.blk_addr != start_addr + off) {
1056 f2fs_put_page(node_page, 1);
1057 continue;
9a01b56b
HY
1058 }
1059
48018b4c
CY
1060 err = f2fs_move_node_page(node_page, gc_type);
1061 if (!err && gc_type == FG_GC)
1062 submitted++;
e1235983 1063 stat_inc_node_blk_count(sbi, 1, gc_type);
7bc09003 1064 }
c718379b 1065
7ea984b0 1066 if (++phase < 3)
7bc09003 1067 goto next_step;
c29fd0c0
CY
1068
1069 if (fggc)
1070 atomic_dec(&sbi->wb_sync_req[NODE]);
48018b4c 1071 return submitted;
7bc09003
JK
1072}
1073
0a8165d7 1074/*
9af45ef5
JK
1075 * Calculate start block index indicating the given node offset.
1076 * Be careful, caller should give this node offset only indicating direct node
1077 * blocks. If any node offsets, which point the other types of node blocks such
1078 * as indirect or double indirect node blocks, are given, it must be a caller's
1079 * bug.
7bc09003 1080 */
4d57b86d 1081block_t f2fs_start_bidx_of_node(unsigned int node_ofs, struct inode *inode)
7bc09003 1082{
ce19a5d4
JK
1083 unsigned int indirect_blks = 2 * NIDS_PER_BLOCK + 4;
1084 unsigned int bidx;
7bc09003 1085
ce19a5d4
JK
1086 if (node_ofs == 0)
1087 return 0;
7bc09003 1088
ce19a5d4 1089 if (node_ofs <= 2) {
7bc09003
JK
1090 bidx = node_ofs - 1;
1091 } else if (node_ofs <= indirect_blks) {
ce19a5d4 1092 int dec = (node_ofs - 4) / (NIDS_PER_BLOCK + 1);
5f029c04 1093
7bc09003
JK
1094 bidx = node_ofs - 2 - dec;
1095 } else {
ce19a5d4 1096 int dec = (node_ofs - indirect_blks - 3) / (NIDS_PER_BLOCK + 1);
5f029c04 1097
7bc09003
JK
1098 bidx = node_ofs - 5 - dec;
1099 }
d02a6e61 1100 return bidx * ADDRS_PER_BLOCK(inode) + ADDRS_PER_INODE(inode);
7bc09003
JK
1101}
1102
c1079892 1103static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
7bc09003
JK
1104 struct node_info *dni, block_t blkaddr, unsigned int *nofs)
1105{
1106 struct page *node_page;
1107 nid_t nid;
d3b7b4af 1108 unsigned int ofs_in_node, max_addrs, base;
7bc09003
JK
1109 block_t source_blkaddr;
1110
1111 nid = le32_to_cpu(sum->nid);
1112 ofs_in_node = le16_to_cpu(sum->ofs_in_node);
1113
4d57b86d 1114 node_page = f2fs_get_node_page(sbi, nid);
7bc09003 1115 if (IS_ERR(node_page))
c1079892 1116 return false;
7bc09003 1117
a9419b63 1118 if (f2fs_get_node_info(sbi, nid, dni, false)) {
7735730d
CY
1119 f2fs_put_page(node_page, 1);
1120 return false;
1121 }
7bc09003
JK
1122
1123 if (sum->version != dni->version) {
dcbb4c10
JP
1124 f2fs_warn(sbi, "%s: valid data with mismatched node version.",
1125 __func__);
c13ff37e 1126 set_sbi_flag(sbi, SBI_NEED_FSCK);
7bc09003
JK
1127 }
1128
6d18762e
CY
1129 if (f2fs_check_nid_range(sbi, dni->ino)) {
1130 f2fs_put_page(node_page, 1);
77900c45 1131 return false;
6d18762e 1132 }
77900c45 1133
d3b7b4af
CY
1134 if (IS_INODE(node_page)) {
1135 base = offset_in_addr(F2FS_INODE(node_page));
1136 max_addrs = DEF_ADDRS_PER_INODE;
1137 } else {
1138 base = 0;
1139 max_addrs = DEF_ADDRS_PER_BLOCK;
1140 }
1141
1142 if (base + ofs_in_node >= max_addrs) {
1143 f2fs_err(sbi, "Inconsistent blkaddr offset: base:%u, ofs_in_node:%u, max:%u, ino:%u, nid:%u",
1144 base, ofs_in_node, max_addrs, dni->ino, dni->nid);
c3db3c2f 1145 f2fs_put_page(node_page, 1);
c6ad7fd1
CY
1146 return false;
1147 }
1148
7bc09003 1149 *nofs = ofs_of_node(node_page);
a2ced1ce 1150 source_blkaddr = data_blkaddr(NULL, node_page, ofs_in_node);
7bc09003
JK
1151 f2fs_put_page(node_page, 1);
1152
bbf9f7d9
ST
1153 if (source_blkaddr != blkaddr) {
1154#ifdef CONFIG_F2FS_CHECK_FS
1155 unsigned int segno = GET_SEGNO(sbi, blkaddr);
1156 unsigned long offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
1157
1158 if (unlikely(check_valid_map(sbi, segno, offset))) {
1159 if (!test_and_set_bit(segno, SIT_I(sbi)->invalid_segmap)) {
833dcd35
JP
1160 f2fs_err(sbi, "mismatched blkaddr %u (source_blkaddr %u) in seg %u",
1161 blkaddr, source_blkaddr, segno);
f6db4307 1162 set_sbi_flag(sbi, SBI_NEED_FSCK);
bbf9f7d9
ST
1163 }
1164 }
1165#endif
c1079892 1166 return false;
bbf9f7d9 1167 }
c1079892 1168 return true;
7bc09003
JK
1169}
1170
6aa58d8a
CY
1171static int ra_data_block(struct inode *inode, pgoff_t index)
1172{
1173 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1174 struct address_space *mapping = inode->i_mapping;
1175 struct dnode_of_data dn;
1176 struct page *page;
6aa58d8a
CY
1177 struct f2fs_io_info fio = {
1178 .sbi = sbi,
1179 .ino = inode->i_ino,
1180 .type = DATA,
1181 .temp = COLD,
1182 .op = REQ_OP_READ,
1183 .op_flags = 0,
1184 .encrypted_page = NULL,
2eae077e 1185 .in_list = 0,
6aa58d8a
CY
1186 };
1187 int err;
1188
1189 page = f2fs_grab_cache_page(mapping, index, true);
1190 if (!page)
1191 return -ENOMEM;
1192
04a91ab0
CH
1193 if (f2fs_lookup_read_extent_cache_block(inode, index,
1194 &dn.data_blkaddr)) {
93770ab7
CY
1195 if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr,
1196 DATA_GENERIC_ENHANCE_READ))) {
10f966bb 1197 err = -EFSCORRUPTED;
93770ab7
CY
1198 goto put_page;
1199 }
6aa58d8a
CY
1200 goto got_it;
1201 }
1202
1203 set_new_dnode(&dn, inode, NULL, NULL, 0);
1204 err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
1205 if (err)
1206 goto put_page;
1207 f2fs_put_dnode(&dn);
1208
93770ab7
CY
1209 if (!__is_valid_data_blkaddr(dn.data_blkaddr)) {
1210 err = -ENOENT;
1211 goto put_page;
1212 }
6aa58d8a 1213 if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr,
93770ab7 1214 DATA_GENERIC_ENHANCE))) {
10f966bb 1215 err = -EFSCORRUPTED;
6aa58d8a
CY
1216 goto put_page;
1217 }
1218got_it:
1219 /* read page */
1220 fio.page = page;
1221 fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
1222
9bf1a3f7
YS
1223 /*
1224 * don't cache encrypted data into meta inode until previous dirty
1225 * data were writebacked to avoid racing between GC and flush.
1226 */
bae0ee7a 1227 f2fs_wait_on_page_writeback(page, DATA, true, true);
9bf1a3f7
YS
1228
1229 f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
1230
6aa58d8a
CY
1231 fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(sbi),
1232 dn.data_blkaddr,
1233 FGP_LOCK | FGP_CREAT, GFP_NOFS);
1234 if (!fio.encrypted_page) {
1235 err = -ENOMEM;
1236 goto put_page;
1237 }
1238
1239 err = f2fs_submit_page_bio(&fio);
1240 if (err)
1241 goto put_encrypted_page;
1242 f2fs_put_page(fio.encrypted_page, 0);
1243 f2fs_put_page(page, 1);
8b83ac81 1244
34a23525
CY
1245 f2fs_update_iostat(sbi, inode, FS_DATA_READ_IO, F2FS_BLKSIZE);
1246 f2fs_update_iostat(sbi, NULL, FS_GDATA_READ_IO, F2FS_BLKSIZE);
8b83ac81 1247
6aa58d8a
CY
1248 return 0;
1249put_encrypted_page:
1250 f2fs_put_page(fio.encrypted_page, 1);
1251put_page:
1252 f2fs_put_page(page, 1);
1253 return err;
1254}
1255
d4c759ee
JK
1256/*
1257 * Move data block via META_MAPPING while keeping locked data page.
1258 * This can be used to move blocks, aka LBAs, directly on disk.
1259 */
48018b4c 1260static int move_data_block(struct inode *inode, block_t bidx,
2ef79ecb 1261 int gc_type, unsigned int segno, int off)
4375a336
JK
1262{
1263 struct f2fs_io_info fio = {
1264 .sbi = F2FS_I_SB(inode),
39d787be 1265 .ino = inode->i_ino,
4375a336 1266 .type = DATA,
a912b54d 1267 .temp = COLD,
04d328de 1268 .op = REQ_OP_READ,
70fd7614 1269 .op_flags = 0,
4375a336 1270 .encrypted_page = NULL,
2eae077e 1271 .in_list = 0,
4375a336
JK
1272 };
1273 struct dnode_of_data dn;
1274 struct f2fs_summary sum;
1275 struct node_info ni;
6aa58d8a 1276 struct page *page, *mpage;
4356e48e 1277 block_t newaddr;
48018b4c 1278 int err = 0;
b0332a0f 1279 bool lfs_mode = f2fs_lfs_mode(fio.sbi);
ac2d750b
WG
1280 int type = fio.sbi->am.atgc_enabled && (gc_type == BG_GC) &&
1281 (fio.sbi->gc_mode != GC_URGENT_HIGH) ?
093749e2 1282 CURSEG_ALL_DATA_ATGC : CURSEG_COLD_DATA;
4375a336
JK
1283
1284 /* do not read out */
a56c7c6f 1285 page = f2fs_grab_cache_page(inode->i_mapping, bidx, false);
4375a336 1286 if (!page)
48018b4c 1287 return -ENOMEM;
4375a336 1288
48018b4c
CY
1289 if (!check_valid_map(F2FS_I_SB(inode), segno, off)) {
1290 err = -ENOENT;
20614711 1291 goto out;
48018b4c 1292 }
20614711 1293
71419129
CY
1294 err = f2fs_gc_pinned_control(inode, gc_type, segno);
1295 if (err)
1ad71a27 1296 goto out;
1ad71a27 1297
4375a336 1298 set_new_dnode(&dn, inode, NULL, NULL, 0);
4d57b86d 1299 err = f2fs_get_dnode_of_data(&dn, bidx, LOOKUP_NODE);
4375a336
JK
1300 if (err)
1301 goto out;
1302
08b39fbd
CY
1303 if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
1304 ClearPageUptodate(page);
48018b4c 1305 err = -ENOENT;
4375a336 1306 goto put_out;
08b39fbd
CY
1307 }
1308
1309 /*
1310 * don't cache encrypted data into meta inode until previous dirty
1311 * data were writebacked to avoid racing between GC and flush.
1312 */
bae0ee7a 1313 f2fs_wait_on_page_writeback(page, DATA, true, true);
4375a336 1314
9bf1a3f7
YS
1315 f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
1316
a9419b63 1317 err = f2fs_get_node_info(fio.sbi, dn.nid, &ni, false);
7735730d
CY
1318 if (err)
1319 goto put_out;
1320
4375a336
JK
1321 /* read page */
1322 fio.page = page;
7a9d7548 1323 fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
4375a336 1324
107a805d 1325 if (lfs_mode)
e4544b63 1326 f2fs_down_write(&fio.sbi->io_order_lock);
107a805d 1327
543b8c46
JK
1328 mpage = f2fs_grab_cache_page(META_MAPPING(fio.sbi),
1329 fio.old_blkaddr, false);
d7cd3702
CY
1330 if (!mpage) {
1331 err = -ENOMEM;
543b8c46 1332 goto up_out;
d7cd3702 1333 }
543b8c46
JK
1334
1335 fio.encrypted_page = mpage;
1336
1337 /* read source block in mpage */
1338 if (!PageUptodate(mpage)) {
1339 err = f2fs_submit_page_bio(&fio);
1340 if (err) {
1341 f2fs_put_page(mpage, 1);
1342 goto up_out;
1343 }
8b83ac81 1344
34a23525
CY
1345 f2fs_update_iostat(fio.sbi, inode, FS_DATA_READ_IO,
1346 F2FS_BLKSIZE);
1347 f2fs_update_iostat(fio.sbi, NULL, FS_GDATA_READ_IO,
1348 F2FS_BLKSIZE);
8b83ac81 1349
543b8c46
JK
1350 lock_page(mpage);
1351 if (unlikely(mpage->mapping != META_MAPPING(fio.sbi) ||
1352 !PageUptodate(mpage))) {
1353 err = -EIO;
1354 f2fs_put_page(mpage, 1);
1355 goto up_out;
1356 }
1357 }
1358
cf740403
CY
1359 set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version);
1360
1361 /* allocate block address */
7d009e04 1362 err = f2fs_allocate_data_block(fio.sbi, NULL, fio.old_blkaddr, &newaddr,
093749e2 1363 &sum, type, NULL);
7d009e04
CY
1364 if (err) {
1365 f2fs_put_page(mpage, 1);
1366 /* filesystem should shutdown, no need to recovery block */
1367 goto up_out;
1368 }
4356e48e 1369
01eccef7
CY
1370 fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(fio.sbi),
1371 newaddr, FGP_LOCK | FGP_CREAT, GFP_NOFS);
4356e48e
CY
1372 if (!fio.encrypted_page) {
1373 err = -ENOMEM;
6aa58d8a 1374 f2fs_put_page(mpage, 1);
543b8c46 1375 goto recover_block;
4356e48e 1376 }
548aedac 1377
543b8c46 1378 /* write target block */
bae0ee7a 1379 f2fs_wait_on_page_writeback(fio.encrypted_page, DATA, true, true);
543b8c46
JK
1380 memcpy(page_address(fio.encrypted_page),
1381 page_address(mpage), PAGE_SIZE);
1382 f2fs_put_page(mpage, 1);
4e4f1eb9
CY
1383
1384 f2fs_invalidate_internal_cache(fio.sbi, fio.old_blkaddr);
543b8c46 1385
8d64d365 1386 set_page_dirty(fio.encrypted_page);
6282adbf
JK
1387 if (clear_page_dirty_for_io(fio.encrypted_page))
1388 dec_page_count(fio.sbi, F2FS_DIRTY_META);
1389
548aedac 1390 set_page_writeback(fio.encrypted_page);
4375a336 1391
04d328de 1392 fio.op = REQ_OP_WRITE;
70fd7614 1393 fio.op_flags = REQ_SYNC;
4356e48e 1394 fio.new_blkaddr = newaddr;
fe16efe6 1395 f2fs_submit_page_write(&fio);
4375a336 1396
34a23525 1397 f2fs_update_iostat(fio.sbi, NULL, FS_GC_DATA_IO, F2FS_BLKSIZE);
b0af6d49 1398
f28b3434 1399 f2fs_update_data_blkaddr(&dn, newaddr);
91942321 1400 set_inode_flag(inode, FI_APPEND_WRITE);
87161a2b 1401
4375a336 1402 f2fs_put_page(fio.encrypted_page, 1);
4356e48e
CY
1403recover_block:
1404 if (err)
4d57b86d 1405 f2fs_do_replace_block(fio.sbi, &sum, newaddr, fio.old_blkaddr,
c5d02785 1406 true, true, true);
543b8c46
JK
1407up_out:
1408 if (lfs_mode)
e4544b63 1409 f2fs_up_write(&fio.sbi->io_order_lock);
4375a336
JK
1410put_out:
1411 f2fs_put_dnode(&dn);
1412out:
1413 f2fs_put_page(page, 1);
48018b4c 1414 return err;
4375a336
JK
1415}
1416
48018b4c 1417static int move_data_page(struct inode *inode, block_t bidx, int gc_type,
20614711 1418 unsigned int segno, int off)
7bc09003 1419{
c879f90d 1420 struct page *page;
48018b4c 1421 int err = 0;
c879f90d 1422
4d57b86d 1423 page = f2fs_get_lock_data_page(inode, bidx, true);
c879f90d 1424 if (IS_ERR(page))
48018b4c 1425 return PTR_ERR(page);
63a0b7cb 1426
48018b4c
CY
1427 if (!check_valid_map(F2FS_I_SB(inode), segno, off)) {
1428 err = -ENOENT;
20614711 1429 goto out;
48018b4c 1430 }
20614711 1431
71419129
CY
1432 err = f2fs_gc_pinned_control(inode, gc_type, segno);
1433 if (err)
1ad71a27 1434 goto out;
5fe45743 1435
7bc09003 1436 if (gc_type == BG_GC) {
16778aea 1437 if (folio_test_writeback(page_folio(page))) {
48018b4c 1438 err = -EAGAIN;
4ebefc44 1439 goto out;
48018b4c 1440 }
7bc09003 1441 set_page_dirty(page);
b763f3be 1442 set_page_private_gcing(page);
7bc09003 1443 } else {
c879f90d
JK
1444 struct f2fs_io_info fio = {
1445 .sbi = F2FS_I_SB(inode),
39d787be 1446 .ino = inode->i_ino,
c879f90d 1447 .type = DATA,
a912b54d 1448 .temp = COLD,
04d328de 1449 .op = REQ_OP_WRITE,
70fd7614 1450 .op_flags = REQ_SYNC,
e959c8f5 1451 .old_blkaddr = NULL_ADDR,
c879f90d 1452 .page = page,
4375a336 1453 .encrypted_page = NULL,
cc15620b 1454 .need_lock = LOCK_REQ,
b0af6d49 1455 .io_type = FS_GC_DATA_IO,
c879f90d 1456 };
72e1c797 1457 bool is_dirty = PageDirty(page);
72e1c797
CY
1458
1459retry:
bae0ee7a 1460 f2fs_wait_on_page_writeback(page, DATA, true, true);
8d64d365
CY
1461
1462 set_page_dirty(page);
933439c8 1463 if (clear_page_dirty_for_io(page)) {
a7ffdbe2 1464 inode_dec_dirty_pages(inode);
4d57b86d 1465 f2fs_remove_dirty_inode(inode);
933439c8 1466 }
72e1c797 1467
b763f3be 1468 set_page_private_gcing(page);
72e1c797 1469
4d57b86d 1470 err = f2fs_do_write_data_page(&fio);
14a28559 1471 if (err) {
b763f3be 1472 clear_page_private_gcing(page);
14a28559 1473 if (err == -ENOMEM) {
4034247a 1474 memalloc_retry_wait(GFP_NOFS);
14a28559
CY
1475 goto retry;
1476 }
1477 if (is_dirty)
1478 set_page_dirty(page);
72e1c797 1479 }
7bc09003
JK
1480 }
1481out:
1482 f2fs_put_page(page, 1);
48018b4c 1483 return err;
7bc09003
JK
1484}
1485
0a8165d7 1486/*
7bc09003
JK
1487 * This function tries to get parent node of victim data block, and identifies
1488 * data block validity. If the block is valid, copy that with cold status and
1489 * modify parent node.
1490 * If the parent node is not valid or the data block address is different,
1491 * the victim data block is ignored.
1492 */
48018b4c 1493static int gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
7dede886
CY
1494 struct gc_inode_list *gc_list, unsigned int segno, int gc_type,
1495 bool force_migrate)
7bc09003
JK
1496{
1497 struct super_block *sb = sbi->sb;
1498 struct f2fs_summary *entry;
1499 block_t start_addr;
43727527 1500 int off;
7bc09003 1501 int phase = 0;
48018b4c 1502 int submitted = 0;
de881df9 1503 unsigned int usable_blks_in_seg = f2fs_usable_blks_in_seg(sbi, segno);
7bc09003
JK
1504
1505 start_addr = START_BLOCK(sbi, segno);
1506
1507next_step:
1508 entry = sum;
c718379b 1509
de881df9 1510 for (off = 0; off < usable_blks_in_seg; off++, entry++) {
7bc09003
JK
1511 struct page *data_page;
1512 struct inode *inode;
1513 struct node_info dni; /* dnode info for the data */
1514 unsigned int ofs_in_node, nofs;
1515 block_t start_bidx;
7ea984b0 1516 nid_t nid = le32_to_cpu(entry->nid);
7bc09003 1517
803e74be
JK
1518 /*
1519 * stop BG_GC if there is not enough free sections.
1520 * Or, stop GC if the segment becomes fully valid caused by
1521 * race condition along with SSR block allocation.
1522 */
1523 if ((gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) ||
7dede886 1524 (!force_migrate && get_valid_blocks(sbi, segno, true) ==
074b5ea2 1525 CAP_BLKS_PER_SEC(sbi)))
48018b4c 1526 return submitted;
7bc09003 1527
43727527 1528 if (check_valid_map(sbi, segno, off) == 0)
7bc09003
JK
1529 continue;
1530
1531 if (phase == 0) {
4d57b86d 1532 f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
7ea984b0
CY
1533 META_NAT, true);
1534 continue;
1535 }
1536
1537 if (phase == 1) {
4d57b86d 1538 f2fs_ra_node_page(sbi, nid);
7bc09003
JK
1539 continue;
1540 }
1541
1542 /* Get an inode by ino with checking validity */
c1079892 1543 if (!is_alive(sbi, entry, &dni, start_addr + off, &nofs))
7bc09003
JK
1544 continue;
1545
7ea984b0 1546 if (phase == 2) {
4d57b86d 1547 f2fs_ra_node_page(sbi, dni.ino);
7bc09003
JK
1548 continue;
1549 }
1550
7bc09003
JK
1551 ofs_in_node = le16_to_cpu(entry->ofs_in_node);
1552
7ea984b0 1553 if (phase == 3) {
71419129
CY
1554 int err;
1555
d4686d56 1556 inode = f2fs_iget(sb, dni.ino);
a798ff17 1557 if (IS_ERR(inode))
7bc09003
JK
1558 continue;
1559
a798ff17
CY
1560 if (is_bad_inode(inode) ||
1561 special_file(inode->i_mode)) {
1562 iput(inode);
1563 continue;
1564 }
1565
71419129
CY
1566 err = f2fs_gc_pinned_control(inode, gc_type, segno);
1567 if (err == -EAGAIN) {
a22bb552
CY
1568 iput(inode);
1569 return submitted;
1570 }
1571
e4544b63 1572 if (!f2fs_down_write_trylock(
b2532c69 1573 &F2FS_I(inode)->i_gc_rwsem[WRITE])) {
bb06664a 1574 iput(inode);
6f8d4455 1575 sbi->skipped_gc_rwsem++;
bb06664a
CY
1576 continue;
1577 }
1578
6aa58d8a
CY
1579 start_bidx = f2fs_start_bidx_of_node(nofs, inode) +
1580 ofs_in_node;
1581
1582 if (f2fs_post_read_required(inode)) {
1583 int err = ra_data_block(inode, start_bidx);
1584
e4544b63 1585 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
6aa58d8a
CY
1586 if (err) {
1587 iput(inode);
1588 continue;
1589 }
1590 add_gc_inode(gc_list, inode);
1591 continue;
1592 }
1593
59237a21
CY
1594 data_page = f2fs_get_read_data_page(inode, start_bidx,
1595 REQ_RAHEAD, true, NULL);
e4544b63 1596 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
31a32688
CL
1597 if (IS_ERR(data_page)) {
1598 iput(inode);
1599 continue;
1600 }
7bc09003
JK
1601
1602 f2fs_put_page(data_page, 0);
7dda2af8 1603 add_gc_inode(gc_list, inode);
31a32688
CL
1604 continue;
1605 }
1606
7ea984b0 1607 /* phase 4 */
7dda2af8 1608 inode = find_gc_inode(gc_list, dni.ino);
31a32688 1609 if (inode) {
82e0a5aa
CY
1610 struct f2fs_inode_info *fi = F2FS_I(inode);
1611 bool locked = false;
48018b4c 1612 int err;
82e0a5aa
CY
1613
1614 if (S_ISREG(inode->i_mode)) {
6fd257cb 1615 if (!f2fs_down_write_trylock(&fi->i_gc_rwsem[WRITE])) {
ad126ebd 1616 sbi->skipped_gc_rwsem++;
82e0a5aa 1617 continue;
ad126ebd 1618 }
e4544b63 1619 if (!f2fs_down_write_trylock(
6fd257cb 1620 &fi->i_gc_rwsem[READ])) {
6f8d4455 1621 sbi->skipped_gc_rwsem++;
6fd257cb 1622 f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
82e0a5aa
CY
1623 continue;
1624 }
1625 locked = true;
73ac2f4e
CY
1626
1627 /* wait for all inflight aio data */
1628 inode_dio_wait(inode);
82e0a5aa
CY
1629 }
1630
4d57b86d 1631 start_bidx = f2fs_start_bidx_of_node(nofs, inode)
c879f90d 1632 + ofs_in_node;
6dbb1796 1633 if (f2fs_post_read_required(inode))
48018b4c
CY
1634 err = move_data_block(inode, start_bidx,
1635 gc_type, segno, off);
4375a336 1636 else
48018b4c 1637 err = move_data_page(inode, start_bidx, gc_type,
d4c759ee 1638 segno, off);
82e0a5aa 1639
48018b4c
CY
1640 if (!err && (gc_type == FG_GC ||
1641 f2fs_post_read_required(inode)))
1642 submitted++;
1643
82e0a5aa 1644 if (locked) {
e4544b63 1645 f2fs_up_write(&fi->i_gc_rwsem[READ]);
6fd257cb 1646 f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
82e0a5aa
CY
1647 }
1648
e1235983 1649 stat_inc_data_blk_count(sbi, 1, gc_type);
7bc09003 1650 }
7bc09003 1651 }
c718379b 1652
7ea984b0 1653 if (++phase < 5)
7bc09003 1654 goto next_step;
48018b4c
CY
1655
1656 return submitted;
7bc09003
JK
1657}
1658
1659static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
8a2d0ace 1660 int gc_type)
7bc09003
JK
1661{
1662 struct sit_info *sit_i = SIT_I(sbi);
1663 int ret;
8a2d0ace 1664
3d26fa6b 1665 down_write(&sit_i->sentry_lock);
19e0e21a 1666 ret = f2fs_get_victim(sbi, victim, gc_type, NO_CHECK_TYPE, LFS, 0);
3d26fa6b 1667 up_write(&sit_i->sentry_lock);
7bc09003
JK
1668 return ret;
1669}
1670
718e53fa
CY
1671static int do_garbage_collect(struct f2fs_sb_info *sbi,
1672 unsigned int start_segno,
7dede886
CY
1673 struct gc_inode_list *gc_list, int gc_type,
1674 bool force_migrate)
7bc09003
JK
1675{
1676 struct page *sum_page;
1677 struct f2fs_summary_block *sum;
c718379b 1678 struct blk_plug plug;
718e53fa 1679 unsigned int segno = start_segno;
a60108f7 1680 unsigned int end_segno = start_segno + SEGS_PER_SEC(sbi);
e3080b01 1681 int seg_freed = 0, migrated = 0;
718e53fa
CY
1682 unsigned char type = IS_DATASEG(get_seg_entry(sbi, segno)->type) ?
1683 SUM_TYPE_DATA : SUM_TYPE_NODE;
9bf1dcbd 1684 unsigned char data_type = (type == SUM_TYPE_DATA) ? DATA : NODE;
48018b4c 1685 int submitted = 0;
7bc09003 1686
e3080b01 1687 if (__is_large_section(sbi))
a60108f7 1688 end_segno = rounddown(end_segno, SEGS_PER_SEC(sbi));
e3080b01 1689
de881df9
AR
1690 /*
1691 * zone-capacity can be less than zone-size in zoned devices,
1692 * resulting in less than expected usable segments in the zone,
1693 * calculate the end segno in the zone which can be garbage collected
1694 */
1695 if (f2fs_sb_has_blkzoned(sbi))
a60108f7 1696 end_segno -= SEGS_PER_SEC(sbi) -
de881df9
AR
1697 f2fs_usable_segs_in_sec(sbi, segno);
1698
093749e2
CY
1699 sanity_check_seg_type(sbi, get_seg_entry(sbi, segno)->type);
1700
718e53fa 1701 /* readahead multi ssa blocks those have contiguous address */
2c70c5e3 1702 if (__is_large_section(sbi))
4d57b86d 1703 f2fs_ra_meta_pages(sbi, GET_SUM_BLOCK(sbi, segno),
e3080b01 1704 end_segno - segno, META_SSA, true);
718e53fa
CY
1705
1706 /* reference all summary page */
1707 while (segno < end_segno) {
4d57b86d 1708 sum_page = f2fs_get_sum_page(sbi, segno++);
edc55aaf
JK
1709 if (IS_ERR(sum_page)) {
1710 int err = PTR_ERR(sum_page);
1711
1712 end_segno = segno - 1;
1713 for (segno = start_segno; segno < end_segno; segno++) {
1714 sum_page = find_get_page(META_MAPPING(sbi),
1715 GET_SUM_BLOCK(sbi, segno));
1716 f2fs_put_page(sum_page, 0);
1717 f2fs_put_page(sum_page, 0);
1718 }
1719 return err;
1720 }
718e53fa
CY
1721 unlock_page(sum_page);
1722 }
7bc09003 1723
c718379b
JK
1724 blk_start_plug(&plug);
1725
718e53fa 1726 for (segno = start_segno; segno < end_segno; segno++) {
aa987273 1727
718e53fa
CY
1728 /* find segment summary of victim */
1729 sum_page = find_get_page(META_MAPPING(sbi),
1730 GET_SUM_BLOCK(sbi, segno));
718e53fa 1731 f2fs_put_page(sum_page, 0);
7bc09003 1732
d6c66cd1
YS
1733 if (get_valid_blocks(sbi, segno, false) == 0)
1734 goto freed;
dabfbbc8 1735 if (gc_type == BG_GC && __is_large_section(sbi) &&
e3080b01
CY
1736 migrated >= sbi->migration_granularity)
1737 goto skip;
d6c66cd1 1738 if (!PageUptodate(sum_page) || unlikely(f2fs_cp_error(sbi)))
e3080b01 1739 goto skip;
de0dcc40 1740
718e53fa 1741 sum = page_address(sum_page);
10d255c3 1742 if (type != GET_SUM_TYPE((&sum->footer))) {
dcbb4c10
JP
1743 f2fs_err(sbi, "Inconsistent segment (%u) type [%d, %d] in SSA and SIT",
1744 segno, type, GET_SUM_TYPE((&sum->footer)));
10d255c3 1745 set_sbi_flag(sbi, SBI_NEED_FSCK);
a9cfee0e
CY
1746 f2fs_stop_checkpoint(sbi, false,
1747 STOP_CP_REASON_CORRUPTED_SUMMARY);
e3080b01 1748 goto skip;
10d255c3 1749 }
718e53fa
CY
1750
1751 /*
1752 * this is to avoid deadlock:
1753 * - lock_page(sum_page) - f2fs_replace_block
3d26fa6b
CY
1754 * - check_valid_map() - down_write(sentry_lock)
1755 * - down_read(sentry_lock) - change_curseg()
718e53fa
CY
1756 * - lock_page(sum_page)
1757 */
718e53fa 1758 if (type == SUM_TYPE_NODE)
48018b4c 1759 submitted += gc_node_segment(sbi, sum->entries, segno,
718e53fa 1760 gc_type);
48018b4c
CY
1761 else
1762 submitted += gc_data_segment(sbi, sum->entries, gc_list,
7dede886
CY
1763 segno, gc_type,
1764 force_migrate);
718e53fa 1765
9bf1dcbd 1766 stat_inc_gc_seg_count(sbi, data_type, gc_type);
07c6b593 1767 sbi->gc_reclaimed_segs[sbi->gc_mode]++;
8c7b9ac1 1768 migrated++;
c56f16da 1769
d6c66cd1 1770freed:
c56f16da
CY
1771 if (gc_type == FG_GC &&
1772 get_valid_blocks(sbi, segno, false) == 0)
1773 seg_freed++;
e3080b01 1774
e219aecf
YS
1775 if (__is_large_section(sbi))
1776 sbi->next_victim_seg[gc_type] =
1777 (segno + 1 < end_segno) ? segno + 1 : NULL_SEGNO;
e3080b01 1778skip:
718e53fa
CY
1779 f2fs_put_page(sum_page, 0);
1780 }
1781
48018b4c 1782 if (submitted)
9bf1dcbd 1783 f2fs_submit_merged_write(sbi, data_type);
c718379b 1784
718e53fa 1785 blk_finish_plug(&plug);
7bc09003 1786
9bf1dcbd
CY
1787 if (migrated)
1788 stat_inc_gc_sec_count(sbi, data_type, gc_type);
17d899df 1789
c56f16da 1790 return seg_freed;
7bc09003
JK
1791}
1792
d147ea4a 1793int f2fs_gc(struct f2fs_sb_info *sbi, struct f2fs_gc_control *gc_control)
7bc09003 1794{
d147ea4a
JK
1795 int gc_type = gc_control->init_gc_type;
1796 unsigned int segno = gc_control->victim_segno;
36ded4c1 1797 int sec_freed = 0, seg_freed = 0, total_freed = 0, total_sec_freed = 0;
c56f16da 1798 int ret = 0;
d5053a34 1799 struct cp_control cpc;
7dda2af8
CL
1800 struct gc_inode_list gc_list = {
1801 .ilist = LIST_HEAD_INIT(gc_list.ilist),
f6bb2a2c 1802 .iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS),
7dda2af8 1803 };
2ef79ecb 1804 unsigned int skipped_round = 0, round = 0;
d11cef14 1805 unsigned int upper_secs;
d5053a34 1806
d147ea4a 1807 trace_f2fs_gc_begin(sbi->sb, gc_type, gc_control->no_bg_gc,
c81d5bae 1808 gc_control->nr_free_secs,
c56f16da
CY
1809 get_pages(sbi, F2FS_DIRTY_NODES),
1810 get_pages(sbi, F2FS_DIRTY_DENTS),
1811 get_pages(sbi, F2FS_DIRTY_IMETA),
1812 free_sections(sbi),
1813 free_segments(sbi),
1814 reserved_segments(sbi),
1815 prefree_segments(sbi));
1816
119ee914 1817 cpc.reason = __get_cp_reason(sbi);
7bc09003 1818gc_more:
c17caf0b 1819 sbi->skipped_gc_rwsem = 0;
1751e8a6 1820 if (unlikely(!(sbi->sb->s_flags & SB_ACTIVE))) {
e5dbd956 1821 ret = -EINVAL;
408e9375 1822 goto stop;
e5dbd956 1823 }
6d5a1495
CY
1824 if (unlikely(f2fs_cp_error(sbi))) {
1825 ret = -EIO;
203681f6 1826 goto stop;
6d5a1495 1827 }
7bc09003 1828
2d3f197b
JK
1829 /* Let's run FG_GC, if we don't have enough space. */
1830 if (has_not_enough_free_secs(sbi, 0, 0)) {
1831 gc_type = FG_GC;
1832
6e17bfbc 1833 /*
19f4e688
HP
1834 * For example, if there are many prefree_segments below given
1835 * threshold, we can make them free by checkpoint. Then, we
1836 * secure free segments which doesn't need fggc any more.
6e17bfbc 1837 */
d147ea4a 1838 if (prefree_segments(sbi)) {
eb61c2cc 1839 stat_inc_cp_call_count(sbi, TOTAL_CALL);
4d57b86d 1840 ret = f2fs_write_checkpoint(sbi, &cpc);
8fd5a37e
JK
1841 if (ret)
1842 goto stop;
36ded4c1
YS
1843 /* Reset due to checkpoint */
1844 sec_freed = 0;
8fd5a37e 1845 }
d64f8047 1846 }
7bc09003 1847
19f4e688 1848 /* f2fs_balance_fs doesn't need to do BG_GC in critical path. */
d147ea4a 1849 if (gc_type == BG_GC && gc_control->no_bg_gc) {
c56f16da 1850 ret = -EINVAL;
19f4e688 1851 goto stop;
c56f16da 1852 }
71419129 1853retry:
97767500 1854 ret = __get_victim(sbi, &segno, gc_type);
71419129
CY
1855 if (ret) {
1856 /* allow to search victim from sections has pinned data */
1857 if (ret == -ENODATA && gc_type == FG_GC &&
1858 f2fs_pinned_section_exists(DIRTY_I(sbi))) {
1859 f2fs_unpin_all_sections(sbi, false);
1860 goto retry;
1861 }
408e9375 1862 goto stop;
71419129 1863 }
7bc09003 1864
d147ea4a
JK
1865 seg_freed = do_garbage_collect(sbi, segno, &gc_list, gc_type,
1866 gc_control->should_migrate_blocks);
19ec1d31
YY
1867 if (seg_freed < 0)
1868 goto stop;
1869
c56f16da 1870 total_freed += seg_freed;
43727527 1871
36ded4c1 1872 if (seg_freed == f2fs_usable_segs_in_sec(sbi, segno)) {
d147ea4a 1873 sec_freed++;
36ded4c1
YS
1874 total_sec_freed++;
1875 }
2ef79ecb 1876
2d3f197b 1877 if (gc_type == FG_GC) {
5ec4e49f 1878 sbi->cur_victim_sec = NULL_SEGNO;
43727527 1879
c1660d88 1880 if (has_enough_free_secs(sbi, sec_freed, 0)) {
2d3f197b 1881 if (!gc_control->no_bg_gc &&
36ded4c1 1882 total_sec_freed < gc_control->nr_free_secs)
2d3f197b
JK
1883 goto go_gc_more;
1884 goto stop;
1885 }
d147ea4a
JK
1886 if (sbi->skipped_gc_rwsem)
1887 skipped_round++;
1888 round++;
1889 if (skipped_round > MAX_SKIP_GC_COUNT &&
1890 skipped_round * 2 >= round) {
eb61c2cc 1891 stat_inc_cp_call_count(sbi, TOTAL_CALL);
4d57b86d 1892 ret = f2fs_write_checkpoint(sbi, &cpc);
d147ea4a 1893 goto stop;
a9163b94 1894 }
c1660d88 1895 } else if (has_enough_free_secs(sbi, 0, 0)) {
2d3f197b 1896 goto stop;
a9163b94 1897 }
d147ea4a 1898
d11cef14
YS
1899 __get_secs_required(sbi, NULL, &upper_secs, NULL);
1900
1901 /*
1902 * Write checkpoint to reclaim prefree segments.
1903 * We need more three extra sections for writer's data/node/dentry.
1904 */
1905 if (free_sections(sbi) <= upper_secs + NR_GC_CHECKPOINT_SECS &&
d147ea4a 1906 prefree_segments(sbi)) {
eb61c2cc 1907 stat_inc_cp_call_count(sbi, TOTAL_CALL);
a9163b94 1908 ret = f2fs_write_checkpoint(sbi, &cpc);
d147ea4a
JK
1909 if (ret)
1910 goto stop;
36ded4c1
YS
1911 /* Reset due to checkpoint */
1912 sec_freed = 0;
d147ea4a 1913 }
c81d5bae 1914go_gc_more:
d147ea4a
JK
1915 segno = NULL_SEGNO;
1916 goto gc_more;
1917
408e9375 1918stop:
e066b83c 1919 SIT_I(sbi)->last_victim[ALLOC_NEXT] = 0;
d147ea4a 1920 SIT_I(sbi)->last_victim[FLUSH_DEVICE] = gc_control->victim_segno;
c56f16da 1921
71419129
CY
1922 if (gc_type == FG_GC)
1923 f2fs_unpin_all_sections(sbi, true);
1924
36ded4c1 1925 trace_f2fs_gc_end(sbi->sb, ret, total_freed, total_sec_freed,
c56f16da
CY
1926 get_pages(sbi, F2FS_DIRTY_NODES),
1927 get_pages(sbi, F2FS_DIRTY_DENTS),
1928 get_pages(sbi, F2FS_DIRTY_IMETA),
1929 free_sections(sbi),
1930 free_segments(sbi),
1931 reserved_segments(sbi),
1932 prefree_segments(sbi));
1933
e4544b63 1934 f2fs_up_write(&sbi->gc_lock);
7bc09003 1935
7dda2af8 1936 put_gc_inode(&gc_list);
d530d4d8 1937
d147ea4a 1938 if (gc_control->err_gc_skipped && !ret)
36ded4c1 1939 ret = total_sec_freed ? 0 : -EAGAIN;
43727527 1940 return ret;
7bc09003
JK
1941}
1942
093749e2
CY
1943int __init f2fs_create_garbage_collection_cache(void)
1944{
1945 victim_entry_slab = f2fs_kmem_cache_create("f2fs_victim_entry",
1946 sizeof(struct victim_entry));
870af777 1947 return victim_entry_slab ? 0 : -ENOMEM;
093749e2
CY
1948}
1949
1950void f2fs_destroy_garbage_collection_cache(void)
1951{
1952 kmem_cache_destroy(victim_entry_slab);
1953}
1954
1955static void init_atgc_management(struct f2fs_sb_info *sbi)
1956{
1957 struct atgc_management *am = &sbi->am;
1958
1959 if (test_opt(sbi, ATGC) &&
1960 SIT_I(sbi)->elapsed_time >= DEF_GC_THREAD_AGE_THRESHOLD)
1961 am->atgc_enabled = true;
1962
1963 am->root = RB_ROOT_CACHED;
1964 INIT_LIST_HEAD(&am->victim_list);
1965 am->victim_count = 0;
1966
1967 am->candidate_ratio = DEF_GC_THREAD_CANDIDATE_RATIO;
1968 am->max_candidate_count = DEF_GC_THREAD_MAX_CANDIDATE_COUNT;
1969 am->age_weight = DEF_GC_THREAD_AGE_WEIGHT;
89e53ff1 1970 am->age_threshold = DEF_GC_THREAD_AGE_THRESHOLD;
093749e2
CY
1971}
1972
4d57b86d 1973void f2fs_build_gc_manager(struct f2fs_sb_info *sbi)
7bc09003 1974{
1ad71a27 1975 sbi->gc_pin_file_threshold = DEF_GC_FAILED_PINNED_FILES;
d5793249
JK
1976
1977 /* give warm/cold data area from slower device */
0916878d 1978 if (f2fs_is_multi_device(sbi) && !__is_large_section(sbi))
d5793249
JK
1979 SIT_I(sbi)->last_victim[ALLOC_NEXT] =
1980 GET_SEGNO(sbi, FDEV(0).end_blk) + 1;
093749e2
CY
1981
1982 init_atgc_management(sbi);
7bc09003 1983}
04f0b2ea 1984
9703d69d
DJ
1985int f2fs_gc_range(struct f2fs_sb_info *sbi,
1986 unsigned int start_seg, unsigned int end_seg,
1987 bool dry_run, unsigned int dry_run_sections)
2f0209f5
DJ
1988{
1989 unsigned int segno;
9703d69d 1990 unsigned int gc_secs = dry_run_sections;
2f0209f5 1991
22af1b8c
ZN
1992 if (unlikely(f2fs_cp_error(sbi)))
1993 return -EIO;
1994
2f0209f5
DJ
1995 for (segno = start_seg; segno <= end_seg; segno += SEGS_PER_SEC(sbi)) {
1996 struct gc_inode_list gc_list = {
1997 .ilist = LIST_HEAD_INIT(gc_list.ilist),
1998 .iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS),
1999 };
2000
9703d69d
DJ
2001 do_garbage_collect(sbi, segno, &gc_list, FG_GC,
2002 dry_run_sections == 0);
2f0209f5
DJ
2003 put_gc_inode(&gc_list);
2004
2005 if (!dry_run && get_valid_blocks(sbi, segno, true))
2006 return -EAGAIN;
9703d69d
DJ
2007 if (dry_run && dry_run_sections &&
2008 !get_valid_blocks(sbi, segno, true) && --gc_secs == 0)
2009 break;
2f0209f5
DJ
2010
2011 if (fatal_signal_pending(current))
2012 return -ERESTARTSYS;
2013 }
2014
2015 return 0;
2016}
2017
b4b10061 2018static int free_segment_range(struct f2fs_sb_info *sbi,
2f0209f5 2019 unsigned int secs, bool dry_run)
04f0b2ea 2020{
2f0209f5 2021 unsigned int next_inuse, start, end;
b4b10061
JK
2022 struct cp_control cpc = { CP_RESIZE, 0, 0, 0 };
2023 int gc_mode, gc_type;
04f0b2ea 2024 int err = 0;
b4b10061
JK
2025 int type;
2026
2027 /* Force block allocation for GC */
2028 MAIN_SECS(sbi) -= secs;
a60108f7 2029 start = MAIN_SECS(sbi) * SEGS_PER_SEC(sbi);
b4b10061
JK
2030 end = MAIN_SEGS(sbi) - 1;
2031
2032 mutex_lock(&DIRTY_I(sbi)->seglist_lock);
2033 for (gc_mode = 0; gc_mode < MAX_GC_POLICY; gc_mode++)
2034 if (SIT_I(sbi)->last_victim[gc_mode] >= start)
2035 SIT_I(sbi)->last_victim[gc_mode] = 0;
2036
2037 for (gc_type = BG_GC; gc_type <= FG_GC; gc_type++)
2038 if (sbi->next_victim_seg[gc_type] >= start)
2039 sbi->next_victim_seg[gc_type] = NULL_SEGNO;
2040 mutex_unlock(&DIRTY_I(sbi)->seglist_lock);
04f0b2ea
QS
2041
2042 /* Move out cursegs from the target range */
24593061
ZN
2043 for (type = CURSEG_HOT_DATA; type < NR_CURSEG_PERSIST_TYPE; type++) {
2044 err = f2fs_allocate_segment_for_resize(sbi, type, start, end);
2045 if (err)
2046 goto out;
2047 }
04f0b2ea
QS
2048
2049 /* do GC to move out valid blocks in the range */
9703d69d 2050 err = f2fs_gc_range(sbi, start, end, dry_run, 0);
2f0209f5 2051 if (err || dry_run)
b4b10061 2052 goto out;
04f0b2ea 2053
eb61c2cc 2054 stat_inc_cp_call_count(sbi, TOTAL_CALL);
b4b10061 2055 err = f2fs_write_checkpoint(sbi, &cpc);
04f0b2ea 2056 if (err)
b4b10061 2057 goto out;
04f0b2ea
QS
2058
2059 next_inuse = find_next_inuse(FREE_I(sbi), end + 1, start);
2060 if (next_inuse <= end) {
dcbb4c10
JP
2061 f2fs_err(sbi, "segno %u should be free but still inuse!",
2062 next_inuse);
04f0b2ea
QS
2063 f2fs_bug_on(sbi, 1);
2064 }
b4b10061
JK
2065out:
2066 MAIN_SECS(sbi) += secs;
04f0b2ea
QS
2067 return err;
2068}
2069
2070static void update_sb_metadata(struct f2fs_sb_info *sbi, int secs)
2071{
2072 struct f2fs_super_block *raw_sb = F2FS_RAW_SUPER(sbi);
a4ba5dfc
CY
2073 int section_count;
2074 int segment_count;
2075 int segment_count_main;
2076 long long block_count;
a60108f7 2077 int segs = secs * SEGS_PER_SEC(sbi);
04f0b2ea 2078
e4544b63 2079 f2fs_down_write(&sbi->sb_lock);
a4ba5dfc
CY
2080
2081 section_count = le32_to_cpu(raw_sb->section_count);
2082 segment_count = le32_to_cpu(raw_sb->segment_count);
2083 segment_count_main = le32_to_cpu(raw_sb->segment_count_main);
2084 block_count = le64_to_cpu(raw_sb->block_count);
2085
04f0b2ea
QS
2086 raw_sb->section_count = cpu_to_le32(section_count + secs);
2087 raw_sb->segment_count = cpu_to_le32(segment_count + segs);
2088 raw_sb->segment_count_main = cpu_to_le32(segment_count_main + segs);
2089 raw_sb->block_count = cpu_to_le64(block_count +
45809cd3 2090 (long long)SEGS_TO_BLKS(sbi, segs));
46d9ce19
QS
2091 if (f2fs_is_multi_device(sbi)) {
2092 int last_dev = sbi->s_ndevs - 1;
2093 int dev_segs =
2094 le32_to_cpu(raw_sb->devs[last_dev].total_segments);
2095
2096 raw_sb->devs[last_dev].total_segments =
2097 cpu_to_le32(dev_segs + segs);
2098 }
a4ba5dfc 2099
e4544b63 2100 f2fs_up_write(&sbi->sb_lock);
04f0b2ea
QS
2101}
2102
2103static void update_fs_metadata(struct f2fs_sb_info *sbi, int secs)
2104{
a60108f7 2105 int segs = secs * SEGS_PER_SEC(sbi);
45809cd3 2106 long long blks = SEGS_TO_BLKS(sbi, segs);
04f0b2ea
QS
2107 long long user_block_count =
2108 le64_to_cpu(F2FS_CKPT(sbi)->user_block_count);
2109
2110 SM_I(sbi)->segment_count = (int)SM_I(sbi)->segment_count + segs;
2111 MAIN_SEGS(sbi) = (int)MAIN_SEGS(sbi) + segs;
b4b10061 2112 MAIN_SECS(sbi) += secs;
04f0b2ea
QS
2113 FREE_I(sbi)->free_sections = (int)FREE_I(sbi)->free_sections + secs;
2114 FREE_I(sbi)->free_segments = (int)FREE_I(sbi)->free_segments + segs;
46d9ce19
QS
2115 F2FS_CKPT(sbi)->user_block_count = cpu_to_le64(user_block_count + blks);
2116
2117 if (f2fs_is_multi_device(sbi)) {
2118 int last_dev = sbi->s_ndevs - 1;
2119
2120 FDEV(last_dev).total_segments =
2121 (int)FDEV(last_dev).total_segments + segs;
2122 FDEV(last_dev).end_blk =
2123 (long long)FDEV(last_dev).end_blk + blks;
2124#ifdef CONFIG_BLK_DEV_ZONED
2e2c6e9b
JK
2125 FDEV(last_dev).nr_blkz = FDEV(last_dev).nr_blkz +
2126 div_u64(blks, sbi->blocks_per_blkz);
46d9ce19
QS
2127#endif
2128 }
04f0b2ea
QS
2129}
2130
d8189834 2131int f2fs_resize_fs(struct file *filp, __u64 block_count)
04f0b2ea 2132{
d8189834 2133 struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp));
04f0b2ea 2134 __u64 old_block_count, shrunk_blocks;
b4b10061 2135 struct cp_control cpc = { CP_RESIZE, 0, 0, 0 };
04f0b2ea 2136 unsigned int secs;
04f0b2ea
QS
2137 int err = 0;
2138 __u32 rem;
2139
2140 old_block_count = le64_to_cpu(F2FS_RAW_SUPER(sbi)->block_count);
2141 if (block_count > old_block_count)
2142 return -EINVAL;
2143
46d9ce19
QS
2144 if (f2fs_is_multi_device(sbi)) {
2145 int last_dev = sbi->s_ndevs - 1;
2146 __u64 last_segs = FDEV(last_dev).total_segments;
2147
45809cd3 2148 if (block_count + SEGS_TO_BLKS(sbi, last_segs) <=
46d9ce19
QS
2149 old_block_count)
2150 return -EINVAL;
2151 }
2152
04f0b2ea
QS
2153 /* new fs size should align to section size */
2154 div_u64_rem(block_count, BLKS_PER_SEC(sbi), &rem);
2155 if (rem)
2156 return -EINVAL;
2157
2158 if (block_count == old_block_count)
2159 return 0;
2160
2161 if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) {
dcbb4c10 2162 f2fs_err(sbi, "Should run fsck to repair first.");
10f966bb 2163 return -EFSCORRUPTED;
04f0b2ea
QS
2164 }
2165
2166 if (test_opt(sbi, DISABLE_CHECKPOINT)) {
dcbb4c10 2167 f2fs_err(sbi, "Checkpoint should be enabled.");
04f0b2ea
QS
2168 return -EINVAL;
2169 }
2170
d8189834
CY
2171 err = mnt_want_write_file(filp);
2172 if (err)
2173 return err;
2174
04f0b2ea
QS
2175 shrunk_blocks = old_block_count - block_count;
2176 secs = div_u64(shrunk_blocks, BLKS_PER_SEC(sbi));
b4b10061
JK
2177
2178 /* stop other GC */
d8189834
CY
2179 if (!f2fs_down_write_trylock(&sbi->gc_lock)) {
2180 err = -EAGAIN;
2181 goto out_drop_write;
2182 }
b4b10061
JK
2183
2184 /* stop CP to protect MAIN_SEC in free_segment_range */
2185 f2fs_lock_op(sbi);
3ab0598e
CY
2186
2187 spin_lock(&sbi->stat_lock);
2188 if (shrunk_blocks + valid_user_blocks(sbi) +
2189 sbi->current_reserved_blocks + sbi->unusable_block_count +
2190 F2FS_OPTION(sbi).root_reserved_blocks > sbi->user_block_count)
2191 err = -ENOSPC;
2192 spin_unlock(&sbi->stat_lock);
2193
2194 if (err)
2195 goto out_unlock;
2196
b4b10061 2197 err = free_segment_range(sbi, secs, true);
3ab0598e
CY
2198
2199out_unlock:
b4b10061 2200 f2fs_unlock_op(sbi);
e4544b63 2201 f2fs_up_write(&sbi->gc_lock);
d8189834
CY
2202out_drop_write:
2203 mnt_drop_write_file(filp);
b4b10061
JK
2204 if (err)
2205 return err;
2206
880b9577 2207 err = freeze_super(sbi->sb, FREEZE_HOLDER_USERSPACE);
8bec7dd1
CY
2208 if (err)
2209 return err;
d8189834
CY
2210
2211 if (f2fs_readonly(sbi->sb)) {
880b9577
DW
2212 err = thaw_super(sbi->sb, FREEZE_HOLDER_USERSPACE);
2213 if (err)
2214 return err;
d8189834
CY
2215 return -EROFS;
2216 }
2217
e4544b63
TM
2218 f2fs_down_write(&sbi->gc_lock);
2219 f2fs_down_write(&sbi->cp_global_sem);
b4b10061 2220
04f0b2ea
QS
2221 spin_lock(&sbi->stat_lock);
2222 if (shrunk_blocks + valid_user_blocks(sbi) +
2223 sbi->current_reserved_blocks + sbi->unusable_block_count +
2224 F2FS_OPTION(sbi).root_reserved_blocks > sbi->user_block_count)
2225 err = -ENOSPC;
2226 else
2227 sbi->user_block_count -= shrunk_blocks;
2228 spin_unlock(&sbi->stat_lock);
b4b10061
JK
2229 if (err)
2230 goto out_err;
04f0b2ea 2231
28fc4e90 2232 set_sbi_flag(sbi, SBI_IS_RESIZEFS);
b4b10061 2233 err = free_segment_range(sbi, secs, false);
04f0b2ea 2234 if (err)
b4b10061 2235 goto recover_out;
04f0b2ea
QS
2236
2237 update_sb_metadata(sbi, -secs);
2238
2239 err = f2fs_commit_super(sbi, false);
2240 if (err) {
2241 update_sb_metadata(sbi, secs);
b4b10061 2242 goto recover_out;
04f0b2ea
QS
2243 }
2244
2245 update_fs_metadata(sbi, -secs);
2246 clear_sbi_flag(sbi, SBI_IS_RESIZEFS);
68275682 2247 set_sbi_flag(sbi, SBI_IS_DIRTY);
68275682 2248
eb61c2cc 2249 stat_inc_cp_call_count(sbi, TOTAL_CALL);
b4b10061 2250 err = f2fs_write_checkpoint(sbi, &cpc);
04f0b2ea
QS
2251 if (err) {
2252 update_fs_metadata(sbi, secs);
2253 update_sb_metadata(sbi, secs);
2254 f2fs_commit_super(sbi, false);
2255 }
b4b10061 2256recover_out:
28fc4e90 2257 clear_sbi_flag(sbi, SBI_IS_RESIZEFS);
04f0b2ea
QS
2258 if (err) {
2259 set_sbi_flag(sbi, SBI_NEED_FSCK);
dcbb4c10 2260 f2fs_err(sbi, "resize_fs failed, should run fsck to repair!");
04f0b2ea 2261
04f0b2ea
QS
2262 spin_lock(&sbi->stat_lock);
2263 sbi->user_block_count += shrunk_blocks;
2264 spin_unlock(&sbi->stat_lock);
2265 }
b4b10061 2266out_err:
e4544b63
TM
2267 f2fs_up_write(&sbi->cp_global_sem);
2268 f2fs_up_write(&sbi->gc_lock);
880b9577 2269 thaw_super(sbi->sb, FREEZE_HOLDER_USERSPACE);
04f0b2ea
QS
2270 return err;
2271}